aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--COPYING2
-rw-r--r--CREDITS5
-rw-r--r--Documentation/admin-guide/acpi/fan_performance_states.rst4
-rw-r--r--Documentation/admin-guide/bootconfig.rst34
-rw-r--r--Documentation/admin-guide/kernel-parameters.txt4
-rw-r--r--Documentation/arm64/memory.rst2
-rw-r--r--Documentation/arm64/tagged-address-abi.rst11
-rw-r--r--Documentation/dev-tools/kunit/usage.rst1
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scmi.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/arm,scpi.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.yaml2
-rw-r--r--Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/omap/mpu.txt2
-rw-r--r--Documentation/devicetree/bindings/arm/psci.yaml36
-rw-r--r--Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml2
-rw-r--r--Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml1
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml2
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml12
-rw-r--r--Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt23
-rw-r--r--Documentation/devicetree/bindings/display/bridge/anx6345.yaml10
-rw-r--r--Documentation/devicetree/bindings/display/bridge/ps8640.yaml112
-rw-r--r--Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml159
-rw-r--r--Documentation/devicetree/bindings/display/ilitek,ili9486.yaml73
-rw-r--r--Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml69
-rw-r--r--Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml122
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b080uan01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b101aw03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b101ean01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b101xtn01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b116xw03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b133htn01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,b133xtn01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt29
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g133han01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,g185han01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,p320hvn03.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/avic,tm070ddh03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt28
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml80
-rw-r--r--Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/chunghwa,claa101wa01a.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/chunghwa,claa101wb03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/display-timing.txt124
-rw-r--r--Documentation/devicetree/bindings/display/panel/display-timings.yaml77
-rw-r--r--Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/edt,et-series.txt55
-rw-r--r--Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml49
-rw-r--r--Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml55
-rw-r--r--Documentation/devicetree/bindings/display/panel/foxlink,fl500wvr00-a0t.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt32
-rw-r--r--Documentation/devicetree/bindings/display/panel/giantplus,gpg482739qs5.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/hannstar,hsd070pww1.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/hannstar,hsd100pxn1.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/hit,tx23d38vm0caa.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,at043tn24.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,g121i1-l01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,n116bge.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,n156bge-l21.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/innolux,zj070na-01p.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt42
-rw-r--r--Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt25
-rw-r--r--Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lb070wv8.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/lg,lp129qe.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/nec,nl12880b20-05.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/nlt,nl192108ac18-02d.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml56
-rw-r--r--Documentation/devicetree/bindings/display/panel/nvd,9128.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/okaya,rs800480t-7x0gp.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt23
-rw-r--r--Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml53
-rw-r--r--Documentation/devicetree/bindings/display/panel/ortustech,com37h3m05dtc.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/ortustech,com37h3m99dtc.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/ortustech,com43h4m85ulc.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt11
-rw-r--r--Documentation/devicetree/bindings/display/panel/panasonic,vvx10f004b00.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt20
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-common.yaml15
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-dpi.txt50
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-dpi.yaml81
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml67
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-simple.yaml209
-rw-r--r--Documentation/devicetree/bindings/display/panel/panel-timing.yaml227
-rw-r--r--Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt25
-rw-r--r--Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml56
-rw-r--r--Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt25
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,ltn101nt05.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,ltn140at29-301.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml50
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq070y3dg3b.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/shelly,sca07010-bfn-lnn.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt7
-rw-r--r--Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt29
-rw-r--r--Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt8
-rw-r--r--Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt16
-rw-r--r--Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt12
-rw-r--r--Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt48
-rw-r--r--Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml4
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-drm.txt19
-rw-r--r--Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml40
-rw-r--r--Documentation/devicetree/bindings/display/simple-framebuffer.yaml6
-rw-r--r--Documentation/devicetree/bindings/display/sitronix,st7735r.txt35
-rw-r--r--Documentation/devicetree/bindings/display/sitronix,st7735r.yaml78
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml152
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml208
-rw-r--r--Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml106
-rw-r--r--Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt2
-rw-r--r--Documentation/devicetree/bindings/dma/ti/k3-udma.yaml14
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml14
-rw-r--r--Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml14
-rw-r--r--Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt3
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/goodix.yaml2
-rw-r--r--Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt2
-rw-r--r--Documentation/devicetree/bindings/leds/common.yaml2
-rw-r--r--Documentation/devicetree/bindings/leds/register-bit-led.txt2
-rw-r--r--Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml40
-rw-r--r--Documentation/devicetree/bindings/media/ti,cal.yaml2
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml20
-rw-r--r--Documentation/devicetree/bindings/memory-controllers/ti/emif.txt2
-rw-r--r--Documentation/devicetree/bindings/mfd/max77650.yaml4
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65910.txt4
-rw-r--r--Documentation/devicetree/bindings/mfd/twl-family.txt (renamed from Documentation/devicetree/bindings/mfd/twl-familly.txt)0
-rw-r--r--Documentation/devicetree/bindings/mfd/zii,rave-sp.txt2
-rw-r--r--Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-controller.yaml1
-rw-r--r--Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt2
-rw-r--r--Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt2
-rw-r--r--Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt2
-rw-r--r--Documentation/devicetree/bindings/net/mdio.yaml1
-rw-r--r--Documentation/devicetree/bindings/nvmem/nvmem.yaml2
-rw-r--r--Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml2
-rw-r--r--Documentation/devicetree/bindings/power/domain-idle-state.txt33
-rw-r--r--Documentation/devicetree/bindings/power/domain-idle-state.yaml64
-rw-r--r--Documentation/devicetree/bindings/power/power-domain.yaml24
-rw-r--r--Documentation/devicetree/bindings/power/power_domain.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/regulator.yaml2
-rw-r--r--Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml6
-rw-r--r--Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-sai.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/st,stm32-spi.yaml2
-rw-r--r--Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml2
-rw-r--r--Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml2
-rw-r--r--Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml2
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.yaml10
-rw-r--r--Documentation/driver-api/dmaengine/client.rst14
-rw-r--r--Documentation/driver-api/ipmb.rst4
-rw-r--r--Documentation/filesystems/debugfs.txt6
-rw-r--r--Documentation/filesystems/zonefs.txt20
-rw-r--r--Documentation/gpu/drm-kms-helpers.rst18
-rw-r--r--Documentation/gpu/i915.rst8
-rw-r--r--Documentation/gpu/todo.rst53
-rw-r--r--Documentation/hwmon/adm1177.rst3
-rw-r--r--Documentation/hwmon/xdpe12284.rst1
-rw-r--r--Documentation/kbuild/makefiles.rst5
-rw-r--r--Documentation/networking/phy.rst5
-rw-r--r--Documentation/power/index.rst1
-rw-r--r--Documentation/process/embargoed-hardware-issues.rst8
-rw-r--r--Documentation/sphinx/parallel-wrapper.sh2
-rw-r--r--Documentation/translations/zh_CN/process/embargoed-hardware-issues.rst2
-rw-r--r--Documentation/virt/guest-halt-polling.rst (renamed from Documentation/virtual/guest-halt-polling.txt)12
-rw-r--r--Documentation/virt/index.rst2
-rw-r--r--Documentation/virt/kvm/api.rst (renamed from Documentation/virt/kvm/api.txt)3383
-rw-r--r--Documentation/virt/kvm/arm/hyp-abi.rst (renamed from Documentation/virt/kvm/arm/hyp-abi.txt)28
-rw-r--r--Documentation/virt/kvm/arm/index.rst12
-rw-r--r--Documentation/virt/kvm/arm/psci.rst (renamed from Documentation/virt/kvm/arm/psci.txt)46
-rw-r--r--Documentation/virt/kvm/devices/arm-vgic-its.rst (renamed from Documentation/virt/kvm/devices/arm-vgic-its.txt)106
-rw-r--r--Documentation/virt/kvm/devices/arm-vgic-v3.rst (renamed from Documentation/virt/kvm/devices/arm-vgic-v3.txt)132
-rw-r--r--Documentation/virt/kvm/devices/arm-vgic.rst (renamed from Documentation/virt/kvm/devices/arm-vgic.txt)89
-rw-r--r--Documentation/virt/kvm/devices/index.rst19
-rw-r--r--Documentation/virt/kvm/devices/mpic.rst (renamed from Documentation/virt/kvm/devices/mpic.txt)11
-rw-r--r--Documentation/virt/kvm/devices/s390_flic.rst (renamed from Documentation/virt/kvm/devices/s390_flic.txt)70
-rw-r--r--Documentation/virt/kvm/devices/vcpu.rst114
-rw-r--r--Documentation/virt/kvm/devices/vcpu.txt76
-rw-r--r--Documentation/virt/kvm/devices/vfio.rst (renamed from Documentation/virt/kvm/devices/vfio.txt)25
-rw-r--r--Documentation/virt/kvm/devices/vm.rst (renamed from Documentation/virt/kvm/devices/vm.txt)206
-rw-r--r--Documentation/virt/kvm/devices/xics.rst (renamed from Documentation/virt/kvm/devices/xics.txt)28
-rw-r--r--Documentation/virt/kvm/devices/xive.rst (renamed from Documentation/virt/kvm/devices/xive.txt)152
-rw-r--r--Documentation/virt/kvm/halt-polling.rst (renamed from Documentation/virt/kvm/halt-polling.txt)90
-rw-r--r--Documentation/virt/kvm/hypercalls.rst (renamed from Documentation/virt/kvm/hypercalls.txt)129
-rw-r--r--Documentation/virt/kvm/index.rst16
-rw-r--r--Documentation/virt/kvm/locking.rst243
-rw-r--r--Documentation/virt/kvm/locking.txt215
-rw-r--r--Documentation/virt/kvm/mmu.rst (renamed from Documentation/virt/kvm/mmu.txt)62
-rw-r--r--Documentation/virt/kvm/msr.rst (renamed from Documentation/virt/kvm/msr.txt)147
-rw-r--r--Documentation/virt/kvm/nested-vmx.rst (renamed from Documentation/virt/kvm/nested-vmx.txt)37
-rw-r--r--Documentation/virt/kvm/ppc-pv.rst (renamed from Documentation/virt/kvm/ppc-pv.txt)26
-rw-r--r--Documentation/virt/kvm/review-checklist.rst (renamed from Documentation/virt/kvm/review-checklist.txt)3
-rw-r--r--Documentation/virt/kvm/s390-diag.rst (renamed from Documentation/virt/kvm/s390-diag.txt)13
-rw-r--r--Documentation/virt/kvm/timekeeping.rst (renamed from Documentation/virt/kvm/timekeeping.txt)223
-rw-r--r--Documentation/virt/uml/user_mode_linux.rst (renamed from Documentation/virt/uml/UserModeLinux-HOWTO.txt)1810
-rw-r--r--Documentation/x86/index.rst1
-rw-r--r--MAINTAINERS156
-rw-r--r--Makefile6
-rw-r--r--arch/Kconfig5
-rw-r--r--arch/arm/boot/dts/am437x-idk-evm.dts4
-rw-r--r--arch/arm/boot/dts/bcm2711-rpi-4-b.dts3
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts2
-rw-r--r--arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts2
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts4
-rw-r--r--arch/arm/boot/dts/dra7-l4.dtsi4
-rw-r--r--arch/arm/boot/dts/dra7.dtsi2
-rw-r--r--arch/arm/boot/dts/dra76x.dtsi5
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi12
-rw-r--r--arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts4
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi1
-rw-r--r--arch/arm/boot/dts/imx7-colibri.dtsi1
-rw-r--r--arch/arm/boot/dts/imx7d.dtsi6
-rw-r--r--arch/arm/boot/dts/ls1021a.dtsi4
-rw-r--r--arch/arm/boot/dts/motorola-mapphone-common.dtsi13
-rw-r--r--arch/arm/boot/dts/r8a7779.dtsi2
-rw-r--r--arch/arm/boot/dts/stih410-b2260.dts3
-rw-r--r--arch/arm/boot/dts/stihxxx-b2120.dtsi2
-rw-r--r--arch/arm/configs/am200epdkit_defconfig2
-rw-r--r--arch/arm/configs/axm55xx_defconfig1
-rw-r--r--arch/arm/configs/bcm2835_defconfig1
-rw-r--r--arch/arm/configs/clps711x_defconfig1
-rw-r--r--arch/arm/configs/cns3420vb_defconfig2
-rw-r--r--arch/arm/configs/colibri_pxa300_defconfig1
-rw-r--r--arch/arm/configs/collie_defconfig2
-rw-r--r--arch/arm/configs/davinci_all_defconfig4
-rw-r--r--arch/arm/configs/efm32_defconfig2
-rw-r--r--arch/arm/configs/ep93xx_defconfig1
-rw-r--r--arch/arm/configs/eseries_pxa_defconfig2
-rw-r--r--arch/arm/configs/ezx_defconfig1
-rw-r--r--arch/arm/configs/h3600_defconfig2
-rw-r--r--arch/arm/configs/h5000_defconfig1
-rw-r--r--arch/arm/configs/imote2_defconfig1
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig2
-rw-r--r--arch/arm/configs/integrator_defconfig2
-rw-r--r--arch/arm/configs/lpc18xx_defconfig4
-rw-r--r--arch/arm/configs/magician_defconfig2
-rw-r--r--arch/arm/configs/moxart_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig2
-rw-r--r--arch/arm/configs/mxs_defconfig2
-rw-r--r--arch/arm/configs/omap1_defconfig2
-rw-r--r--arch/arm/configs/omap2plus_defconfig8
-rw-r--r--arch/arm/configs/palmz72_defconfig2
-rw-r--r--arch/arm/configs/pcm027_defconfig2
-rw-r--r--arch/arm/configs/pleb_defconfig2
-rw-r--r--arch/arm/configs/realview_defconfig1
-rw-r--r--arch/arm/configs/sama5_defconfig3
-rw-r--r--arch/arm/configs/shmobile_defconfig2
-rw-r--r--arch/arm/configs/socfpga_defconfig1
-rw-r--r--arch/arm/configs/stm32_defconfig2
-rw-r--r--arch/arm/configs/sunxi_defconfig3
-rw-r--r--arch/arm/configs/u300_defconfig2
-rw-r--r--arch/arm/configs/versatile_defconfig2
-rw-r--r--arch/arm/configs/vexpress_defconfig2
-rw-r--r--arch/arm/configs/viper_defconfig1
-rw-r--r--arch/arm/configs/zeus_defconfig2
-rw-r--r--arch/arm/configs/zx_defconfig1
-rw-r--r--arch/arm/include/asm/kvm_host.h3
-rw-r--r--arch/arm/kernel/ftrace.c7
-rw-r--r--arch/arm/kernel/patch.c19
-rw-r--r--arch/arm/mach-imx/Makefile2
-rw-r--r--arch/arm/mach-imx/common.h4
-rw-r--r--arch/arm/mach-imx/resume-imx6.S24
-rw-r--r--arch/arm/mach-imx/suspend-imx6.S14
-rw-r--r--arch/arm/mach-meson/Kconfig1
-rw-r--r--arch/arm/mach-npcm/Kconfig2
-rw-r--r--arch/arm/mach-omap2/Makefile2
-rw-r--r--arch/arm/mach-omap2/io.c2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts2
-rw-r--r--arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts1
-rw-r--r--arch/arm64/boot/dts/arm/fvp-base-revc.dts8
-rw-r--r--arch/arm64/boot/dts/freescale/imx8qxp-mek.dts5
-rw-r--r--arch/arm64/boot/dts/intel/socfpga_agilex.dtsi6
-rw-r--r--arch/arm64/configs/defconfig6
-rw-r--r--arch/arm64/include/asm/arch_gicv3.h2
-rw-r--r--arch/arm64/include/asm/cache.h2
-rw-r--r--arch/arm64/include/asm/cacheflush.h2
-rw-r--r--arch/arm64/include/asm/cpufeature.h10
-rw-r--r--arch/arm64/include/asm/exception.h4
-rw-r--r--arch/arm64/include/asm/io.h4
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h48
-rw-r--r--arch/arm64/include/asm/kvm_host.h32
-rw-r--r--arch/arm64/include/asm/kvm_hyp.h7
-rw-r--r--arch/arm64/include/asm/kvm_mmu.h3
-rw-r--r--arch/arm64/include/asm/lse.h2
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/spinlock.h6
-rw-r--r--arch/arm64/include/asm/virt.h2
-rw-r--r--arch/arm64/kernel/kaslr.c1
-rw-r--r--arch/arm64/kernel/process.c13
-rw-r--r--arch/arm64/kernel/time.c2
-rw-r--r--arch/arm64/kvm/hyp/switch.c39
-rw-r--r--arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c4
-rw-r--r--arch/arm64/mm/context.c20
-rw-r--r--arch/csky/Kconfig51
-rw-r--r--arch/csky/Kconfig.platforms9
-rw-r--r--arch/csky/abiv1/inc/abi/cacheflush.h5
-rw-r--r--arch/csky/abiv1/inc/abi/entry.h19
-rw-r--r--arch/csky/abiv2/cacheflush.c84
-rw-r--r--arch/csky/abiv2/inc/abi/cacheflush.h33
-rw-r--r--arch/csky/abiv2/inc/abi/entry.h11
-rw-r--r--arch/csky/configs/defconfig8
-rw-r--r--arch/csky/include/asm/Kbuild1
-rw-r--r--arch/csky/include/asm/cache.h1
-rw-r--r--arch/csky/include/asm/cacheflush.h1
-rw-r--r--arch/csky/include/asm/fixmap.h9
-rw-r--r--arch/csky/include/asm/memory.h25
-rw-r--r--arch/csky/include/asm/mmu.h1
-rw-r--r--arch/csky/include/asm/mmu_context.h2
-rw-r--r--arch/csky/include/asm/pci.h34
-rw-r--r--arch/csky/include/asm/pgtable.h6
-rw-r--r--arch/csky/include/asm/stackprotector.h29
-rw-r--r--arch/csky/include/asm/tcm.h24
-rw-r--r--arch/csky/include/uapi/asm/unistd.h3
-rw-r--r--arch/csky/kernel/atomic.S8
-rw-r--r--arch/csky/kernel/process.c13
-rw-r--r--arch/csky/kernel/setup.c5
-rw-r--r--arch/csky/kernel/smp.c2
-rw-r--r--arch/csky/kernel/time.c2
-rw-r--r--arch/csky/kernel/vmlinux.lds.S49
-rw-r--r--arch/csky/mm/Makefile3
-rw-r--r--arch/csky/mm/cachev1.c5
-rw-r--r--arch/csky/mm/cachev2.c45
-rw-r--r--arch/csky/mm/highmem.c64
-rw-r--r--arch/csky/mm/init.c92
-rw-r--r--arch/csky/mm/syscache.c13
-rw-r--r--arch/csky/mm/tcm.c169
-rw-r--r--arch/mips/boot/dts/ingenic/jz4740.dtsi17
-rw-r--r--arch/mips/boot/dts/ingenic/jz4780.dtsi17
-rw-r--r--arch/mips/boot/dts/ingenic/x1000.dtsi6
-rw-r--r--arch/mips/include/asm/sync.h4
-rw-r--r--arch/mips/kernel/vpe.c2
-rw-r--r--arch/mips/vdso/Makefile28
-rw-r--r--arch/powerpc/include/asm/page.h5
-rw-r--r--arch/powerpc/include/asm/processor.h4
-rw-r--r--arch/powerpc/kernel/asm-offsets.c12
-rw-r--r--arch/powerpc/kernel/cputable.c4
-rw-r--r--arch/powerpc/kernel/eeh_driver.c21
-rw-r--r--arch/powerpc/kernel/entry_32.S13
-rw-r--r--arch/powerpc/kernel/head_32.S155
-rw-r--r--arch/powerpc/kernel/head_32.h21
-rw-r--r--arch/powerpc/kernel/head_8xx.S2
-rw-r--r--arch/powerpc/kernel/hw_breakpoint.c12
-rw-r--r--arch/powerpc/kernel/idle_6xx.S8
-rw-r--r--arch/powerpc/kernel/signal.c17
-rw-r--r--arch/powerpc/kernel/signal_32.c28
-rw-r--r--arch/powerpc/kernel/signal_64.c22
-rw-r--r--arch/powerpc/kernel/vmlinux.lds.S6
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S52
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c10
-rw-r--r--arch/powerpc/mm/hugetlbpage.c29
-rw-r--r--arch/powerpc/mm/kasan/kasan_init_32.c3
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/xmon/xmon.c5
-rw-r--r--arch/riscv/Kconfig1
-rw-r--r--arch/riscv/Kconfig.socs24
-rw-r--r--arch/riscv/Makefile6
-rw-r--r--arch/riscv/boot/.gitignore2
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts5
-rw-r--r--arch/riscv/configs/defconfig17
-rw-r--r--arch/riscv/configs/rv32_defconfig18
-rw-r--r--arch/riscv/include/asm/csr.h12
-rw-r--r--arch/riscv/include/asm/syscall.h7
-rw-r--r--arch/riscv/kernel/entry.S11
-rw-r--r--arch/riscv/kernel/head.S6
-rw-r--r--arch/riscv/kernel/module.c16
-rw-r--r--arch/riscv/kernel/ptrace.c11
-rw-r--r--arch/riscv/kernel/traps.c4
-rw-r--r--arch/riscv/mm/init.c2
-rw-r--r--arch/riscv/mm/kasan_init.c53
-rw-r--r--arch/s390/Makefile2
-rw-r--r--arch/s390/boot/Makefile2
-rw-r--r--arch/s390/boot/kaslr.c2
-rw-r--r--arch/s390/boot/uv.c3
-rw-r--r--arch/s390/configs/debug_defconfig28
-rw-r--r--arch/s390/configs/defconfig11
-rw-r--r--arch/s390/include/asm/page.h2
-rw-r--r--arch/s390/include/asm/pgtable.h6
-rw-r--r--arch/s390/include/asm/processor.h1
-rw-r--r--arch/s390/include/asm/qdio.h6
-rw-r--r--arch/s390/include/asm/timex.h2
-rw-r--r--arch/s390/pci/pci.c4
-rw-r--r--arch/x86/boot/compressed/kaslr_64.c3
-rw-r--r--arch/x86/events/amd/core.c1
-rw-r--r--arch/x86/events/intel/core.c1
-rw-r--r--arch/x86/events/intel/cstate.c22
-rw-r--r--arch/x86/events/intel/ds.c2
-rw-r--r--arch/x86/events/msr.c3
-rw-r--r--arch/x86/include/asm/io_bitmap.h9
-rw-r--r--arch/x86/include/asm/kvm_emulate.h13
-rw-r--r--arch/x86/include/asm/kvm_host.h19
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/include/asm/paravirt.h7
-rw-r--r--arch/x86/include/asm/paravirt_types.h4
-rw-r--r--arch/x86/include/asm/vmx.h2
-rw-r--r--arch/x86/include/asm/vmxfeatures.h1
-rw-r--r--arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--arch/x86/kernel/cpu/amd.c14
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/mce/amd.c50
-rw-r--r--arch/x86/kernel/ima_arch.c6
-rw-r--r--arch/x86/kernel/kvm.c65
-rw-r--r--arch/x86/kernel/paravirt.c5
-rw-r--r--arch/x86/kernel/process.c2
-rw-r--r--arch/x86/kvm/Kconfig13
-rw-r--r--arch/x86/kvm/Makefile1
-rw-r--r--arch/x86/kvm/emulate.c36
-rw-r--r--arch/x86/kvm/irq_comm.c2
-rw-r--r--arch/x86/kvm/lapic.c12
-rw-r--r--arch/x86/kvm/mmu.h13
-rw-r--r--arch/x86/kvm/mmu/mmu.c11
-rw-r--r--arch/x86/kvm/mmu/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/mmutrace.h2
-rw-r--r--arch/x86/kvm/svm.c72
-rw-r--r--arch/x86/kvm/vmx/capabilities.h1
-rw-r--r--arch/x86/kvm/vmx/nested.c122
-rw-r--r--arch/x86/kvm/vmx/nested.h10
-rw-r--r--arch/x86/kvm/vmx/vmx.c152
-rw-r--r--arch/x86/kvm/vmx/vmx.h3
-rw-r--r--arch/x86/kvm/x86.c58
-rw-r--r--arch/x86/mm/dump_pagetables.c7
-rw-r--r--arch/x86/platform/efi/efi_64.c151
-rw-r--r--arch/x86/xen/enlighten_pv.c32
-rw-r--r--block/bfq-cgroup.c9
-rw-r--r--block/blk-core.c6
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-mq-sched.c22
-rw-r--r--block/blk-mq-tag.c4
-rw-r--r--block/blk-mq-tag.h4
-rw-r--r--block/blk-mq.c28
-rw-r--r--block/blk-mq.h5
-rw-r--r--crypto/Kconfig4
-rw-r--r--crypto/hash_info.c2
-rw-r--r--crypto/testmgr.c36
-rw-r--r--drivers/acpi/acpi_watchdog.c15
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/evevent.c45
-rw-r--r--drivers/acpi/acpica/evxfgpe.c32
-rw-r--r--drivers/acpi/acpica/hwgpe.c71
-rw-r--r--drivers/acpi/ec.c44
-rw-r--r--drivers/acpi/sleep.c57
-rw-r--r--drivers/android/binder.c9
-rw-r--r--drivers/android/binder_internal.h2
-rw-r--r--drivers/android/binderfs.c7
-rw-r--r--drivers/base/core.c27
-rw-r--r--drivers/base/swnode.c14
-rw-r--r--drivers/block/floppy.c7
-rw-r--r--drivers/block/null_blk.h3
-rw-r--r--drivers/block/null_blk_main.c2
-rw-r--r--drivers/block/paride/pcd.c2
-rw-r--r--drivers/block/xen-blkfront.c80
-rw-r--r--drivers/bus/moxtet.c2
-rw-r--r--drivers/bus/ti-sysc.c4
-rw-r--r--drivers/cdrom/gdrom.c2
-rw-r--r--drivers/char/ipmi/ipmb_dev_int.c33
-rw-r--r--drivers/char/ipmi/ipmi_ssif.c10
-rw-r--r--drivers/char/tpm/Makefile8
-rw-r--r--drivers/char/tpm/tpm2-cmd.c2
-rw-r--r--drivers/char/tpm/tpm_tis_spi_main.c (renamed from drivers/char/tpm/tpm_tis_spi.c)0
-rw-r--r--drivers/cpufreq/cpufreq.c17
-rw-r--r--drivers/dax/super.c2
-rw-r--r--drivers/devfreq/devfreq.c4
-rw-r--r--drivers/dma-buf/Kconfig12
-rw-r--r--drivers/dma-buf/dma-buf.c111
-rw-r--r--drivers/dma/coh901318.c4
-rw-r--r--drivers/dma/idxd/cdev.c4
-rw-r--r--drivers/dma/idxd/sysfs.c27
-rw-r--r--drivers/dma/imx-sdma.c5
-rw-r--r--drivers/dma/tegra20-apb-dma.c6
-rw-r--r--drivers/dma/ti/k3-udma.c493
-rw-r--r--drivers/edac/edac_mc.c12
-rw-r--r--drivers/edac/edac_mc_sysfs.c18
-rw-r--r--drivers/edac/synopsys_edac.c22
-rw-r--r--drivers/firmware/efi/efi.c4
-rw-r--r--drivers/firmware/imx/imx-scu.c27
-rw-r--r--drivers/firmware/imx/misc.c8
-rw-r--r--drivers/firmware/imx/scu-pd.c2
-rw-r--r--drivers/fsi/Kconfig1
-rw-r--r--drivers/gpio/gpio-bd71828.c10
-rw-r--r--drivers/gpio/gpio-sifive.c6
-rw-r--r--drivers/gpio/gpio-xilinx.c5
-rw-r--r--drivers/gpio/gpiolib.c30
-rw-r--r--drivers/gpu/drm/Kconfig8
-rw-r--r--drivers/gpu/drm/Makefile4
-rw-r--r--drivers/gpu/drm/amd/acp/Kconfig1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c34
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c26
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c95
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c169
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c183
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c144
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_display.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c124
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c80
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c249
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c98
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c93
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c51
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c111
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c191
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c250
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atom.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v6_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v8_0.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_virtual.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c282
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c148
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c504
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c67
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c146
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c29
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c12
-rw-r--r--drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h338
-rw-r--r--drivers/gpu/drm/amd/amdgpu/navi10_ih.c36
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c15
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nv.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v11_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v3_1.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c14
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.c46
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/soc15_common.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/umc_v6_1.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c265
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c45
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c152
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h11
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c150
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c27
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.c5
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_topology.h1
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c388
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h37
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c91
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c242
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h9
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c46
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile6
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c26
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c41
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c162
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c197
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c60
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c309
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c200
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_dp_types.h48
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_stream.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c16
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c23
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c2204
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c25
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c92
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c58
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c13
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c172
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c129
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c78
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c126
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c318
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c138
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c39
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c22
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c7
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c214
-rw-r--r--drivers/gpu/drm/amd/display/dc/dm_cp_psp.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c8
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h12
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c11
-rw-r--r--drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/opp.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h6
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h7
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h3
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h72
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h13
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h75
-rw-r--r--drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h51
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c69
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h9
-rw-r--r--drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c67
-rw-r--r--drivers/gpu/drm/amd/display/include/dal_asic_id.h28
-rw-r--r--drivers/gpu/drm/amd/display/include/dpcd_defs.h8
-rw-r--r--drivers/gpu/drm/amd/display/include/logger_types.h63
-rw-r--r--drivers/gpu/drm/amd/display/modules/freesync/freesync.c8
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c69
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h60
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c15
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c16
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c6
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c36
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c4
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h17
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c183
-rw-r--r--drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h32
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h10
-rw-r--r--drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h3
-rw-r--r--drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c20
-rw-r--r--drivers/gpu/drm/amd/display/modules/vmid/vmid.c16
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h2
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h69
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h29
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h69
-rw-r--r--drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h29
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h31
-rw-r--r--drivers/gpu/drm/amd/powerplay/amdgpu_smu.c168
-rw-r--r--drivers/gpu/drm/amd/powerplay/arcturus_ppt.c56
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c6
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c7
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h9
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h6
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/pp_debug.h4
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h3
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h10
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h46
-rw-r--r--drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h5
-rw-r--r--drivers/gpu/drm/amd/powerplay/navi10_ppt.c95
-rw-r--r--drivers/gpu/drm/amd/powerplay/renoir_ppt.c54
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_internal.h11
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v11_0.c136
-rw-r--r--drivers/gpu/drm/amd/powerplay/smu_v12_0.c91
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c11
-rw-r--r--drivers/gpu/drm/amd/powerplay/vega20_ppt.c70
-rw-r--r--drivers/gpu/drm/arc/arcpgu_crtc.c16
-rw-r--r--drivers/gpu/drm/arc/arcpgu_hdmi.c2
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c8
-rw-r--r--drivers/gpu/drm/ast/ast_drv.h6
-rw-r--r--drivers/gpu/drm/ast/ast_main.c24
-rw-r--r--drivers/gpu/drm/ast/ast_mode.c27
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c2
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c6
-rw-r--r--drivers/gpu/drm/bochs/bochs_hw.c24
-rw-r--r--drivers/gpu/drm/bochs/bochs_kms.c34
-rw-r--r--drivers/gpu/drm/bridge/Kconfig51
-rw-r--r--drivers/gpu/drm/bridge/Makefile6
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Kconfig13
-rw-r--r--drivers/gpu/drm/bridge/adv7511/Makefile3
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511.h40
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c28
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx6345.c23
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c13
-rw-r--r--drivers/gpu/drm/bridge/analogix/analogix_dp_core.c54
-rw-r--r--drivers/gpu/drm/bridge/cdns-dsi.c6
-rw-r--r--drivers/gpu/drm/bridge/display-connector.c295
-rw-r--r--drivers/gpu/drm/bridge/dumb-vga-dac.c300
-rw-r--r--drivers/gpu/drm/bridge/lvds-codec.c21
-rw-r--r--drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c8
-rw-r--r--drivers/gpu/drm/bridge/nxp-ptn3460.c8
-rw-r--r--drivers/gpu/drm/bridge/panel.c23
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8622.c8
-rw-r--r--drivers/gpu/drm/bridge/parade-ps8640.c349
-rw-r--r--drivers/gpu/drm/bridge/sii902x.c8
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c3
-rw-r--r--drivers/gpu/drm/bridge/simple-bridge.c342
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-hdmi.c329
-rw-r--r--drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c11
-rw-r--r--drivers/gpu/drm/bridge/tc358767.c17
-rw-r--r--drivers/gpu/drm/bridge/tc358768.c1046
-rw-r--r--drivers/gpu/drm/bridge/thc63lvd1024.c5
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c267
-rw-r--r--drivers/gpu/drm/bridge/ti-tfp410.c238
-rw-r--r--drivers/gpu/drm/bridge/ti-tpd12s015.c211
-rw-r--r--drivers/gpu/drm/cirrus/cirrus.c51
-rw-r--r--drivers/gpu/drm/drm_atomic.c117
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c83
-rw-r--r--drivers/gpu/drm/drm_atomic_state_helper.c102
-rw-r--r--drivers/gpu/drm/drm_auth.c8
-rw-r--r--drivers/gpu/drm/drm_bridge.c751
-rw-r--r--drivers/gpu/drm/drm_bridge_connector.c379
-rw-r--r--drivers/gpu/drm/drm_bufs.c40
-rw-r--r--drivers/gpu/drm/drm_client.c2
-rw-r--r--drivers/gpu/drm/drm_client_modeset.c15
-rw-r--r--drivers/gpu/drm/drm_connector.c96
-rw-r--r--drivers/gpu/drm/drm_context.c28
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_internal.h2
-rw-r--r--drivers/gpu/drm/drm_debugfs.c3
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c2
-rw-r--r--drivers/gpu/drm/drm_dma.c21
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c141
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c203
-rw-r--r--drivers/gpu/drm/drm_drv.c20
-rw-r--r--drivers/gpu/drm/drm_edid.c213
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c22
-rw-r--r--drivers/gpu/drm/drm_file.c90
-rw-r--r--drivers/gpu/drm/drm_format_helper.c2
-rw-r--r--drivers/gpu/drm/drm_framebuffer.c122
-rw-r--r--drivers/gpu/drm/drm_gem.c2
-rw-r--r--drivers/gpu/drm/drm_gem_shmem_helper.c16
-rw-r--r--drivers/gpu/drm/drm_gem_vram_helper.c61
-rw-r--r--drivers/gpu/drm/drm_hdcp.c158
-rw-r--r--drivers/gpu/drm/drm_internal.h5
-rw-r--r--drivers/gpu/drm/drm_ioctl.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c4
-rw-r--r--drivers/gpu/drm/drm_lock.c11
-rw-r--r--drivers/gpu/drm/drm_mipi_dbi.c39
-rw-r--r--drivers/gpu/drm/drm_mm.c10
-rw-r--r--drivers/gpu/drm/drm_modes.c7
-rw-r--r--drivers/gpu/drm/drm_pci.c82
-rw-r--r--drivers/gpu/drm/drm_scatter.c3
-rw-r--r--drivers/gpu/drm/drm_simple_kms_helper.c46
-rw-r--r--drivers/gpu/drm/drm_syncobj.c87
-rw-r--r--drivers/gpu/drm/drm_sysfs.c4
-rw-r--r--drivers/gpu/drm/drm_vblank.c177
-rw-r--r--drivers/gpu/drm/drm_vm.c26
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fbdev.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c24
-rw-r--r--drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c2
-rw-r--r--drivers/gpu/drm/gma500/cdv_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c6
-rw-r--r--drivers/gpu/drm/gma500/intel_bios.h2
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.c4
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h6
-rw-r--r--drivers/gpu/drm/gma500/psb_intel_display.c3
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.c12
-rw-r--r--drivers/gpu/drm/gma500/psb_irq.h7
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c79
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c9
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h13
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c11
-rw-r--r--drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c2
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h1
-rw-r--r--drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c20
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c10
-rw-r--r--drivers/gpu/drm/i915/Kconfig12
-rw-r--r--drivers/gpu/drm/i915/Kconfig.profile25
-rw-r--r--drivers/gpu/drm/i915/Makefile21
-rw-r--r--drivers/gpu/drm/i915/display/icl_dsi.c406
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.c89
-rw-r--r--drivers/gpu/drm/i915/display/intel_acpi.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.c57
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic.h5
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.c97
-rw-r--r--drivers/gpu/drm/i915/display/intel_atomic_plane.h8
-rw-r--r--drivers/gpu/drm/i915/display/intel_audio.c255
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.c450
-rw-r--r--drivers/gpu/drm/i915/display/intel_bios.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.c63
-rw-r--r--drivers/gpu/drm/i915/display/intel_bw.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.c1106
-rw-r--r--drivers/gpu/drm/i915/display/intel_cdclk.h73
-rw-r--r--drivers/gpu/drm/i915/display/intel_color.c602
-rw-r--r--drivers/gpu/drm/i915/display/intel_combo_phy.c163
-rw-r--r--drivers/gpu/drm/i915/display/intel_connector.c5
-rw-r--r--drivers/gpu/drm/i915/display/intel_crt.c128
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.c (renamed from drivers/gpu/drm/i915/intel_csr.c)46
-rw-r--r--drivers/gpu/drm/i915/display/intel_csr.h (renamed from drivers/gpu/drm/i915/intel_csr.h)0
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.c1387
-rw-r--r--drivers/gpu/drm/i915/display/intel_ddi.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_de.h72
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.c3606
-rw-r--r--drivers/gpu/drm/i915/display/intel_display.h17
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.c2134
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_debugfs.h20
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.c751
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_power.h6
-rw-r--r--drivers/gpu/drm/i915/display/intel_display_types.h119
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.c876
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c193
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_link_training.c75
-rw-r--r--drivers/gpu/drm/i915/display/intel_dp_mst.c96
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpio_phy.c108
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.c1521
-rw-r--r--drivers/gpu/drm/i915/display/intel_dpll_mgr.h14
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsb.c58
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c6
-rw-r--r--drivers/gpu/drm/i915/display/intel_dsi_vbt.c218
-rw-r--r--drivers/gpu/drm/i915/display/intel_dvo.c50
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.c264
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbc.h13
-rw-r--r--drivers/gpu/drm/i915/display/intel_fbdev.c18
-rw-r--r--drivers/gpu/drm/i915/display/intel_fifo_underrun.c66
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.c223
-rw-r--r--drivers/gpu/drm/i915/display/intel_global_state.h87
-rw-r--r--drivers/gpu/drm/i915/display/intel_gmbus.c111
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.c527
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdcp.h7
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.c433
-rw-r--r--drivers/gpu/drm/i915/display/intel_hdmi.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.c203
-rw-r--r--drivers/gpu/drm/i915/display/intel_hotplug.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_lpe_audio.c39
-rw-r--r--drivers/gpu/drm/i915/display/intel_lvds.c108
-rw-r--r--drivers/gpu/drm/i915/display/intel_opregion.c223
-rw-r--r--drivers/gpu/drm/i915/display/intel_overlay.c69
-rw-r--r--drivers/gpu/drm/i915/display/intel_panel.c407
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_pipe_crc.h4
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.c388
-rw-r--r--drivers/gpu/drm/i915/display/intel_psr.h1
-rw-r--r--drivers/gpu/drm/i915/display/intel_quirks.c20
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.c114
-rw-r--r--drivers/gpu/drm/i915/display/intel_sdvo.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_sprite.c465
-rw-r--r--drivers/gpu/drm/i915/display/intel_tc.c51
-rw-r--r--drivers/gpu/drm/i915/display/intel_tv.c165
-rw-r--r--drivers/gpu/drm/i915/display/intel_vbt_defs.h2
-rw-r--r--drivers/gpu/drm/i915/display/intel_vdsc.c445
-rw-r--r--drivers/gpu/drm/i915/display/intel_vga.c14
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi.c494
-rw-r--r--drivers/gpu/drm/i915/display/vlv_dsi_pll.c96
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_client_blt.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.c520
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_context_types.h13
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c8
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c825
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_internal.c2
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_mman.c134
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.c19
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object.h18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_blt.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_object_types.h11
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pages.c4
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_phys.c105
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_pm.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shmem.c3
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_shrinker.c18
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.c136
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_stolen.h1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_tiling.c1
-rw-r--r--drivers/gpu/drm/i915/gem/i915_gem_userptr.c21
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/huge_pages.c102
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c178
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c74
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c2
-rw-r--r--drivers/gpu/drm/i915/gem/selftests/mock_context.c8
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.c402
-rw-r--r--drivers/gpu/drm/i915/gt/gen7_renderclear.h15
-rw-r--r--drivers/gpu/drm/i915/gt/gen8_ppgtt.c27
-rw-r--r--drivers/gpu/drm/i915/gt/hsw_clear_kernel.c61
-rw-r--r--drivers/gpu/drm/i915/gt/intel_breadcrumbs.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.c60
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context.h20
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.c63
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_param.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_sseu.c98
-rw-r--r--drivers/gpu/drm/i915/gt/intel_context_types.h16
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine.h37
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_cs.c187
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_pm.c4
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_types.h14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_engine_user.c3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ggtt.c106
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gpu_commands.h17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.c68
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt.h2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.c117
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_irq.h3
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_pm.c2
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gt_requests.c17
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.c65
-rw-r--r--drivers/gpu/drm/i915/gt/intel_gtt.h11
-rw-r--r--drivers/gpu/drm/i915/gt/intel_llc.c6
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc.c818
-rw-r--r--drivers/gpu/drm/i915/gt/intel_lrc_reg.h1
-rw-r--r--drivers/gpu/drm/i915/gt/intel_mocs.c76
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rc6.c19
-rw-r--r--drivers/gpu/drm/i915/gt/intel_reset.c104
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.c7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring.h8
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_submission.c236
-rw-r--r--drivers/gpu/drm/i915/gt/intel_ring_types.h7
-rw-r--r--drivers/gpu/drm/i915/gt/intel_rps.c65
-rw-r--r--drivers/gpu/drm/i915/gt/intel_timeline.c14
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds.c288
-rw-r--r--drivers/gpu/drm/i915/gt/intel_workarounds_types.h4
-rw-r--r--drivers/gpu/drm/i915/gt/ivb_clear_kernel.c61
-rw-r--r--drivers/gpu/drm/i915/gt/mock_engine.c24
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c30
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_hangcheck.c4
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_llc.c11
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_lrc.c2100
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_mocs.c24
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_rc6.c5
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_reset.c2
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_ring_submission.c296
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_timeline.c188
-rw-r--r--drivers/gpu/drm/i915/gt/selftest_workarounds.c9
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.c445
-rw-r--r--drivers/gpu/drm/i915/gt/sysfs_engines.h13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.c30
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc.h23
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c255
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c13
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h19
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.c7
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc.h8
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c2
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.c69
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc.h62
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c11
-rw-r--r--drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h18
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c84
-rw-r--r--drivers/gpu/drm/i915/gvt/cfg_space.c27
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c208
-rw-r--r--drivers/gpu/drm/i915/gvt/debugfs.c45
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/dmabuf.c10
-rw-r--r--drivers/gpu/drm/i915/gvt/edid.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.c103
-rw-r--r--drivers/gpu/drm/i915/gvt/execlist.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/fb_decoder.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/firmware.c20
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c67
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c43
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h62
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c211
-rw-r--r--drivers/gpu/drm/i915/gvt/interrupt.c21
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c309
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.c32
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h4
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c127
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.h5
-rw-r--r--drivers/gpu/drm/i915/gvt/sched_policy.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c256
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h9
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c20
-rw-r--r--drivers/gpu/drm/i915/i915_active.c174
-rw-r--r--drivers/gpu/drm/i915/i915_active.h17
-rw-r--r--drivers/gpu/drm/i915/i915_buddy.c3
-rw-r--r--drivers/gpu/drm/i915/i915_cmd_parser.c29
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2428
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.h8
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.c250
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs_params.h14
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c1206
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h237
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c32
-rw-r--r--drivers/gpu/drm/i915/i915_gem_evict.c17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_fence_reg.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c5
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c36
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.h12
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.c7
-rw-r--r--drivers/gpu/drm/i915/i915_ioc32.h17
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c287
-rw-r--r--drivers/gpu/drm/i915/i915_irq.h6
-rw-r--r--drivers/gpu/drm/i915/i915_params.c11
-rw-r--r--drivers/gpu/drm/i915/i915_params.h74
-rw-r--r--drivers/gpu/drm/i915/i915_pci.c22
-rw-r--r--drivers/gpu/drm/i915/i915_perf.c153
-rw-r--r--drivers/gpu/drm/i915/i915_perf_types.h3
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c77
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.h13
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h66
-rw-r--r--drivers/gpu/drm/i915/i915_request.c352
-rw-r--r--drivers/gpu/drm/i915/i915_request.h74
-rw-r--r--drivers/gpu/drm/i915/i915_scheduler.c48
-rw-r--r--drivers/gpu/drm/i915/i915_suspend.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.c17
-rw-r--r--drivers/gpu/drm/i915/i915_sw_fence.h2
-rw-r--r--drivers/gpu/drm/i915/i915_switcheroo.c2
-rw-r--r--drivers/gpu/drm/i915/i915_sysfs.c22
-rw-r--r--drivers/gpu/drm/i915/i915_trace.h66
-rw-r--r--drivers/gpu/drm/i915/i915_utils.c4
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h27
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.c72
-rw-r--r--drivers/gpu/drm/i915/i915_vgpu.h25
-rw-r--r--drivers/gpu/drm/i915/i915_vma.c83
-rw-r--r--drivers/gpu/drm/i915/i915_vma.h2
-rw-r--r--drivers/gpu/drm/i915/i915_vma_types.h11
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.c45
-rw-r--r--drivers/gpu/drm/i915/intel_device_info.h4
-rw-r--r--drivers/gpu/drm/i915/intel_dram.c500
-rw-r--r--drivers/gpu/drm/i915/intel_dram.h14
-rw-r--r--drivers/gpu/drm/i915/intel_gvt.c21
-rw-r--r--drivers/gpu/drm/i915/intel_memory_region.c4
-rw-r--r--drivers/gpu/drm/i915/intel_pch.c66
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c765
-rw-r--r--drivers/gpu/drm/i915/intel_pm.h5
-rw-r--r--drivers/gpu/drm/i915/intel_sideband.c11
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c54
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_active.c78
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_buddy.c25
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_gem.c6
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_live_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_perf_selftests.h1
-rw-r--r--drivers/gpu/drm/i915/selftests/igt_spinner.c2
-rw-r--r--drivers/gpu/drm/i915/selftests/intel_memory_region.c203
-rw-r--r--drivers/gpu/drm/i915/selftests/mock_gem_device.c1
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.c489
-rw-r--r--drivers/gpu/drm/i915/vlv_suspend.h18
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c2
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c2
-rw-r--r--drivers/gpu/drm/imx/parallel-display.c176
-rw-r--r--drivers/gpu/drm/ingenic/ingenic-drm.c2
-rw-r--r--drivers/gpu/drm/lima/lima_drv.c16
-rw-r--r--drivers/gpu/drm/lima/lima_drv.h1
-rw-r--r--drivers/gpu/drm/lima/lima_gem.c134
-rw-r--r--drivers/gpu/drm/lima/lima_gem.h4
-rw-r--r--drivers/gpu/drm/lima/lima_gp.c63
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.c5
-rw-r--r--drivers/gpu/drm/lima/lima_mmu.h1
-rw-r--r--drivers/gpu/drm/lima/lima_regs.h1
-rw-r--r--drivers/gpu/drm/lima/lima_sched.c35
-rw-r--r--drivers/gpu/drm/lima/lima_sched.h6
-rw-r--r--drivers/gpu/drm/lima/lima_vm.c46
-rw-r--r--drivers/gpu/drm/lima/lima_vm.h1
-rw-r--r--drivers/gpu/drm/mcde/mcde_drv.c9
-rw-r--r--drivers/gpu/drm/mcde/mcde_dsi.c5
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dpi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c30
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_plane.c7
-rw-r--r--drivers/gpu/drm/mediatek/mtk_dsi.c2
-rw-r--r--drivers/gpu/drm/mediatek/mtk_hdmi.c10
-rw-r--r--drivers/gpu/drm/meson/meson_dw_hdmi.c180
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.c93
-rw-r--r--drivers/gpu/drm/meson/meson_vclk.h7
-rw-r--r--drivers/gpu/drm/meson/meson_venc.c10
-rw-r--r--drivers/gpu/drm/meson/meson_venc.h4
-rw-r--r--drivers/gpu/drm/meson/meson_venc_cvbs.c6
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.h9
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_mode.c86
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c37
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c65
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c85
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c1
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c58
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c86
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c95
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c6
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c2
-rw-r--r--drivers/gpu/drm/msm/edp/edp_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c18
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h3
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c6
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/crtc.c4
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c26
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/head.c5
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/wndw.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.h11
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_vga.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/device/base.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c26
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Kconfig22
-rw-r--r--drivers/gpu/drm/omapdrm/displays/Makefile4
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c97
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c183
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c137
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c217
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/Makefile2
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c55
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c349
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c28
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c313
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.c59
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4_core.h4
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c295
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c48
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.h5
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c9
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h46
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c53
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c178
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c269
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c247
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h3
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c2
-rw-r--r--drivers/gpu/drm/omapdrm/omap_dmm_tiler.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c88
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c83
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c6
-rw-r--r--drivers/gpu/drm/panel/Kconfig44
-rw-r--r--drivers/gpu/drm/panel/Makefile5
-rw-r--r--drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c854
-rw-r--r--drivers/gpu/drm/panel/panel-elida-kd35t133.c352
-rw-r--r--drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c526
-rw-r--r--drivers/gpu/drm/panel/panel-ilitek-ili9322.c14
-rw-r--r--drivers/gpu/drm/panel/panel-lg-lg4573.c2
-rw-r--r--drivers/gpu/drm/panel/panel-novatek-nt35510.c1098
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-ld9040.c6
-rw-r--r--drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c293
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c332
-rw-r--r--drivers/gpu/drm/panel/panel-sony-acx424akp.c4
-rw-r--r--drivers/gpu/drm/panel/panel-tpo-td028ttec1.c17
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.c123
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_device.h26
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_drv.c31
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem.h6
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c3
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_gpu.c18
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_job.c15
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_mmu.c57
-rw-r--r--drivers/gpu/drm/panfrost/panfrost_perfcnt.c11
-rw-r--r--drivers/gpu/drm/pl111/pl111_versatile.c73
-rw-r--r--drivers/gpu/drm/qxl/qxl_cmd.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c43
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c26
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c4
-rw-r--r--drivers/gpu/drm/qxl/qxl_ttm.c6
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c1
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c25
-rw-r--r--drivers/gpu/drm/radeon/radeon_dp_mst.c27
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c61
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h9
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_encoder.c2
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_lvds.c22
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c9
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_gem.c1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_lvds.c2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c5
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler_trace.h27
-rw-r--r--drivers/gpu/drm/scheduler/sched_entity.c56
-rw-r--r--drivers/gpu/drm/scheduler/sched_main.c86
-rw-r--r--drivers/gpu/drm/selftests/drm_cmdline_selftests.h1
-rw-r--r--drivers/gpu/drm/selftests/test-drm_cmdline_parser.c15
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.c11
-rw-r--r--drivers/gpu/drm/sti/sti_crtc.h2
-rw-r--r--drivers/gpu/drm/sti/sti_drv.c4
-rw-r--r--drivers/gpu/drm/sti/sti_dvo.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c2
-rw-r--r--drivers/gpu/drm/stm/drv.c2
-rw-r--r--drivers/gpu/drm/stm/dw_mipi_dsi-stm.c4
-rw-r--r--drivers/gpu/drm/stm/ltdc.c103
-rw-r--r--drivers/gpu/drm/stm/ltdc.h6
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_lvds.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_rgb.c2
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c104
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.h14
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c129
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c104
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.h11
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_vi_layer.c66
-rw-r--r--drivers/gpu/drm/tegra/dc.c20
-rw-r--r--drivers/gpu/drm/tegra/fb.c8
-rw-r--r--drivers/gpu/drm/tegra/hdmi.c34
-rw-r--r--drivers/gpu/drm/tidss/Kconfig14
-rw-r--r--drivers/gpu/drm/tidss/Makefile12
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.c432
-rw-r--r--drivers/gpu/drm/tidss/tidss_crtc.h48
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.c2753
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc.h137
-rw-r--r--drivers/gpu/drm/tidss/tidss_dispc_regs.h243
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.c285
-rw-r--r--drivers/gpu/drm/tidss/tidss_drv.h39
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.c88
-rw-r--r--drivers/gpu/drm/tidss/tidss_encoder.h17
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.c146
-rw-r--r--drivers/gpu/drm/tidss/tidss_irq.h77
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.c299
-rw-r--r--drivers/gpu/drm/tidss/tidss_kms.h15
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.c217
-rw-r--r--drivers/gpu/drm/tidss/tidss_plane.h25
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.c202
-rw-r--r--drivers/gpu/drm/tidss/tidss_scale_coefs.h22
-rw-r--r--drivers/gpu/drm/tilcdc/tilcdc_external.c2
-rw-r--r--drivers/gpu/drm/tiny/Kconfig22
-rw-r--r--drivers/gpu/drm/tiny/Makefile1
-rw-r--r--drivers/gpu/drm/tiny/gm12u320.c9
-rw-r--r--drivers/gpu/drm/tiny/ili9225.c9
-rw-r--r--drivers/gpu/drm/tiny/ili9486.c286
-rw-r--r--drivers/gpu/drm/tiny/repaper.c21
-rw-r--r--drivers/gpu/drm/tiny/st7586.c9
-rw-r--r--drivers/gpu/drm/tiny/st7735r.c76
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c271
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c2
-rw-r--r--drivers/gpu/drm/udl/udl_connector.c1
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c11
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.h41
-rw-r--r--drivers/gpu/drm/vboxvideo/vbox_mode.c13
-rw-r--r--drivers/gpu/drm/vboxvideo/vboxvideo.h2
-rw-r--r--drivers/gpu/drm/vc4/vc4_crtc.c13
-rw-r--r--drivers/gpu/drm/vc4/vc4_dpi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c3
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.h49
-rw-r--r--drivers/gpu/drm/vc4/vc4_dsi.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c9
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_debugfs.c1
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c12
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.c6
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h36
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_gem.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ioctl.c90
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_kms.c41
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_object.c114
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c7
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c369
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c9
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c1
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h4
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c14
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.h34
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c87
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c31
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c5
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_surface.c2
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c6
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_kms.c19
-rw-r--r--drivers/gpu/drm/zte/zx_plane.c4
-rw-r--r--drivers/hid/hid-alps.c2
-rw-r--r--drivers/hid/hid-apple.c3
-rw-r--r--drivers/hid/hid-bigbenff.c31
-rw-r--r--drivers/hid/hid-core.c4
-rw-r--r--drivers/hid/hid-hyperv.c6
-rw-r--r--drivers/hid/hid-ite.c5
-rw-r--r--drivers/hid/hid-logitech-hidpp.c43
-rw-r--r--drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c8
-rw-r--r--drivers/hid/usbhid/hiddev.c2
-rw-r--r--drivers/hwmon/acpi_power_meter.c16
-rw-r--r--drivers/hwmon/adt7462.c2
-rw-r--r--drivers/hwmon/pmbus/ltc2978.c4
-rw-r--r--drivers/hwmon/pmbus/xdpe12284.c58
-rw-r--r--drivers/hwmon/w83627ehf.c7
-rw-r--r--drivers/i2c/busses/i2c-altera.c2
-rw-r--r--drivers/i2c/busses/i2c-jz4780.c36
-rw-r--r--drivers/ide/ide-gd.c2
-rw-r--r--drivers/infiniband/core/cm.c1
-rw-r--r--drivers/infiniband/core/cma.c15
-rw-r--r--drivers/infiniband/core/core_priv.h14
-rw-r--r--drivers/infiniband/core/iwcm.c4
-rw-r--r--drivers/infiniband/core/nldev.c2
-rw-r--r--drivers/infiniband/core/rw.c31
-rw-r--r--drivers/infiniband/core/security.c28
-rw-r--r--drivers/infiniband/core/umem_odp.c24
-rw-r--r--drivers/infiniband/core/user_mad.c5
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c24
-rw-r--r--drivers/infiniband/core/uverbs_std_types.c1
-rw-r--r--drivers/infiniband/core/verbs.c10
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/cxgb4/qp.c4
-rw-r--r--drivers/infiniband/hw/hfi1/affinity.c2
-rw-r--r--drivers/infiniband/hw/hfi1/file_ops.c52
-rw-r--r--drivers/infiniband/hw/hfi1/hfi.h5
-rw-r--r--drivers/infiniband/hw/hfi1/user_exp_rcv.c5
-rw-r--r--drivers/infiniband/hw/hfi1/user_sdma.c17
-rw-r--r--drivers/infiniband/hw/hfi1/verbs.c4
-rw-r--r--drivers/infiniband/hw/mlx5/devx.c51
-rw-r--r--drivers/infiniband/hw/mlx5/main.c6
-rw-r--r--drivers/infiniband/hw/mlx5/mlx5_ib.h1
-rw-r--r--drivers/infiniband/hw/mlx5/odp.c17
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c9
-rw-r--r--drivers/infiniband/hw/qib/qib_verbs.c2
-rw-r--r--drivers/infiniband/sw/rdmavt/qp.c84
-rw-r--r--drivers/infiniband/sw/rxe/rxe_comp.c8
-rw-r--r--drivers/infiniband/sw/siw/siw_cm.c5
-rw-r--r--drivers/infiniband/sw/siw/siw_main.c6
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c12
-rw-r--r--drivers/input/keyboard/goldfish_events.c2
-rw-r--r--drivers/input/keyboard/gpio_keys.c2
-rw-r--r--drivers/input/keyboard/gpio_keys_polled.c2
-rw-r--r--drivers/input/keyboard/tca6416-keypad.c4
-rw-r--r--drivers/input/mouse/cyapa_gen5.c8
-rw-r--r--drivers/input/mouse/psmouse-smbus.c8
-rw-r--r--drivers/input/mouse/synaptics.c4
-rw-r--r--drivers/input/touchscreen/ili210x.c34
-rw-r--r--drivers/interconnect/core.c9
-rw-r--r--drivers/iommu/Makefile4
-rw-r--r--drivers/iommu/amd_iommu_init.c13
-rw-r--r--drivers/iommu/intel-iommu.c41
-rw-r--r--drivers/iommu/qcom_iommu.c28
-rw-r--r--drivers/macintosh/therm_windtunnel.c52
-rw-r--r--drivers/md/bcache/journal.c7
-rw-r--r--drivers/md/bcache/super.c17
-rw-r--r--drivers/md/dm-bio-record.h15
-rw-r--r--drivers/md/dm-cache-target.c6
-rw-r--r--drivers/md/dm-integrity.c84
-rw-r--r--drivers/md/dm-mpath.c2
-rw-r--r--drivers/md/dm-thin-metadata.c2
-rw-r--r--drivers/md/dm-verity-target.c2
-rw-r--r--drivers/md/dm-writecache.c16
-rw-r--r--drivers/md/dm-zoned-target.c10
-rw-r--r--drivers/md/dm.c22
-rw-r--r--drivers/media/mc/mc-entity.c4
-rw-r--r--drivers/media/platform/vicodec/codec-v4l2-fwht.c34
-rw-r--r--drivers/media/usb/pulse8-cec/pulse8-cec.c12
-rw-r--r--drivers/media/v4l2-core/v4l2-mem2mem.c4
-rw-r--r--drivers/misc/altera-stapl/altera.c12
-rw-r--r--drivers/misc/habanalabs/device.c5
-rw-r--r--drivers/misc/habanalabs/goya/goya.c44
-rw-r--r--drivers/net/bonding/bond_main.c55
-rw-r--r--drivers/net/bonding/bond_options.c2
-rw-r--r--drivers/net/dsa/b53/b53_common.c3
-rw-r--r--drivers/net/dsa/bcm_sf2.c3
-rw-r--r--drivers/net/dsa/mv88e6xxx/chip.h12
-rw-r--r--drivers/net/dsa/mv88e6xxx/global1.c4
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.c96
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_com.h9
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_ethtool.c46
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.c6
-rw-r--r--drivers/net/ethernet/amazon/ena/ena_netdev.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c5
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_filters.c2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_hw.h2
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_nic.c8
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c13
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.c10
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/aq_ring.h3
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c22
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c19
-rw-r--r--drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c12
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt.c16
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c10
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c48
-rw-r--r--drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c10
-rw-r--r--drivers/net/ethernet/broadcom/cnic_defs.h4
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c1
-rw-r--r--drivers/net/ethernet/cadence/macb.h1
-rw-r--r--drivers/net/ethernet/cadence/macb_main.c66
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c62
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.h9
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c2
-rw-r--r--drivers/net/ethernet/davicom/dm9000.c2
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c22
-rw-r--r--drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h2
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_if.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h1
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_main.c3
-rw-r--r--drivers/net/ethernet/huawei/hinic/hinic_rx.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c4
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h1
-rw-r--r--drivers/net/ethernet/intel/ice/ice_base.c35
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c37
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb.c8
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c99
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_nl.c20
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c409
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h7
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c71
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c195
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.c11
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx.h6
-rw-r--r--drivers/net/ethernet/intel/ice/ice_txrx_lib.c2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c201
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_xsk.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/health.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_main.c6
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/en_rep.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch.c20
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.c11
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/lag.h1
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c9
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.c39
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/wq.h2
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/pci_hw.h2
-rw-r--r--drivers/net/ethernet/micrel/ks8851_mll.c67
-rw-r--r--drivers/net/ethernet/mscc/ocelot_board.c8
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_dev.c11
-rw-r--r--drivers/net/ethernet/pensando/ionic/ionic_if.h1
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede.h2
-rw-r--r--drivers/net/ethernet/qlogic/qede/qede_rdma.c29
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c186
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h3
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c7
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c8
-rw-r--r--drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h1
-rw-r--r--drivers/net/ethernet/sfc/ptp.c38
-rw-r--r--drivers/net/ethernet/socionext/sni_ave.c9
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c13
-rw-r--r--drivers/net/ethernet/sun/sunvnet_common.c23
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac.h4
-rw-r--r--drivers/net/ethernet/xilinx/ll_temac_main.c209
-rw-r--r--drivers/net/gtp.c4
-rw-r--r--drivers/net/hyperv/netvsc.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c3
-rw-r--r--drivers/net/phy/broadcom.c4
-rw-r--r--drivers/net/phy/marvell.c5
-rw-r--r--drivers/net/phy/mdio-bcm-iproc.c20
-rw-r--r--drivers/net/phy/mscc.c4
-rw-r--r--drivers/net/phy/phy-c45.c6
-rw-r--r--drivers/net/phy/phy_device.c11
-rw-r--r--drivers/net/slip/slip.c3
-rw-r--r--drivers/net/usb/qmi_wwan.c46
-rw-r--r--drivers/net/wireguard/device.c11
-rw-r--r--drivers/net/wireguard/receive.c7
-rw-r--r--drivers/net/wireguard/send.c16
-rw-r--r--drivers/net/wireguard/socket.c1
-rw-r--r--drivers/nfc/pn544/i2c.c1
-rw-r--r--drivers/nfc/pn544/pn544.c2
-rw-r--r--drivers/nvme/host/core.c14
-rw-r--r--drivers/nvme/host/multipath.c1
-rw-r--r--drivers/nvme/host/pci.c40
-rw-r--r--drivers/nvme/host/rdma.c2
-rw-r--r--drivers/nvme/host/tcp.c9
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c2
-rw-r--r--drivers/perf/arm_pmu_acpi.c7
-rw-r--r--drivers/perf/arm_smmuv3_pmu.c2
-rw-r--r--drivers/perf/fsl_imx8_ddr_perf.c10
-rw-r--r--drivers/phy/allwinner/phy-sun50i-usb3.c2
-rw-r--r--drivers/phy/broadcom/phy-brcm-sata.c148
-rw-r--r--drivers/phy/motorola/phy-mapphone-mdm6600.c27
-rw-r--r--drivers/phy/phy-core.c18
-rw-r--r--drivers/phy/ti/phy-gmii-sel.c10
-rw-r--r--drivers/platform/chrome/wilco_ec/properties.c2
-rw-r--r--drivers/regulator/stm32-vrefbuf.c3
-rw-r--r--drivers/reset/Kconfig3
-rw-r--r--drivers/s390/cio/blacklist.c5
-rw-r--r--drivers/s390/cio/chp.c4
-rw-r--r--drivers/s390/cio/qdio.h4
-rw-r--r--drivers/s390/cio/qdio_debug.c5
-rw-r--r--drivers/s390/cio/qdio_main.c29
-rw-r--r--drivers/s390/cio/qdio_setup.c5
-rw-r--r--drivers/s390/cio/vfio_ccw_trace.h4
-rw-r--r--drivers/s390/crypto/ap_bus.h4
-rw-r--r--drivers/s390/crypto/ap_card.c8
-rw-r--r--drivers/s390/crypto/ap_queue.c6
-rw-r--r--drivers/s390/crypto/pkey_api.c2
-rw-r--r--drivers/s390/crypto/zcrypt_api.c16
-rw-r--r--drivers/s390/crypto/zcrypt_ep11misc.c4
-rw-r--r--drivers/s390/net/qeth_core_main.c49
-rw-r--r--drivers/s390/net/qeth_l2_main.c29
-rw-r--r--drivers/s390/scsi/zfcp_fsf.c2
-rw-r--r--drivers/s390/scsi/zfcp_fsf.h2
-rw-r--r--drivers/s390/scsi/zfcp_qdio.c6
-rw-r--r--drivers/s390/scsi/zfcp_qdio.h6
-rw-r--r--drivers/s390/scsi/zfcp_sysfs.c2
-rw-r--r--drivers/scsi/libfc/fc_disc.c2
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_fusion.c5
-rw-r--r--drivers/scsi/sd_zbc.c7
-rw-r--r--drivers/scsi/sr.c2
-rw-r--r--drivers/soc/imx/soc-imx-scu.c2
-rw-r--r--drivers/soc/tegra/fuse/fuse-tegra30.c3
-rw-r--r--drivers/spi/atmel-quadspi.c11
-rw-r--r--drivers/spi/spi-bcm63xx-hsspi.c1
-rw-r--r--drivers/spi/spi-omap2-mcspi.c103
-rw-r--r--drivers/spi/spi-pxa2xx.c23
-rw-r--r--drivers/spi/spi-qup.c11
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c3
-rw-r--r--drivers/spi/spi.c32
-rw-r--r--drivers/spi/spidev.c5
-rw-r--r--drivers/spmi/spmi-pmic-arb.c4
-rw-r--r--drivers/staging/android/Kconfig8
-rw-r--r--drivers/staging/android/Makefile1
-rw-r--r--drivers/staging/android/TODO9
-rw-r--r--drivers/staging/android/ashmem.c28
-rw-r--r--drivers/staging/android/uapi/vsoc_shm.h295
-rw-r--r--drivers/staging/android/vsoc.c1149
-rw-r--r--drivers/staging/greybus/audio_manager.c2
-rw-r--r--drivers/staging/media/hantro/hantro_drv.c4
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c40
-rw-r--r--drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c5
-rw-r--r--drivers/staging/rtl8723bs/os_dep/ioctl_linux.c47
-rw-r--r--drivers/staging/speakup/selection.c2
-rw-r--r--drivers/staging/vt6656/dpc.c2
-rw-r--r--drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt2
-rw-r--r--drivers/target/iscsi/iscsi_target.c16
-rw-r--r--drivers/target/target_core_transport.c31
-rw-r--r--drivers/tee/amdtee/Kconfig2
-rw-r--r--drivers/tee/amdtee/core.c48
-rw-r--r--drivers/thunderbolt/switch.c7
-rw-r--r--drivers/tty/serdev/core.c10
-rw-r--r--drivers/tty/serdev/serdev-ttyport.c6
-rw-r--r--drivers/tty/serial/8250/8250_aspeed_vuart.c1
-rw-r--r--drivers/tty/serial/8250/8250_core.c5
-rw-r--r--drivers/tty/serial/8250/8250_exar.c33
-rw-r--r--drivers/tty/serial/8250/8250_of.c1
-rw-r--r--drivers/tty/serial/8250/8250_port.c4
-rw-r--r--drivers/tty/serial/ar933x_uart.c8
-rw-r--r--drivers/tty/serial/atmel_serial.c3
-rw-r--r--drivers/tty/serial/cpm_uart/cpm_uart_core.c1
-rw-r--r--drivers/tty/serial/fsl_lpuart.c41
-rw-r--r--drivers/tty/serial/imx.c2
-rw-r--r--drivers/tty/serial/mvebu-uart.c2
-rw-r--r--drivers/tty/serial/qcom_geni_serial.c18
-rw-r--r--drivers/tty/serial/serial-tegra.c35
-rw-r--r--drivers/tty/tty_port.c5
-rw-r--r--drivers/tty/vt/selection.c35
-rw-r--r--drivers/tty/vt/vt.c17
-rw-r--r--drivers/tty/vt/vt_ioctl.c17
-rw-r--r--drivers/usb/cdns3/gadget.c19
-rw-r--r--drivers/usb/core/config.c31
-rw-r--r--drivers/usb/core/hub.c26
-rw-r--r--drivers/usb/core/hub.h1
-rw-r--r--drivers/usb/core/port.c10
-rw-r--r--drivers/usb/core/quirks.c43
-rw-r--r--drivers/usb/core/usb.h3
-rw-r--r--drivers/usb/dwc2/gadget.c40
-rw-r--r--drivers/usb/dwc3/debug.h39
-rw-r--r--drivers/usb/dwc3/gadget.c12
-rw-r--r--drivers/usb/gadget/composite.c30
-rw-r--r--drivers/usb/gadget/function/f_fs.c5
-rw-r--r--drivers/usb/gadget/function/u_audio.c10
-rw-r--r--drivers/usb/gadget/function/u_serial.c4
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c1
-rw-r--r--drivers/usb/host/xhci-hub.c25
-rw-r--r--drivers/usb/host/xhci-mem.c71
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci.h14
-rw-r--r--drivers/usb/misc/iowarrior.c31
-rw-r--r--drivers/usb/misc/usb251xb.c20
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c8
-rw-r--r--drivers/usb/serial/ch341.c10
-rw-r--r--drivers/usb/serial/ir-usb.c2
-rw-r--r--drivers/usb/storage/uas.c23
-rw-r--r--drivers/usb/storage/unusual_devs.h6
-rw-r--r--drivers/vhost/net.c10
-rw-r--r--drivers/video/backlight/Kconfig15
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/led_bl.c260
-rw-r--r--drivers/video/console/Kconfig76
-rw-r--r--drivers/video/console/vgacon.c3
-rw-r--r--drivers/video/fbdev/Kconfig9
-rw-r--r--drivers/video/fbdev/aty/mach64_gx.c3
-rw-r--r--drivers/video/fbdev/aty/radeon_base.c26
-rw-r--r--drivers/video/fbdev/cg14.c3
-rw-r--r--drivers/video/fbdev/core/Makefile1
-rw-r--r--drivers/video/fbdev/core/fbcon.c27
-rw-r--r--drivers/video/fbdev/core/fbmem.c38
-rw-r--r--drivers/video/fbdev/hyperv_fb.c4
-rw-r--r--drivers/video/fbdev/kyro/STG4000OverlayDevice.c3
-rw-r--r--drivers/video/fbdev/matrox/matroxfb_base.c15
-rw-r--r--drivers/video/fbdev/mmp/hw/mmp_ctrl.h2
-rw-r--r--drivers/video/fbdev/nvidia/nvidia.c41
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c4
-rw-r--r--drivers/video/fbdev/pxa168fb.c5
-rw-r--r--drivers/video/fbdev/s1d13xxxfb.c16
-rw-r--r--drivers/video/fbdev/sa1100fb.c2
-rw-r--r--drivers/video/fbdev/sh_mobile_lcdcfb.c4
-rw-r--r--drivers/video/fbdev/ssd1307fb.c4
-rw-r--r--drivers/video/fbdev/w100fb.c18
-rw-r--r--drivers/video/fbdev/wm8505fb.c2
-rw-r--r--drivers/video/hdmi.c11
-rw-r--r--drivers/watchdog/Kconfig2
-rw-r--r--drivers/watchdog/da9062_wdt.c19
-rw-r--r--drivers/watchdog/wdat_wdt.c25
-rw-r--r--drivers/xen/preempt.c4
-rw-r--r--drivers/xen/xen-pciback/pciback.h2
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c4
-rw-r--r--drivers/xen/xenbus/xenbus_probe.c10
-rw-r--r--drivers/xen/xenbus/xenbus_probe_backend.c5
-rw-r--r--drivers/xen/xenbus/xenbus_xs.c9
-rw-r--r--fs/btrfs/disk-io.c4
-rw-r--r--fs/btrfs/extent-tree.c2
-rw-r--r--fs/btrfs/extent_map.c11
-rw-r--r--fs/btrfs/inode.c30
-rw-r--r--fs/btrfs/ordered-data.c7
-rw-r--r--fs/btrfs/qgroup.c13
-rw-r--r--fs/btrfs/qgroup.h1
-rw-r--r--fs/btrfs/ref-verify.c5
-rw-r--r--fs/btrfs/super.c2
-rw-r--r--fs/btrfs/sysfs.c17
-rw-r--r--fs/btrfs/transaction.c2
-rw-r--r--fs/btrfs/volumes.h1
-rw-r--r--fs/ceph/file.c17
-rw-r--r--fs/ceph/super.c129
-rw-r--r--fs/ceph/super.h2
-rw-r--r--fs/cifs/cifs_dfs_ref.c2
-rw-r--r--fs/cifs/cifsacl.c4
-rw-r--r--fs/cifs/cifsfs.c8
-rw-r--r--fs/cifs/cifsglob.h7
-rw-r--r--fs/cifs/cifsproto.h5
-rw-r--r--fs/cifs/cifssmb.c3
-rw-r--r--fs/cifs/connect.c2
-rw-r--r--fs/cifs/file.c19
-rw-r--r--fs/cifs/inode.c18
-rw-r--r--fs/cifs/smb1ops.c2
-rw-r--r--fs/cifs/smb2inode.c4
-rw-r--r--fs/cifs/smb2ops.c39
-rw-r--r--fs/cifs/smb2pdu.c1
-rw-r--r--fs/dax.c11
-rw-r--r--fs/debugfs/file.c17
-rw-r--r--fs/ecryptfs/crypto.c6
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h2
-rw-r--r--fs/ecryptfs/keystore.c2
-rw-r--r--fs/ecryptfs/main.c2
-rw-r--r--fs/ecryptfs/messaging.c3
-rw-r--r--fs/ext2/inode.c5
-rw-r--r--fs/ext4/balloc.c14
-rw-r--r--fs/ext4/block_validity.c1
-rw-r--r--fs/ext4/dir.c14
-rw-r--r--fs/ext4/ext4.h44
-rw-r--r--fs/ext4/ialloc.c23
-rw-r--r--fs/ext4/inode.c30
-rw-r--r--fs/ext4/mballoc.c61
-rw-r--r--fs/ext4/migrate.c27
-rw-r--r--fs/ext4/mmp.c12
-rw-r--r--fs/ext4/namei.c8
-rw-r--r--fs/ext4/resize.c62
-rw-r--r--fs/ext4/super.c153
-rw-r--r--fs/fat/inode.c19
-rw-r--r--fs/fcntl.c6
-rw-r--r--fs/io-wq.c139
-rw-r--r--fs/io-wq.h18
-rw-r--r--fs/io_uring.c507
-rw-r--r--fs/jbd2/commit.c46
-rw-r--r--fs/jbd2/transaction.c26
-rw-r--r--fs/locks.c14
-rw-r--r--fs/nfs/delegation.c50
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/dir.c126
-rw-r--r--fs/nfs/inode.c1
-rw-r--r--fs/nfs/nfs4file.c1
-rw-r--r--fs/nfs/nfs4proc.c20
-rw-r--r--fs/pipe.c18
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/zonefs/Kconfig1
-rw-r--r--fs/zonefs/super.c8
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actypes.h3
-rw-r--r--include/drm/bridge/dw_hdmi.h1
-rw-r--r--include/drm/bridge/mhl.h4
-rw-r--r--include/drm/drm_atomic.h76
-rw-r--r--include/drm/drm_atomic_helper.h8
-rw-r--r--include/drm/drm_atomic_state_helper.h13
-rw-r--r--include/drm/drm_bridge.h405
-rw-r--r--include/drm/drm_bridge_connector.h18
-rw-r--r--include/drm/drm_client.h7
-rw-r--r--include/drm/drm_connector.h46
-rw-r--r--include/drm/drm_crtc.h80
-rw-r--r--include/drm/drm_device.h2
-rw-r--r--include/drm/drm_dp_helper.h26
-rw-r--r--include/drm/drm_dp_mst_helper.h17
-rw-r--r--include/drm/drm_drv.h194
-rw-r--r--include/drm/drm_edid.h5
-rw-r--r--include/drm/drm_encoder.h3
-rw-r--r--include/drm/drm_fb_helper.h27
-rw-r--r--include/drm/drm_file.h1
-rw-r--r--include/drm/drm_gem_shmem_helper.h5
-rw-r--r--include/drm/drm_gem_vram_helper.h9
-rw-r--r--include/drm/drm_hdcp.h6
-rw-r--r--include/drm/drm_legacy.h6
-rw-r--r--include/drm/drm_mipi_dbi.h12
-rw-r--r--include/drm/drm_mm.h2
-rw-r--r--include/drm/drm_modes.h11
-rw-r--r--include/drm/drm_modeset_helper_vtables.h63
-rw-r--r--include/drm/drm_panel.h3
-rw-r--r--include/drm/drm_pci.h11
-rw-r--r--include/drm/drm_print.h78
-rw-r--r--include/drm/drm_simple_kms_helper.h11
-rw-r--r--include/drm/drm_vblank.h36
-rw-r--r--include/drm/gpu_scheduler.h13
-rw-r--r--include/drm/i915_mei_hdcp_interface.h1
-rw-r--r--include/drm/ttm/ttm_bo_api.h11
-rw-r--r--include/drm/ttm/ttm_bo_driver.h15
-rw-r--r--include/linux/blkdev.h3
-rw-r--r--include/linux/blktrace_api.h18
-rw-r--r--include/linux/bootconfig.h3
-rw-r--r--include/linux/compat.h29
-rw-r--r--include/linux/cpufreq.h3
-rw-r--r--include/linux/dax.h14
-rw-r--r--include/linux/debugfs.h13
-rw-r--r--include/linux/device.h11
-rw-r--r--include/linux/dma-buf.h97
-rw-r--r--include/linux/hdmi.h2
-rw-r--r--include/linux/hid.h2
-rw-r--r--include/linux/icmpv6.h10
-rw-r--r--include/linux/intel-svm.h2
-rw-r--r--include/linux/irqdomain.h2
-rw-r--r--include/linux/ktime.h37
-rw-r--r--include/linux/kvm_host.h4
-rw-r--r--include/linux/mlx5/mlx5_ifc.h5
-rw-r--r--include/linux/mm.h4
-rw-r--r--include/linux/netdevice.h23
-rw-r--r--include/linux/netfilter/ipset/ip_set.h11
-rw-r--r--include/linux/nfs_fs.h26
-rw-r--r--include/linux/pipe_fs_i.h3
-rw-r--r--include/linux/platform_data/simplefb.h2
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h1
-rw-r--r--include/linux/rculist_nulls.h7
-rw-r--r--include/linux/sched/nohz.h2
-rw-r--r--include/linux/skbuff.h30
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swiotlb.h11
-rw-r--r--include/linux/time32.h154
-rw-r--r--include/linux/timekeeping32.h32
-rw-r--r--include/linux/trace_events.h2
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/types.h5
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/net/flow_dissector.h10
-rw-r--r--include/net/icmp.h6
-rw-r--r--include/net/mac80211.h11
-rw-r--r--include/net/sock.h38
-rw-r--r--include/scsi/iscsi_proto.h1
-rw-r--r--include/sound/rawmidi.h6
-rw-r--r--include/sound/soc-dapm.h2
-rw-r--r--include/sound/soc.h2
-rw-r--r--include/uapi/asm-generic/posix_types.h2
-rw-r--r--include/uapi/drm/amdgpu_drm.h5
-rw-r--r--include/uapi/drm/drm.h2
-rw-r--r--include/uapi/drm/i915_drm.h21
-rw-r--r--include/uapi/drm/lima_drm.h9
-rw-r--r--include/uapi/linux/bpf.h16
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h12
-rw-r--r--include/uapi/linux/swab.h4
-rw-r--r--include/uapi/linux/time.h22
-rw-r--r--include/uapi/linux/usb/charger.h16
-rw-r--r--include/video/mmp_disp.h2
-rw-r--r--include/video/samsung_fimd.h2
-rw-r--r--include/xen/interface/io/tpmif.h2
-rw-r--r--include/xen/xenbus.h3
-rw-r--r--init/Kconfig6
-rw-r--r--init/main.c75
-rw-r--r--ipc/sem.c6
-rw-r--r--kernel/audit.c40
-rw-r--r--kernel/auditfilter.c71
-rw-r--r--kernel/bpf/btf.c6
-rw-r--r--kernel/bpf/hashtab.c58
-rw-r--r--kernel/bpf/offload.c2
-rw-r--r--kernel/cgroup/cgroup.c13
-rw-r--r--kernel/compat.c64
-rw-r--r--kernel/dma/contiguous.c9
-rw-r--r--kernel/dma/direct.c61
-rw-r--r--kernel/dma/swiotlb.c42
-rw-r--r--kernel/exit.c4
-rw-r--r--kernel/fork.c2
-rw-r--r--kernel/irq/internals.h2
-rw-r--r--kernel/irq/manage.c18
-rw-r--r--kernel/irq/proc.c22
-rw-r--r--kernel/power/snapshot.c2
-rw-r--r--kernel/power/suspend.c9
-rw-r--r--kernel/sched/core.c63
-rw-r--r--kernel/sched/fair.c58
-rw-r--r--kernel/sched/loadavg.c33
-rw-r--r--kernel/sched/psi.c3
-rw-r--r--kernel/sched/sched.h15
-rw-r--r--kernel/signal.c23
-rw-r--r--kernel/sysctl.c9
-rw-r--r--kernel/time/time.c43
-rw-r--r--kernel/trace/Kconfig4
-rw-r--r--kernel/trace/blktrace.c117
-rw-r--r--kernel/trace/synth_event_gen_test.c44
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/trace/trace_events_hist.c319
-rw-r--r--kernel/trace/trace_kprobe.c2
-rw-r--r--lib/Kconfig3
-rw-r--r--lib/Makefile2
-rw-r--r--lib/bootconfig.c51
-rw-r--r--lib/crypto/chacha20poly1305.c3
-rw-r--r--lib/stackdepot.c8
-rw-r--r--lib/string.c16
-rw-r--r--mm/huge_memory.c3
-rw-r--r--mm/memcontrol.c4
-rw-r--r--mm/memory.c35
-rw-r--r--mm/memory_hotplug.c8
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/mprotect.c38
-rw-r--r--mm/mremap.c1
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/sparse.c2
-rw-r--r--mm/swapfile.c2
-rw-r--r--mm/vmscan.c9
-rw-r--r--mm/z3fold.c1
-rw-r--r--net/Kconfig1
-rw-r--r--net/bridge/br_device.c6
-rw-r--r--net/bridge/br_stp.c3
-rw-r--r--net/core/dev.c40
-rw-r--r--net/core/devlink.c38
-rw-r--r--net/core/fib_rules.c2
-rw-r--r--net/core/page_pool.c22
-rw-r--r--net/core/rtnetlink.c26
-rw-r--r--net/core/skbuff.c6
-rw-r--r--net/dsa/tag_ar9331.c2
-rw-r--r--net/dsa/tag_qca.c2
-rw-r--r--net/ethtool/bitset.c6
-rw-r--r--net/ethtool/bitset.h2
-rw-r--r--net/hsr/hsr_framereg.c3
-rw-r--r--net/ipv4/cipso_ipv4.c7
-rw-r--r--net/ipv4/icmp.c33
-rw-r--r--net/ipv4/tcp_input.c6
-rw-r--r--net/ipv4/udp.c6
-rw-r--r--net/ipv6/ip6_fib.c7
-rw-r--r--net/ipv6/ip6_gre.c8
-rw-r--r--net/ipv6/ip6_icmp.c34
-rw-r--r--net/ipv6/ip6_tunnel.c81
-rw-r--r--net/ipv6/ipv6_sockglue.c10
-rw-r--r--net/ipv6/route.c1
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/mlme.c14
-rw-r--r--net/mac80211/rx.c2
-rw-r--r--net/mac80211/tx.c2
-rw-r--r--net/mac80211/util.c34
-rw-r--r--net/mptcp/Kconfig1
-rw-r--r--net/mptcp/protocol.c56
-rw-r--r--net/mptcp/protocol.h4
-rw-r--r--net/netfilter/ipset/ip_set_core.c34
-rw-r--r--net/netfilter/ipset/ip_set_hash_gen.h635
-rw-r--r--net/netfilter/nf_conntrack_core.c192
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c20
-rw-r--r--net/netfilter/nf_flow_table_offload.c6
-rw-r--r--net/netfilter/nft_set_pipapo.c12
-rw-r--r--net/netfilter/xt_hashlimit.c38
-rw-r--r--net/netlabel/netlabel_domainhash.c3
-rw-r--r--net/netlabel/netlabel_unlabeled.c3
-rw-r--r--net/netlink/af_netlink.c5
-rw-r--r--net/netlink/genetlink.c5
-rw-r--r--net/openvswitch/datapath.c9
-rw-r--r--net/openvswitch/flow_netlink.c18
-rw-r--r--net/openvswitch/flow_table.c6
-rw-r--r--net/openvswitch/meter.c3
-rw-r--r--net/openvswitch/vport.c3
-rw-r--r--net/rds/rdma.c24
-rw-r--r--net/sched/act_api.c1
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/cls_matchall.c1
-rw-r--r--net/sctp/sm_statefuns.c29
-rw-r--r--net/smc/af_smc.c27
-rw-r--r--net/smc/smc_clc.c4
-rw-r--r--net/smc/smc_core.c12
-rw-r--r--net/smc/smc_core.h2
-rw-r--r--net/smc/smc_diag.c5
-rw-r--r--net/smc/smc_ib.c2
-rw-r--r--net/sunrpc/xprtrdma/frwr_ops.c13
-rw-r--r--net/tipc/node.c7
-rw-r--r--net/tipc/socket.c2
-rw-r--r--net/tls/tls_device.c20
-rw-r--r--net/unix/af_unix.c4
-rw-r--r--net/vmw_vsock/af_vsock.c20
-rw-r--r--net/vmw_vsock/hyperv_transport.c3
-rw-r--r--net/vmw_vsock/virtio_transport_common.c2
-rw-r--r--net/wireless/ethtool.c8
-rw-r--r--net/wireless/nl80211.c6
-rw-r--r--net/wireless/reg.c2
-rw-r--r--net/xdp/xsk.c2
-rw-r--r--net/xdp/xsk_queue.h3
-rw-r--r--net/xfrm/xfrm_interface.c6
-rw-r--r--scripts/Makefile.lib6
-rwxr-xr-xscripts/get_maintainer.pl32
-rw-r--r--scripts/kallsyms.c4
-rwxr-xr-xscripts/link-vmlinux.sh2
-rwxr-xr-x[-rw-r--r--]scripts/parse-maintainers.pl0
-rw-r--r--security/integrity/ima/Kconfig5
-rw-r--r--security/integrity/platform_certs/load_uefi.c40
-rw-r--r--security/selinux/hooks.c2
-rw-r--r--security/selinux/ss/sidtab.c12
-rw-r--r--sound/core/pcm_native.c3
-rw-r--r--sound/core/seq/seq_clientmgr.c4
-rw-r--r--sound/core/seq/seq_queue.c29
-rw-r--r--sound/core/seq/seq_timer.c13
-rw-r--r--sound/core/seq/seq_timer.h3
-rw-r--r--sound/hda/ext/hdac_ext_controller.c9
-rw-r--r--sound/hda/hdmi_chmap.c2
-rw-r--r--sound/mips/sgio2audio.c6
-rw-r--r--sound/pci/hda/hda_codec.c2
-rw-r--r--sound/pci/hda/hda_eld.c2
-rw-r--r--sound/pci/hda/hda_sysfs.c4
-rw-r--r--sound/pci/hda/patch_realtek.c37
-rw-r--r--sound/soc/amd/raven/acp3x-i2s.c8
-rw-r--r--sound/soc/amd/raven/acp3x-pcm-dma.c7
-rw-r--r--sound/soc/amd/raven/pci-acp3x.c23
-rw-r--r--sound/soc/atmel/Kconfig4
-rw-r--r--sound/soc/atmel/Makefile10
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/hdmi-codec.c10
-rw-r--r--sound/soc/codecs/max98090.c434
-rw-r--r--sound/soc/codecs/max98090.h3
-rw-r--r--sound/soc/codecs/pcm512x.c8
-rw-r--r--sound/soc/codecs/rt1015.c3
-rw-r--r--sound/soc/codecs/tas2562.c5
-rw-r--r--sound/soc/fsl/fsl_sai.c22
-rw-r--r--sound/soc/intel/skylake/skl-debug.c32
-rw-r--r--sound/soc/intel/skylake/skl-ssp-clk.c4
-rw-r--r--sound/soc/meson/g12a-tohdmitx.c6
-rw-r--r--sound/soc/soc-component.c2
-rw-r--r--sound/soc/soc-compress.c2
-rw-r--r--sound/soc/soc-dapm.c59
-rw-r--r--sound/soc/soc-pcm.c18
-rw-r--r--sound/soc/soc-topology.c17
-rw-r--r--sound/soc/sof/intel/hda-codec.c12
-rw-r--r--sound/soc/sof/intel/hda-dsp.c11
-rw-r--r--sound/soc/sof/intel/hda.c19
-rw-r--r--sound/soc/sof/ipc.c2
-rw-r--r--sound/soc/stm/stm32_sai_sub.c18
-rw-r--r--sound/soc/sunxi/sun8i-codec.c3
-rw-r--r--sound/usb/clock.c91
-rw-r--r--sound/usb/clock.h4
-rw-r--r--sound/usb/format.c36
-rw-r--r--sound/usb/mixer.c12
-rw-r--r--sound/usb/quirks.c1
-rw-r--r--tools/arch/arm64/include/uapi/asm/kvm.h12
-rw-r--r--tools/arch/arm64/include/uapi/asm/unistd.h1
-rw-r--r--tools/arch/x86/include/asm/cpufeatures.h2
-rw-r--r--tools/arch/x86/include/asm/disabled-features.h8
-rw-r--r--tools/arch/x86/include/asm/msr-index.h2
-rw-r--r--tools/arch/x86/include/uapi/asm/kvm.h1
-rw-r--r--tools/bootconfig/include/linux/memblock.h12
-rw-r--r--tools/bootconfig/include/linux/printk.h5
-rw-r--r--tools/bootconfig/main.c79
-rw-r--r--tools/bootconfig/samples/bad-mixed-kv1.bconf3
-rw-r--r--tools/bootconfig/samples/bad-mixed-kv2.bconf3
-rw-r--r--tools/bootconfig/samples/bad-samekey.bconf6
-rwxr-xr-xtools/bootconfig/test-bootconfig.sh25
-rw-r--r--tools/include/uapi/asm-generic/mman-common.h2
-rw-r--r--tools/include/uapi/asm-generic/unistd.h7
-rw-r--r--tools/include/uapi/drm/i915_drm.h32
-rw-r--r--tools/include/uapi/linux/bpf.h16
-rw-r--r--tools/include/uapi/linux/fcntl.h2
-rw-r--r--tools/include/uapi/linux/fscrypt.h14
-rw-r--r--tools/include/uapi/linux/kvm.h5
-rw-r--r--tools/include/uapi/linux/openat2.h39
-rw-r--r--tools/include/uapi/linux/prctl.h4
-rw-r--r--tools/include/uapi/linux/sched.h6
-rw-r--r--tools/include/uapi/sound/asound.h155
-rw-r--r--tools/lib/bpf/libbpf.c8
-rw-r--r--tools/perf/Documentation/perf-config.txt74
-rw-r--r--tools/perf/arch/arm/util/cs-etm.c18
-rw-r--r--tools/perf/arch/arm64/util/arm-spe.c17
-rw-r--r--tools/perf/arch/arm64/util/header.c63
-rw-r--r--tools/perf/arch/powerpc/entry/syscalls/syscall.tbl2
-rw-r--r--tools/perf/arch/x86/entry/syscalls/syscall_64.tbl2
-rw-r--r--tools/perf/arch/x86/util/intel-bts.c17
-rw-r--r--tools/perf/arch/x86/util/intel-pt.c17
-rw-r--r--tools/perf/builtin-annotate.c4
-rw-r--r--tools/perf/builtin-probe.c6
-rw-r--r--tools/perf/builtin-report.c2
-rw-r--r--tools/perf/builtin-top.c4
-rw-r--r--tools/perf/builtin-trace.c4
-rwxr-xr-xtools/perf/check-headers.sh1
-rw-r--r--tools/perf/include/bpf/pid_filter.h2
-rw-r--r--tools/perf/include/bpf/stdio.h2
-rw-r--r--tools/perf/include/bpf/unistd.h2
-rw-r--r--tools/perf/tests/shell/lib/probe_vfs_getname.sh2
-rw-r--r--tools/perf/trace/beauty/beauty.h2
-rw-r--r--tools/perf/trace/beauty/prctl.c3
-rw-r--r--tools/perf/ui/browsers/annotate.c19
-rw-r--r--tools/perf/ui/gtk/annotate.c2
-rw-r--r--tools/perf/util/annotate.c194
-rw-r--r--tools/perf/util/annotate.h9
-rw-r--r--tools/perf/util/auxtrace.c22
-rw-r--r--tools/perf/util/auxtrace.h6
-rw-r--r--tools/perf/util/config.c12
-rw-r--r--tools/perf/util/config.h1
-rw-r--r--tools/perf/util/llvm-utils.c1
-rw-r--r--tools/perf/util/machine.c26
-rw-r--r--tools/perf/util/map.c17
-rw-r--r--tools/perf/util/probe-file.c28
-rw-r--r--tools/perf/util/stat-shadow.c6
-rw-r--r--tools/perf/util/symbol.c17
-rwxr-xr-xtools/testing/kunit/kunit.py12
-rw-r--r--tools/testing/kunit/kunit_kernel.py28
-rw-r--r--tools/testing/selftests/Makefile12
-rw-r--r--tools/testing/selftests/bpf/prog_tests/select_reuseport.c8
-rw-r--r--tools/testing/selftests/bpf/prog_tests/sockmap_basic.c5
-rw-r--r--tools/testing/selftests/ftrace/Makefile2
-rw-r--r--tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc8
-rw-r--r--tools/testing/selftests/futex/functional/Makefile2
-rw-r--r--tools/testing/selftests/kvm/Makefile3
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/processor.h44
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/svm.h297
-rw-r--r--tools/testing/selftests/kvm/include/x86_64/svm_util.h38
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/svm.c161
-rw-r--r--tools/testing/selftests/kvm/lib/x86_64/vmx.c6
-rw-r--r--tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c79
-rw-r--r--tools/testing/selftests/lib.mk23
-rw-r--r--tools/testing/selftests/livepatch/Makefile2
-rw-r--r--tools/testing/selftests/lkdtm/.gitignore2
-rw-r--r--tools/testing/selftests/net/Makefile4
-rwxr-xr-xtools/testing/selftests/net/fib_tests.sh6
-rwxr-xr-xtools/testing/selftests/net/forwarding/mirror_gre.sh25
-rwxr-xr-xtools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh6
-rw-r--r--tools/testing/selftests/net/mptcp/Makefile2
-rwxr-xr-xtools/testing/selftests/netfilter/nft_concat_range.sh55
-rw-r--r--tools/testing/selftests/openat2/helpers.c2
-rw-r--r--tools/testing/selftests/openat2/resolve_test.c2
-rw-r--r--tools/testing/selftests/pidfd/.gitignore1
-rw-r--r--tools/testing/selftests/rseq/Makefile4
-rw-r--r--tools/testing/selftests/rtc/Makefile4
-rw-r--r--tools/testing/selftests/timens/Makefile2
-rwxr-xr-xtools/testing/selftests/tpm2/test_smoke.sh13
-rwxr-xr-xtools/testing/selftests/tpm2/test_space.sh9
-rwxr-xr-xtools/testing/selftests/vm/run_vmtests33
-rwxr-xr-xtools/testing/selftests/wireguard/netns.sh11
-rw-r--r--tools/testing/selftests/wireguard/qemu/Makefile38
-rw-r--r--virt/kvm/arm/arm.c2
-rw-r--r--virt/kvm/arm/trace.h1
-rw-r--r--virt/kvm/arm/vgic/vgic-mmio.c12
-rw-r--r--virt/kvm/kvm_main.c16
2179 files changed, 71561 insertions, 38586 deletions
diff --git a/COPYING b/COPYING
index da4cb28febe6..a635a38ef940 100644
--- a/COPYING
+++ b/COPYING
@@ -16,3 +16,5 @@ In addition, other licenses may also apply. Please see:
Documentation/process/license-rules.rst
for more details.
+
+All contributions to the Linux Kernel are subject to this COPYING file.
diff --git a/CREDITS b/CREDITS
index a97d3280a627..032b5994f476 100644
--- a/CREDITS
+++ b/CREDITS
@@ -567,6 +567,11 @@ D: Original author of Amiga FFS filesystem
S: Orlando, Florida
S: USA
+N: Paul Burton
+W: https://pburton.com
+D: MIPS maintainer 2018-2020
+
N: Lennert Buytenhek
D: Original (2.4) rewrite of the ethernet bridging code
diff --git a/Documentation/admin-guide/acpi/fan_performance_states.rst b/Documentation/admin-guide/acpi/fan_performance_states.rst
index 21d233ca50d8..98fe5c333121 100644
--- a/Documentation/admin-guide/acpi/fan_performance_states.rst
+++ b/Documentation/admin-guide/acpi/fan_performance_states.rst
@@ -18,7 +18,7 @@ may look as follows::
$ ls -l /sys/bus/acpi/devices/INT3404:00/
total 0
-...
+ ...
-r--r--r-- 1 root root 4096 Dec 13 20:38 state0
-r--r--r-- 1 root root 4096 Dec 13 20:38 state1
-r--r--r-- 1 root root 4096 Dec 13 20:38 state10
@@ -38,7 +38,7 @@ where each of the "state*" files represents one performance state of the fan
and contains a colon-separated list of 5 integer numbers (fields) with the
following interpretation::
-control_percent:trip_point_index:speed_rpm:noise_level_mdb:power_mw
+ control_percent:trip_point_index:speed_rpm:noise_level_mdb:power_mw
* ``control_percent``: The percent value to be used to set the fan speed to a
specific level using the _FSL object (0-100).
diff --git a/Documentation/admin-guide/bootconfig.rst b/Documentation/admin-guide/bootconfig.rst
index b342a6796392..cf2edcd09183 100644
--- a/Documentation/admin-guide/bootconfig.rst
+++ b/Documentation/admin-guide/bootconfig.rst
@@ -62,6 +62,30 @@ Or more shorter, written as following::
In both styles, same key words are automatically merged when parsing it
at boot time. So you can append similar trees or key-values.
+Same-key Values
+---------------
+
+It is prohibited that two or more values or arrays share a same-key.
+For example,::
+
+ foo = bar, baz
+ foo = qux # !ERROR! we can not re-define same key
+
+If you want to append the value to existing key as an array member,
+you can use ``+=`` operator. For example::
+
+ foo = bar, baz
+ foo += qux
+
+In this case, the key ``foo`` has ``bar``, ``baz`` and ``qux``.
+
+However, a sub-key and a value can not co-exist under a parent key.
+For example, following config is NOT allowed.::
+
+ foo = value1
+ foo.bar = value2 # !ERROR! subkey "bar" and value "value1" can NOT co-exist
+
+
Comments
--------
@@ -102,9 +126,13 @@ Boot Kernel With a Boot Config
==============================
Since the boot configuration file is loaded with initrd, it will be added
-to the end of the initrd (initramfs) image file. The Linux kernel decodes
-the last part of the initrd image in memory to get the boot configuration
-data.
+to the end of the initrd (initramfs) image file with size, checksum and
+12-byte magic word as below.
+
+[initrd][bootconfig][size(u32)][checksum(u32)][#BOOTCONFIG\n]
+
+The Linux kernel decodes the last part of the initrd image in memory to
+get the boot configuration data.
Because of this "piggyback" method, there is no need to change or
update the boot loader and the kernel image itself.
diff --git a/Documentation/admin-guide/kernel-parameters.txt b/Documentation/admin-guide/kernel-parameters.txt
index dbc22d684627..c07815d230bc 100644
--- a/Documentation/admin-guide/kernel-parameters.txt
+++ b/Documentation/admin-guide/kernel-parameters.txt
@@ -136,6 +136,10 @@
dynamic table installation which will install SSDT
tables to /sys/firmware/acpi/tables/dynamic.
+ acpi_no_watchdog [HW,ACPI,WDT]
+ Ignore the ACPI-based watchdog interface (WDAT) and let
+ a native driver control the watchdog device instead.
+
acpi_rsdp= [ACPI,EFI,KEXEC]
Pass the RSDP address to the kernel, mostly used
on machines running EFI runtime service to boot the
diff --git a/Documentation/arm64/memory.rst b/Documentation/arm64/memory.rst
index 02e02175e6f5..cf03b3290800 100644
--- a/Documentation/arm64/memory.rst
+++ b/Documentation/arm64/memory.rst
@@ -129,7 +129,7 @@ this logic.
As a single binary will need to support both 48-bit and 52-bit VA
spaces, the VMEMMAP must be sized large enough for 52-bit VAs and
-also must be sized large enought to accommodate a fixed PAGE_OFFSET.
+also must be sized large enough to accommodate a fixed PAGE_OFFSET.
Most code in the kernel should not need to consider the VA_BITS, for
code that does need to know the VA size the variables are
diff --git a/Documentation/arm64/tagged-address-abi.rst b/Documentation/arm64/tagged-address-abi.rst
index d4a85d535bf9..4a9d9c794ee5 100644
--- a/Documentation/arm64/tagged-address-abi.rst
+++ b/Documentation/arm64/tagged-address-abi.rst
@@ -44,8 +44,15 @@ The AArch64 Tagged Address ABI has two stages of relaxation depending
how the user addresses are used by the kernel:
1. User addresses not accessed by the kernel but used for address space
- management (e.g. ``mmap()``, ``mprotect()``, ``madvise()``). The use
- of valid tagged pointers in this context is always allowed.
+ management (e.g. ``mprotect()``, ``madvise()``). The use of valid
+ tagged pointers in this context is allowed with the exception of
+ ``brk()``, ``mmap()`` and the ``new_address`` argument to
+ ``mremap()`` as these have the potential to alias with existing
+ user addresses.
+
+ NOTE: This behaviour changed in v5.6 and so some earlier kernels may
+ incorrectly accept valid tagged pointers for the ``brk()``,
+ ``mmap()`` and ``mremap()`` system calls.
2. User addresses accessed by the kernel (e.g. ``write()``). This ABI
relaxation is disabled by default and the application thread needs to
diff --git a/Documentation/dev-tools/kunit/usage.rst b/Documentation/dev-tools/kunit/usage.rst
index 7cd56a1993b1..607758a66a99 100644
--- a/Documentation/dev-tools/kunit/usage.rst
+++ b/Documentation/dev-tools/kunit/usage.rst
@@ -551,6 +551,7 @@ options to your ``.config``:
Once the kernel is built and installed, a simple
.. code-block:: bash
+
modprobe example-test
...will run the tests.
diff --git a/Documentation/devicetree/bindings/arm/arm,scmi.txt b/Documentation/devicetree/bindings/arm/arm,scmi.txt
index f493d69e6194..dc102c4e4a78 100644
--- a/Documentation/devicetree/bindings/arm/arm,scmi.txt
+++ b/Documentation/devicetree/bindings/arm/arm,scmi.txt
@@ -102,7 +102,7 @@ Required sub-node properties:
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
[2] Documentation/devicetree/bindings/power/power-domain.yaml
[3] Documentation/devicetree/bindings/thermal/thermal.txt
-[4] Documentation/devicetree/bindings/sram/sram.txt
+[4] Documentation/devicetree/bindings/sram/sram.yaml
[5] Documentation/devicetree/bindings/reset/reset.txt
Example:
diff --git a/Documentation/devicetree/bindings/arm/arm,scpi.txt b/Documentation/devicetree/bindings/arm/arm,scpi.txt
index 7b83ef43b418..dd04d9d9a1b8 100644
--- a/Documentation/devicetree/bindings/arm/arm,scpi.txt
+++ b/Documentation/devicetree/bindings/arm/arm,scpi.txt
@@ -109,7 +109,7 @@ Required properties:
[0] http://infocenter.arm.com/help/topic/com.arm.doc.dui0922b/index.html
[1] Documentation/devicetree/bindings/clock/clock-bindings.txt
[2] Documentation/devicetree/bindings/thermal/thermal.txt
-[3] Documentation/devicetree/bindings/sram/sram.txt
+[3] Documentation/devicetree/bindings/sram/sram.yaml
[4] Documentation/devicetree/bindings/power/power-domain.yaml
Example:
diff --git a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt
index b82b6a0ae6f7..8c7a4908a849 100644
--- a/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt
+++ b/Documentation/devicetree/bindings/arm/bcm/brcm,bcm63138.txt
@@ -62,7 +62,7 @@ Timer node:
Syscon reboot node:
-See Documentation/devicetree/bindings/power/reset/syscon-reboot.txt for the
+See Documentation/devicetree/bindings/power/reset/syscon-reboot.yaml for the
detailed list of properties, the two values defined below are specific to the
BCM6328-style timer:
diff --git a/Documentation/devicetree/bindings/arm/cpus.yaml b/Documentation/devicetree/bindings/arm/cpus.yaml
index 7a9c3ce2dbef..0d5b61056b10 100644
--- a/Documentation/devicetree/bindings/arm/cpus.yaml
+++ b/Documentation/devicetree/bindings/arm/cpus.yaml
@@ -216,7 +216,7 @@ properties:
$ref: '/schemas/types.yaml#/definitions/phandle-array'
description: |
List of phandles to idle state nodes supported
- by this cpu (see ./idle-states.txt).
+ by this cpu (see ./idle-states.yaml).
capacity-dmips-mhz:
$ref: '/schemas/types.yaml#/definitions/uint32'
diff --git a/Documentation/devicetree/bindings/arm/fsl.yaml b/Documentation/devicetree/bindings/arm/fsl.yaml
index a8e0b4a813ed..0e17e1f6fb80 100644
--- a/Documentation/devicetree/bindings/arm/fsl.yaml
+++ b/Documentation/devicetree/bindings/arm/fsl.yaml
@@ -160,7 +160,7 @@ properties:
items:
- enum:
- armadeus,imx6dl-apf6 # APF6 (Solo) SoM
- - armadeus,imx6dl-apf6dldev # APF6 (Solo) SoM on APF6Dev board
+ - armadeus,imx6dl-apf6dev # APF6 (Solo) SoM on APF6Dev board
- eckelmann,imx6dl-ci4x10
- emtrion,emcon-mx6 # emCON-MX6S or emCON-MX6DL SoM
- emtrion,emcon-mx6-avari # emCON-MX6S or emCON-MX6DL SoM on Avari Base
diff --git a/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt b/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt
index 115c5be0bd0b..8defacc44dd5 100644
--- a/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt
+++ b/Documentation/devicetree/bindings/arm/hisilicon/hi3519-sysctrl.txt
@@ -1,7 +1,7 @@
* Hisilicon Hi3519 System Controller Block
This bindings use the following binding:
-Documentation/devicetree/bindings/mfd/syscon.txt
+Documentation/devicetree/bindings/mfd/syscon.yaml
Required properties:
- compatible: "hisilicon,hi3519-sysctrl".
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
index 06df04cc827a..6ce0b212ec6d 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,idle-state.txt
@@ -81,4 +81,4 @@ Example:
};
};
-[1]. Documentation/devicetree/bindings/arm/idle-states.txt
+[1]. Documentation/devicetree/bindings/arm/idle-states.yaml
diff --git a/Documentation/devicetree/bindings/arm/omap/mpu.txt b/Documentation/devicetree/bindings/arm/omap/mpu.txt
index f301e636fd52..e41490e6979c 100644
--- a/Documentation/devicetree/bindings/arm/omap/mpu.txt
+++ b/Documentation/devicetree/bindings/arm/omap/mpu.txt
@@ -17,7 +17,7 @@ am335x and am437x only:
- pm-sram: Phandles to ocmcram nodes to be used for power management.
First should be type 'protect-exec' for the driver to use to copy
and run PM functions, second should be regular pool to be used for
- data region for code. See Documentation/devicetree/bindings/sram/sram.txt
+ data region for code. See Documentation/devicetree/bindings/sram/sram.yaml
for more details.
Examples:
diff --git a/Documentation/devicetree/bindings/arm/psci.yaml b/Documentation/devicetree/bindings/arm/psci.yaml
index 8ef85420b2ab..5e66934455bb 100644
--- a/Documentation/devicetree/bindings/arm/psci.yaml
+++ b/Documentation/devicetree/bindings/arm/psci.yaml
@@ -100,13 +100,14 @@ properties:
bindings in [1]) must specify this property.
[1] Kernel documentation - ARM idle states bindings
- Documentation/devicetree/bindings/arm/idle-states.txt
-
- "#power-domain-cells":
- description:
- The number of cells in a PM domain specifier as per binding in [3].
- Must be 0 as to represent a single PM domain.
+ Documentation/devicetree/bindings/arm/idle-states.yaml
+patternProperties:
+ "^power-domain-":
+ allOf:
+ - $ref: "../power/power-domain.yaml#"
+ type: object
+ description: |
ARM systems can have multiple cores, sometimes in an hierarchical
arrangement. This often, but not always, maps directly to the processor
power topology of the system. Individual nodes in a topology have their
@@ -122,14 +123,8 @@ properties:
helps to implement support for OSI mode and OS implementations may choose
to mandate it.
- [3] Documentation/devicetree/bindings/power/power_domain.txt
- [4] Documentation/devicetree/bindings/power/domain-idle-state.txt
-
- power-domains:
- $ref: '/schemas/types.yaml#/definitions/phandle-array'
- description:
- List of phandles and PM domain specifiers, as defined by bindings of the
- PM domain provider.
+ [3] Documentation/devicetree/bindings/power/power-domain.yaml
+ [4] Documentation/devicetree/bindings/power/domain-idle-state.yaml
required:
- compatible
@@ -199,7 +194,7 @@ examples:
CPU0: cpu@0 {
device_type = "cpu";
- compatible = "arm,cortex-a53", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x0>;
enable-method = "psci";
power-domains = <&CPU_PD0>;
@@ -208,7 +203,7 @@ examples:
CPU1: cpu@1 {
device_type = "cpu";
- compatible = "arm,cortex-a57", "arm,armv8";
+ compatible = "arm,cortex-a53";
reg = <0x100>;
enable-method = "psci";
power-domains = <&CPU_PD1>;
@@ -224,6 +219,9 @@ examples:
exit-latency-us = <10>;
min-residency-us = <100>;
};
+ };
+
+ domain-idle-states {
CLUSTER_RET: cluster-retention {
compatible = "domain-idle-state";
@@ -247,19 +245,19 @@ examples:
compatible = "arm,psci-1.0";
method = "smc";
- CPU_PD0: cpu-pd0 {
+ CPU_PD0: power-domain-cpu0 {
#power-domain-cells = <0>;
domain-idle-states = <&CPU_PWRDN>;
power-domains = <&CLUSTER_PD>;
};
- CPU_PD1: cpu-pd1 {
+ CPU_PD1: power-domain-cpu1 {
#power-domain-cells = <0>;
domain-idle-states = <&CPU_PWRDN>;
power-domains = <&CLUSTER_PD>;
};
- CLUSTER_PD: cluster-pd {
+ CLUSTER_PD: power-domain-cluster {
#power-domain-cells = <0>;
domain-idle-states = <&CLUSTER_RET>, <&CLUSTER_PWRDN>;
};
diff --git a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
index 68917bb7c7e8..55f7938c4826 100644
--- a/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
+++ b/Documentation/devicetree/bindings/arm/stm32/st,mlahb.yaml
@@ -52,7 +52,7 @@ required:
examples:
- |
- mlahb: ahb {
+ mlahb: ahb@38000000 {
compatible = "st,mlahb", "simple-bus";
#address-cells = <1>;
#size-cells = <1>;
diff --git a/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml b/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
index 9fe11ceecdba..80973619342d 100644
--- a/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
+++ b/Documentation/devicetree/bindings/bus/allwinner,sun8i-a23-rsb.yaml
@@ -70,7 +70,6 @@ examples:
#size-cells = <0>;
pmic@3e3 {
- compatible = "...";
reg = <0x3e3>;
/* ... */
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml
index 69cfa4a3d562..c604822cda07 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun4i-a10-osc-clk.yaml
@@ -40,7 +40,7 @@ additionalProperties: false
examples:
- |
- osc24M: clk@01c20050 {
+ osc24M: clk@1c20050 {
#clock-cells = <0>;
compatible = "allwinner,sun4i-a10-osc-clk";
reg = <0x01c20050 0x4>;
diff --git a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml
index 07f38def7dc3..43963c3062c8 100644
--- a/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml
+++ b/Documentation/devicetree/bindings/clock/allwinner,sun9i-a80-gt-clk.yaml
@@ -41,7 +41,7 @@ additionalProperties: false
examples:
- |
- clk@0600005c {
+ clk@600005c {
#clock-cells = <0>;
compatible = "allwinner,sun9i-a80-gt-clk";
reg = <0x0600005c 0x4>;
diff --git a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
index 17f87178f6b8..3647007f82ca 100644
--- a/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
+++ b/Documentation/devicetree/bindings/clock/qcom,gcc-apq8064.yaml
@@ -42,7 +42,7 @@ properties:
be part of GCC and hence the TSENS properties can also be part
of the GCC/clock-controller node.
For more details on the TSENS properties please refer
- Documentation/devicetree/bindings/thermal/qcom-tsens.txt
+ Documentation/devicetree/bindings/thermal/qcom-tsens.yaml
nvmem-cell-names:
minItems: 1
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
index 86ad617d2327..e5344c4ae226 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tcon.yaml
@@ -36,6 +36,12 @@ properties:
- items:
- enum:
+ - allwinner,sun7i-a20-tcon0
+ - allwinner,sun7i-a20-tcon1
+ - const: allwinner,sun7i-a20-tcon
+
+ - items:
+ - enum:
- allwinner,sun50i-a64-tcon-lcd
- const: allwinner,sun8i-a83t-tcon-lcd
@@ -43,9 +49,13 @@ properties:
- enum:
- allwinner,sun8i-h3-tcon-tv
- allwinner,sun50i-a64-tcon-tv
- - allwinner,sun50i-h6-tcon-tv
- const: allwinner,sun8i-a83t-tcon-tv
+ - items:
+ - enum:
+ - allwinner,sun50i-h6-tcon-tv
+ - const: allwinner,sun8i-r40-tcon-tv
+
reg:
maxItems: 1
diff --git a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
index 5d5d39665119..6009324be967 100644
--- a/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
+++ b/Documentation/devicetree/bindings/display/allwinner,sun4i-a10-tv-encoder.yaml
@@ -49,11 +49,7 @@ examples:
resets = <&tcon_ch0_clk 0>;
port {
- #address-cells = <1>;
- #size-cells = <0>;
-
- tve0_in_tcon0: endpoint@0 {
- reg = <0>;
+ tve0_in_tcon0: endpoint {
remote-endpoint = <&tcon0_out_tve0>;
};
};
diff --git a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
index 2c887536258c..e8ddec5d9d91 100644
--- a/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
+++ b/Documentation/devicetree/bindings/display/bridge/adi,adv7511.txt
@@ -1,10 +1,10 @@
-Analog Device ADV7511(W)/13/33 HDMI Encoders
+Analog Device ADV7511(W)/13/33/35 HDMI Encoders
-----------------------------------------
-The ADV7511, ADV7511W, ADV7513 and ADV7533 are HDMI audio and video transmitters
-compatible with HDMI 1.4 and DVI 1.0. They support color space conversion,
-S/PDIF, CEC and HDCP. ADV7533 supports the DSI interface for input pixels, while
-the others support RGB interface.
+The ADV7511, ADV7511W, ADV7513, ADV7533 and ADV7535 are HDMI audio and video
+transmitters compatible with HDMI 1.4 and DVI 1.0. They support color space
+conversion, S/PDIF, CEC and HDCP. ADV7533/5 supports the DSI interface for input
+pixels, while the others support RGB interface.
Required properties:
@@ -13,6 +13,7 @@ Required properties:
"adi,adv7511w"
"adi,adv7513"
"adi,adv7533"
+ "adi,adv7535"
- reg: I2C slave addresses
The ADV7511 internal registers are split into four pages exposed through
@@ -52,14 +53,14 @@ The following input format properties are required except in "rgb 1x" and
- bgvdd-supply: A 1.8V supply that powers up the BGVDD pin. This is
needed only for ADV7511.
-The following properties are required for ADV7533:
+The following properties are required for ADV7533 and ADV7535:
- adi,dsi-lanes: Number of DSI data lanes connected to the DSI host. It should
be one of 1, 2, 3 or 4.
- a2vdd-supply: 1.8V supply that powers up the A2VDD pin on the chip.
- v3p3-supply: A 3.3V supply that powers up the V3P3 pin on the chip.
- v1p2-supply: A supply that powers up the V1P2 pin on the chip. It can be
- either 1.2V or 1.8V.
+ either 1.2V or 1.8V for ADV7533 but only 1.8V for ADV7535.
Optional properties:
@@ -71,9 +72,9 @@ Optional properties:
- adi,embedded-sync: The input uses synchronization signals embedded in the
data stream (similar to BT.656). Defaults to separate H/V synchronization
signals.
-- adi,disable-timing-generator: Only for ADV7533. Disables the internal timing
- generator. The chip will rely on the sync signals in the DSI data lanes,
- rather than generate its own timings for HDMI output.
+- adi,disable-timing-generator: Only for ADV7533 and ADV7535. Disables the
+ internal timing generator. The chip will rely on the sync signals in the
+ DSI data lanes, rather than generate its own timings for HDMI output.
- clocks: from common clock binding: reference to the CEC clock.
- clock-names: from common clock binding: must be "cec".
- reg-names : Names of maps with programmable addresses.
@@ -85,7 +86,7 @@ Required nodes:
The ADV7511 has two video ports. Their connections are modelled using the OF
graph bindings specified in Documentation/devicetree/bindings/graph.txt.
-- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533, the
+- Video port 0 for the RGB, YUV or DSI input. In the case of ADV7533/5, the
remote endpoint phandle should be a reference to a valid mipi_dsi_host device
node.
- Video port 1 for the HDMI output
diff --git a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
index 6d72b3d11fbc..c21103869923 100644
--- a/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
+++ b/Documentation/devicetree/bindings/display/bridge/anx6345.yaml
@@ -79,21 +79,15 @@ examples:
#size-cells = <0>;
anx6345_in: port@0 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <0>;
- anx6345_in_tcon0: endpoint@0 {
- reg = <0>;
+ anx6345_in_tcon0: endpoint {
remote-endpoint = <&tcon0_out_anx6345>;
};
};
anx6345_out: port@1 {
- #address-cells = <1>;
- #size-cells = <0>;
reg = <1>;
- anx6345_out_panel: endpoint@0 {
- reg = <0>;
+ anx6345_out_panel: endpoint {
remote-endpoint = <&panel_in_edp>;
};
};
diff --git a/Documentation/devicetree/bindings/display/bridge/ps8640.yaml b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
new file mode 100644
index 000000000000..5dff93641bea
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/ps8640.yaml
@@ -0,0 +1,112 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/ps8640.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: MIPI DSI to eDP Video Format Converter Device Tree Bindings
+
+maintainers:
+ - Nicolas Boichat <[email protected]>
+ - Enric Balletbo i Serra <[email protected]>
+
+description: |
+ The PS8640 is a low power MIPI-to-eDP video format converter supporting
+ mobile devices with embedded panel resolutions up to 2048 x 1536. The
+ device accepts a single channel of MIPI DSI v1.1, with up to four lanes
+ plus clock, at a transmission rate up to 1.5Gbit/sec per lane. The
+ device outputs eDP v1.4, one or two lanes, at a link rate of up to
+ 3.24Gbit/sec per lane.
+
+properties:
+ compatible:
+ const: parade,ps8640
+
+ reg:
+ maxItems: 1
+ description: Base I2C address of the device.
+
+ powerdown-gpios:
+ maxItems: 1
+ description: GPIO connected to active low powerdown.
+
+ reset-gpios:
+ maxItems: 1
+ description: GPIO connected to active low reset.
+
+ vdd12-supply:
+ maxItems: 1
+ description: Regulator for 1.2V digital core power.
+
+ vdd33-supply:
+ maxItems: 1
+ description: Regulator for 3.3V digital core power.
+
+ ports:
+ type: object
+ description:
+ A node containing DSI input & output port nodes with endpoint
+ definitions as documented in
+ Documentation/devicetree/bindings/media/video-interfaces.txt
+ Documentation/devicetree/bindings/graph.txt
+ properties:
+ port@0:
+ type: object
+ description: |
+ Video port for DSI input
+
+ port@1:
+ type: object
+ description: |
+ Video port for eDP output (panel or connector).
+
+ required:
+ - port@0
+
+required:
+ - compatible
+ - reg
+ - powerdown-gpios
+ - reset-gpios
+ - vdd12-supply
+ - vdd33-supply
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+ i2c0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ ps8640: edp-bridge@18 {
+ compatible = "parade,ps8640";
+ reg = <0x18>;
+ powerdown-gpios = <&pio 116 GPIO_ACTIVE_LOW>;
+ reset-gpios = <&pio 115 GPIO_ACTIVE_LOW>;
+ vdd12-supply = <&ps8640_fixed_1v2>;
+ vdd33-supply = <&mt6397_vgp2_reg>;
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ ps8640_in: endpoint {
+ remote-endpoint = <&dsi0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ ps8640_out: endpoint {
+ remote-endpoint = <&panel_in>;
+ };
+ };
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
new file mode 100644
index 000000000000..c036a75db8f7
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/bridge/toshiba,tc358768.yaml
@@ -0,0 +1,159 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/bridge/toshiba,tc358768.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Toschiba TC358768/TC358778 Parallel RGB to MIPI DSI bridge
+
+maintainers:
+ - Peter Ujfalusi <[email protected]>
+
+description: |
+ The TC358768/TC358778 is bridge device which converts RGB to DSI.
+
+properties:
+ compatible:
+ enum:
+ - toshiba,tc358768
+ - toshiba,tc358778
+
+ reg:
+ maxItems: 1
+ description: base I2C address of the device
+
+ reset-gpios:
+ maxItems: 1
+ description: GPIO connected to active low RESX pin
+
+ vddc-supply:
+ description: Regulator for 1.2V internal core power.
+
+ vddmipi-supply:
+ description: Regulator for 1.2V for the MIPI.
+
+ vddio-supply:
+ description: Regulator for 1.8V - 3.3V IO power.
+
+ clocks:
+ maxItems: 1
+
+ clock-names:
+ const: refclk
+
+ ports:
+ type: object
+
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ port@0:
+ type: object
+ additionalProperties: false
+
+ description: |
+ Video port for RGB input
+
+ properties:
+ reg:
+ const: 0
+
+ patternProperties:
+ endpoint:
+ type: object
+ additionalProperties: false
+
+ properties:
+ data-lines:
+ enum: [ 16, 18, 24 ]
+
+ remote-endpoint: true
+
+ required:
+ - reg
+
+ port@1:
+ type: object
+ additionalProperties: false
+
+ description: |
+ Video port for DSI output (panel or connector).
+
+ properties:
+ reg:
+ const: 1
+
+ patternProperties:
+ endpoint:
+ type: object
+ additionalProperties: false
+
+ properties:
+ remote-endpoint: true
+
+ required:
+ - reg
+
+ required:
+ - "#address-cells"
+ - "#size-cells"
+ - port@0
+ - port@1
+
+required:
+ - compatible
+ - reg
+ - vddc-supply
+ - vddmipi-supply
+ - vddio-supply
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ i2c1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dsi_bridge: dsi-bridge@e {
+ compatible = "toshiba,tc358768";
+ reg = <0xe>;
+
+ clocks = <&tc358768_refclk>;
+ clock-names = "refclk";
+
+ reset-gpios = <&pcf_display_board 0 GPIO_ACTIVE_LOW>;
+
+ vddc-supply = <&v1_2d>;
+ vddmipi-supply = <&v1_2d>;
+ vddio-supply = <&v3_3d>;
+
+ dsi_bridge_ports: ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ rgb_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ data-lines = <24>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dsi_out: endpoint {
+ remote-endpoint = <&lcd_in>;
+ };
+ };
+ };
+ };
+ };
+
diff --git a/Documentation/devicetree/bindings/display/ilitek,ili9486.yaml b/Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
new file mode 100644
index 000000000000..66e93e563653
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
@@ -0,0 +1,73 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/ilitek,ili9486.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Ilitek ILI9486 display panels device tree bindings
+
+maintainers:
+ - Kamlesh Gurudasani <[email protected]>
+
+description:
+ This binding is for display panels using an Ilitek ILI9486 controller in SPI
+ mode.
+
+allOf:
+ - $ref: panel/panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - enum:
+ # Waveshare 3.5" 320x480 Color TFT LCD
+ - waveshare,rpi-lcd-35
+ # Ozzmaker 3.5" 320x480 Color TFT LCD
+ - ozzmaker,piscreen
+ - const: ilitek,ili9486
+
+ spi-max-frequency:
+ maximum: 32000000
+
+ dc-gpios:
+ maxItems: 1
+ description: Display data/command selection (D/CX)
+
+ backlight: true
+ reg: true
+ reset-gpios: true
+ rotation: true
+
+required:
+ - compatible
+ - reg
+ - dc-gpios
+ - reset-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ backlight: backlight {
+ compatible = "gpio-backlight";
+ gpios = <&gpio 22 GPIO_ACTIVE_HIGH>;
+ };
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+
+ display@0{
+ compatible = "waveshare,rpi-lcd-35", "ilitek,ili9486";
+ reg = <0>;
+ spi-max-frequency = <32000000>;
+ dc-gpios = <&gpio0 24 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio0 25 GPIO_ACTIVE_HIGH>;
+ rotation = <180>;
+ backlight = <&backlight>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml
new file mode 100644
index 000000000000..93878c2cd370
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml
@@ -0,0 +1,69 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/advantech,idk-1110wr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Advantech IDK-1110WR 10.1" WSVGA LVDS Display Panel
+
+maintainers:
+ - Lad Prabhakar <[email protected]>
+ - Thierry Reding <[email protected]>
+
+allOf:
+ - $ref: lvds.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: advantech,idk-1110wr
+ - {} # panel-lvds, but not listed here to avoid false select
+
+ data-mapping:
+ const: jeida-24
+
+ width-mm:
+ const: 223
+
+ height-mm:
+ const: 125
+
+ panel-timing: true
+ port: true
+
+additionalProperties: false
+
+required:
+ - compatible
+
+examples:
+ - |+
+ panel {
+ compatible = "advantech,idk-1110wr", "panel-lvds";
+
+ width-mm = <223>;
+ height-mm = <125>;
+
+ data-mapping = "jeida-24";
+
+ panel-timing {
+ /* 1024x600 @60Hz */
+ clock-frequency = <51200000>;
+ hactive = <1024>;
+ vactive = <600>;
+ hsync-len = <240>;
+ hfront-porch = <40>;
+ hback-porch = <40>;
+ vsync-len = <10>;
+ vfront-porch = <15>;
+ vback-porch = <10>;
+ };
+
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&lvds_encoder>;
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
new file mode 100644
index 000000000000..6b7fddc80c41
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-2121wr.yaml
@@ -0,0 +1,122 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/advantech,idk-2121wr.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Advantech IDK-2121WR 21.5" Full-HD dual-LVDS panel
+
+maintainers:
+ - Lad Prabhakar <[email protected]>
+ - Thierry Reding <[email protected]>
+
+description: |
+ The IDK-2121WR from Advantech is a Full-HD dual-LVDS panel.
+ A dual-LVDS interface is a dual-link connection with even pixels traveling
+ on one link, and with odd pixels traveling on the other link.
+
+ The panel expects odd pixels on the first port, and even pixels on the
+ second port, therefore the ports must be marked accordingly (with either
+ dual-lvds-odd-pixels or dual-lvds-even-pixels).
+
+properties:
+ compatible:
+ items:
+ - const: advantech,idk-2121wr
+ - {} # panel-lvds, but not listed here to avoid false select
+
+ width-mm:
+ const: 476
+
+ height-mm:
+ const: 268
+
+ data-mapping:
+ const: vesa-24
+
+ panel-timing: true
+
+ ports:
+ type: object
+ properties:
+ port@0:
+ type: object
+ description: The sink for odd pixels.
+ properties:
+ reg:
+ const: 0
+
+ dual-lvds-odd-pixels: true
+
+ required:
+ - reg
+ - dual-lvds-odd-pixels
+
+ port@1:
+ type: object
+ description: The sink for even pixels.
+ properties:
+ reg:
+ const: 1
+
+ dual-lvds-even-pixels: true
+
+ required:
+ - reg
+ - dual-lvds-even-pixels
+
+additionalProperties: false
+
+required:
+ - compatible
+ - width-mm
+ - height-mm
+ - data-mapping
+ - panel-timing
+ - ports
+
+examples:
+ - |+
+ panel-lvds {
+ compatible = "advantech,idk-2121wr", "panel-lvds";
+
+ width-mm = <476>;
+ height-mm = <268>;
+
+ data-mapping = "vesa-24";
+
+ panel-timing {
+ clock-frequency = <148500000>;
+ hactive = <1920>;
+ vactive = <1080>;
+ hsync-len = <44>;
+ hfront-porch = <88>;
+ hback-porch = <148>;
+ vfront-porch = <4>;
+ vback-porch = <36>;
+ vsync-len = <5>;
+ };
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ reg = <0>;
+ dual-lvds-odd-pixels;
+ panel_in0: endpoint {
+ remote-endpoint = <&lvds0_out>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ dual-lvds-even-pixels;
+ panel_in1: endpoint {
+ remote-endpoint = <&lvds1_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b080uan01.txt b/Documentation/devicetree/bindings/display/panel/auo,b080uan01.txt
deleted file mode 100644
index bae0e2b51467..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b080uan01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 8.0" WUXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b101ean01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b101aw03.txt b/Documentation/devicetree/bindings/display/panel/auo,b101aw03.txt
deleted file mode 100644
index 72e088a4fb3a..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b101aw03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 10.1" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b101aw03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b101ean01.txt b/Documentation/devicetree/bindings/display/panel/auo,b101ean01.txt
deleted file mode 100644
index 3590b0741619..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b101ean01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 10.1" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b101ean01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b101xtn01.txt b/Documentation/devicetree/bindings/display/panel/auo,b101xtn01.txt
deleted file mode 100644
index 889d511d66c9..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b101xtn01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 10.1" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b101xtn01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b116xw03.txt b/Documentation/devicetree/bindings/display/panel/auo,b116xw03.txt
deleted file mode 100644
index 690d0a568ef3..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b116xw03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
-
-Required properties:
-- compatible: should be "auo,b116xw03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b133htn01.txt b/Documentation/devicetree/bindings/display/panel/auo,b133htn01.txt
deleted file mode 100644
index 302226b5bb55..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b133htn01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
-
-Required properties:
-- compatible: should be "auo,b133htn01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,b133xtn01.txt b/Documentation/devicetree/bindings/display/panel/auo,b133xtn01.txt
deleted file mode 100644
index 7443b7c76769..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,b133xtn01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,b133xtn01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt b/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt
deleted file mode 100644
index 49e4105378f6..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g070vvn01.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g070vvn01"
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: single regulator to provide the supply voltage
-
-Required nodes:
-- port: Parallel port mapping to connect this display
-
-This panel needs single power supply voltage. Its backlight is conntrolled
-via PWM signal.
-
-Example:
---------
-
-Example device-tree definition when connected to iMX6Q based board
-
- lcd_panel: lcd-panel {
- compatible = "auo,g070vvn01";
- backlight = <&backlight_lcd>;
- power-supply = <&reg_display>;
-
- port {
- lcd_panel_in: endpoint {
- remote-endpoint = <&lcd_display_out>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt b/Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt
deleted file mode 100644
index bc6a0c858e23..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g101evn010.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g101evn010"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt b/Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt
deleted file mode 100644
index 85626edf63e5..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g104sn02.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-AU Optronics Corporation 10.4" (800x600) color TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g104sn02"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g133han01.txt b/Documentation/devicetree/bindings/display/panel/auo,g133han01.txt
deleted file mode 100644
index 3afc76747824..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g133han01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 13.3" FHD (1920x1080) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g133han01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,g185han01.txt b/Documentation/devicetree/bindings/display/panel/auo,g185han01.txt
deleted file mode 100644
index ed657c2141d4..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,g185han01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 18.5" FHD (1920x1080) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,g185han01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,p320hvn03.txt b/Documentation/devicetree/bindings/display/panel/auo,p320hvn03.txt
deleted file mode 100644
index 59bb6cd8aa75..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,p320hvn03.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-AU Optronics Corporation 31.5" FHD (1920x1080) TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,p320hvn03"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt b/Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt
deleted file mode 100644
index cbd9da3f03b1..000000000000
--- a/Documentation/devicetree/bindings/display/panel/auo,t215hvn01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-AU Optronics Corporation 21.5" FHD (1920x1080) color TFT LCD panel
-
-Required properties:
-- compatible: should be "auo,t215hvn01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/avic,tm070ddh03.txt b/Documentation/devicetree/bindings/display/panel/avic,tm070ddh03.txt
deleted file mode 100644
index b6f2f3e8f44e..000000000000
--- a/Documentation/devicetree/bindings/display/panel/avic,tm070ddh03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Shanghai AVIC Optoelectronics 7" 1024x600 color TFT-LCD panel
-
-Required properties:
-- compatible: should be "avic,tm070ddh03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt b/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt
deleted file mode 100644
index 55183d360032..000000000000
--- a/Documentation/devicetree/bindings/display/panel/boe,hv070wsa-100.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "boe,hv070wsa-100"
-- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
-- enable-gpios: GPIO pin to enable and disable panel (active high)
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-The device node can contain one 'port' child node with one child
-'endpoint' node, according to the bindings defined in [1]. This
-node should describe panel's video bus.
-
-[1]: Documentation/devicetree/bindings/media/video-interfaces.txt
-
-Example:
-
- panel: panel {
- compatible = "boe,hv070wsa-100";
- power-supply = <&vcc_3v3_reg>;
- enable-gpios = <&gpd1 3 GPIO_ACTIVE_HIGH>;
- port {
- panel_ep: endpoint {
- remote-endpoint = <&bridge_out_ep>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt b/Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt
deleted file mode 100644
index b258d6a91ec6..000000000000
--- a/Documentation/devicetree/bindings/display/panel/boe,nv101wxmn51.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "boe,nv101wxmn51"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt b/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt
deleted file mode 100644
index 50be5e2438b2..000000000000
--- a/Documentation/devicetree/bindings/display/panel/boe,tv080wum-nl0.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Boe Corporation 8.0" WUXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "boe,tv080wum-nl0"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
new file mode 100644
index 000000000000..740213459134
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/boe,tv101wum-nl6.yaml
@@ -0,0 +1,80 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/boe,tv101wum-nl6.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: BOE TV101WUM-NL6 DSI Display Panel
+
+maintainers:
+ - Thierry Reding <[email protected]>
+ - Sam Ravnborg <[email protected]>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ enum:
+ # BOE TV101WUM-NL6 10.1" WUXGA TFT LCD panel
+ - boe,tv101wum-nl6
+ # AUO KD101N80-45NA 10.1" WUXGA TFT LCD panel
+ - auo,kd101n80-45na
+ # BOE TV101WUM-N53 10.1" WUXGA TFT LCD panel
+ - boe,tv101wum-n53
+ # AUO B101UAN08.3 10.1" WUXGA TFT LCD panel
+ - auo,b101uan08.3
+
+ reg:
+ description: the virtual channel number of a DSI peripheral
+
+ enable-gpios:
+ description: a GPIO spec for the enable pin
+
+ pp1800-supply:
+ description: core voltage supply
+
+ avdd-supply:
+ description: phandle of the regulator that provides positive voltage
+
+ avee-supply:
+ description: phandle of the regulator that provides negative voltage
+
+ backlight:
+ description: phandle of the backlight device attached to the panel
+
+ port: true
+
+required:
+ - compatible
+ - reg
+ - enable-gpios
+ - pp1800-supply
+ - avdd-supply
+ - avee-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "boe,tv101wum-nl6";
+ reg = <0>;
+ enable-gpios = <&pio 45 0>;
+ avdd-supply = <&ppvarn_lcd>;
+ avee-supply = <&ppvarp_lcd>;
+ pp1800-supply = <&pp1800_lcd>;
+ backlight = <&backlight_lcd0>;
+ status = "okay";
+ port {
+ panel_in: endpoint {
+ remote-endpoint = <&dsi_out>;
+ };
+ };
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt b/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt
deleted file mode 100644
index 057f7f3f6dbe..000000000000
--- a/Documentation/devicetree/bindings/display/panel/cdtech,s043wq26h-ct7.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
-
-Required properties:
-- compatible: should be "cdtech,s043wq26h-ct7"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt b/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt
deleted file mode 100644
index 505615dfa0df..000000000000
--- a/Documentation/devicetree/bindings/display/panel/cdtech,s070wv95-ct16.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-CDTech(H.K.) Electronics Limited 7" 800x480 color TFT-LCD panel
-
-Required properties:
-- compatible: should be "cdtech,s070wv95-ct16"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt b/Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt
deleted file mode 100644
index dd22685d2adc..000000000000
--- a/Documentation/devicetree/bindings/display/panel/chunghwa,claa070wp03xg.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Chunghwa Picture Tubes Ltd. 7" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "chunghwa,claa070wp03xg"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wa01a.txt b/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wa01a.txt
deleted file mode 100644
index f24614e4d5ec..000000000000
--- a/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wa01a.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "chunghwa,claa101wa01a"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wb03.txt b/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wb03.txt
deleted file mode 100644
index 0ab2c05a4c22..000000000000
--- a/Documentation/devicetree/bindings/display/panel/chunghwa,claa101wb03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "chunghwa,claa101wb03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt b/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt
deleted file mode 100644
index 897085ee3cd4..000000000000
--- a/Documentation/devicetree/bindings/display/panel/dataimage,scf0700c48ggu18.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-DataImage, Inc. 7" WVGA (800x480) TFT LCD panel with 24-bit parallel interface.
-
-Required properties:
-- compatible: should be "dataimage,scf0700c48ggu18"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/display-timing.txt b/Documentation/devicetree/bindings/display/panel/display-timing.txt
index 78222ced1874..7f55ad4a40c4 100644
--- a/Documentation/devicetree/bindings/display/panel/display-timing.txt
+++ b/Documentation/devicetree/bindings/display/panel/display-timing.txt
@@ -1,123 +1 @@
-display-timing bindings
-=======================
-
-display-timings node
---------------------
-
-required properties:
- - none
-
-optional properties:
- - native-mode: The native mode for the display, in case multiple modes are
- provided. When omitted, assume the first node is the native.
-
-timing subnode
---------------
-
-required properties:
- - hactive, vactive: display resolution
- - hfront-porch, hback-porch, hsync-len: horizontal display timing parameters
- in pixels
- vfront-porch, vback-porch, vsync-len: vertical display timing parameters in
- lines
- - clock-frequency: display clock in Hz
-
-optional properties:
- - hsync-active: hsync pulse is active low/high/ignored
- - vsync-active: vsync pulse is active low/high/ignored
- - de-active: data-enable pulse is active low/high/ignored
- - pixelclk-active: with
- - active high = drive pixel data on rising edge/
- sample data on falling edge
- - active low = drive pixel data on falling edge/
- sample data on rising edge
- - ignored = ignored
- - syncclk-active: with
- - active high = drive sync on rising edge/
- sample sync on falling edge of pixel
- clock
- - active low = drive sync on falling edge/
- sample sync on rising edge of pixel
- clock
- - omitted = same configuration as pixelclk-active
- - interlaced (bool): boolean to enable interlaced mode
- - doublescan (bool): boolean to enable doublescan mode
- - doubleclk (bool): boolean to enable doubleclock mode
-
-All the optional properties that are not bool follow the following logic:
- <1>: high active
- <0>: low active
- omitted: not used on hardware
-
-There are different ways of describing the capabilities of a display. The
-devicetree representation corresponds to the one commonly found in datasheets
-for displays. If a display supports multiple signal timings, the native-mode
-can be specified.
-
-The parameters are defined as:
-
- +----------+-------------------------------------+----------+-------+
- | | ^ | | |
- | | |vback_porch | | |
- | | v | | |
- +----------#######################################----------+-------+
- | # ^ # | |
- | # | # | |
- | hback # | # hfront | hsync |
- | porch # | hactive # porch | len |
- |<-------->#<-------+--------------------------->#<-------->|<----->|
- | # | # | |
- | # |vactive # | |
- | # | # | |
- | # v # | |
- +----------#######################################----------+-------+
- | | ^ | | |
- | | |vfront_porch | | |
- | | v | | |
- +----------+-------------------------------------+----------+-------+
- | | ^ | | |
- | | |vsync_len | | |
- | | v | | |
- +----------+-------------------------------------+----------+-------+
-
-Note: In addition to being used as subnode(s) of display-timings, the timing
- subnode may also be used on its own. This is appropriate if only one mode
- need be conveyed. In this case, the node should be named 'panel-timing'.
-
-
-Example:
-
- display-timings {
- native-mode = <&timing0>;
- timing0: 1080p24 {
- /* 1920x1080p24 */
- clock-frequency = <52000000>;
- hactive = <1920>;
- vactive = <1080>;
- hfront-porch = <25>;
- hback-porch = <25>;
- hsync-len = <25>;
- vback-porch = <2>;
- vfront-porch = <2>;
- vsync-len = <2>;
- hsync-active = <1>;
- };
- };
-
-Every required property also supports the use of ranges, so the commonly used
-datasheet description with minimum, typical and maximum values can be used.
-
-Example:
-
- timing1: timing {
- /* 1920x1080p24 */
- clock-frequency = <148500000>;
- hactive = <1920>;
- vactive = <1080>;
- hsync-len = <0 44 60>;
- hfront-porch = <80 88 95>;
- hback-porch = <100 148 160>;
- vfront-porch = <0 4 6>;
- vback-porch = <0 36 50>;
- vsync-len = <0 5 6>;
- };
+See display-timings.yaml in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/display-timings.yaml b/Documentation/devicetree/bindings/display/panel/display-timings.yaml
new file mode 100644
index 000000000000..c8c0c9cb0492
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/display-timings.yaml
@@ -0,0 +1,77 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/display-timings.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: display timing bindings
+
+maintainers:
+ - Thierry Reding <[email protected]>
+ - Laurent Pinchart <[email protected]>
+ - Sam Ravnborg <[email protected]>
+
+description: |
+ A display panel may be able to handle several display timings,
+ with different resolutions.
+ The display-timings node makes it possible to specify the timing
+ and to specify the timing that is native for the display.
+
+properties:
+ $nodename:
+ const: display-timings
+
+ native-mode:
+ $ref: /schemas/types.yaml#/definitions/phandle
+ description: |
+ The default display timing is the one specified as native-mode.
+ If no native-mode is specified then the first node is assumed the
+ native mode.
+
+patternProperties:
+ "^timing":
+ type: object
+ allOf:
+ - $ref: panel-timing.yaml#
+
+additionalProperties: false
+
+examples:
+ - |+
+
+ /*
+ * Example that specifies panel timing using minimum, typical,
+ * maximum values as commonly used in datasheet description.
+ * timing1 is the native-mode.
+ */
+ display-timings {
+ native-mode = <&timing1>;
+ timing0 {
+ /* 1920x1080p24 */
+ clock-frequency = <148500000>;
+ hactive = <1920>;
+ vactive = <1080>;
+ hsync-len = <0 44 60>;
+ hfront-porch = <80 88 95>;
+ hback-porch = <100 148 160>;
+ vfront-porch = <0 4 6>;
+ vback-porch = <0 36 50>;
+ vsync-len = <0 5 6>;
+ };
+ timing1 {
+ /* 1920x1080p24 */
+ clock-frequency = <52000000>;
+ hactive = <1920>;
+ vactive = <1080>;
+ hfront-porch = <25>;
+ hback-porch = <25>;
+ hsync-len = <0 25 25>;
+ vback-porch = <2>;
+ vfront-porch = <2>;
+ vsync-len = <2>;
+ hsync-active = <1>;
+ pixelclk-active = <1>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt b/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt
deleted file mode 100644
index fbf5dcd15661..000000000000
--- a/Documentation/devicetree/bindings/display/panel/dlc,dlc1010gig.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-DLC Display Co. DLC1010GIG 10.1" WXGA TFT LCD Panel
-
-Required properties:
-- compatible: should be "dlc,dlc1010gig"
-- power-supply: See simple-panel.txt
-
-Optional properties:
-- enable-gpios: See simple-panel.txt
-- backlight: See simple-panel.txt
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/edt,et-series.txt b/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
deleted file mode 100644
index b7ac1c725f97..000000000000
--- a/Documentation/devicetree/bindings/display/panel/edt,et-series.txt
+++ /dev/null
@@ -1,55 +0,0 @@
-Emerging Display Technology Corp. Displays
-==========================================
-
-
-Display bindings for EDT Display Technology Corp. Displays which are
-compatible with the simple-panel binding, which is specified in
-simple-panel.txt
-
-3,5" QVGA TFT Panels
---------------------
-+-----------------+---------------------+-------------------------------------+
-| Identifier | compatbile | description |
-+=================+=====================+=====================================+
-| ET035012DM6 | edt,et035012dm6 | 3.5" QVGA TFT LCD panel |
-+-----------------+---------------------+-------------------------------------+
-
-4,3" WVGA TFT Panels
---------------------
-
-+-----------------+---------------------+-------------------------------------+
-| Identifier | compatbile | description |
-+=================+=====================+=====================================+
-| ETM0430G0DH6 | edt,etm0430g0dh6 | 480x272 TFT Display |
-+-----------------+---------------------+-------------------------------------+
-
-5,7" WVGA TFT Panels
---------------------
-
-+-----------------+---------------------+-------------------------------------+
-| Identifier | compatbile | description |
-+=================+=====================+=====================================+
-| ET057090DHU | edt,et057090dhu | 5.7" VGA TFT LCD panel |
-+-----------------+---------------------+-------------------------------------+
-
-
-7,0" WVGA TFT Panels
---------------------
-
-+-----------------+---------------------+-------------------------------------+
-| Identifier | compatbile | description |
-+=================+=====================+=====================================+
-| ETM0700G0DH6 | edt,etm070080dh6 | WVGA TFT Display with capacitive |
-| | edt,etm0700g0dh6 | Touchscreen |
-+-----------------+---------------------+-------------------------------------+
-| ETM0700G0BDH6 | edt,etm070080bdh6 | Same as ETM0700G0DH6 but with |
-| | | inverted pixel clock. |
-+-----------------+---------------------+-------------------------------------+
-| ETM0700G0EDH6 | edt,etm070080edh6 | Same display as the ETM0700G0BDH6, |
-| | | but with changed Hardware for the |
-| | | backlight and the touch interface |
-+-----------------+---------------------+-------------------------------------+
-| ET070080DH6 | edt,etm070080dh6 | Same timings as the ETM0700G0DH6, |
-| | | but with resistive touch. |
-+-----------------+---------------------+-------------------------------------+
-
diff --git a/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml b/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml
new file mode 100644
index 000000000000..aa761f697b7a
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/elida,kd35t133.yaml
@@ -0,0 +1,49 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/elida,kd35t133.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Elida KD35T133 3.5in 320x480 DSI panel
+
+maintainers:
+ - Heiko Stuebner <[email protected]>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: elida,kd35t133
+ reg: true
+ backlight: true
+ reset-gpios: true
+ iovcc-supply:
+ description: regulator that supplies the iovcc voltage
+ vdd-supply:
+ description: regulator that supplies the vdd voltage
+
+required:
+ - compatible
+ - reg
+ - backlight
+ - iovcc-supply
+ - vdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "elida,kd35t133";
+ reg = <0>;
+ backlight = <&backlight>;
+ iovcc-supply = <&vcc_1v8>;
+ vdd-supply = <&vcc3v3_lcd>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt b/Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt
deleted file mode 100644
index 82d22e191ac3..000000000000
--- a/Documentation/devicetree/bindings/display/panel/evervision,vgg804821.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel
-
-Required properties:
-- compatible: should be "evervision,vgg804821"
-- power-supply: See simple-panel.txt
-
-Optional properties:
-- backlight: See simple-panel.txt
-- enable-gpios: See simple-panel.txt
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml b/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
new file mode 100644
index 000000000000..927f1eea18d2
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
@@ -0,0 +1,55 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/feixin,k101-im2ba02.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Feixin K101 IM2BA02 10.1" MIPI-DSI LCD panel
+
+maintainers:
+ - Icenowy Zheng <[email protected]>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: feixin,k101-im2ba02
+ reg: true
+ backlight: true
+ reset-gpios: true
+ avdd-supply:
+ description: regulator that supplies the AVDD voltage
+ dvdd-supply:
+ description: regulator that supplies the DVDD voltage
+ cvdd-supply:
+ description: regulator that supplies the CVDD voltage
+
+required:
+ - compatible
+ - reg
+ - avdd-supply
+ - dvdd-supply
+ - cvdd-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "feixin,k101-im2ba02";
+ reg = <0>;
+ avdd-supply = <&reg_dc1sw>;
+ dvdd-supply = <&reg_dc1sw>;
+ cvdd-supply = <&reg_ldo_io1>;
+ reset-gpios = <&pio 3 24 GPIO_ACTIVE_HIGH>;
+ backlight = <&backlight>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/foxlink,fl500wvr00-a0t.txt b/Documentation/devicetree/bindings/display/panel/foxlink,fl500wvr00-a0t.txt
deleted file mode 100644
index b47f9d87bc19..000000000000
--- a/Documentation/devicetree/bindings/display/panel/foxlink,fl500wvr00-a0t.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Foxlink Group 5" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "foxlink,fl500wvr00-a0t"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt b/Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt
deleted file mode 100644
index 6c9156fc3478..000000000000
--- a/Documentation/devicetree/bindings/display/panel/friendlyarm,hd702e.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-FriendlyELEC HD702E 800x1280 LCD panel
-
-HD702E lcd is FriendlyELEC developed eDP LCD panel with 800x1280
-resolution. It has built in Goodix, GT9271 captive touchscreen
-with backlight adjustable via PWM.
-
-Required properties:
-- compatible: should be "friendlyarm,hd702e"
-- power-supply: regulator to provide the supply voltage
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Optional nodes:
-- Video port for LCD panel input.
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Example:
-
- panel {
- compatible ="friendlyarm,hd702e", "simple-panel";
- backlight = <&backlight>;
- power-supply = <&vcc3v3_sys>;
-
- port {
- panel_in_edp: endpoint {
- remote-endpoint = <&edp_out_panel>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/giantplus,gpg482739qs5.txt b/Documentation/devicetree/bindings/display/panel/giantplus,gpg482739qs5.txt
deleted file mode 100644
index 24b0b624434b..000000000000
--- a/Documentation/devicetree/bindings/display/panel/giantplus,gpg482739qs5.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-GiantPlus GPG48273QS5 4.3" (480x272) WQVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "giantplus,gpg48273qs5"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/hannstar,hsd070pww1.txt b/Documentation/devicetree/bindings/display/panel/hannstar,hsd070pww1.txt
deleted file mode 100644
index 7da1d5c038ff..000000000000
--- a/Documentation/devicetree/bindings/display/panel/hannstar,hsd070pww1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-HannStar Display Corp. HSD070PWW1 7.0" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "hannstar,hsd070pww1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/hannstar,hsd100pxn1.txt b/Documentation/devicetree/bindings/display/panel/hannstar,hsd100pxn1.txt
deleted file mode 100644
index 8270319a99de..000000000000
--- a/Documentation/devicetree/bindings/display/panel/hannstar,hsd100pxn1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-HannStar Display Corp. HSD100PXN1 10.1" XGA LVDS panel
-
-Required properties:
-- compatible: should be "hannstar,hsd100pxn1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/hit,tx23d38vm0caa.txt b/Documentation/devicetree/bindings/display/panel/hit,tx23d38vm0caa.txt
deleted file mode 100644
index 04caaae19af6..000000000000
--- a/Documentation/devicetree/bindings/display/panel/hit,tx23d38vm0caa.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
-
-Required properties:
-- compatible: should be "hit,tx23d38vm0caa"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,at043tn24.txt b/Documentation/devicetree/bindings/display/panel/innolux,at043tn24.txt
deleted file mode 100644
index 4104226b61bc..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,at043tn24.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux AT043TN24 4.3" WQVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,at043tn24"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt b/Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt
deleted file mode 100644
index 3e10cd782491..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,at070tn92.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux AT070TN92 7.0" WQVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,at070tn92"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt
deleted file mode 100644
index 7c234cf68e11..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,g070y2-l01.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,g070y2-l01"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt
deleted file mode 100644
index 9e7590465227..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,g101ice-l01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 10.1" G101ICE-L01 WXGA (1280x800) LVDS panel
-
-Required properties:
-- compatible: should be "innolux,g101ice-l01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g121i1-l01.txt b/Documentation/devicetree/bindings/display/panel/innolux,g121i1-l01.txt
deleted file mode 100644
index 2743b07cd2f2..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,g121i1-l01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,g121i1-l01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt b/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt
deleted file mode 100644
index 649744620ae1..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,g121x1-l03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,g121x1-l03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,n116bge.txt b/Documentation/devicetree/bindings/display/panel/innolux,n116bge.txt
deleted file mode 100644
index 081bb939ed31..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,n116bge.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,n116bge"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,n156bge-l21.txt b/Documentation/devicetree/bindings/display/panel/innolux,n156bge-l21.txt
deleted file mode 100644
index 7825844aafdf..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,n156bge-l21.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-InnoLux 15.6" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,n156bge-l21"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/innolux,zj070na-01p.txt b/Documentation/devicetree/bindings/display/panel/innolux,zj070na-01p.txt
deleted file mode 100644
index 824f87f1526d..000000000000
--- a/Documentation/devicetree/bindings/display/panel/innolux,zj070na-01p.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
-
-Required properties:
-- compatible: should be "innolux,zj070na-01p"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt b/Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt
deleted file mode 100644
index be7ac666807b..000000000000
--- a/Documentation/devicetree/bindings/display/panel/koe,tx14d24vm1bpa.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel
-
-Required properties:
-- compatible: should be "koe,tx14d24vm1bpa"
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: single regulator to provide the supply voltage
-
-Required nodes:
-- port: Parallel port mapping to connect this display
-
-This panel needs single power supply voltage. Its backlight is conntrolled
-via PWM signal.
-
-Example:
---------
-
-Example device-tree definition when connected to iMX53 based board
-
- lcd_panel: lcd-panel {
- compatible = "koe,tx14d24vm1bpa";
- backlight = <&backlight_lcd>;
- power-supply = <&reg_3v3>;
-
- port {
- lcd_panel_in: endpoint {
- remote-endpoint = <&lcd_display_out>;
- };
- };
- };
-
-Then one needs to extend the dispX node:
-
- lcd_display: disp1 {
-
- port@1 {
- reg = <1>;
-
- lcd_display_out: endpoint {
- remote-endpoint = <&lcd_panel_in>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt b/Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt
deleted file mode 100644
index 6a036ede3e28..000000000000
--- a/Documentation/devicetree/bindings/display/panel/koe,tx31d200vm0baa.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Required properties:
-- compatible: should be "koe,tx31d200vm0baa"
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Optional nodes:
-- Video port for LVDS panel input.
-
-Example:
- panel {
- compatible = "koe,tx31d200vm0baa";
- backlight = <&backlight_lvds>;
-
- port {
- panel_in: endpoint {
- remote-endpoint = <&lvds0_out>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt b/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt
deleted file mode 100644
index a8e940fe731e..000000000000
--- a/Documentation/devicetree/bindings/display/panel/kyo,tcg121xglp.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Kyocera Corporation 12.1" XGA (1024x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "kyo,tcg121xglp"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
index 4ebcea7d0c63..fd931b293816 100644
--- a/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
+++ b/Documentation/devicetree/bindings/display/panel/leadtek,ltk500hd1829.yaml
@@ -34,9 +34,11 @@ additionalProperties: false
examples:
- |
- dsi@ff450000 {
+ dsi {
#address-cells = <1>;
#size-cells = <0>;
+ reg = <0xff450000 0x1000>;
+
panel@0 {
compatible = "leadtek,ltk500hd1829";
reg = <0>;
diff --git a/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt b/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt
deleted file mode 100644
index 74ee7ea6b493..000000000000
--- a/Documentation/devicetree/bindings/display/panel/lemaker,bl035-rgb-002.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-LeMaker BL035-RGB-002 3.5" QVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "lemaker,bl035-rgb-002"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lb070wv8.txt b/Documentation/devicetree/bindings/display/panel/lg,lb070wv8.txt
deleted file mode 100644
index a7588e5259cf..000000000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lb070wv8.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG 7" (800x480 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lb070wv8"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt b/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt
deleted file mode 100644
index b9877acad012..000000000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lp079qx1-sp0v.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lp079qx1-sp0v"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt b/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt
deleted file mode 100644
index 42141516f078..000000000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lp097qx1-spa1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG 9.7" (2048x1536 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lp097qx1-spa1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt b/Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt
deleted file mode 100644
index 8c5de692c55c..000000000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lp120up1.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG 12.0" (1920x1280 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lp120up1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/lg,lp129qe.txt b/Documentation/devicetree/bindings/display/panel/lg,lp129qe.txt
deleted file mode 100644
index 9f262e0c5a2e..000000000000
--- a/Documentation/devicetree/bindings/display/panel/lg,lp129qe.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-LG 12.9" (2560x1700 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "lg,lp129qe"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt
deleted file mode 100644
index 7d8f6eeef6d9..000000000000
--- a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa070mc01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Mitsubishi "AA070MC01 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "mitsubishi,aa070mc01-ca1"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/nec,nl12880b20-05.txt b/Documentation/devicetree/bindings/display/panel/nec,nl12880b20-05.txt
deleted file mode 100644
index 71cbc49ecfab..000000000000
--- a/Documentation/devicetree/bindings/display/panel/nec,nl12880b20-05.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-NEC LCD Technologies, Ltd. 12.1" WXGA (1280x800) LVDS TFT LCD panel
-
-Required properties:
-- compatible: should be "nec,nl12880bc20-05"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt b/Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt
deleted file mode 100644
index 8e1914d1edb8..000000000000
--- a/Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "nec,nl4827hc19-05b"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt b/Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt
deleted file mode 100644
index c6d06b5eab51..000000000000
--- a/Documentation/devicetree/bindings/display/panel/netron-dy,e231732.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Netron-DY E231732 7.0" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "netron-dy,e231732"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt b/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt
deleted file mode 100644
index e78292b1a131..000000000000
--- a/Documentation/devicetree/bindings/display/panel/newhaven,nhd-4.3-480272ef-atxl.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Newhaven Display International 480 x 272 TFT LCD panel
-
-Required properties:
-- compatible: should be "newhaven,nhd-4.3-480272ef-atxl"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/nlt,nl192108ac18-02d.txt b/Documentation/devicetree/bindings/display/panel/nlt,nl192108ac18-02d.txt
deleted file mode 100644
index 1a639fd8778d..000000000000
--- a/Documentation/devicetree/bindings/display/panel/nlt,nl192108ac18-02d.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-NLT Technologies, Ltd. 15.6" FHD (1920x1080) LVDS TFT LCD panel
-
-Required properties:
-- compatible: should be "nlt,nl192108ac18-02d"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml b/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
new file mode 100644
index 000000000000..73d2ff3baaff
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/novatek,nt35510.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Novatek NT35510-based display panels
+
+maintainers:
+ - Linus Walleij <[email protected]>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ items:
+ - const: hydis,hva40wv1
+ - const: novatek,nt35510
+ description: This indicates the panel manufacturer of the panel
+ that is in turn using the NT35510 panel driver. The compatible
+ string determines how the NT35510 panel driver shall be configured
+ to work with the indicated panel. The novatek,nt35510 compatible shall
+ always be provided as a fallback.
+ reg: true
+ reset-gpios: true
+ vdd-supply:
+ description: regulator that supplies the vdd voltage
+ vddi-supply:
+ description: regulator that supplies the vddi voltage
+ backlight: true
+
+required:
+ - compatible
+ - reg
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "hydis,hva40wv1", "novatek,nt35510";
+ reg = <0>;
+ vdd-supply = <&ab8500_ldo_aux4_reg>;
+ vddi-supply = <&ab8500_ldo_aux6_reg>;
+ reset-gpios = <&gpio4 11 GPIO_ACTIVE_LOW>;
+ backlight = <&gpio_bl>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/nvd,9128.txt b/Documentation/devicetree/bindings/display/panel/nvd,9128.txt
deleted file mode 100644
index 17bcd017c678..000000000000
--- a/Documentation/devicetree/bindings/display/panel/nvd,9128.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-New Vision Display 7.0" 800 RGB x 480 TFT LCD panel
-
-Required properties:
-- compatible: should be "nvd,9128"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/okaya,rs800480t-7x0gp.txt b/Documentation/devicetree/bindings/display/panel/okaya,rs800480t-7x0gp.txt
deleted file mode 100644
index ddf8e211d382..000000000000
--- a/Documentation/devicetree/bindings/display/panel/okaya,rs800480t-7x0gp.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
-
-Required properties:
-- compatible: should be "okaya,rs800480t-7x0gp"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt b/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt
deleted file mode 100644
index 74540a090669..000000000000
--- a/Documentation/devicetree/bindings/display/panel/olimex,lcd-olinuxino-43-ts.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Olimex 4.3" TFT LCD panel
-
-Required properties:
-- compatible: should be "olimex,lcd-olinuxino-43-ts"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt b/Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt
deleted file mode 100644
index 3d8a5e029242..000000000000
--- a/Documentation/devicetree/bindings/display/panel/ontat,yx700wv03.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-On Tat Industrial Company 7" DPI TFT panel.
-
-Required properties:
-- compatible: should be "ontat,yx700wv03"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt
deleted file mode 100644
index 203b03eefb68..000000000000
--- a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-Orise Tech OTM8009A 3.97" 480x800 TFT LCD panel (MIPI-DSI video mode)
-
-The Orise Tech OTM8009A is a 3.97" 480x800 TFT LCD panel connected using
-a MIPI-DSI video interface. Its backlight is managed through the DSI link.
-
-Required properties:
- - compatible: "orisetech,otm8009a"
- - reg: the virtual channel number of a DSI peripheral
-
-Optional properties:
- - reset-gpios: a GPIO spec for the reset pin (active low).
- - power-supply: phandle of the regulator that provides the supply voltage.
-
-Example:
-&dsi {
- ...
- panel@0 {
- compatible = "orisetech,otm8009a";
- reg = <0>;
- reset-gpios = <&gpioh 7 GPIO_ACTIVE_LOW>;
- power-supply = <&v1v8>;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml
new file mode 100644
index 000000000000..4b6dda6dbc0f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/orisetech,otm8009a.yaml
@@ -0,0 +1,53 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/orisetech,otm8009a.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Orise Tech OTM8009A 3.97" 480x800 TFT LCD panel (MIPI-DSI video mode)
+
+maintainers:
+ - Philippe CORNU <[email protected]>
+
+description: |
+ The Orise Tech OTM8009A is a 3.97" 480x800 TFT LCD panel connected using
+ a MIPI-DSI video interface. Its backlight is managed through the DSI link.
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+
+ compatible:
+ const: orisetech,otm8009a
+
+ reg:
+ maxItems: 1
+ description: DSI virtual channel
+
+ enable-gpios: true
+ port: true
+ power-supply: true
+
+ reset-gpios:
+ maxItems: 1
+
+additionalProperties: false
+
+required:
+ - compatible
+ - reg
+
+examples:
+ - |
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "orisetech,otm8009a";
+ reg = <0>;
+ reset-gpios = <&gpiof 15 0>;
+ power-supply = <&v1v8>;
+ };
+ };
+...
+
diff --git a/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m05dtc.txt b/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m05dtc.txt
deleted file mode 100644
index c16907c02f80..000000000000
--- a/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m05dtc.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-OrtusTech COM37H3M05DTC Blanview 3.7" VGA portrait TFT-LCD panel
-
-Required properties:
-- compatible: should be "ortustech,com37h3m05dtc"
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: phandle of the regulator that provides the supply voltage
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m99dtc.txt b/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m99dtc.txt
deleted file mode 100644
index 06a73c3f46b5..000000000000
--- a/Documentation/devicetree/bindings/display/panel/ortustech,com37h3m99dtc.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-OrtusTech COM37H3M99DTC Blanview 3.7" VGA portrait TFT-LCD panel
-
-Required properties:
-- compatible: should be "ortustech,com37h3m99dtc"
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: phandle of the regulator that provides the supply voltage
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/ortustech,com43h4m85ulc.txt b/Documentation/devicetree/bindings/display/panel/ortustech,com43h4m85ulc.txt
deleted file mode 100644
index de19e9398618..000000000000
--- a/Documentation/devicetree/bindings/display/panel/ortustech,com43h4m85ulc.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-OrtusTech COM43H4M85ULC Blanview 3.7" TFT-LCD panel
-
-Required properties:
-- compatible: should be "ortustech,com43h4m85ulc"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt b/Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt
deleted file mode 100644
index e57883ccdf2f..000000000000
--- a/Documentation/devicetree/bindings/display/panel/osddisplays,osd070t1718-19ts.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-OSD Displays OSD070T1718-19TS 7" WVGA TFT LCD panel
-
-Required properties:
-- compatible: shall be "osddisplays,osd070t1718-19ts"
-- power-supply: see simple-panel.txt
-
-Optional properties:
-- backlight: see simple-panel.txt
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory. No other simple-panel properties than
-the ones specified herein are valid.
diff --git a/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt b/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt
deleted file mode 100644
index 85c0b2cacfda..000000000000
--- a/Documentation/devicetree/bindings/display/panel/osddisplays,osd101t2045-53ts.txt
+++ /dev/null
@@ -1,11 +0,0 @@
-One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
-
-Required properties:
-- compatible: should be "osddisplays,osd101t2045-53ts"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f004b00.txt b/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f004b00.txt
deleted file mode 100644
index d328b0341bf4..000000000000
--- a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f004b00.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Panasonic Corporation 10.1" WUXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "panasonic,vvx10f004b00"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt b/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt
deleted file mode 100644
index 37dedf6a6702..000000000000
--- a/Documentation/devicetree/bindings/display/panel/panasonic,vvx10f034n00.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Panasonic 10" WUXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "panasonic,vvx10f034n00"
-- reg: DSI virtual channel of the peripheral
-- power-supply: phandle of the regulator that provides the supply voltage
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
-
- mdss_dsi@fd922800 {
- panel@0 {
- compatible = "panasonic,vvx10f034n00";
- reg = <0>;
- power-supply = <&vreg_vsp>;
- backlight = <&lp8566_wled>;
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/panel-common.yaml b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
index ef8d8cdfcede..ed051ba12084 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-common.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-common.yaml
@@ -54,13 +54,20 @@ properties:
# Display Timings
panel-timing:
- type: object
description:
Most display panels are restricted to a single resolution and
require specific display timings. The panel-timing subnode expresses those
- timings as specified in the timing subnode section of the display timing
- bindings defined in
- Documentation/devicetree/bindings/display/panel/display-timing.txt.
+ timings.
+ allOf:
+ - $ref: panel-timing.yaml#
+
+ display-timings:
+ description:
+ Some display panels supports several resolutions with different timing.
+ The display-timings bindings supports specifying several timings and
+ optional specify which is the native mode.
+ allOf:
+ - $ref: display-timings.yaml#
# Connectivity
port:
diff --git a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt b/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
deleted file mode 100644
index 6b203bc4d932..000000000000
--- a/Documentation/devicetree/bindings/display/panel/panel-dpi.txt
+++ /dev/null
@@ -1,50 +0,0 @@
-Generic MIPI DPI Panel
-======================
-
-Required properties:
-- compatible: "panel-dpi"
-
-Optional properties:
-- label: a symbolic name for the panel
-- enable-gpios: panel enable gpio
-- reset-gpios: GPIO to control the RESET pin
-- vcc-supply: phandle of regulator that will be used to enable power to the display
-- backlight: phandle of the backlight device
-
-Required nodes:
-- "panel-timing" containing video timings
- (Documentation/devicetree/bindings/display/panel/display-timing.txt)
-- Video port for DPI input
-
-Example
--------
-
-lcd0: display@0 {
- compatible = "samsung,lte430wq-f0c", "panel-dpi";
- label = "lcd";
-
- backlight = <&backlight>;
-
- port {
- lcd_in: endpoint {
- remote-endpoint = <&dpi_out>;
- };
- };
-
- panel-timing {
- clock-frequency = <9200000>;
- hactive = <480>;
- vactive = <272>;
- hfront-porch = <8>;
- hback-porch = <4>;
- hsync-len = <41>;
- vback-porch = <2>;
- vfront-porch = <4>;
- vsync-len = <10>;
-
- hsync-active = <0>;
- vsync-active = <0>;
- de-active = <1>;
- pixelclk-active = <1>;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/panel-dpi.yaml b/Documentation/devicetree/bindings/display/panel/panel-dpi.yaml
new file mode 100644
index 000000000000..f63870384c00
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-dpi.yaml
@@ -0,0 +1,81 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-dpi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Generic MIPI DPI Panel
+
+maintainers:
+ - Sam Ravnborg <[email protected]>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ description:
+ Shall contain a panel specific compatible and "panel-dpi"
+ in that order.
+ items:
+ - {}
+ - const: panel-dpi
+
+ data-mapping:
+ enum:
+ - rgb24
+ - rgb565
+ - bgr666
+ description: |
+ Describes the media format, how the display panel is connected
+ to the display interface.
+
+ backlight: true
+ enable-gpios: true
+ height-mm: true
+ label: true
+ panel-timing: true
+ port: true
+ power-supply: true
+ reset-gpios: true
+ width-mm: true
+
+required:
+ - panel-timing
+ - power-supply
+
+additionalProperties: false
+
+examples:
+ - |
+ panel {
+ compatible = "osddisplays,osd057T0559-34ts", "panel-dpi";
+ label = "osddisplay";
+ power-supply = <&vcc_supply>;
+ data-mapping = "rgb565";
+ backlight = <&backlight>;
+
+ port {
+ lcd_in: endpoint {
+ remote-endpoint = <&dpi_out>;
+ };
+ };
+ panel-timing {
+ clock-frequency = <9200000>;
+ hactive = <800>;
+ vactive = <480>;
+ hfront-porch = <8>;
+ hback-porch = <4>;
+ hsync-len = <41>;
+ vback-porch = <2>;
+ vfront-porch = <4>;
+ vsync-len = <10>;
+
+ hsync-active = <0>;
+ vsync-active = <0>;
+ de-active = <1>;
+ pixelclk-active = <1>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
new file mode 100644
index 000000000000..b2e8742fd6af
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple-dsi.yaml
@@ -0,0 +1,67 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-simple-dsi.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Simple DSI panels with a single power-supply
+
+maintainers:
+ - Thierry Reding <[email protected]>
+ - Sam Ravnborg <[email protected]>
+
+description: |
+ This binding file is a collection of the DSI panels that
+ requires only a single power-supply.
+ There are optionally a backlight and an enable GPIO.
+ The panel may use an OF graph binding for the association to the display,
+ or it may be a direct child node of the display.
+
+ If the panel is more advanced a dedicated binding file is required.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+
+ compatible:
+ enum:
+ # compatible must be listed in alphabetical order, ordered by compatible.
+ # The description in the comment is mandatory for each compatible.
+
+ # Panasonic 10" WUXGA TFT LCD panel
+ - panasonic,vvx10f034n00
+
+ reg:
+ maxItems: 1
+ description: DSI virtual channel
+
+ backlight: true
+ enable-gpios: true
+ port: true
+ power-supply: true
+
+additionalProperties: false
+
+required:
+ - compatible
+ - power-supply
+ - reg
+
+examples:
+ - |
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "panasonic,vvx10f034n00";
+ reg = <0>;
+ power-supply = <&vcc_lcd_reg>;
+
+ port {
+ panel: endpoint {
+ remote-endpoint = <&ltdc_out>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
index 8fe60ee2531c..393ffc6acbba 100644
--- a/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
+++ b/Documentation/devicetree/bindings/display/panel/panel-simple.yaml
@@ -33,16 +33,225 @@ properties:
- ampire,am-480272h3tmqw-t01h
# Ampire AM-800480R3TMQW-A1H 7.0" WVGA TFT LCD panel
- ampire,am800480r3tmqwa1h
+ # AU Optronics Corporation 8.0" WUXGA TFT LCD panel
+ - auo,b080uan01
+ # AU Optronics Corporation 10.1" WSVGA TFT LCD panel
+ - auo,b101aw03
+ # AU Optronics Corporation 10.1" WSVGA TFT LCD panel
+ - auo,b101ean01
+ # AU Optronics Corporation 10.1" WXGA TFT LCD panel
+ - auo,b101xtn01
# AUO B116XAK01 eDP TFT LCD panel
- auo,b116xa01
+ # AU Optronics Corporation 11.6" HD (1366x768) color TFT-LCD panel
+ - auo,b116xw03
+ # AU Optronics Corporation 13.3" FHD (1920x1080) color TFT-LCD panel
+ - auo,b133htn01
+ # AU Optronics Corporation 13.3" WXGA (1366x768) TFT LCD panel
+ - auo,b133xtn01
+ # AU Optronics Corporation 7.0" FHD (800 x 480) TFT LCD panel
+ - auo,g070vvn01
+ # AU Optronics Corporation 10.1" (1280x800) color TFT LCD panel
+ - auo,g101evn010
+ # AU Optronics Corporation 10.4" (800x600) color TFT LCD panel
+ - auo,g104sn02
+ # AU Optronics Corporation 13.3" FHD (1920x1080) TFT LCD panel
+ - auo,g133han01
+ # AU Optronics Corporation 18.5" FHD (1920x1080) TFT LCD panel
+ - auo,g185han01
+ # AU Optronics Corporation 31.5" FHD (1920x1080) TFT LCD panel
+ - auo,p320hvn03
+ # AU Optronics Corporation 21.5" FHD (1920x1080) color TFT LCD panel
+ - auo,t215hvn01
+ # Shanghai AVIC Optoelectronics 7" 1024x600 color TFT-LCD panel
+ - avic,tm070ddh03
+ # BOE HV070WSA-100 7.01" WSVGA TFT LCD panel
+ - boe,hv070wsa-100
+ # BOE OPTOELECTRONICS TECHNOLOGY 10.1" WXGA TFT LCD panel
+ - boe,nv101wxmn51
# BOE NV140FHM-N49 14.0" FHD a-Si FT panel
- boe,nv140fhmn49
+ # Boe Corporation 8.0" WUXGA TFT LCD panel
+ - boe,tv080wum-nl0
+ # CDTech(H.K.) Electronics Limited 4.3" 480x272 color TFT-LCD panel
+ - cdtech,s043wq26h-ct7
+ # CDTech(H.K.) Electronics Limited 7" 800x480 color TFT-LCD panel
+ - cdtech,s070wv95-ct16
+ # Chunghwa Picture Tubes Ltd. 7" WXGA TFT LCD panel
+ - chunghwa,claa070wp03xg
+ # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+ - chunghwa,claa101wa01a
+ # Chunghwa Picture Tubes Ltd. 10.1" WXGA TFT LCD panel
+ - chunghwa,claa101wb03
+ # DataImage, Inc. 7" WVGA (800x480) TFT LCD panel with 24-bit parallel interface.
+ - dataimage,scf0700c48ggu18
+ # DLC Display Co. DLC1010GIG 10.1" WXGA TFT LCD Panel
+ - dlc,dlc1010gig
+ # Emerging Display Technology Corp. 3.5" QVGA TFT LCD panel
+ - edt,et035012dm6
+ # Emerging Display Technology Corp. 480x272 TFT Display with capacitive touch
+ - edt,etm043080dh6gp
+ # Emerging Display Technology Corp. 480x272 TFT Display
+ - edt,etm0430g0dh6
+ # Emerging Display Technology Corp. 5.7" VGA TFT LCD panel
+ - edt,et057090dhu
+ # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+ - edt,etm070080dh6
+ # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+ - edt,etm0700g0dh6
+ # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+ # Same as ETM0700G0DH6 but with inverted pixel clock.
+ - edt,etm070080bdh6
+ # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+ # Same display as the ETM0700G0BDH6, but with changed hardware for the
+ # backlight and the touch interface.
+ - edt,etm070080edh6
+ # Emerging Display Technology Corp. WVGA TFT Display with capacitive touch
+ # Same timings as the ETM0700G0DH6, but with resistive touch.
+ - edt,etm070080dh6
+ # Evervision Electronics Co. Ltd. VGG804821 5.0" WVGA TFT LCD Panel
+ - evervision,vgg804821
+ # Foxlink Group 5" WVGA TFT LCD panel
+ - foxlink,fl500wvr00-a0t
+ # Frida FRD350H54004 3.5" QVGA TFT LCD panel
+ - frida,frd350h54004
+ # FriendlyELEC HD702E 800x1280 LCD panel
+ - friendlyarm,hd702e
+ # GiantPlus GPG48273QS5 4.3" (480x272) WQVGA TFT LCD panel
+ - giantplus,gpg48273qs5
# GiantPlus GPM940B0 3.0" QVGA TFT LCD panel
- giantplus,gpm940b0
+ # HannStar Display Corp. HSD070PWW1 7.0" WXGA TFT LCD panel
+ - hannstar,hsd070pww1
+ # HannStar Display Corp. HSD100PXN1 10.1" XGA LVDS panel
+ - hannstar,hsd100pxn1
+ # Hitachi Ltd. Corporation 9" WVGA (800x480) TFT LCD panel
+ - hit,tx23d38vm0caa
+ # Innolux AT043TN24 4.3" WQVGA TFT LCD panel
+ - innolux,at043tn24
+ # Innolux AT070TN92 7.0" WQVGA TFT LCD panel
+ - innolux,at070tn92
+ # Innolux G070Y2-L01 7" WVGA (800x480) TFT LCD panel
+ - innolux,g070y2-l01
+ # Innolux Corporation 10.1" G101ICE-L01 WXGA (1280x800) LVDS panel
+ - innolux,g101ice-l01
+ # Innolux Corporation 12.1" WXGA (1280x800) TFT LCD panel
+ - innolux,g121i1-l01
+ # Innolux Corporation 12.1" G121X1-L03 XGA (1024x768) TFT LCD panel
+ - innolux,g121x1-l03
+ # Innolux Corporation 11.6" WXGA (1366x768) TFT LCD panel
+ - innolux,n116bge
+ # InnoLux 15.6" WXGA TFT LCD panel
+ - innolux,n156bge-l21
+ # Innolux Corporation 7.0" WSVGA (1024x600) TFT LCD panel
+ - innolux,zj070na-01p
+ # Kaohsiung Opto-Electronics Inc. 5.7" QVGA (320 x 240) TFT LCD panel
+ - koe,tx14d24vm1bpa
+ # Kaohsiung Opto-Electronics. TX31D200VM0BAA 12.3" HSXGA LVDS panel
+ - koe,tx31d200vm0baa
+ # Kyocera Corporation 12.1" XGA (1024x768) TFT LCD panel
+ - kyo,tcg121xglp
+ # LeMaker BL035-RGB-002 3.5" QVGA TFT LCD panel
+ - lemaker,bl035-rgb-002
+ # LG 7" (800x480 pixels) TFT LCD panel
+ - lg,lb070wv8
+ # LG LP079QX1-SP0V 7.9" (1536x2048 pixels) TFT LCD panel
+ - lg,lp079qx1-sp0v
+ # LG 9.7" (2048x1536 pixels) TFT LCD panel
+ - lg,lp097qx1-spa1
+ # LG 12.0" (1920x1280 pixels) TFT LCD panel
+ - lg,lp120up1
+ # LG 12.9" (2560x1700 pixels) TFT LCD panel
+ - lg,lp129qe
+ # Logic Technologies LT161010-2NHC 7" WVGA TFT Cap Touch Module
+ - logictechno,lt161010-2nhc
+ # Logic Technologies LT161010-2NHR 7" WVGA TFT Resistive Touch Module
+ - logictechno,lt161010-2nhr
+ # Logic Technologies LT170410-2WHC 10.1" 1280x800 IPS TFT Cap Touch Mod.
+ - logictechno,lt170410-2whc
+ # Mitsubishi "AA070MC01 7.0" WVGA TFT LCD panel
+ - mitsubishi,aa070mc01-ca1
+ # NEC LCD Technologies, Ltd. 12.1" WXGA (1280x800) LVDS TFT LCD panel
+ - nec,nl12880bc20-05
+ # NEC LCD Technologies,Ltd. WQVGA TFT LCD panel
+ - nec,nl4827hc19-05b
+ # Netron-DY E231732 7.0" WSVGA TFT LCD panel
+ - netron-dy,e231732
+ # NewEast Optoelectronics CO., LTD WJFH116008A eDP TFT LCD panel
+ - neweast,wjfh116008a
+ # Newhaven Display International 480 x 272 TFT LCD panel
+ - newhaven,nhd-4.3-480272ef-atxl
+ # NLT Technologies, Ltd. 15.6" FHD (1920x1080) LVDS TFT LCD panel
+ - nlt,nl192108ac18-02d
+ # New Vision Display 7.0" 800 RGB x 480 TFT LCD panel
+ - nvd,9128
+ # OKAYA Electric America, Inc. RS800480T-7X0GP 7" WVGA LCD panel
+ - okaya,rs800480t-7x0gp
+ # Olimex 4.3" TFT LCD panel
+ - olimex,lcd-olinuxino-43-ts
+ # On Tat Industrial Company 7" DPI TFT panel.
+ - ontat,yx700wv03
+ # OrtusTech COM37H3M05DTC Blanview 3.7" VGA portrait TFT-LCD panel
+ - ortustech,com37h3m05dtc
+ # OrtusTech COM37H3M99DTC Blanview 3.7" VGA portrait TFT-LCD panel
+ - ortustech,com37h3m99dtc
+ # OrtusTech COM43H4M85ULC Blanview 3.7" TFT-LCD panel
+ - ortustech,com43h4m85ulc
+ # OSD Displays OSD070T1718-19TS 7" WVGA TFT LCD panel
+ - osddisplays,osd070t1718-19ts
+ # One Stop Displays OSD101T2045-53TS 10.1" 1920x1200 panel
+ - osddisplays,osd101t2045-53ts
+ # QiaoDian XianShi Corporation 4"3 TFT LCD panel
+ - qiaodian,qd43003c0-40
+ # Rocktech Displays Ltd. RK101II01D-CT 10.1" TFT 1280x800
+ - rocktech,rk101ii01d-ct
+ # Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel
+ - rocktech,rk070er9427
+ # Samsung 12.2" (2560x1600 pixels) TFT LCD panel
+ - samsung,lsn122dl01-c01
+ # Samsung Electronics 10.1" WSVGA TFT LCD panel
+ - samsung,ltn101nt05
+ # Samsung Electronics 14" WXGA (1366x768) TFT LCD panel
+ - samsung,ltn140at29-301
# Satoz SAT050AT40H12R2 5.0" WVGA TFT LCD panel
- satoz,sat050at40h12r2
+ # Sharp LQ035Q7DB03 3.5" QVGA TFT LCD panel
+ - sharp,lq035q7db03
+ # Sharp LQ070Y3DG3B 7.0" WVGA landscape TFT LCD panel
+ - sharp,lq070y3dg3b
+ # Sharp Display Corp. LQ101K1LY04 10.07" WXGA TFT LCD panel
+ - sharp,lq101k1ly04
+ # Sharp 12.3" (2400x1600 pixels) TFT LCD panel
+ - sharp,lq123p1jx31
# Sharp LS020B1DD01D 2.0" HQVGA TFT LCD panel
- sharp,ls020b1dd01d
+ # Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
+ - shelly,sca07010-bfn-lnn
+ # Starry 12.2" (1920x1200 pixels) TFT LCD panel
+ - starry,kr122ea0sra
+ # Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
+ - tianma,tm070jdhg30
+ # Tianma Micro-electronics TM070RVHG71 7.0" WXGA TFT LCD panel
+ - tianma,tm070rvhg71
+ # Toshiba 8.9" WXGA (1280x768) TFT LCD panel
+ - toshiba,lt089ac29000
+ # TPK U.S.A. LLC Fusion 7" 800 x 480 (WVGA) LCD panel with capacitive touch
+ - tpk,f07a-0102
+ # TPK U.S.A. LLC Fusion 10.1" 1024 x 600 (WSVGA) LCD panel with capacitive touch
+ - tpk,f10a-0102
+ # United Radiant Technology UMSH-8596MD-xT 7.0" WVGA TFT LCD panel
+ # Supported are LVDS versions (-11T, -19T) and parallel ones
+ # (-T, -1T, -7T, -20T).
+ - urt,umsh-8596md-t
+ - urt,umsh-8596md-1t
+ - urt,umsh-8596md-7t
+ - urt,umsh-8596md-11t
+ - urt,umsh-8596md-19t
+ - urt,umsh-8596md-20t
+ # VXT 800x480 color TFT LCD panel
+ - vxt,vl050-8048nt-c01
+ # Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
+ - winstar,wf35ltiacd
backlight: true
enable-gpios: true
diff --git a/Documentation/devicetree/bindings/display/panel/panel-timing.yaml b/Documentation/devicetree/bindings/display/panel/panel-timing.yaml
new file mode 100644
index 000000000000..bd558ad7891f
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/panel-timing.yaml
@@ -0,0 +1,227 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/panel-timing.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: panel timing bindings
+
+maintainers:
+ - Thierry Reding <[email protected]>
+ - Sam Ravnborg <[email protected]>
+
+description: |
+ There are different ways of describing the timing data of a panel. The
+ devicetree representation corresponds to the one commonly found in datasheets
+ for panels.
+
+ The parameters are defined as seen in the following illustration.
+
+ +----------+-------------------------------------+----------+-------+
+ | | ^ | | |
+ | | |vback_porch | | |
+ | | v | | |
+ +----------#######################################----------+-------+
+ | # ^ # | |
+ | # | # | |
+ | hback # | # hfront | hsync |
+ | porch # | hactive # porch | len |
+ |<-------->#<-------+--------------------------->#<-------->|<----->|
+ | # | # | |
+ | # |vactive # | |
+ | # | # | |
+ | # v # | |
+ +----------#######################################----------+-------+
+ | | ^ | | |
+ | | |vfront_porch | | |
+ | | v | | |
+ +----------+-------------------------------------+----------+-------+
+ | | ^ | | |
+ | | |vsync_len | | |
+ | | v | | |
+ +----------+-------------------------------------+----------+-------+
+
+
+ The following is the panel timings shown with time on the x-axis.
+ This matches the timing diagrams often found in data sheets.
+
+ Active Front Sync Back
+ Region Porch Porch
+ <-----------------------><----------------><-------------><-------------->
+ //////////////////////|
+ ////////////////////// |
+ ////////////////////// |.................. ................
+ _______________
+
+ Timing can be specified either as a typical value or as a tuple
+ of min, typ, max values.
+
+properties:
+
+ clock-frequency:
+ description: Panel clock in Hz
+
+ hactive:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Horizontal panel resolution in pixels
+
+ vactive:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description: Vertical panel resolution in pixels
+
+ hfront-porch:
+ description: Horizontal front porch panel timing
+ oneOf:
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ items:
+ description: typical number of pixels
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of pixels
+
+ hback-porch:
+ description: Horizontal back porch timing
+ oneOf:
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ items:
+ description: typical number of pixels
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of pixels
+
+ hsync-len:
+ description: Horizontal sync length panel timing
+ oneOf:
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ items:
+ description: typical number of pixels
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of pixels
+
+ vfront-porch:
+ description: Vertical front porch panel timing
+ oneOf:
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ items:
+ description: typical number of lines
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of lines
+
+ vback-porch:
+ description: Vertical back porch panel timing
+ oneOf:
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ items:
+ description: typical number of lines
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of lines
+
+ vsync-len:
+ description: Vertical sync length panel timing
+ oneOf:
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32
+ - maxItems: 1
+ items:
+ description: typical number of lines
+ - allOf:
+ - $ref: /schemas/types.yaml#/definitions/uint32-array
+ - minItems: 3
+ maxItems: 3
+ items:
+ description: min, typ, max number of lines
+
+ hsync-active:
+ description: |
+ Horizontal sync pulse.
+ 0 selects active low, 1 selects active high.
+ If omitted then it is not used by the hardware
+ enum: [0, 1]
+
+ vsync-active:
+ description: |
+ Vertical sync pulse.
+ 0 selects active low, 1 selects active high.
+ If omitted then it is not used by the hardware
+ enum: [0, 1]
+
+ de-active:
+ description: |
+ Data enable.
+ 0 selects active low, 1 selects active high.
+ If omitted then it is not used by the hardware
+ enum: [0, 1]
+
+ pixelclk-active:
+ description: |
+ Data driving on rising or falling edge.
+ Use 0 to drive pixel data on falling edge and
+ sample data on rising edge.
+ Use 1 to drive pixel data on rising edge and
+ sample data on falling edge
+ enum: [0, 1]
+
+ syncclk-active:
+ description: |
+ Drive sync on rising or sample sync on falling edge.
+ If not specified then the setup is as specified by pixelclk-active.
+ Use 0 to drive sync on falling edge and
+ sample sync on rising edge of pixel clock.
+ Use 1 to drive sync on rising edge and
+ sample sync on falling edge of pixel clock
+ enum: [0, 1]
+
+ interlaced:
+ type: boolean
+ description: Enable interlaced mode
+
+ doublescan:
+ type: boolean
+ description: Enable double scan mode
+
+ doubleclk:
+ type: boolean
+ description: Enable double clock mode
+
+required:
+ - clock-frequency
+ - hactive
+ - vactive
+ - hfront-porch
+ - hback-porch
+ - hsync-len
+ - vfront-porch
+ - vback-porch
+ - vsync-len
+
+additionalProperties: false
+
+...
diff --git a/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt b/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt
deleted file mode 100644
index 0fbdab89ac3d..000000000000
--- a/Documentation/devicetree/bindings/display/panel/qiaodian,qd43003c0-40.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-QiaoDian XianShi Corporation 4"3 TFT LCD panel
-
-Required properties:
-- compatible: should be "qiaodian,qd43003c0-40"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt
deleted file mode 100644
index cbb79ef3bfc9..000000000000
--- a/Documentation/devicetree/bindings/display/panel/raydium,rm68200.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Raydium Semiconductor Corporation RM68200 5.5" 720p MIPI-DSI TFT LCD panel
-
-The Raydium Semiconductor Corporation RM68200 is a 5.5" 720x1280 TFT LCD
-panel connected using a MIPI-DSI video interface.
-
-Required properties:
- - compatible: "raydium,rm68200"
- - reg: the virtual channel number of a DSI peripheral
-
-Optional properties:
- - reset-gpios: a GPIO spec for the reset pin (active low).
- - power-supply: phandle of the regulator that provides the supply voltage.
- - backlight: phandle of the backlight device attached to the panel.
-
-Example:
-&dsi {
- ...
- panel@0 {
- compatible = "raydium,rm68200";
- reg = <0>;
- reset-gpios = <&gpiof 15 GPIO_ACTIVE_LOW>;
- power-supply = <&v1v8>;
- backlight = <&pwm_backlight>;
- };
-};
diff --git a/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml
new file mode 100644
index 000000000000..a35ba16fc000
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/raydium,rm68200.yaml
@@ -0,0 +1,56 @@
+# SPDX-License-Identifier: (GPL-2.0-only or BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/raydium,rm68200.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Raydium Semiconductor Corporation RM68200 5.5" 720p MIPI-DSI TFT LCD panel
+
+maintainers:
+ - Philippe CORNU <[email protected]>
+
+description: |
+ The Raydium Semiconductor Corporation RM68200 is a 5.5" 720x1280 TFT LCD
+ panel connected using a MIPI-DSI video interface.
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+
+ compatible:
+ const: raydium,rm68200
+
+ reg:
+ maxItems: 1
+ description: DSI virtual channel
+
+ backlight: true
+ enable-gpios: true
+ port: true
+ power-supply: true
+
+ reset-gpios:
+ maxItems: 1
+
+additionalProperties: false
+
+required:
+ - compatible
+ - power-supply
+ - reg
+
+examples:
+ - |
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ compatible = "raydium,rm68200";
+ reg = <0>;
+ reset-gpios = <&gpiof 15 0>;
+ power-supply = <&v1v8>;
+ backlight = <&pwm_backlight>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt b/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt
deleted file mode 100644
index eb1fb9f8d1f4..000000000000
--- a/Documentation/devicetree/bindings/display/panel/rocktech,rk070er9427.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-Rocktech Display Ltd. RK070ER9427 800(RGB)x480 TFT LCD panel
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Required properties:
-- compatible: should be "rocktech,rk070er9427"
-
-Optional properties:
-- backlight: phandle of the backlight device attached to the panel
-
-Optional nodes:
-- Video port for LCD panel input.
-
-Example:
- panel {
- compatible = "rocktech,rk070er9427";
- backlight = <&backlight_lcd>;
-
- port {
- lcd_panel_in: endpoint {
- remote-endpoint = <&lcd_display_out>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt b/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt
deleted file mode 100644
index dba298b43b24..000000000000
--- a/Documentation/devicetree/bindings/display/panel/samsung,lsn122dl01-c01.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Samsung 12.2" (2560x1600 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "samsung,lsn122dl01-c01"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ltn101nt05.txt b/Documentation/devicetree/bindings/display/panel/samsung,ltn101nt05.txt
deleted file mode 100644
index ef522c6bb85f..000000000000
--- a/Documentation/devicetree/bindings/display/panel/samsung,ltn101nt05.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Samsung Electronics 10.1" WSVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "samsung,ltn101nt05"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,ltn140at29-301.txt b/Documentation/devicetree/bindings/display/panel/samsung,ltn140at29-301.txt
deleted file mode 100644
index e7f969d891cc..000000000000
--- a/Documentation/devicetree/bindings/display/panel/samsung,ltn140at29-301.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Samsung Electronics 14" WXGA (1366x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "samsung,ltn140at29-301"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml b/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml
new file mode 100644
index 000000000000..7a685d0428b3
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/panel/samsung,s6e88a0-ams452ef01.yaml
@@ -0,0 +1,50 @@
+# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/panel/samsung,s6e88a0-ams452ef01.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Samsung AMS452EF01 AMOLED panel with S6E88A0 video mode DSI controller
+
+maintainers:
+ - Michael Srba <[email protected]>
+
+allOf:
+ - $ref: panel-common.yaml#
+
+properties:
+ compatible:
+ const: samsung,s6e88a0-ams452ef01
+ reg: true
+ reset-gpios: true
+ vdd3-supply:
+ description: core voltage supply
+ vci-supply:
+ description: voltage supply for analog circuits
+
+required:
+ - compatible
+ - reg
+ - vdd3-supply
+ - vci-supply
+ - reset-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ dsi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ panel@0 {
+ reg = <0>;
+
+ compatible = "samsung,s6e88a0-ams452ef01";
+
+ vdd3-supply = <&pm8916_l17>;
+ vci-supply = <&reg_vlcd_vci>;
+ reset-gpios = <&msmgpio 25 GPIO_ACTIVE_HIGH>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt
deleted file mode 100644
index 0753f6967279..000000000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq035q7db03.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Sharp LQ035Q7DB03 3.5" QVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq035q7db03"
-- power-supply: phandle of the regulator that provides the supply voltage
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq070y3dg3b.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq070y3dg3b.txt
deleted file mode 100644
index 95534b55ee5f..000000000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq070y3dg3b.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-Sharp LQ070Y3DG3B 7.0" WVGA landscape TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq070y3dg3b"
-
-Optional properties:
-- enable-gpios: GPIO pin to enable or disable the panel
-- backlight: phandle of the backlight device attached to the panel
-- power-supply: phandle of the regulator that provides the supply voltage
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt
deleted file mode 100644
index 4aff25b8dfe6..000000000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq101k1ly04.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Sharp Display Corp. LQ101K1LY04 10.07" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq101k1ly04"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt b/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt
deleted file mode 100644
index bcb0e8a29f71..000000000000
--- a/Documentation/devicetree/bindings/display/panel/sharp,lq123p1jx31.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Sharp 12.3" (2400x1600 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "sharp,lq123p1jx31"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/shelly,sca07010-bfn-lnn.txt b/Documentation/devicetree/bindings/display/panel/shelly,sca07010-bfn-lnn.txt
deleted file mode 100644
index fc1ea9e26c94..000000000000
--- a/Documentation/devicetree/bindings/display/panel/shelly,sca07010-bfn-lnn.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Shelly SCA07010-BFN-LNN 7.0" WVGA TFT LCD panel
-
-Required properties:
-- compatible: should be "shelly,sca07010-bfn-lnn"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt b/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt
deleted file mode 100644
index 1e87fe6078af..000000000000
--- a/Documentation/devicetree/bindings/display/panel/starry,kr122ea0sra.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Starry 12.2" (1920x1200 pixels) TFT LCD panel
-
-Required properties:
-- compatible: should be "starry,kr122ea0sra"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt b/Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt
deleted file mode 100644
index eb9501a82e25..000000000000
--- a/Documentation/devicetree/bindings/display/panel/tianma,tm070jdhg30.txt
+++ /dev/null
@@ -1,7 +0,0 @@
-Tianma Micro-electronics TM070JDHG30 7.0" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "tianma,tm070jdhg30"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt b/Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt
deleted file mode 100644
index b25261e63a6d..000000000000
--- a/Documentation/devicetree/bindings/display/panel/tianma,tm070rvhg71.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Tianma Micro-electronics TM070RVHG71 7.0" WXGA TFT LCD panel
-
-Required properties:
-- compatible: should be "tianma,tm070rvhg71"
-- power-supply: single regulator to provide the supply voltage
-- backlight: phandle of the backlight device attached to the panel
-
-Required nodes:
-- port: LVDS port mapping to connect this display
-
-This panel needs single power supply voltage. Its backlight is conntrolled
-via PWM signal.
-
-Example:
---------
-
-Example device-tree definition when connected to iMX6Q based board
-
- panel: panel-lvds0 {
- compatible = "tianma,tm070rvhg71";
- backlight = <&backlight_lvds>;
- power-supply = <&reg_lvds>;
-
- port {
- panel_in_lvds0: endpoint {
- remote-endpoint = <&lvds0_out>;
- };
- };
- };
diff --git a/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt b/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt
deleted file mode 100644
index 89826116628c..000000000000
--- a/Documentation/devicetree/bindings/display/panel/toshiba,lt089ac29000.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-Toshiba 8.9" WXGA (1280x768) TFT LCD panel
-
-Required properties:
-- compatible: should be "toshiba,lt089ac29000"
-- power-supply: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt b/Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt
deleted file mode 100644
index a2613b9675df..000000000000
--- a/Documentation/devicetree/bindings/display/panel/tpk,f07a-0102.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-TPK U.S.A. LLC Fusion 7" integrated projected capacitive touch display with,
-800 x 480 (WVGA) LCD panel.
-
-Required properties:
-- compatible: should be "tpk,f07a-0102"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt b/Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt
deleted file mode 100644
index b9d051196ba9..000000000000
--- a/Documentation/devicetree/bindings/display/panel/tpk,f10a-0102.txt
+++ /dev/null
@@ -1,8 +0,0 @@
-TPK U.S.A. LLC Fusion 10.1" integrated projected capacitive touch display with,
-1024 x 600 (WSVGA) LCD panel.
-
-Required properties:
-- compatible: should be "tpk,f10a-0102"
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt b/Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt
deleted file mode 100644
index 088a6cea5015..000000000000
--- a/Documentation/devicetree/bindings/display/panel/urt,umsh-8596md.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-United Radiant Technology UMSH-8596MD-xT 7.0" WVGA TFT LCD panel
-
-Supported are LVDS versions (-11T, -19T) and parallel ones
-(-T, -1T, -7T, -20T).
-
-Required properties:
-- compatible: should be one of:
- "urt,umsh-8596md-t",
- "urt,umsh-8596md-1t",
- "urt,umsh-8596md-7t",
- "urt,umsh-8596md-11t",
- "urt,umsh-8596md-19t",
- "urt,umsh-8596md-20t".
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt b/Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt
deleted file mode 100644
index b42bf06bbd99..000000000000
--- a/Documentation/devicetree/bindings/display/panel/vl050_8048nt_c01.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-VXT 800x480 color TFT LCD panel
-
-Required properties:
-- compatible: should be "vxt,vl050-8048nt-c01"
-- power-supply: as specified in the base binding
-
-Optional properties:
-- backlight: as specified in the base binding
-- enable-gpios: as specified in the base binding
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
diff --git a/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt b/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt
deleted file mode 100644
index 2a7e6e3ba64c..000000000000
--- a/Documentation/devicetree/bindings/display/panel/winstar,wf35ltiacd.txt
+++ /dev/null
@@ -1,48 +0,0 @@
-Winstar Display Corporation 3.5" QVGA (320x240) TFT LCD panel
-
-Required properties:
-- compatible: should be "winstar,wf35ltiacd"
-- power-supply: regulator to provide the VCC supply voltage (3.3 volts)
-
-This binding is compatible with the simple-panel binding, which is specified
-in simple-panel.txt in this directory.
-
-Example:
- backlight: backlight {
- compatible = "pwm-backlight";
- pwms = <&hlcdc_pwm 0 50000 PWM_POLARITY_INVERTED>;
- brightness-levels = <0 31 63 95 127 159 191 223 255>;
- default-brightness-level = <191>;
- power-supply = <&bl_reg>;
- };
-
- bl_reg: backlight_regulator {
- compatible = "regulator-fixed";
- regulator-name = "backlight-power-supply";
- regulator-min-microvolt = <5000000>;
- regulator-max-microvolt = <5000000>;
- };
-
- panel: panel {
- compatible = "winstar,wf35ltiacd", "simple-panel";
- backlight = <&backlight>;
- power-supply = <&panel_reg>;
- #address-cells = <1>;
- #size-cells = <0>;
-
- port {
- #address-cells = <1>;
- #size-cells = <0>;
-
- panel_input: endpoint {
- remote-endpoint = <&hlcdc_panel_output>;
- };
- };
- };
-
- panel_reg: panel_regulator {
- compatible = "regulator-fixed";
- regulator-name = "panel-power-supply";
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- };
diff --git a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
index 186e5e1c8fa3..d9fdb58e06b4 100644
--- a/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
+++ b/Documentation/devicetree/bindings/display/panel/xinpeng,xpp055c272.yaml
@@ -34,9 +34,11 @@ additionalProperties: false
examples:
- |
- dsi@ff450000 {
+ dsi {
#address-cells = <1>;
#size-cells = <0>;
+ reg = <0xff450000 0x1000>;
+
panel@0 {
compatible = "xinpeng,xpp055c272";
reg = <0>;
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.txt b/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.txt
deleted file mode 100644
index 5707af89319d..000000000000
--- a/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-Rockchip DRM master device
-================================
-
-The Rockchip DRM master device is a virtual device needed to list all
-vop devices or other display interface nodes that comprise the
-graphics subsystem.
-
-Required properties:
-- compatible: Should be "rockchip,display-subsystem"
-- ports: Should contain a list of phandles pointing to display interface port
- of vop devices. vop definitions as defined in
- Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
-
-example:
-
-display-subsystem {
- compatible = "rockchip,display-subsystem";
- ports = <&vopl_out>, <&vopb_out>;
-};
diff --git a/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml b/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml
new file mode 100644
index 000000000000..ec8ae742d4da
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/rockchip/rockchip-drm.yaml
@@ -0,0 +1,40 @@
+# SPDX-License-Identifier: (GPL-2.0-only)
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/rockchip/rockchip-drm.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Rockchip DRM master device
+
+maintainers:
+ - Sandy Huang <[email protected]>
+ - Heiko Stuebner <[email protected]>
+
+description: |
+ The Rockchip DRM master device is a virtual device needed to list all
+ vop devices or other display interface nodes that comprise the
+ graphics subsystem.
+
+properties:
+ compatible:
+ const: rockchip,display-subsystem
+
+ ports:
+ $ref: /schemas/types.yaml#/definitions/phandle-array
+ description: |
+ Should contain a list of phandles pointing to display interface port
+ of vop devices. vop definitions as defined in
+ Documentation/devicetree/bindings/display/rockchip/rockchip-vop.txt
+
+required:
+ - compatible
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ display-subsystem {
+ compatible = "rockchip,display-subsystem";
+ ports = <&vopl_out>, <&vopb_out>;
+ };
diff --git a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
index 678776b6012a..1db608c9eef5 100644
--- a/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
+++ b/Documentation/devicetree/bindings/display/simple-framebuffer.yaml
@@ -174,10 +174,6 @@ examples:
};
};
- soc@1c00000 {
- lcdc0: lcdc@1c0c000 {
- compatible = "allwinner,sun4i-a10-lcdc";
- };
- };
+ lcdc0: lcdc { };
...
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7735r.txt b/Documentation/devicetree/bindings/display/sitronix,st7735r.txt
deleted file mode 100644
index cd5c7186890a..000000000000
--- a/Documentation/devicetree/bindings/display/sitronix,st7735r.txt
+++ /dev/null
@@ -1,35 +0,0 @@
-Sitronix ST7735R display panels
-
-This binding is for display panels using a Sitronix ST7735R controller in SPI
-mode.
-
-Required properties:
-- compatible: "jianda,jd-t18003-t01", "sitronix,st7735r"
-- dc-gpios: Display data/command selection (D/CX)
-- reset-gpios: Reset signal (RSTX)
-
-The node for this driver must be a child node of a SPI controller, hence
-all mandatory properties described in ../spi/spi-bus.txt must be specified.
-
-Optional properties:
-- rotation: panel rotation in degrees counter clockwise (0,90,180,270)
-- backlight: phandle of the backlight device attached to the panel
-
-Example:
-
- backlight: backlight {
- compatible = "gpio-backlight";
- gpios = <&gpio 44 GPIO_ACTIVE_HIGH>;
- };
-
- ...
-
- display@0{
- compatible = "jianda,jd-t18003-t01", "sitronix,st7735r";
- reg = <0>;
- spi-max-frequency = <32000000>;
- dc-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>;
- reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>;
- rotation = <270>;
- backlight = &backlight;
- };
diff --git a/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml b/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
new file mode 100644
index 000000000000..0cebaaefda03
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
@@ -0,0 +1,78 @@
+# SPDX-License-Identifier: GPL-2.0-only
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/display/sitronix,st7735r.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: Sitronix ST7735R Display Panels Device Tree Bindings
+
+maintainers:
+ - David Lechner <[email protected]>
+
+description:
+ This binding is for display panels using a Sitronix ST7715R or ST7735R
+ controller in SPI mode.
+
+allOf:
+ - $ref: panel/panel-common.yaml#
+
+properties:
+ compatible:
+ oneOf:
+ - description:
+ Adafruit 1.8" 160x128 Color TFT LCD (Product ID 358 or 618)
+ items:
+ - enum:
+ - jianda,jd-t18003-t01
+ - const: sitronix,st7735r
+ - description:
+ Okaya 1.44" 128x128 Color TFT LCD (E.g. Renesas YRSK-LCD-PMOD)
+ items:
+ - enum:
+ - okaya,rh128128t
+ - const: sitronix,st7715r
+
+ spi-max-frequency:
+ maximum: 32000000
+
+ dc-gpios:
+ maxItems: 1
+ description: Display data/command selection (D/CX)
+
+ backlight: true
+ reg: true
+ reset-gpios: true
+ rotation: true
+
+required:
+ - compatible
+ - reg
+ - dc-gpios
+ - reset-gpios
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/gpio/gpio.h>
+
+ backlight: backlight {
+ compatible = "gpio-backlight";
+ gpios = <&gpio 44 GPIO_ACTIVE_HIGH>;
+ };
+
+ spi {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ display@0{
+ compatible = "jianda,jd-t18003-t01", "sitronix,st7735r";
+ reg = <0>;
+ spi-max-frequency = <32000000>;
+ dc-gpios = <&gpio 43 GPIO_ACTIVE_HIGH>;
+ reset-gpios = <&gpio 80 GPIO_ACTIVE_HIGH>;
+ rotation = <270>;
+ };
+ };
+
+...
diff --git a/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
new file mode 100644
index 000000000000..cac61a998203
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
@@ -0,0 +1,152 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/ti/ti,am65x-dss.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Texas Instruments AM65x Display Subsystem
+
+maintainers:
+ - Jyri Sarha <[email protected]>
+ - Tomi Valkeinen <[email protected]>
+
+description: |
+ The AM65x TI Keystone Display SubSystem with two output ports and
+ two video planes. The first video port supports OLDI and the second
+ supports DPI format. The fist plane is full video plane with all
+ features and the second is a "lite plane" without scaling support.
+
+properties:
+ compatible:
+ const: ti,am65x-dss
+
+ reg:
+ description:
+ Addresses to each DSS memory region described in the SoC's TRM.
+ items:
+ - description: common DSS register area
+ - description: VIDL1 light video plane
+ - description: VID video plane
+ - description: OVR1 overlay manager for vp1
+ - description: OVR2 overlay manager for vp2
+ - description: VP1 video port 1
+ - description: VP2 video port 2
+
+ reg-names:
+ items:
+ - const: common
+ - const: vidl1
+ - const: vid
+ - const: ovr1
+ - const: ovr2
+ - const: vp1
+ - const: vp2
+
+ clocks:
+ items:
+ - description: fck DSS functional clock
+ - description: vp1 Video Port 1 pixel clock
+ - description: vp2 Video Port 2 pixel clock
+
+ clock-names:
+ items:
+ - const: fck
+ - const: vp1
+ - const: vp2
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+ description: phandle to the associated power domain
+
+ ports:
+ type: object
+ description:
+ Ports as described in Documentation/devictree/bindings/graph.txt
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ port@0:
+ type: object
+ description:
+ The DSS OLDI output port node form video port 1
+
+ port@1:
+ type: object
+ description:
+ The DSS DPI output port node from video port 2
+
+ required:
+ - "#address-cells"
+ - "#size-cells"
+
+ ti,am65x-oldi-io-ctrl:
+ allOf:
+ - $ref: "/schemas/types.yaml#/definitions/phandle-array"
+ - maxItems: 1
+ description:
+ phandle to syscon device node mapping OLDI IO_CTRL registers.
+ The mapped range should point to OLDI_DAT0_IO_CTRL, map it and
+ following OLDI_DAT1_IO_CTRL, OLDI_DAT2_IO_CTRL, OLDI_DAT3_IO_CTRL,
+ and OLDI_CLK_IO_CTRL registers. This property is needed for OLDI
+ interface to work.
+
+ max-memory-bandwidth:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Input memory (from main memory to dispc) bandwidth limit in
+ bytes per second
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+ dss: dss@04a00000 {
+ compatible = "ti,am65x-dss";
+ reg = <0x0 0x04a00000 0x0 0x1000>, /* common */
+ <0x0 0x04a02000 0x0 0x1000>, /* vidl1 */
+ <0x0 0x04a06000 0x0 0x1000>, /* vid */
+ <0x0 0x04a07000 0x0 0x1000>, /* ovr1 */
+ <0x0 0x04a08000 0x0 0x1000>, /* ovr2 */
+ <0x0 0x04a0a000 0x0 0x1000>, /* vp1 */
+ <0x0 0x04a0b000 0x0 0x1000>; /* vp2 */
+ reg-names = "common", "vidl1", "vid",
+ "ovr1", "ovr2", "vp1", "vp2";
+ ti,am65x-oldi-io-ctrl = <&dss_oldi_io_ctrl>;
+ power-domains = <&k3_pds 67 TI_SCI_PD_EXCLUSIVE>;
+ clocks = <&k3_clks 67 1>,
+ <&k3_clks 216 1>,
+ <&k3_clks 67 2>;
+ clock-names = "fck", "vp1", "vp2";
+ interrupts = <GIC_SPI 166 IRQ_TYPE_EDGE_RISING>;
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+ oldi_out0: endpoint {
+ remote-endpoint = <&lcd_in0>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
new file mode 100644
index 000000000000..ade9b2f513f5
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
@@ -0,0 +1,208 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/ti/ti,j721e-dss.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Texas Instruments J721E Display Subsystem
+
+maintainers:
+ - Jyri Sarha <[email protected]>
+ - Tomi Valkeinen <[email protected]>
+
+description: |
+ The J721E TI Keystone Display SubSystem with four output ports and
+ four video planes. There is two full video planes and two "lite
+ planes" without scaling support. The video ports can be connected to
+ the SoC's DPI pins or to integrated display bridges on the SoC.
+
+properties:
+ compatible:
+ const: ti,j721e-dss
+
+ reg:
+ items:
+ - description: common_m DSS Master common
+ - description: common_s0 DSS Shared common 0
+ - description: common_s1 DSS Shared common 1
+ - description: common_s2 DSS Shared common 2
+ - description: VIDL1 light video plane 1
+ - description: VIDL2 light video plane 2
+ - description: VID1 video plane 1
+ - description: VID1 video plane 2
+ - description: OVR1 overlay manager for vp1
+ - description: OVR2 overlay manager for vp2
+ - description: OVR3 overlay manager for vp3
+ - description: OVR4 overlay manager for vp4
+ - description: VP1 video port 1
+ - description: VP2 video port 2
+ - description: VP3 video port 3
+ - description: VP4 video port 4
+ - description: WB Write Back
+
+ reg-names:
+ items:
+ - const: common_m
+ - const: common_s0
+ - const: common_s1
+ - const: common_s2
+ - const: vidl1
+ - const: vidl2
+ - const: vid1
+ - const: vid2
+ - const: ovr1
+ - const: ovr2
+ - const: ovr3
+ - const: ovr4
+ - const: vp1
+ - const: vp2
+ - const: vp3
+ - const: vp4
+ - const: wb
+
+ clocks:
+ items:
+ - description: fck DSS functional clock
+ - description: vp1 Video Port 1 pixel clock
+ - description: vp2 Video Port 2 pixel clock
+ - description: vp3 Video Port 3 pixel clock
+ - description: vp4 Video Port 4 pixel clock
+
+ clock-names:
+ items:
+ - const: fck
+ - const: vp1
+ - const: vp2
+ - const: vp3
+ - const: vp4
+
+ interrupts:
+ items:
+ - description: common_m DSS Master common
+ - description: common_s0 DSS Shared common 0
+ - description: common_s1 DSS Shared common 1
+ - description: common_s2 DSS Shared common 2
+
+ interrupt-names:
+ items:
+ - const: common_m
+ - const: common_s0
+ - const: common_s1
+ - const: common_s2
+
+ power-domains:
+ maxItems: 1
+ description: phandle to the associated power domain
+
+ ports:
+ type: object
+ description:
+ Ports as described in Documentation/devictree/bindings/graph.txt
+ properties:
+ "#address-cells":
+ const: 1
+
+ "#size-cells":
+ const: 0
+
+ port@0:
+ type: object
+ description:
+ The output port node form video port 1
+
+ port@1:
+ type: object
+ description:
+ The output port node from video port 2
+
+ port@2:
+ type: object
+ description:
+ The output port node from video port 3
+
+ port@3:
+ type: object
+ description:
+ The output port node from video port 4
+
+ required:
+ - "#address-cells"
+ - "#size-cells"
+
+ max-memory-bandwidth:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Input memory (from main memory to dispc) bandwidth limit in
+ bytes per second
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+ - interrupt-names
+ - ports
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+ #include <dt-bindings/soc/ti,sci_pm_domain.h>
+
+ dss: dss@04a00000 {
+ compatible = "ti,j721e-dss";
+ reg = <0x00 0x04a00000 0x00 0x10000>, /* common_m */
+ <0x00 0x04a10000 0x00 0x10000>, /* common_s0*/
+ <0x00 0x04b00000 0x00 0x10000>, /* common_s1*/
+ <0x00 0x04b10000 0x00 0x10000>, /* common_s2*/
+ <0x00 0x04a20000 0x00 0x10000>, /* vidl1 */
+ <0x00 0x04a30000 0x00 0x10000>, /* vidl2 */
+ <0x00 0x04a50000 0x00 0x10000>, /* vid1 */
+ <0x00 0x04a60000 0x00 0x10000>, /* vid2 */
+ <0x00 0x04a70000 0x00 0x10000>, /* ovr1 */
+ <0x00 0x04a90000 0x00 0x10000>, /* ovr2 */
+ <0x00 0x04ab0000 0x00 0x10000>, /* ovr3 */
+ <0x00 0x04ad0000 0x00 0x10000>, /* ovr4 */
+ <0x00 0x04a80000 0x00 0x10000>, /* vp1 */
+ <0x00 0x04aa0000 0x00 0x10000>, /* vp2 */
+ <0x00 0x04ac0000 0x00 0x10000>, /* vp3 */
+ <0x00 0x04ae0000 0x00 0x10000>, /* vp4 */
+ <0x00 0x04af0000 0x00 0x10000>; /* wb */
+ reg-names = "common_m", "common_s0",
+ "common_s1", "common_s2",
+ "vidl1", "vidl2","vid1","vid2",
+ "ovr1", "ovr2", "ovr3", "ovr4",
+ "vp1", "vp2", "vp3", "vp4",
+ "wb";
+ clocks = <&k3_clks 152 0>,
+ <&k3_clks 152 1>,
+ <&k3_clks 152 4>,
+ <&k3_clks 152 9>,
+ <&k3_clks 152 13>;
+ clock-names = "fck", "vp1", "vp2", "vp3", "vp4";
+ power-domains = <&k3_pds 152 TI_SCI_PD_EXCLUSIVE>;
+ interrupts = <GIC_SPI 602 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 603 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 604 IRQ_TYPE_LEVEL_HIGH>,
+ <GIC_SPI 605 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "common_m",
+ "common_s0",
+ "common_s1",
+ "common_s2";
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ port@0 {
+ reg = <0>;
+
+ dpi_out_0: endpoint {
+ remote-endpoint = <&dp_bridge_input>;
+ };
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml b/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
new file mode 100644
index 000000000000..385bd060ccf9
--- /dev/null
+++ b/Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
@@ -0,0 +1,106 @@
+# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause)
+# Copyright 2019 Texas Instruments Incorporated
+%YAML 1.2
+---
+$id: "http://devicetree.org/schemas/display/ti/ti,k2g-dss.yaml#"
+$schema: "http://devicetree.org/meta-schemas/core.yaml#"
+
+title: Texas Instruments K2G Display Subsystem
+
+maintainers:
+ - Jyri Sarha <[email protected]>
+ - Tomi Valkeinen <[email protected]>
+
+description: |
+ The K2G DSS is an ultra-light version of TI Keystone Display
+ SubSystem. It has only one output port and video plane. The
+ output is DPI.
+
+properties:
+ compatible:
+ const: ti,k2g-dss
+
+ reg:
+ items:
+ - description: cfg DSS top level
+ - description: common DISPC common
+ - description: VID1 video plane 1
+ - description: OVR1 overlay manager for vp1
+ - description: VP1 video port 1
+
+ reg-names:
+ items:
+ - const: cfg
+ - const: common
+ - const: vid1
+ - const: ovr1
+ - const: vp1
+
+ clocks:
+ items:
+ - description: fck DSS functional clock
+ - description: vp1 Video Port 1 pixel clock
+
+ clock-names:
+ items:
+ - const: fck
+ - const: vp1
+
+ interrupts:
+ maxItems: 1
+
+ power-domains:
+ maxItems: 1
+ description: phandle to the associated power domain
+
+ port:
+ type: object
+ description:
+ Port as described in Documentation/devictree/bindings/graph.txt.
+ The DSS DPI output port node
+
+ max-memory-bandwidth:
+ $ref: /schemas/types.yaml#/definitions/uint32
+ description:
+ Input memory (from main memory to dispc) bandwidth limit in
+ bytes per second
+
+required:
+ - compatible
+ - reg
+ - reg-names
+ - clocks
+ - clock-names
+ - interrupts
+ - port
+
+additionalProperties: false
+
+examples:
+ - |
+ #include <dt-bindings/interrupt-controller/arm-gic.h>
+ #include <dt-bindings/interrupt-controller/irq.h>
+
+ dss: dss@02540000 {
+ compatible = "ti,k2g-dss";
+ reg = <0x02540000 0x400>,
+ <0x02550000 0x1000>,
+ <0x02557000 0x1000>,
+ <0x0255a800 0x100>,
+ <0x0255ac00 0x100>;
+ reg-names = "cfg", "common", "vid1", "ovr1", "vp1";
+ clocks = <&k2g_clks 0x2 0>,
+ <&k2g_clks 0x2 1>;
+ clock-names = "fck", "vp1";
+ interrupts = <GIC_SPI 247 IRQ_TYPE_EDGE_RISING>;
+
+ power-domains = <&k2g_pds 0x2>;
+
+ max-memory-bandwidth = <230000000>;
+
+ port {
+ dpi_out: endpoint {
+ remote-endpoint = <&sii9022_in>;
+ };
+ };
+ };
diff --git a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
index 7bf1bb444812..aac617acb64f 100644
--- a/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
+++ b/Documentation/devicetree/bindings/display/tilcdc/tilcdc.txt
@@ -37,7 +37,7 @@ Optional nodes:
supports a single port with a single endpoint.
- See also Documentation/devicetree/bindings/display/tilcdc/panel.txt and
- Documentation/devicetree/bindings/display/tilcdc/tfp410.txt for connecting
+ Documentation/devicetree/bindings/display/bridge/ti,tfp410.txt for connecting
tfp410 DVI encoder or lcd panel to lcdc
[1] There is an errata about AM335x color wiring. For 16-bit color mode
diff --git a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
index 8b5c346f23f6..34780d7535b8 100644
--- a/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
+++ b/Documentation/devicetree/bindings/dma/ti/k3-udma.yaml
@@ -143,7 +143,7 @@ examples:
#size-cells = <2>;
dma-coherent;
dma-ranges;
- ranges;
+ ranges = <0x0 0x30800000 0x0 0x30800000 0x0 0x05000000>;
ti,sci-dev-id = <118>;
@@ -169,16 +169,4 @@ examples:
ti,sci-rm-range-rflow = <0x6>; /* GP RFLOW */
};
};
-
- mcasp0: mcasp@02B00000 {
- dmas = <&main_udmap 0xc400>, <&main_udmap 0x4400>;
- dma-names = "tx", "rx";
- };
-
- crypto: crypto@4E00000 {
- compatible = "ti,sa2ul-crypto";
-
- dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, <&main_udmap 0x4001>;
- dma-names = "tx", "rx1", "rx2";
- };
};
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
index 4ea6a8789699..e8b99adcb1bd 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-bifrost.yaml
@@ -84,31 +84,31 @@ examples:
gpu_opp_table: opp_table0 {
compatible = "operating-points-v2";
- opp@533000000 {
+ opp-533000000 {
opp-hz = /bits/ 64 <533000000>;
opp-microvolt = <1250000>;
};
- opp@450000000 {
+ opp-450000000 {
opp-hz = /bits/ 64 <450000000>;
opp-microvolt = <1150000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <1125000>;
};
- opp@350000000 {
+ opp-350000000 {
opp-hz = /bits/ 64 <350000000>;
opp-microvolt = <1075000>;
};
- opp@266000000 {
+ opp-266000000 {
opp-hz = /bits/ 64 <266000000>;
opp-microvolt = <1025000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
opp-microvolt = <925000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <912500>;
};
diff --git a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
index 36f59b3ade71..8d966f3ff3db 100644
--- a/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
+++ b/Documentation/devicetree/bindings/gpu/arm,mali-midgard.yaml
@@ -138,31 +138,31 @@ examples:
gpu_opp_table: opp_table0 {
compatible = "operating-points-v2";
- opp@533000000 {
+ opp-533000000 {
opp-hz = /bits/ 64 <533000000>;
opp-microvolt = <1250000>;
};
- opp@450000000 {
+ opp-450000000 {
opp-hz = /bits/ 64 <450000000>;
opp-microvolt = <1150000>;
};
- opp@400000000 {
+ opp-400000000 {
opp-hz = /bits/ 64 <400000000>;
opp-microvolt = <1125000>;
};
- opp@350000000 {
+ opp-350000000 {
opp-hz = /bits/ 64 <350000000>;
opp-microvolt = <1075000>;
};
- opp@266000000 {
+ opp-266000000 {
opp-hz = /bits/ 64 <266000000>;
opp-microvolt = <1025000>;
};
- opp@160000000 {
+ opp-160000000 {
opp-hz = /bits/ 64 <160000000>;
opp-microvolt = <925000>;
};
- opp@100000000 {
+ opp-100000000 {
opp-hz = /bits/ 64 <100000000>;
opp-microvolt = <912500>;
};
diff --git a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
index f46de17c0878..cc3c8ea6a894 100644
--- a/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
+++ b/Documentation/devicetree/bindings/iio/adc/samsung,exynos-adc.yaml
@@ -123,7 +123,7 @@ examples:
samsung,syscon-phandle = <&pmu_system_controller>;
/* NTC thermistor is a hwmon device */
- ncp15wb473@0 {
+ ncp15wb473 {
compatible = "murata,ncp15wb473";
pullup-uv = <1800000>;
pullup-ohm = <47000>;
diff --git a/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt b/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt
index dc194b2c151a..cdcaa3f52d25 100644
--- a/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt
+++ b/Documentation/devicetree/bindings/input/ilitek,ili2xxx.txt
@@ -1,9 +1,10 @@
-Ilitek ILI210x/ILI2117/ILI251x touchscreen controller
+Ilitek ILI210x/ILI2117/ILI2120/ILI251x touchscreen controller
Required properties:
- compatible:
ilitek,ili210x for ILI210x
ilitek,ili2117 for ILI2117
+ ilitek,ili2120 for ILI2120
ilitek,ili251x for ILI251x
- reg: The I2C address of the device
diff --git a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
index d7c3262b2494..c99ed3934d7e 100644
--- a/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
+++ b/Documentation/devicetree/bindings/input/touchscreen/goodix.yaml
@@ -62,7 +62,7 @@ required:
examples:
- |
- i2c@00000000 {
+ i2c {
#address-cells = <1>;
#size-cells = <0>;
gt928@5d {
diff --git a/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt b/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt
index c864a46cddcf..f5021214edec 100644
--- a/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt
+++ b/Documentation/devicetree/bindings/input/twl4030-pwrbutton.txt
@@ -1,7 +1,7 @@
Texas Instruments TWL family (twl4030) pwrbutton module
This module is part of the TWL4030. For more details about the whole
-chip see Documentation/devicetree/bindings/mfd/twl-familly.txt.
+chip see Documentation/devicetree/bindings/mfd/twl-family.txt.
This module provides a simple power button event via an Interrupt.
diff --git a/Documentation/devicetree/bindings/leds/common.yaml b/Documentation/devicetree/bindings/leds/common.yaml
index d97d099b87e5..c60b994fe116 100644
--- a/Documentation/devicetree/bindings/leds/common.yaml
+++ b/Documentation/devicetree/bindings/leds/common.yaml
@@ -85,7 +85,7 @@ properties:
# LED will act as a back-light, controlled by the framebuffer system
- backlight
# LED will turn on (but for leds-gpio see "default-state" property in
- # Documentation/devicetree/bindings/leds/leds-gpio.txt)
+ # Documentation/devicetree/bindings/leds/leds-gpio.yaml)
- default-on
# LED "double" flashes at a load average based rate
- heartbeat
diff --git a/Documentation/devicetree/bindings/leds/register-bit-led.txt b/Documentation/devicetree/bindings/leds/register-bit-led.txt
index cf1ea403ba7a..c7af6f70a97b 100644
--- a/Documentation/devicetree/bindings/leds/register-bit-led.txt
+++ b/Documentation/devicetree/bindings/leds/register-bit-led.txt
@@ -5,7 +5,7 @@ where single bits in a certain register can turn on/off a
single LED. The register bit LEDs appear as children to the
syscon device, with the proper compatible string. For the
syscon bindings see:
-Documentation/devicetree/bindings/mfd/syscon.txt
+Documentation/devicetree/bindings/mfd/syscon.yaml
Each LED is represented as a sub-node of the syscon device. Each
node's name represents the name of the corresponding LED.
diff --git a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
index 9af873b43acd..8453ee340b9f 100644
--- a/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
+++ b/Documentation/devicetree/bindings/media/allwinner,sun4i-a10-csi.yaml
@@ -33,24 +33,40 @@ properties:
maxItems: 1
clocks:
- minItems: 2
- maxItems: 3
- items:
- - description: The CSI interface clock
- - description: The CSI ISP clock
- - description: The CSI DRAM clock
+ oneOf:
+ - items:
+ - description: The CSI interface clock
+ - description: The CSI DRAM clock
+
+ - items:
+ - description: The CSI interface clock
+ - description: The CSI ISP clock
+ - description: The CSI DRAM clock
clock-names:
- minItems: 2
- maxItems: 3
- items:
- - const: bus
- - const: isp
- - const: ram
+ oneOf:
+ - items:
+ - const: bus
+ - const: ram
+
+ - items:
+ - const: bus
+ - const: isp
+ - const: ram
resets:
maxItems: 1
+ # FIXME: This should be made required eventually once every SoC will
+ # have the MBUS declared.
+ interconnects:
+ maxItems: 1
+
+ # FIXME: This should be made required eventually once every SoC will
+ # have the MBUS declared.
+ interconnect-names:
+ const: dma-mem
+
# See ./video-interfaces.txt for details
port:
type: object
diff --git a/Documentation/devicetree/bindings/media/ti,cal.yaml b/Documentation/devicetree/bindings/media/ti,cal.yaml
index 1ea784179536..5e066629287d 100644
--- a/Documentation/devicetree/bindings/media/ti,cal.yaml
+++ b/Documentation/devicetree/bindings/media/ti,cal.yaml
@@ -177,7 +177,7 @@ examples:
};
};
- i2c5: i2c@4807c000 {
+ i2c {
clock-frequency = <400000>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml
index dd1843489ad1..3e0a8a92d652 100644
--- a/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml
+++ b/Documentation/devicetree/bindings/memory-controllers/nvidia,tegra124-emc.yaml
@@ -347,6 +347,7 @@ examples:
interrupts = <GIC_SPI 77 IRQ_TYPE_LEVEL_HIGH>;
#iommu-cells = <1>;
+ #reset-cells = <1>;
};
external-memory-controller@7001b000 {
@@ -363,20 +364,23 @@ examples:
timing-0 {
clock-frequency = <12750000>;
- nvidia,emc-zcal-cnt-long = <0x00000042>;
- nvidia,emc-auto-cal-interval = <0x001fffff>;
- nvidia,emc-ctt-term-ctrl = <0x00000802>;
- nvidia,emc-cfg = <0x73240000>;
- nvidia,emc-cfg-2 = <0x000008c5>;
- nvidia,emc-sel-dpd-ctrl = <0x00040128>;
- nvidia,emc-bgbias-ctl0 = <0x00000008>;
nvidia,emc-auto-cal-config = <0xa1430000>;
nvidia,emc-auto-cal-config2 = <0x00000000>;
nvidia,emc-auto-cal-config3 = <0x00000000>;
- nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-auto-cal-interval = <0x001fffff>;
+ nvidia,emc-bgbias-ctl0 = <0x00000008>;
+ nvidia,emc-cfg = <0x73240000>;
+ nvidia,emc-cfg-2 = <0x000008c5>;
+ nvidia,emc-ctt-term-ctrl = <0x00000802>;
nvidia,emc-mode-1 = <0x80100003>;
nvidia,emc-mode-2 = <0x80200008>;
nvidia,emc-mode-4 = <0x00000000>;
+ nvidia,emc-mode-reset = <0x80001221>;
+ nvidia,emc-mrs-wait-cnt = <0x000e000e>;
+ nvidia,emc-sel-dpd-ctrl = <0x00040128>;
+ nvidia,emc-xm2dqspadctrl2 = <0x0130b118>;
+ nvidia,emc-zcal-cnt-long = <0x00000042>;
+ nvidia,emc-zcal-interval = <0x00000000>;
nvidia,emc-configuration = <
0x00000000 /* EMC_RC */
diff --git a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
index 44d71469c914..63f674ffeb4f 100644
--- a/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
+++ b/Documentation/devicetree/bindings/memory-controllers/ti/emif.txt
@@ -32,7 +32,7 @@ Required only for "ti,emif-am3352" and "ti,emif-am4372":
- sram : Phandles for generic sram driver nodes,
first should be type 'protect-exec' for the driver to use to copy
and run PM functions, second should be regular pool to be used for
- data region for code. See Documentation/devicetree/bindings/sram/sram.txt
+ data region for code. See Documentation/devicetree/bindings/sram/sram.yaml
for more details.
Optional properties:
diff --git a/Documentation/devicetree/bindings/mfd/max77650.yaml b/Documentation/devicetree/bindings/mfd/max77650.yaml
index 4a70f875a6eb..480385789394 100644
--- a/Documentation/devicetree/bindings/mfd/max77650.yaml
+++ b/Documentation/devicetree/bindings/mfd/max77650.yaml
@@ -97,14 +97,14 @@ examples:
regulators {
compatible = "maxim,max77650-regulator";
- max77650_ldo: regulator@0 {
+ max77650_ldo: regulator-ldo {
regulator-compatible = "ldo";
regulator-name = "max77650-ldo";
regulator-min-microvolt = <1350000>;
regulator-max-microvolt = <2937500>;
};
- max77650_sbb0: regulator@1 {
+ max77650_sbb0: regulator-sbb0 {
regulator-compatible = "sbb0";
regulator-name = "max77650-sbb0";
regulator-min-microvolt = <800000>;
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt
index 4f62143afd24..a5ced46bbde9 100644
--- a/Documentation/devicetree/bindings/mfd/tps65910.txt
+++ b/Documentation/devicetree/bindings/mfd/tps65910.txt
@@ -26,8 +26,8 @@ Required properties:
ldo6, ldo7, ldo8
- xxx-supply: Input voltage supply regulator.
- These entries are require if regulators are enabled for a device. Missing of these
- properties can cause the regulator registration fails.
+ These entries are required if regulators are enabled for a device. Missing these
+ properties can cause the regulator registration to fail.
If some of input supply is powered through battery or always-on supply then
also it is require to have these parameters with proper node handle of always
on power supply.
diff --git a/Documentation/devicetree/bindings/mfd/twl-familly.txt b/Documentation/devicetree/bindings/mfd/twl-family.txt
index 56f244b5d8a4..56f244b5d8a4 100644
--- a/Documentation/devicetree/bindings/mfd/twl-familly.txt
+++ b/Documentation/devicetree/bindings/mfd/twl-family.txt
diff --git a/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt b/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt
index 088eff9ddb78..e0f901edc063 100644
--- a/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt
+++ b/Documentation/devicetree/bindings/mfd/zii,rave-sp.txt
@@ -20,7 +20,7 @@ RAVE SP consists of the following sub-devices:
Device Description
------ -----------
rave-sp-wdt : Watchdog
-rave-sp-nvmem : Interface to onborad EEPROM
+rave-sp-nvmem : Interface to onboard EEPROM
rave-sp-backlight : Display backlight
rave-sp-hwmon : Interface to onboard hardware sensors
rave-sp-leds : Interface to onboard LEDs
diff --git a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
index bb7e896cb644..9134e9bcca56 100644
--- a/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
+++ b/Documentation/devicetree/bindings/misc/fsl,qoriq-mc.txt
@@ -26,7 +26,7 @@ For generic IOMMU bindings, see
Documentation/devicetree/bindings/iommu/iommu.txt.
For arm-smmu binding, see:
-Documentation/devicetree/bindings/iommu/arm,smmu.txt.
+Documentation/devicetree/bindings/iommu/arm,smmu.yaml.
Required properties:
diff --git a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
index 3c0df4016a12..8fded83c519a 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
+++ b/Documentation/devicetree/bindings/mmc/mmc-controller.yaml
@@ -370,6 +370,7 @@ examples:
mmc3: mmc@1c12000 {
#address-cells = <1>;
#size-cells = <0>;
+ reg = <0x1c12000 0x200>;
pinctrl-names = "default";
pinctrl-0 = <&mmc3_pins_a>;
vmmc-supply = <&reg_vmmc3>;
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
index 19f5508a7569..4a9145ef15d6 100644
--- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
@@ -124,7 +124,7 @@ not every application needs SDIO irq, e.g. MMC cards.
pinctrl-1 = <&mmc1_idle>;
pinctrl-2 = <&mmc1_sleep>;
...
- interrupts-extended = <&intc 64 &gpio2 28 GPIO_ACTIVE_LOW>;
+ interrupts-extended = <&intc 64 &gpio2 28 IRQ_TYPE_LEVEL_LOW>;
};
mmc1_idle : pinmux_cirq_pin {
diff --git a/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt b/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
index f3893c4d3c6a..d2eada5044b2 100644
--- a/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
+++ b/Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt
@@ -27,7 +27,7 @@ Required properties of NAND chips:
- reg: shall contain the native Chip Select ids from 0 to max supported by
the cadence nand flash controller
-See Documentation/devicetree/bindings/mtd/nand.txt for more details on
+See Documentation/devicetree/bindings/mtd/nand-controller.yaml for more details on
generic bindings.
Example:
diff --git a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
index 48a7f916c5e4..88b57b0ca1f4 100644
--- a/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
+++ b/Documentation/devicetree/bindings/net/brcm,bcm7445-switch-v4.0.txt
@@ -45,7 +45,7 @@ Optional properties:
switch queue
- resets: a single phandle and reset identifier pair. See
- Documentation/devicetree/binding/reset/reset.txt for details.
+ Documentation/devicetree/bindings/reset/reset.txt for details.
- reset-names: If the "reset" property is specified, this property should have
the value "switch" to denote the switch reset line.
diff --git a/Documentation/devicetree/bindings/net/mdio.yaml b/Documentation/devicetree/bindings/net/mdio.yaml
index 5d08d2ffd4eb..50c3397a82bc 100644
--- a/Documentation/devicetree/bindings/net/mdio.yaml
+++ b/Documentation/devicetree/bindings/net/mdio.yaml
@@ -56,7 +56,6 @@ patternProperties:
examples:
- |
davinci_mdio: mdio@5c030000 {
- compatible = "ti,davinci_mdio";
reg = <0x5c030000 0x1000>;
#address-cells = <1>;
#size-cells = <0>;
diff --git a/Documentation/devicetree/bindings/nvmem/nvmem.yaml b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
index b43c6c65294e..65980224d550 100644
--- a/Documentation/devicetree/bindings/nvmem/nvmem.yaml
+++ b/Documentation/devicetree/bindings/nvmem/nvmem.yaml
@@ -76,6 +76,8 @@ examples:
qfprom: eeprom@700000 {
#address-cells = <1>;
#size-cells = <1>;
+ reg = <0x00700000 0x100000>;
+
wp-gpios = <&gpio1 3 GPIO_ACTIVE_HIGH>;
/* ... */
diff --git a/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml b/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml
index 020ef9e4c411..94ac23687b7e 100644
--- a/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml
+++ b/Documentation/devicetree/bindings/phy/allwinner,sun4i-a10-usb-phy.yaml
@@ -86,7 +86,7 @@ examples:
#include <dt-bindings/clock/sun4i-a10-ccu.h>
#include <dt-bindings/reset/sun4i-a10-ccu.h>
- usbphy: phy@01c13400 {
+ usbphy: phy@1c13400 {
#phy-cells = <1>;
compatible = "allwinner,sun4i-a10-usb-phy";
reg = <0x01c13400 0x10>, <0x01c14800 0x4>, <0x01c1c800 0x4>;
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
index bb690e20c368..135c7dfbc180 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2400-pinctrl.yaml
@@ -17,7 +17,7 @@ description: |+
"aspeed,ast2400-scu", "syscon", "simple-mfd"
Refer to the the bindings described in
- Documentation/devicetree/bindings/mfd/syscon.txt
+ Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
index f7f5d57f2c9a..824f7fd1d51b 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2500-pinctrl.yaml
@@ -18,7 +18,7 @@ description: |+
"aspeed,g5-scu", "syscon", "simple-mfd"
Refer to the the bindings described in
- Documentation/devicetree/bindings/mfd/syscon.txt
+ Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
index 3749fa233e87..ac8d1c30a8ed 100644
--- a/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/aspeed,ast2600-pinctrl.yaml
@@ -17,7 +17,7 @@ description: |+
"aspeed,ast2600-scu", "syscon", "simple-mfd"
Refer to the the bindings described in
- Documentation/devicetree/bindings/mfd/syscon.txt
+ Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
index 754ea7ab040a..ef4de32cb17c 100644
--- a/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
+++ b/Documentation/devicetree/bindings/pinctrl/st,stm32-pinctrl.yaml
@@ -248,7 +248,7 @@ examples:
};
//Example 3 pin groups
- pinctrl@60020000 {
+ pinctrl {
usart1_pins_a: usart1-0 {
pins1 {
pinmux = <STM32_PINMUX('A', 9, AF7)>;
diff --git a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
index aab70e8b681e..d3098c924b25 100644
--- a/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
+++ b/Documentation/devicetree/bindings/power/amlogic,meson-ee-pwrc.yaml
@@ -18,7 +18,7 @@ description: |+
"amlogic,meson-gx-hhi-sysctrl", "simple-mfd", "syscon"
Refer to the the bindings described in
- Documentation/devicetree/bindings/mfd/syscon.txt
+ Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/power/domain-idle-state.txt b/Documentation/devicetree/bindings/power/domain-idle-state.txt
deleted file mode 100644
index eefc7ed22ca2..000000000000
--- a/Documentation/devicetree/bindings/power/domain-idle-state.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-PM Domain Idle State Node:
-
-A domain idle state node represents the state parameters that will be used to
-select the state when there are no active components in the domain.
-
-The state node has the following parameters -
-
-- compatible:
- Usage: Required
- Value type: <string>
- Definition: Must be "domain-idle-state".
-
-- entry-latency-us
- Usage: Required
- Value type: <prop-encoded-array>
- Definition: u32 value representing worst case latency in
- microseconds required to enter the idle state.
- The exit-latency-us duration may be guaranteed
- only after entry-latency-us has passed.
-
-- exit-latency-us
- Usage: Required
- Value type: <prop-encoded-array>
- Definition: u32 value representing worst case latency
- in microseconds required to exit the idle state.
-
-- min-residency-us
- Usage: Required
- Value type: <prop-encoded-array>
- Definition: u32 value representing minimum residency duration
- in microseconds after which the idle state will yield
- power benefits after overcoming the overhead in entering
-i the idle state.
diff --git a/Documentation/devicetree/bindings/power/domain-idle-state.yaml b/Documentation/devicetree/bindings/power/domain-idle-state.yaml
new file mode 100644
index 000000000000..dfba1af9abe5
--- /dev/null
+++ b/Documentation/devicetree/bindings/power/domain-idle-state.yaml
@@ -0,0 +1,64 @@
+# SPDX-License-Identifier: GPL-2.0
+%YAML 1.2
+---
+$id: http://devicetree.org/schemas/power/domain-idle-state.yaml#
+$schema: http://devicetree.org/meta-schemas/core.yaml#
+
+title: PM Domain Idle States binding description
+
+maintainers:
+ - Ulf Hansson <[email protected]>
+
+description:
+ A domain idle state node represents the state parameters that will be used to
+ select the state when there are no active components in the PM domain.
+
+properties:
+ $nodename:
+ const: domain-idle-states
+
+patternProperties:
+ "^(cpu|cluster|domain)-":
+ type: object
+ description:
+ Each state node represents a domain idle state description.
+
+ properties:
+ compatible:
+ const: domain-idle-state
+
+ entry-latency-us:
+ description:
+ The worst case latency in microseconds required to enter the idle
+ state. Note that, the exit-latency-us duration may be guaranteed only
+ after the entry-latency-us has passed.
+
+ exit-latency-us:
+ description:
+ The worst case latency in microseconds required to exit the idle
+ state.
+
+ min-residency-us:
+ description:
+ The minimum residency duration in microseconds after which the idle
+ state will yield power benefits, after overcoming the overhead while
+ entering the idle state.
+
+ required:
+ - compatible
+ - entry-latency-us
+ - exit-latency-us
+ - min-residency-us
+
+examples:
+ - |
+
+ domain-idle-states {
+ domain_retention: domain-retention {
+ compatible = "domain-idle-state";
+ entry-latency-us = <20>;
+ exit-latency-us = <40>;
+ min-residency-us = <80>;
+ };
+ };
+...
diff --git a/Documentation/devicetree/bindings/power/power-domain.yaml b/Documentation/devicetree/bindings/power/power-domain.yaml
index 455b573293ae..6047aacd7766 100644
--- a/Documentation/devicetree/bindings/power/power-domain.yaml
+++ b/Documentation/devicetree/bindings/power/power-domain.yaml
@@ -25,22 +25,20 @@ description: |+
properties:
$nodename:
- pattern: "^(power-controller|power-domain)(@.*)?$"
+ pattern: "^(power-controller|power-domain)([@-].*)?$"
domain-idle-states:
$ref: /schemas/types.yaml#/definitions/phandle-array
- description:
- A phandle of an idle-state that shall be soaked into a generic domain
- power state. The idle state definitions are compatible with
- domain-idle-state specified in
- Documentation/devicetree/bindings/power/domain-idle-state.txt
- phandles that are not compatible with domain-idle-state will be ignored.
- The domain-idle-state property reflects the idle state of this PM domain
- and not the idle states of the devices or sub-domains in the PM domain.
- Devices and sub-domains have their own idle-states independent
- of the parent domain's idle states. In the absence of this property,
- the domain would be considered as capable of being powered-on
- or powered-off.
+ description: |
+ Phandles of idle states that defines the available states for the
+ power-domain provider. The idle state definitions are compatible with the
+ domain-idle-state bindings, specified in ./domain-idle-state.yaml.
+
+ Note that, the domain-idle-state property reflects the idle states of this
+ PM domain and not the idle states of the devices or sub-domains in the PM
+ domain. Devices and sub-domains have their own idle states independent of
+ the parent domain's idle states. In the absence of this property, the
+ domain would be considered as capable of being powered-on or powered-off.
operating-points-v2:
$ref: /schemas/types.yaml#/definitions/phandle-array
diff --git a/Documentation/devicetree/bindings/power/power_domain.txt b/Documentation/devicetree/bindings/power/power_domain.txt
index 5b09b2deb483..08497ef26c7a 100644
--- a/Documentation/devicetree/bindings/power/power_domain.txt
+++ b/Documentation/devicetree/bindings/power/power_domain.txt
@@ -109,4 +109,4 @@ Example:
required-opps = <&domain1_opp_1>;
};
-[1]. Documentation/devicetree/bindings/power/domain-idle-state.txt
+[1]. Documentation/devicetree/bindings/power/domain-idle-state.yaml
diff --git a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
index f5cdac8b2847..8b005192f6e8 100644
--- a/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qcom,spmi-regulator.txt
@@ -161,7 +161,7 @@ The regulator node houses sub-nodes for each regulator within the device. Each
sub-node is identified using the node's name, with valid values listed for each
of the PMICs below.
-pm8005:
+pm8004:
s2, s5
pm8005:
diff --git a/Documentation/devicetree/bindings/regulator/regulator.yaml b/Documentation/devicetree/bindings/regulator/regulator.yaml
index 92ff2e8ad572..91a39a33000b 100644
--- a/Documentation/devicetree/bindings/regulator/regulator.yaml
+++ b/Documentation/devicetree/bindings/regulator/regulator.yaml
@@ -191,7 +191,7 @@ patternProperties:
examples:
- |
- xyzreg: regulator@0 {
+ xyzreg: regulator {
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <2500000>;
regulator-always-on;
diff --git a/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml b/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml
index 246dea8a2ec9..8ac437282659 100644
--- a/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml
+++ b/Documentation/devicetree/bindings/reset/intel,rcu-gw.yaml
@@ -23,7 +23,11 @@ properties:
description: Global reset register offset and bit offset.
allOf:
- $ref: /schemas/types.yaml#/definitions/uint32-array
- - maxItems: 2
+ items:
+ - description: Register offset
+ - description: Register bit offset
+ minimum: 0
+ maximum: 31
"#reset-cells":
minimum: 2
diff --git a/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt b/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt
index b4edaf7c7ff3..2880d5dda95e 100644
--- a/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt
+++ b/Documentation/devicetree/bindings/reset/st,stm32mp1-rcc.txt
@@ -3,4 +3,4 @@ STMicroelectronics STM32MP1 Peripheral Reset Controller
The RCC IP is both a reset and a clock controller.
-Please see Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.txt
+Please see Documentation/devicetree/bindings/clock/st,stm32mp1-rcc.yaml
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
index 944743dd9212..c42b91e525fa 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
+++ b/Documentation/devicetree/bindings/sound/st,stm32-sai.txt
@@ -36,7 +36,7 @@ SAI subnodes required properties:
- clock-names: Must contain "sai_ck".
Must also contain "MCLK", if SAI shares a master clock,
with a SAI set as MCLK clock provider.
- - dmas: see Documentation/devicetree/bindings/dma/stm32-dma.txt
+ - dmas: see Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
- dma-names: identifier string for each DMA request line
"tx": if sai sub-block is configured as playback DAI
"rx": if sai sub-block is configured as capture DAI
diff --git a/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt
index 33826f2459fa..ca9101777c44 100644
--- a/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt
+++ b/Documentation/devicetree/bindings/sound/st,stm32-spdifrx.txt
@@ -10,7 +10,7 @@ Required properties:
- clock-names: must contain "kclk"
- interrupts: cpu DAI interrupt line
- dmas: DMA specifiers for audio data DMA and iec control flow DMA
- See STM32 DMA bindings, Documentation/devicetree/bindings/dma/stm32-dma.txt
+ See STM32 DMA bindings, Documentation/devicetree/bindings/dma/st,stm32-dma.yaml
- dma-names: two dmas have to be defined, "rx" and "rx-ctrl"
Optional properties:
diff --git a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
index f0d979664f07..e49ecbf715ba 100644
--- a/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
+++ b/Documentation/devicetree/bindings/spi/st,stm32-spi.yaml
@@ -49,7 +49,7 @@ properties:
dmas:
description: |
DMA specifiers for tx and rx dma. DMA fifo mode must be used. See
- the STM32 DMA bindings Documentation/devicetree/bindings/dma/stm32-dma.txt.
+ the STM32 DMA bindings Documentation/devicetree/bindings/dma/st,stm32-dma.yaml.
items:
- description: rx DMA channel
- description: tx DMA channel
diff --git a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
index 80bac7a182d5..4b5509436588 100644
--- a/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
+++ b/Documentation/devicetree/bindings/sram/allwinner,sun4i-a10-system-control.yaml
@@ -125,7 +125,7 @@ examples:
#size-cells = <1>;
ranges;
- sram_a: sram@00000000 {
+ sram_a: sram@0 {
compatible = "mmio-sram";
reg = <0x00000000 0xc000>;
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml b/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml
index d9fdf4809a49..f3e68ed03abf 100644
--- a/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml
+++ b/Documentation/devicetree/bindings/thermal/brcm,avs-ro-thermal.yaml
@@ -17,7 +17,7 @@ description: |+
"brcm,bcm2711-avs-monitor", "syscon", "simple-mfd"
Refer to the the bindings described in
- Documentation/devicetree/bindings/mfd/syscon.txt
+ Documentation/devicetree/bindings/mfd/syscon.yaml
properties:
compatible:
diff --git a/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml b/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
index 23e989e09766..d918cee100ac 100644
--- a/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
+++ b/Documentation/devicetree/bindings/timer/allwinner,sun4i-a10-timer.yaml
@@ -87,7 +87,7 @@ additionalProperties: false
examples:
- |
- timer {
+ timer@1c20c00 {
compatible = "allwinner,sun4i-a10-timer";
reg = <0x01c20c00 0x400>;
interrupts = <22>,
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.yaml b/Documentation/devicetree/bindings/vendor-prefixes.yaml
index 9e67944bec9c..a2da166df1bc 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.yaml
+++ b/Documentation/devicetree/bindings/vendor-prefixes.yaml
@@ -337,12 +337,16 @@ patternProperties:
description: Fastrax Oy
"^fcs,.*":
description: Fairchild Semiconductor
+ "^feixin,.*":
+ description: Shenzhen Feixin Photoelectic Co., Ltd
"^feiyang,.*":
description: Shenzhen Fly Young Technology Co.,LTD.
"^firefly,.*":
description: Firefly
"^focaltech,.*":
description: FocalTech Systems Co.,Ltd
+ "^frida,.*":
+ description: Shenzhen Frida LCD Co., Ltd.
"^friendlyarm,.*":
description: Guangzhou FriendlyARM Computer Tech Co., Ltd
"^fsl,.*":
@@ -421,6 +425,8 @@ patternProperties:
description: Shenzhen Hugsun Technology Co. Ltd.
"^hwacom,.*":
description: HwaCom Systems Inc.
+ "^hydis,.*":
+ description: Hydis Technologies
"^hyundai,.*":
description: Hyundai Technology
"^i2se,.*":
@@ -553,6 +559,8 @@ patternProperties:
description: Linear Technology Corporation
"^logicpd,.*":
description: Logic PD, Inc.
+ "^logictechno,.*":
+ description: Logic Technologies Limited
"^longcheer,.*":
description: Longcheer Technology (Shanghai) Co., Ltd.
"^loongson,.*":
@@ -659,6 +667,8 @@ patternProperties:
description: Netron DY
"^netxeon,.*":
description: Shenzhen Netxeon Technology CO., LTD
+ "^neweast,.*":
+ description: Guangdong Neweast Optoelectronics CO., LTD
"^nexbox,.*":
description: Nexbox
"^nextthing,.*":
diff --git a/Documentation/driver-api/dmaengine/client.rst b/Documentation/driver-api/dmaengine/client.rst
index e5953e7e4bf4..2104830a99ae 100644
--- a/Documentation/driver-api/dmaengine/client.rst
+++ b/Documentation/driver-api/dmaengine/client.rst
@@ -151,8 +151,8 @@ The details of these operations are:
Note that callbacks will always be invoked from the DMA
engines tasklet, never from interrupt context.
-Optional: per descriptor metadata
----------------------------------
+ **Optional: per descriptor metadata**
+
DMAengine provides two ways for metadata support.
DESC_METADATA_CLIENT
@@ -199,12 +199,15 @@ Optional: per descriptor metadata
DESC_METADATA_CLIENT
- DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+
1. prepare the descriptor (dmaengine_prep_*)
construct the metadata in the client's buffer
2. use dmaengine_desc_attach_metadata() to attach the buffer to the
descriptor
3. submit the transfer
+
- DMA_DEV_TO_MEM:
+
1. prepare the descriptor (dmaengine_prep_*)
2. use dmaengine_desc_attach_metadata() to attach the buffer to the
descriptor
@@ -215,6 +218,7 @@ Optional: per descriptor metadata
DESC_METADATA_ENGINE
- DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+
1. prepare the descriptor (dmaengine_prep_*)
2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the
engine's metadata area
@@ -222,7 +226,9 @@ Optional: per descriptor metadata
4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the
amount of data the client has placed into the metadata buffer
5. submit the transfer
+
- DMA_DEV_TO_MEM:
+
1. prepare the descriptor (dmaengine_prep_*)
2. submit the transfer
3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get
@@ -278,8 +284,8 @@ Optional: per descriptor metadata
void dma_async_issue_pending(struct dma_chan *chan);
-Further APIs:
--------------
+Further APIs
+------------
1. Terminate APIs
diff --git a/Documentation/driver-api/ipmb.rst b/Documentation/driver-api/ipmb.rst
index 3ec3baed84c4..209c49e05116 100644
--- a/Documentation/driver-api/ipmb.rst
+++ b/Documentation/driver-api/ipmb.rst
@@ -71,9 +71,13 @@ b) Example for device tree::
ipmb@10 {
compatible = "ipmb-dev";
reg = <0x10>;
+ i2c-protocol;
};
};
+If xmit of data to be done using raw i2c block vs smbus
+then "i2c-protocol" needs to be defined as above.
+
2) Manually from Linux::
modprobe ipmb-dev-int
diff --git a/Documentation/filesystems/debugfs.txt b/Documentation/filesystems/debugfs.txt
index dc497b96fa4f..55336a47a110 100644
--- a/Documentation/filesystems/debugfs.txt
+++ b/Documentation/filesystems/debugfs.txt
@@ -164,9 +164,9 @@ file.
void __iomem *base;
};
- struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
- struct dentry *parent,
- struct debugfs_regset32 *regset);
+ debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset);
void debugfs_print_regs32(struct seq_file *s, struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix);
diff --git a/Documentation/filesystems/zonefs.txt b/Documentation/filesystems/zonefs.txt
index 935bf22031ca..d54fa98ac158 100644
--- a/Documentation/filesystems/zonefs.txt
+++ b/Documentation/filesystems/zonefs.txt
@@ -134,7 +134,7 @@ Sequential zone files can only be written sequentially, starting from the file
end, that is, write operations can only be append writes. Zonefs makes no
attempt at accepting random writes and will fail any write request that has a
start offset not corresponding to the end of the file, or to the end of the last
-write issued and still in-flight (for asynchrnous I/O operations).
+write issued and still in-flight (for asynchronous I/O operations).
Since dirty page writeback by the page cache does not guarantee a sequential
write pattern, zonefs prevents buffered writes and writeable shared mappings
@@ -142,7 +142,7 @@ on sequential files. Only direct I/O writes are accepted for these files.
zonefs relies on the sequential delivery of write I/O requests to the device
implemented by the block layer elevator. An elevator implementing the sequential
write feature for zoned block device (ELEVATOR_F_ZBD_SEQ_WRITE elevator feature)
-must be used. This type of elevator (e.g. mq-deadline) is the set by default
+must be used. This type of elevator (e.g. mq-deadline) is set by default
for zoned block devices on device initialization.
There are no restrictions on the type of I/O used for read operations in
@@ -196,7 +196,7 @@ additional conditions that result in I/O errors.
may still happen in the case of a partial failure of a very large direct I/O
operation split into multiple BIOs/requests or asynchronous I/O operations.
If one of the write request within the set of sequential write requests
- issued to the device fails, all write requests after queued after it will
+ issued to the device fails, all write requests queued after it will
become unaligned and fail.
* Delayed write errors: similarly to regular block devices, if the device side
@@ -207,7 +207,7 @@ additional conditions that result in I/O errors.
causing all data to be dropped after the sector that caused the error.
All I/O errors detected by zonefs are notified to the user with an error code
-return for the system call that trigered or detected the error. The recovery
+return for the system call that triggered or detected the error. The recovery
actions taken by zonefs in response to I/O errors depend on the I/O type (read
vs write) and on the reason for the error (bad sector, unaligned writes or zone
condition change).
@@ -222,7 +222,7 @@ condition change).
* A zone condition change to read-only or offline also always triggers zonefs
I/O error recovery.
-Zonefs minimal I/O error recovery may change a file size and a file access
+Zonefs minimal I/O error recovery may change a file size and file access
permissions.
* File size changes:
@@ -237,7 +237,7 @@ permissions.
A file size may also be reduced to reflect a delayed write error detected on
fsync(): in this case, the amount of data effectively written in the zone may
be less than originally indicated by the file inode size. After such I/O
- error, zonefs always fixes a file inode size to reflect the amount of data
+ error, zonefs always fixes the file inode size to reflect the amount of data
persistently stored in the file zone.
* Access permission changes:
@@ -281,11 +281,11 @@ Further notes:
permissions to read-only applies to all files. The file system is remounted
read-only.
* Access permission and file size changes due to the device transitioning zones
- to the offline condition are permanent. Remounting or reformating the device
+ to the offline condition are permanent. Remounting or reformatting the device
with mkfs.zonefs (mkzonefs) will not change back offline zone files to a good
state.
* File access permission changes to read-only due to the device transitioning
- zones to the read-only condition are permanent. Remounting or reformating
+ zones to the read-only condition are permanent. Remounting or reformatting
the device will not re-enable file write access.
* File access permission changes implied by the remount-ro, zone-ro and
zone-offline mount options are temporary for zones in a good condition.
@@ -301,13 +301,13 @@ Mount options
zonefs define the "errors=<behavior>" mount option to allow the user to specify
zonefs behavior in response to I/O errors, inode size inconsistencies or zone
-condition chages. The defined behaviors are as follow:
+condition changes. The defined behaviors are as follow:
* remount-ro (default)
* zone-ro
* zone-offline
* repair
-The I/O error actions defined for each behavior is detailed in the previous
+The I/O error actions defined for each behavior are detailed in the previous
section.
Zonefs User Space Tools
diff --git a/Documentation/gpu/drm-kms-helpers.rst b/Documentation/gpu/drm-kms-helpers.rst
index 9668a7fe2408..ee730457bf4e 100644
--- a/Documentation/gpu/drm-kms-helpers.rst
+++ b/Documentation/gpu/drm-kms-helpers.rst
@@ -139,11 +139,17 @@ Overview
.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
:doc: overview
-Default bridge callback sequence
---------------------------------
+Bridge Operations
+-----------------
.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
- :doc: bridge callbacks
+ :doc: bridge operations
+
+Bridge Connector Helper
+-----------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge_connector.c
+ :doc: overview
Bridge Helper Reference
@@ -155,6 +161,12 @@ Bridge Helper Reference
.. kernel-doc:: drivers/gpu/drm/drm_bridge.c
:export:
+Bridge Connector Helper Reference
+---------------------------------
+
+.. kernel-doc:: drivers/gpu/drm/drm_bridge_connector.c
+ :export:
+
Panel-Bridge Helper Reference
-----------------------------
diff --git a/Documentation/gpu/i915.rst b/Documentation/gpu/i915.rst
index e539c42a3e78..f6d363b6756e 100644
--- a/Documentation/gpu/i915.rst
+++ b/Documentation/gpu/i915.rst
@@ -207,10 +207,10 @@ DPIO
CSR firmware support for DMC
----------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_csr.c
:doc: csr support for dmc
-.. kernel-doc:: drivers/gpu/drm/i915/intel_csr.c
+.. kernel-doc:: drivers/gpu/drm/i915/display/intel_csr.c
:internal:
Video BIOS Table (VBT)
@@ -332,7 +332,7 @@ This process is dubbed relocation.
GEM BO Management Implementation Details
----------------------------------------
-.. kernel-doc:: drivers/gpu/drm/i915/i915_vma.h
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vma_types.h
:doc: Virtual Memory Address
Buffer Object Eviction
@@ -382,7 +382,7 @@ Logical Rings, Logical Ring Contexts and Execlists
Global GTT views
----------------
-.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
+.. kernel-doc:: drivers/gpu/drm/i915/i915_vma_types.h
:doc: Global GTT views
.. kernel-doc:: drivers/gpu/drm/i915/i915_gem_gtt.c
diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst
index bc869b23fc39..439656f55c5d 100644
--- a/Documentation/gpu/todo.rst
+++ b/Documentation/gpu/todo.rst
@@ -72,6 +72,28 @@ Contact: Ville Syrjälä, Daniel Vetter, driver maintainers
Level: Advanced
+Improve plane atomic_check helpers
+----------------------------------
+
+Aside from the clipped coordinates right above there's a few suboptimal things
+with the current helpers:
+
+- drm_plane_helper_funcs->atomic_check gets called for enabled or disabled
+ planes. At best this seems to confuse drivers, worst it means they blow up
+ when the plane is disabled without the CRTC. The only special handling is
+ resetting values in the plane state structures, which instead should be moved
+ into the drm_plane_funcs->atomic_duplicate_state functions.
+
+- Once that's done, helpers could stop calling ->atomic_check for disabled
+ planes.
+
+- Then we could go through all the drivers and remove the more-or-less confused
+ checks for plane_state->fb and plane_state->crtc.
+
+Contact: Daniel Vetter
+
+Level: Advanced
+
Convert early atomic drivers to async commit helpers
----------------------------------------------------
@@ -337,23 +359,6 @@ Contact: Sean Paul
Level: Starter
-drm_fb_helper tasks
--------------------
-
-- drm_fb_helper_restore_fbdev_mode_unlocked() should call restore_fbdev_mode()
- not the _force variant so it can bail out if there is a master. But first
- these igt tests need to be fixed: kms_fbcon_fbt@psr and
- kms_fbcon_fbt@psr-suspend.
-
-- The max connector argument for drm_fb_helper_init() isn't used anymore and
- can be removed.
-
-- The helper doesn't keep an array of connectors anymore so these can be
- removed: drm_fb_helper_single_add_all_connectors(),
- drm_fb_helper_add_one_connector() and drm_fb_helper_remove_one_connector().
-
-Level: Intermediate
-
connector register/unregister fixes
-----------------------------------
@@ -385,6 +390,20 @@ Contact: Daniel Vetter
Level: Intermediate
+Replace drm_detect_hdmi_monitor() with drm_display_info.is_hdmi
+---------------------------------------------------------------
+
+Once EDID is parsed, the monitor HDMI support information is available through
+drm_display_info.is_hdmi. Many drivers still call drm_detect_hdmi_monitor() to
+retrieve the same information, which is less efficient.
+
+Audit each individual driver calling drm_detect_hdmi_monitor() and switch to
+drm_display_info.is_hdmi if applicable.
+
+Contact: Laurent Pinchart, respective driver maintainers
+
+Level: Intermediate
+
Core refactorings
=================
diff --git a/Documentation/hwmon/adm1177.rst b/Documentation/hwmon/adm1177.rst
index c81e0b4abd28..471be1e98d6f 100644
--- a/Documentation/hwmon/adm1177.rst
+++ b/Documentation/hwmon/adm1177.rst
@@ -20,8 +20,7 @@ Usage Notes
-----------
This driver does not auto-detect devices. You will have to instantiate the
-devices explicitly. Please see Documentation/i2c/instantiating-devices for
-details.
+devices explicitly. Please see :doc:`/i2c/instantiating-devices` for details.
Sysfs entries
diff --git a/Documentation/hwmon/xdpe12284.rst b/Documentation/hwmon/xdpe12284.rst
index 6b7ae98cc536..67d1f87808e5 100644
--- a/Documentation/hwmon/xdpe12284.rst
+++ b/Documentation/hwmon/xdpe12284.rst
@@ -24,6 +24,7 @@ This driver implements support for Infineon Multi-phase XDPE122 family
dual loop voltage regulators.
The family includes XDPE12284 and XDPE12254 devices.
The devices from this family complaint with:
+
- Intel VR13 and VR13HC rev 1.3, IMVP8 rev 1.2 and IMPVP9 rev 1.3 DC-DC
converter specification.
- Intel SVID rev 1.9. protocol.
diff --git a/Documentation/kbuild/makefiles.rst b/Documentation/kbuild/makefiles.rst
index 0e0eb2c8da7d..6bc126a14b3d 100644
--- a/Documentation/kbuild/makefiles.rst
+++ b/Documentation/kbuild/makefiles.rst
@@ -765,7 +765,7 @@ is not sufficient this sometimes needs to be explicit.
Example::
#arch/x86/boot/Makefile
- subdir- := compressed/
+ subdir- := compressed
The above assignment instructs kbuild to descend down in the
directory compressed/ when "make clean" is executed.
@@ -1379,9 +1379,6 @@ See subsequent chapter for the syntax of the Kbuild file.
in arch/$(ARCH)/include/(uapi/)/asm, Kbuild will automatically generate
a wrapper of the asm-generic one.
- The convention is to list one subdir per line and
- preferably in alphabetic order.
-
8 Kbuild Variables
==================
diff --git a/Documentation/networking/phy.rst b/Documentation/networking/phy.rst
index 1e4735cc0553..256106054c8c 100644
--- a/Documentation/networking/phy.rst
+++ b/Documentation/networking/phy.rst
@@ -487,8 +487,9 @@ phy_register_fixup_for_id()::
The stubs set one of the two matching criteria, and set the other one to
match anything.
-When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module,
-unregister fixup and free allocate memory are required.
+When phy_register_fixup() or \*_for_uid()/\*_for_id() is called at module load
+time, the module needs to unregister the fixup and free allocated memory when
+it's unloaded.
Call one of following function before unloading module::
diff --git a/Documentation/power/index.rst b/Documentation/power/index.rst
index 002e42745263..ced8a8007434 100644
--- a/Documentation/power/index.rst
+++ b/Documentation/power/index.rst
@@ -13,7 +13,6 @@ Power Management
drivers-testing
energy-model
freezing-of-tasks
- interface
opp
pci
pm_qos_interface
diff --git a/Documentation/process/embargoed-hardware-issues.rst b/Documentation/process/embargoed-hardware-issues.rst
index 33edae654599..a19d084f9b2c 100644
--- a/Documentation/process/embargoed-hardware-issues.rst
+++ b/Documentation/process/embargoed-hardware-issues.rst
@@ -244,23 +244,23 @@ disclosure of a particular issue, unless requested by a response team or by
an involved disclosed party. The current ambassadors list:
============= ========================================================
- ARM
+ ARM Grant Likely <[email protected]>
AMD Tom Lendacky <[email protected]>
IBM
Intel Tony Luck <[email protected]>
Qualcomm Trilok Soni <[email protected]>
- Microsoft Sasha Levin <[email protected]>
+ Microsoft James Morris <[email protected]>
VMware
Xen Andrew Cooper <[email protected]>
- Canonical Tyler Hicks <[email protected]>
+ Canonical John Johansen <[email protected]>
Debian Ben Hutchings <[email protected]>
Oracle Konrad Rzeszutek Wilk <[email protected]>
Red Hat Josh Poimboeuf <[email protected]>
SUSE Jiri Kosina <[email protected]>
- Amazon Peter Bowen <[email protected]>
+ Amazon
Google Kees Cook <[email protected]>
============= ========================================================
diff --git a/Documentation/sphinx/parallel-wrapper.sh b/Documentation/sphinx/parallel-wrapper.sh
index 7daf5133bdd3..e54c44ce117d 100644
--- a/Documentation/sphinx/parallel-wrapper.sh
+++ b/Documentation/sphinx/parallel-wrapper.sh
@@ -30,4 +30,4 @@ if [ -n "$parallel" ] ; then
parallel="-j$parallel"
fi
-exec "$sphinx" "$parallel" "$@"
+exec "$sphinx" $parallel "$@"
diff --git a/Documentation/translations/zh_CN/process/embargoed-hardware-issues.rst b/Documentation/translations/zh_CN/process/embargoed-hardware-issues.rst
index b93f1af68261..88273ebe7823 100644
--- a/Documentation/translations/zh_CN/process/embargoed-hardware-issues.rst
+++ b/Documentation/translations/zh_CN/process/embargoed-hardware-issues.rst
@@ -183,7 +183,7 @@ CVE分配
VMware
Xen Andrew Cooper <[email protected]>
- Canonical Tyler Hicks <[email protected]>
+ Canonical John Johansen <[email protected]>
Debian Ben Hutchings <[email protected]>
Oracle Konrad Rzeszutek Wilk <[email protected]>
Red Hat Josh Poimboeuf <[email protected]>
diff --git a/Documentation/virtual/guest-halt-polling.txt b/Documentation/virt/guest-halt-polling.rst
index b3a2a294532d..b4e747942417 100644
--- a/Documentation/virtual/guest-halt-polling.txt
+++ b/Documentation/virt/guest-halt-polling.rst
@@ -1,9 +1,11 @@
+==================
Guest halt polling
==================
The cpuidle_haltpoll driver, with the haltpoll governor, allows
the guest vcpus to poll for a specified amount of time before
halting.
+
This provides the following benefits to host side polling:
1) The POLL flag is set while polling is performed, which allows
@@ -29,18 +31,21 @@ Module Parameters
The haltpoll governor has 5 tunable module parameters:
1) guest_halt_poll_ns:
+
Maximum amount of time, in nanoseconds, that polling is
performed before halting.
Default: 200000
2) guest_halt_poll_shrink:
+
Division factor used to shrink per-cpu guest_halt_poll_ns when
wakeup event occurs after the global guest_halt_poll_ns.
Default: 2
3) guest_halt_poll_grow:
+
Multiplication factor used to grow per-cpu guest_halt_poll_ns
when event occurs after per-cpu guest_halt_poll_ns
but before global guest_halt_poll_ns.
@@ -48,6 +53,7 @@ but before global guest_halt_poll_ns.
Default: 2
4) guest_halt_poll_grow_start:
+
The per-cpu guest_halt_poll_ns eventually reaches zero
in case of an idle system. This value sets the initial
per-cpu guest_halt_poll_ns when growing. This can
@@ -66,7 +72,7 @@ high once achieves global guest_halt_poll_ns value).
Default: Y
-The module parameters can be set from the debugfs files in:
+The module parameters can be set from the debugfs files in::
/sys/module/haltpoll/parameters/
@@ -74,5 +80,5 @@ Further Notes
=============
- Care should be taken when setting the guest_halt_poll_ns parameter as a
-large value has the potential to drive the cpu usage to 100% on a machine which
-would be almost entirely idle otherwise.
+ large value has the potential to drive the cpu usage to 100% on a machine
+ which would be almost entirely idle otherwise.
diff --git a/Documentation/virt/index.rst b/Documentation/virt/index.rst
index 062ffb527043..de1ab81df958 100644
--- a/Documentation/virt/index.rst
+++ b/Documentation/virt/index.rst
@@ -8,7 +8,9 @@ Linux Virtualization Support
:maxdepth: 2
kvm/index
+ uml/user_mode_linux
paravirt_ops
+ guest-halt-polling
.. only:: html and subproject
diff --git a/Documentation/virt/kvm/api.txt b/Documentation/virt/kvm/api.rst
index c6e1ce5d40de..ebd383fba939 100644
--- a/Documentation/virt/kvm/api.txt
+++ b/Documentation/virt/kvm/api.rst
@@ -1,8 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================================================================
The Definitive KVM (Kernel-based Virtual Machine) API Documentation
===================================================================
1. General description
-----------------------
+======================
The kvm API is a set of ioctls that are issued to control various aspects
of a virtual machine. The ioctls belong to the following classes:
@@ -33,7 +36,7 @@ of a virtual machine. The ioctls belong to the following classes:
was used to create the VM.
2. File descriptors
--------------------
+===================
The kvm API is centered around file descriptors. An initial
open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
@@ -70,7 +73,7 @@ the VM is shut down.
3. Extensions
--------------
+=============
As of Linux 2.6.22, the KVM ABI has been stabilized: no backward
incompatible change are allowed. However, there is an extension
@@ -84,13 +87,14 @@ set of ioctls is available for application use.
4. API description
-------------------
+==================
This section describes ioctls that can be used to control kvm guests.
For each ioctl, the following information is provided along with a
description:
- Capability: which KVM extension provides this ioctl. Can be 'basic',
+ Capability:
+ which KVM extension provides this ioctl. Can be 'basic',
which means that is will be provided by any kernel that supports
API version 12 (see section 4.1), a KVM_CAP_xyz constant, which
means availability needs to be checked with KVM_CHECK_EXTENSION
@@ -99,24 +103,29 @@ description:
availability: for kernels that don't support the ioctl,
the ioctl returns -ENOTTY.
- Architectures: which instruction set architectures provide this ioctl.
+ Architectures:
+ which instruction set architectures provide this ioctl.
x86 includes both i386 and x86_64.
- Type: system, vm, or vcpu.
+ Type:
+ system, vm, or vcpu.
- Parameters: what parameters are accepted by the ioctl.
+ Parameters:
+ what parameters are accepted by the ioctl.
- Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
+ Returns:
+ the return value. General error numbers (EBADF, ENOMEM, EINVAL)
are not detailed, but errors with specific meanings are.
4.1 KVM_GET_API_VERSION
+-----------------------
-Capability: basic
-Architectures: all
-Type: system ioctl
-Parameters: none
-Returns: the constant KVM_API_VERSION (=12)
+:Capability: basic
+:Architectures: all
+:Type: system ioctl
+:Parameters: none
+:Returns: the constant KVM_API_VERSION (=12)
This identifies the API version as the stable kvm API. It is not
expected that this number will change. However, Linux 2.6.20 and
@@ -127,12 +136,13 @@ described as 'basic' will be available.
4.2 KVM_CREATE_VM
+-----------------
-Capability: basic
-Architectures: all
-Type: system ioctl
-Parameters: machine type identifier (KVM_VM_*)
-Returns: a VM fd that can be used to control the new virtual machine.
+:Capability: basic
+:Architectures: all
+:Type: system ioctl
+:Parameters: machine type identifier (KVM_VM_*)
+:Returns: a VM fd that can be used to control the new virtual machine.
The new VM has no virtual cpus and no memory.
You probably want to use 0 as machine type.
@@ -155,17 +165,17 @@ identifier, where IPA_Bits is the maximum width of any physical
address used by the VM. The IPA_Bits is encoded in bits[7-0] of the
machine type identifier.
-e.g, to configure a guest to use 48bit physical address size :
+e.g, to configure a guest to use 48bit physical address size::
vm_fd = ioctl(dev_fd, KVM_CREATE_VM, KVM_VM_TYPE_ARM_IPA_SIZE(48));
-The requested size (IPA_Bits) must be :
- 0 - Implies default size, 40bits (for backward compatibility)
+The requested size (IPA_Bits) must be:
- or
-
- N - Implies N bits, where N is a positive integer such that,
+ == =========================================================
+ 0 Implies default size, 40bits (for backward compatibility)
+ N Implies N bits, where N is a positive integer such that,
32 <= N <= Host_IPA_Limit
+ == =========================================================
Host_IPA_Limit is the maximum possible value for IPA_Bits on the host and
is dependent on the CPU capability and the kernel configuration. The limit can
@@ -179,21 +189,28 @@ host physical address translations).
4.3 KVM_GET_MSR_INDEX_LIST, KVM_GET_MSR_FEATURE_INDEX_LIST
+----------------------------------------------------------
+
+:Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST
+:Architectures: x86
+:Type: system ioctl
+:Parameters: struct kvm_msr_list (in/out)
+:Returns: 0 on success; -1 on error
-Capability: basic, KVM_CAP_GET_MSR_FEATURES for KVM_GET_MSR_FEATURE_INDEX_LIST
-Architectures: x86
-Type: system ioctl
-Parameters: struct kvm_msr_list (in/out)
-Returns: 0 on success; -1 on error
Errors:
- EFAULT: the msr index list cannot be read from or written to
- E2BIG: the msr index list is to be to fit in the array specified by
+
+ ====== ============================================================
+ EFAULT the msr index list cannot be read from or written to
+ E2BIG the msr index list is to be to fit in the array specified by
the user.
+ ====== ============================================================
-struct kvm_msr_list {
+::
+
+ struct kvm_msr_list {
__u32 nmsrs; /* number of msrs in entries */
__u32 indices[0];
-};
+ };
The user fills in the size of the indices array in nmsrs, and in return
kvm adjusts nmsrs to reflect the actual number of msrs and fills in the
@@ -214,12 +231,13 @@ otherwise.
4.4 KVM_CHECK_EXTENSION
+-----------------------
-Capability: basic, KVM_CAP_CHECK_EXTENSION_VM for vm ioctl
-Architectures: all
-Type: system ioctl, vm ioctl
-Parameters: extension identifier (KVM_CAP_*)
-Returns: 0 if unsupported; 1 (or some other positive integer) if supported
+:Capability: basic, KVM_CAP_CHECK_EXTENSION_VM for vm ioctl
+:Architectures: all
+:Type: system ioctl, vm ioctl
+:Parameters: extension identifier (KVM_CAP_*)
+:Returns: 0 if unsupported; 1 (or some other positive integer) if supported
The API allows the application to query about extensions to the core
kvm API. Userspace passes an extension identifier (an integer) and
@@ -232,12 +250,13 @@ It is thus encouraged to use the vm ioctl to query for capabilities (available
with KVM_CAP_CHECK_EXTENSION_VM on the vm fd)
4.5 KVM_GET_VCPU_MMAP_SIZE
+--------------------------
-Capability: basic
-Architectures: all
-Type: system ioctl
-Parameters: none
-Returns: size of vcpu mmap area, in bytes
+:Capability: basic
+:Architectures: all
+:Type: system ioctl
+:Parameters: none
+:Returns: size of vcpu mmap area, in bytes
The KVM_RUN ioctl (cf.) communicates with userspace via a shared
memory region. This ioctl returns the size of that region. See the
@@ -245,23 +264,25 @@ KVM_RUN documentation for details.
4.6 KVM_SET_MEMORY_REGION
+-------------------------
-Capability: basic
-Architectures: all
-Type: vm ioctl
-Parameters: struct kvm_memory_region (in)
-Returns: 0 on success, -1 on error
+:Capability: basic
+:Architectures: all
+:Type: vm ioctl
+:Parameters: struct kvm_memory_region (in)
+:Returns: 0 on success, -1 on error
This ioctl is obsolete and has been removed.
4.7 KVM_CREATE_VCPU
+-------------------
-Capability: basic
-Architectures: all
-Type: vm ioctl
-Parameters: vcpu id (apic id on x86)
-Returns: vcpu fd on success, -1 on error
+:Capability: basic
+:Architectures: all
+:Type: vm ioctl
+:Parameters: vcpu id (apic id on x86)
+:Returns: vcpu fd on success, -1 on error
This API adds a vcpu to a virtual machine. No more than max_vcpus may be added.
The vcpu id is an integer in the range [0, max_vcpu_id).
@@ -302,22 +323,25 @@ cpu's hardware control block.
4.8 KVM_GET_DIRTY_LOG (vm ioctl)
+--------------------------------
-Capability: basic
-Architectures: all
-Type: vm ioctl
-Parameters: struct kvm_dirty_log (in/out)
-Returns: 0 on success, -1 on error
+:Capability: basic
+:Architectures: all
+:Type: vm ioctl
+:Parameters: struct kvm_dirty_log (in/out)
+:Returns: 0 on success, -1 on error
-/* for KVM_GET_DIRTY_LOG */
-struct kvm_dirty_log {
+::
+
+ /* for KVM_GET_DIRTY_LOG */
+ struct kvm_dirty_log {
__u32 slot;
__u32 padding;
union {
void __user *dirty_bitmap; /* one bit per page */
__u64 padding;
};
-};
+ };
Given a memory slot, return a bitmap containing any pages dirtied
since the last call to this ioctl. Bit 0 is the first page in the
@@ -334,25 +358,31 @@ KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is enabled. For more information,
see the description of the capability.
4.9 KVM_SET_MEMORY_ALIAS
+------------------------
-Capability: basic
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_memory_alias (in)
-Returns: 0 (success), -1 (error)
+:Capability: basic
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_memory_alias (in)
+:Returns: 0 (success), -1 (error)
This ioctl is obsolete and has been removed.
4.10 KVM_RUN
+------------
+
+:Capability: basic
+:Architectures: all
+:Type: vcpu ioctl
+:Parameters: none
+:Returns: 0 on success, -1 on error
-Capability: basic
-Architectures: all
-Type: vcpu ioctl
-Parameters: none
-Returns: 0 on success, -1 on error
Errors:
- EINTR: an unmasked signal is pending
+
+ ===== =============================
+ EINTR an unmasked signal is pending
+ ===== =============================
This ioctl is used to run a guest virtual cpu. While there are no
explicit parameters, there is an implicit parameter block that can be
@@ -362,42 +392,46 @@ kvm_run' (see below).
4.11 KVM_GET_REGS
+-----------------
-Capability: basic
-Architectures: all except ARM, arm64
-Type: vcpu ioctl
-Parameters: struct kvm_regs (out)
-Returns: 0 on success, -1 on error
+:Capability: basic
+:Architectures: all except ARM, arm64
+:Type: vcpu ioctl
+:Parameters: struct kvm_regs (out)
+:Returns: 0 on success, -1 on error
Reads the general purpose registers from the vcpu.
-/* x86 */
-struct kvm_regs {
+::
+
+ /* x86 */
+ struct kvm_regs {
/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
__u64 rax, rbx, rcx, rdx;
__u64 rsi, rdi, rsp, rbp;
__u64 r8, r9, r10, r11;
__u64 r12, r13, r14, r15;
__u64 rip, rflags;
-};
+ };
-/* mips */
-struct kvm_regs {
+ /* mips */
+ struct kvm_regs {
/* out (KVM_GET_REGS) / in (KVM_SET_REGS) */
__u64 gpr[32];
__u64 hi;
__u64 lo;
__u64 pc;
-};
+ };
4.12 KVM_SET_REGS
+-----------------
-Capability: basic
-Architectures: all except ARM, arm64
-Type: vcpu ioctl
-Parameters: struct kvm_regs (in)
-Returns: 0 on success, -1 on error
+:Capability: basic
+:Architectures: all except ARM, arm64
+:Type: vcpu ioctl
+:Parameters: struct kvm_regs (in)
+:Returns: 0 on success, -1 on error
Writes the general purpose registers into the vcpu.
@@ -405,17 +439,20 @@ See KVM_GET_REGS for the data structure.
4.13 KVM_GET_SREGS
+------------------
-Capability: basic
-Architectures: x86, ppc
-Type: vcpu ioctl
-Parameters: struct kvm_sregs (out)
-Returns: 0 on success, -1 on error
+:Capability: basic
+:Architectures: x86, ppc
+:Type: vcpu ioctl
+:Parameters: struct kvm_sregs (out)
+:Returns: 0 on success, -1 on error
Reads special registers from the vcpu.
-/* x86 */
-struct kvm_sregs {
+::
+
+ /* x86 */
+ struct kvm_sregs {
struct kvm_segment cs, ds, es, fs, gs, ss;
struct kvm_segment tr, ldt;
struct kvm_dtable gdt, idt;
@@ -423,9 +460,9 @@ struct kvm_sregs {
__u64 efer;
__u64 apic_base;
__u64 interrupt_bitmap[(KVM_NR_INTERRUPTS + 63) / 64];
-};
+ };
-/* ppc -- see arch/powerpc/include/uapi/asm/kvm.h */
+ /* ppc -- see arch/powerpc/include/uapi/asm/kvm.h */
interrupt_bitmap is a bitmap of pending external interrupts. At most
one bit may be set. This interrupt has been acknowledged by the APIC
@@ -433,29 +470,33 @@ but not yet injected into the cpu core.
4.14 KVM_SET_SREGS
+------------------
-Capability: basic
-Architectures: x86, ppc
-Type: vcpu ioctl
-Parameters: struct kvm_sregs (in)
-Returns: 0 on success, -1 on error
+:Capability: basic
+:Architectures: x86, ppc
+:Type: vcpu ioctl
+:Parameters: struct kvm_sregs (in)
+:Returns: 0 on success, -1 on error
Writes special registers into the vcpu. See KVM_GET_SREGS for the
data structures.
4.15 KVM_TRANSLATE
+------------------
-Capability: basic
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_translation (in/out)
-Returns: 0 on success, -1 on error
+:Capability: basic
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_translation (in/out)
+:Returns: 0 on success, -1 on error
Translates a virtual address according to the vcpu's current address
translation mode.
-struct kvm_translation {
+::
+
+ struct kvm_translation {
/* in */
__u64 linear_address;
@@ -465,59 +506,68 @@ struct kvm_translation {
__u8 writeable;
__u8 usermode;
__u8 pad[5];
-};
+ };
4.16 KVM_INTERRUPT
+------------------
-Capability: basic
-Architectures: x86, ppc, mips
-Type: vcpu ioctl
-Parameters: struct kvm_interrupt (in)
-Returns: 0 on success, negative on failure.
+:Capability: basic
+:Architectures: x86, ppc, mips
+:Type: vcpu ioctl
+:Parameters: struct kvm_interrupt (in)
+:Returns: 0 on success, negative on failure.
Queues a hardware interrupt vector to be injected.
-/* for KVM_INTERRUPT */
-struct kvm_interrupt {
+::
+
+ /* for KVM_INTERRUPT */
+ struct kvm_interrupt {
/* in */
__u32 irq;
-};
+ };
X86:
+^^^^
+
+:Returns:
-Returns: 0 on success,
- -EEXIST if an interrupt is already enqueued
- -EINVAL the the irq number is invalid
- -ENXIO if the PIC is in the kernel
- -EFAULT if the pointer is invalid
+ ========= ===================================
+ 0 on success,
+ -EEXIST if an interrupt is already enqueued
+ -EINVAL the the irq number is invalid
+ -ENXIO if the PIC is in the kernel
+ -EFAULT if the pointer is invalid
+ ========= ===================================
Note 'irq' is an interrupt vector, not an interrupt pin or line. This
ioctl is useful if the in-kernel PIC is not used.
PPC:
+^^^^
Queues an external interrupt to be injected. This ioctl is overleaded
with 3 different irq values:
a) KVM_INTERRUPT_SET
- This injects an edge type external interrupt into the guest once it's ready
- to receive interrupts. When injected, the interrupt is done.
+ This injects an edge type external interrupt into the guest once it's ready
+ to receive interrupts. When injected, the interrupt is done.
b) KVM_INTERRUPT_UNSET
- This unsets any pending interrupt.
+ This unsets any pending interrupt.
- Only available with KVM_CAP_PPC_UNSET_IRQ.
+ Only available with KVM_CAP_PPC_UNSET_IRQ.
c) KVM_INTERRUPT_SET_LEVEL
- This injects a level type external interrupt into the guest context. The
- interrupt stays pending until a specific ioctl with KVM_INTERRUPT_UNSET
- is triggered.
+ This injects a level type external interrupt into the guest context. The
+ interrupt stays pending until a specific ioctl with KVM_INTERRUPT_UNSET
+ is triggered.
- Only available with KVM_CAP_PPC_IRQ_LEVEL.
+ Only available with KVM_CAP_PPC_IRQ_LEVEL.
Note that any value for 'irq' other than the ones stated above is invalid
and incurs unexpected behavior.
@@ -525,6 +575,7 @@ and incurs unexpected behavior.
This is an asynchronous vcpu ioctl and can be invoked from any thread.
MIPS:
+^^^^^
Queues an external interrupt to be injected into the virtual CPU. A negative
interrupt number dequeues the interrupt.
@@ -533,24 +584,26 @@ This is an asynchronous vcpu ioctl and can be invoked from any thread.
4.17 KVM_DEBUG_GUEST
+--------------------
-Capability: basic
-Architectures: none
-Type: vcpu ioctl
-Parameters: none)
-Returns: -1 on error
+:Capability: basic
+:Architectures: none
+:Type: vcpu ioctl
+:Parameters: none)
+:Returns: -1 on error
Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead.
4.18 KVM_GET_MSRS
+-----------------
-Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system)
-Architectures: x86
-Type: system ioctl, vcpu ioctl
-Parameters: struct kvm_msrs (in/out)
-Returns: number of msrs successfully returned;
- -1 on error
+:Capability: basic (vcpu), KVM_CAP_GET_MSR_FEATURES (system)
+:Architectures: x86
+:Type: system ioctl, vcpu ioctl
+:Parameters: struct kvm_msrs (in/out)
+:Returns: number of msrs successfully returned;
+ -1 on error
When used as a system ioctl:
Reads the values of MSR-based features that are available for the VM. This
@@ -562,18 +615,20 @@ When used as a vcpu ioctl:
Reads model-specific registers from the vcpu. Supported msr indices can
be obtained using KVM_GET_MSR_INDEX_LIST in a system ioctl.
-struct kvm_msrs {
+::
+
+ struct kvm_msrs {
__u32 nmsrs; /* number of msrs in entries */
__u32 pad;
struct kvm_msr_entry entries[0];
-};
+ };
-struct kvm_msr_entry {
+ struct kvm_msr_entry {
__u32 index;
__u32 reserved;
__u64 data;
-};
+ };
Application code should set the 'nmsrs' member (which indicates the
size of the entries array) and the 'index' member of each array entry.
@@ -581,12 +636,13 @@ kvm will fill in the 'data' member.
4.19 KVM_SET_MSRS
+-----------------
-Capability: basic
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_msrs (in)
-Returns: number of msrs successfully set (see below), -1 on error
+:Capability: basic
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_msrs (in)
+:Returns: number of msrs successfully set (see below), -1 on error
Writes model-specific registers to the vcpu. See KVM_GET_MSRS for the
data structures.
@@ -602,41 +658,44 @@ MSRs that have been set successfully.
4.20 KVM_SET_CPUID
+------------------
-Capability: basic
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_cpuid (in)
-Returns: 0 on success, -1 on error
+:Capability: basic
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_cpuid (in)
+:Returns: 0 on success, -1 on error
Defines the vcpu responses to the cpuid instruction. Applications
should use the KVM_SET_CPUID2 ioctl if available.
+::
-struct kvm_cpuid_entry {
+ struct kvm_cpuid_entry {
__u32 function;
__u32 eax;
__u32 ebx;
__u32 ecx;
__u32 edx;
__u32 padding;
-};
+ };
-/* for KVM_SET_CPUID */
-struct kvm_cpuid {
+ /* for KVM_SET_CPUID */
+ struct kvm_cpuid {
__u32 nent;
__u32 padding;
struct kvm_cpuid_entry entries[0];
-};
+ };
4.21 KVM_SET_SIGNAL_MASK
+------------------------
-Capability: basic
-Architectures: all
-Type: vcpu ioctl
-Parameters: struct kvm_signal_mask (in)
-Returns: 0 on success, -1 on error
+:Capability: basic
+:Architectures: all
+:Type: vcpu ioctl
+:Parameters: struct kvm_signal_mask (in)
+:Returns: 0 on success, -1 on error
Defines which signals are blocked during execution of KVM_RUN. This
signal mask temporarily overrides the threads signal mask. Any
@@ -646,25 +705,30 @@ their traditional behaviour) will cause KVM_RUN to return with -EINTR.
Note the signal will only be delivered if not blocked by the original
signal mask.
-/* for KVM_SET_SIGNAL_MASK */
-struct kvm_signal_mask {
+::
+
+ /* for KVM_SET_SIGNAL_MASK */
+ struct kvm_signal_mask {
__u32 len;
__u8 sigset[0];
-};
+ };
4.22 KVM_GET_FPU
+----------------
-Capability: basic
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_fpu (out)
-Returns: 0 on success, -1 on error
+:Capability: basic
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_fpu (out)
+:Returns: 0 on success, -1 on error
Reads the floating point state from the vcpu.
-/* for KVM_GET_FPU and KVM_SET_FPU */
-struct kvm_fpu {
+::
+
+ /* for KVM_GET_FPU and KVM_SET_FPU */
+ struct kvm_fpu {
__u8 fpr[8][16];
__u16 fcw;
__u16 fsw;
@@ -676,21 +740,24 @@ struct kvm_fpu {
__u8 xmm[16][16];
__u32 mxcsr;
__u32 pad2;
-};
+ };
4.23 KVM_SET_FPU
+----------------
-Capability: basic
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_fpu (in)
-Returns: 0 on success, -1 on error
+:Capability: basic
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_fpu (in)
+:Returns: 0 on success, -1 on error
Writes the floating point state to the vcpu.
-/* for KVM_GET_FPU and KVM_SET_FPU */
-struct kvm_fpu {
+::
+
+ /* for KVM_GET_FPU and KVM_SET_FPU */
+ struct kvm_fpu {
__u8 fpr[8][16];
__u16 fcw;
__u16 fsw;
@@ -702,16 +769,17 @@ struct kvm_fpu {
__u8 xmm[16][16];
__u32 mxcsr;
__u32 pad2;
-};
+ };
4.24 KVM_CREATE_IRQCHIP
+-----------------------
-Capability: KVM_CAP_IRQCHIP, KVM_CAP_S390_IRQCHIP (s390)
-Architectures: x86, ARM, arm64, s390
-Type: vm ioctl
-Parameters: none
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_IRQCHIP, KVM_CAP_S390_IRQCHIP (s390)
+:Architectures: x86, ARM, arm64, s390
+:Type: vm ioctl
+:Parameters: none
+:Returns: 0 on success, -1 on error
Creates an interrupt controller model in the kernel.
On x86, creates a virtual ioapic, a virtual PIC (two PICs, nested), and sets up
@@ -727,12 +795,13 @@ before KVM_CREATE_IRQCHIP can be used.
4.25 KVM_IRQ_LINE
+-----------------
-Capability: KVM_CAP_IRQCHIP
-Architectures: x86, arm, arm64
-Type: vm ioctl
-Parameters: struct kvm_irq_level
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_IRQCHIP
+:Architectures: x86, arm, arm64
+:Type: vm ioctl
+:Parameters: struct kvm_irq_level
+:Returns: 0 on success, -1 on error
Sets the level of a GSI input to the interrupt controller model in the kernel.
On some architectures it is required that an interrupt controller model has
@@ -756,16 +825,20 @@ of course).
ARM/arm64 can signal an interrupt either at the CPU level, or at the
in-kernel irqchip (GIC), and for in-kernel irqchip can tell the GIC to
use PPIs designated for specific cpus. The irq field is interpreted
-like this:
+like this::
 bits: | 31 ... 28 | 27 ... 24 | 23 ... 16 | 15 ... 0 |
field: | vcpu2_index | irq_type | vcpu_index | irq_id |
The irq_type field has the following values:
-- irq_type[0]: out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ
-- irq_type[1]: in-kernel GIC: SPI, irq_id between 32 and 1019 (incl.)
+
+- irq_type[0]:
+ out-of-kernel GIC: irq_id 0 is IRQ, irq_id 1 is FIQ
+- irq_type[1]:
+ in-kernel GIC: SPI, irq_id between 32 and 1019 (incl.)
(the vcpu_index field is ignored)
-- irq_type[2]: in-kernel GIC: PPI, irq_id between 16 and 31 (incl.)
+- irq_type[2]:
+ in-kernel GIC: PPI, irq_id between 16 and 31 (incl.)
(The irq_id field thus corresponds nicely to the IRQ ID in the ARM GIC specs)
@@ -779,27 +852,32 @@ Note that on arm/arm64, the KVM_CAP_IRQCHIP capability only conditions
injection of interrupts for the in-kernel irqchip. KVM_IRQ_LINE can always
be used for a userspace interrupt controller.
-struct kvm_irq_level {
+::
+
+ struct kvm_irq_level {
union {
__u32 irq; /* GSI */
__s32 status; /* not used for KVM_IRQ_LEVEL */
};
__u32 level; /* 0 or 1 */
-};
+ };
4.26 KVM_GET_IRQCHIP
+--------------------
-Capability: KVM_CAP_IRQCHIP
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_irqchip (in/out)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_IRQCHIP
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_irqchip (in/out)
+:Returns: 0 on success, -1 on error
Reads the state of a kernel interrupt controller created with
KVM_CREATE_IRQCHIP into a buffer provided by the caller.
-struct kvm_irqchip {
+::
+
+ struct kvm_irqchip {
__u32 chip_id; /* 0 = PIC1, 1 = PIC2, 2 = IOAPIC */
__u32 pad;
union {
@@ -807,21 +885,24 @@ struct kvm_irqchip {
struct kvm_pic_state pic;
struct kvm_ioapic_state ioapic;
} chip;
-};
+ };
4.27 KVM_SET_IRQCHIP
+--------------------
-Capability: KVM_CAP_IRQCHIP
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_irqchip (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_IRQCHIP
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_irqchip (in)
+:Returns: 0 on success, -1 on error
Sets the state of a kernel interrupt controller created with
KVM_CREATE_IRQCHIP from a buffer provided by the caller.
-struct kvm_irqchip {
+::
+
+ struct kvm_irqchip {
__u32 chip_id; /* 0 = PIC1, 1 = PIC2, 2 = IOAPIC */
__u32 pad;
union {
@@ -829,16 +910,17 @@ struct kvm_irqchip {
struct kvm_pic_state pic;
struct kvm_ioapic_state ioapic;
} chip;
-};
+ };
4.28 KVM_XEN_HVM_CONFIG
+-----------------------
-Capability: KVM_CAP_XEN_HVM
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_xen_hvm_config (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_XEN_HVM
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_xen_hvm_config (in)
+:Returns: 0 on success, -1 on error
Sets the MSR that the Xen HVM guest uses to initialize its hypercall
page, and provides the starting address and size of the hypercall
@@ -846,7 +928,9 @@ blobs in userspace. When the guest writes the MSR, kvm copies one
page of a blob (32- or 64-bit, depending on the vcpu mode) to guest
memory.
-struct kvm_xen_hvm_config {
+::
+
+ struct kvm_xen_hvm_config {
__u32 flags;
__u32 msr;
__u64 blob_addr_32;
@@ -854,16 +938,17 @@ struct kvm_xen_hvm_config {
__u8 blob_size_32;
__u8 blob_size_64;
__u8 pad2[30];
-};
+ };
4.29 KVM_GET_CLOCK
+------------------
-Capability: KVM_CAP_ADJUST_CLOCK
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_clock_data (out)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_ADJUST_CLOCK
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_clock_data (out)
+:Returns: 0 on success, -1 on error
Gets the current timestamp of kvmclock as seen by the current guest. In
conjunction with KVM_SET_CLOCK, it is used to ensure monotonicity on scenarios
@@ -880,47 +965,56 @@ with KVM_SET_CLOCK. KVM will try to make all VCPUs follow this clock,
but the exact value read by each VCPU could differ, because the host
TSC is not stable.
-struct kvm_clock_data {
+::
+
+ struct kvm_clock_data {
__u64 clock; /* kvmclock current value */
__u32 flags;
__u32 pad[9];
-};
+ };
4.30 KVM_SET_CLOCK
+------------------
-Capability: KVM_CAP_ADJUST_CLOCK
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_clock_data (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_ADJUST_CLOCK
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_clock_data (in)
+:Returns: 0 on success, -1 on error
Sets the current timestamp of kvmclock to the value specified in its parameter.
In conjunction with KVM_GET_CLOCK, it is used to ensure monotonicity on scenarios
such as migration.
-struct kvm_clock_data {
+::
+
+ struct kvm_clock_data {
__u64 clock; /* kvmclock current value */
__u32 flags;
__u32 pad[9];
-};
+ };
4.31 KVM_GET_VCPU_EVENTS
+------------------------
-Capability: KVM_CAP_VCPU_EVENTS
-Extended by: KVM_CAP_INTR_SHADOW
-Architectures: x86, arm, arm64
-Type: vcpu ioctl
-Parameters: struct kvm_vcpu_event (out)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_VCPU_EVENTS
+:Extended by: KVM_CAP_INTR_SHADOW
+:Architectures: x86, arm, arm64
+:Type: vcpu ioctl
+:Parameters: struct kvm_vcpu_event (out)
+:Returns: 0 on success, -1 on error
X86:
+^^^^
Gets currently pending exceptions, interrupts, and NMIs as well as related
states of the vcpu.
-struct kvm_vcpu_events {
+::
+
+ struct kvm_vcpu_events {
struct {
__u8 injected;
__u8 nr;
@@ -951,7 +1045,7 @@ struct kvm_vcpu_events {
__u8 reserved[27];
__u8 exception_has_payload;
__u64 exception_payload;
-};
+ };
The following bits are defined in the flags field:
@@ -967,6 +1061,7 @@ The following bits are defined in the flags field:
KVM_CAP_EXCEPTION_PAYLOAD is enabled.
ARM/ARM64:
+^^^^^^^^^^
If the guest accesses a device that is being emulated by the host kernel in
such a way that a real device would generate a physical SError, KVM may make
@@ -1006,8 +1101,9 @@ It is not possible to read back a pending external abort (injected via
KVM_SET_VCPU_EVENTS or otherwise) because such an exception is always delivered
directly to the virtual CPU).
+::
-struct kvm_vcpu_events {
+ struct kvm_vcpu_events {
struct {
__u8 serror_pending;
__u8 serror_has_esr;
@@ -1017,18 +1113,20 @@ struct kvm_vcpu_events {
__u64 serror_esr;
} exception;
__u32 reserved[12];
-};
+ };
4.32 KVM_SET_VCPU_EVENTS
+------------------------
-Capability: KVM_CAP_VCPU_EVENTS
-Extended by: KVM_CAP_INTR_SHADOW
-Architectures: x86, arm, arm64
-Type: vcpu ioctl
-Parameters: struct kvm_vcpu_event (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_VCPU_EVENTS
+:Extended by: KVM_CAP_INTR_SHADOW
+:Architectures: x86, arm, arm64
+:Type: vcpu ioctl
+:Parameters: struct kvm_vcpu_event (in)
+:Returns: 0 on success, -1 on error
X86:
+^^^^
Set pending exceptions, interrupts, and NMIs as well as related states of the
vcpu.
@@ -1040,9 +1138,11 @@ from the update. These fields are nmi.pending, sipi_vector, smi.smm,
smi.pending. Keep the corresponding bits in the flags field cleared to
suppress overwriting the current in-kernel state. The bits are:
-KVM_VCPUEVENT_VALID_NMI_PENDING - transfer nmi.pending to the kernel
-KVM_VCPUEVENT_VALID_SIPI_VECTOR - transfer sipi_vector
-KVM_VCPUEVENT_VALID_SMM - transfer the smi sub-struct.
+=============================== ==================================
+KVM_VCPUEVENT_VALID_NMI_PENDING transfer nmi.pending to the kernel
+KVM_VCPUEVENT_VALID_SIPI_VECTOR transfer sipi_vector
+KVM_VCPUEVENT_VALID_SMM transfer the smi sub-struct.
+=============================== ==================================
If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
the flags field to signal that interrupt.shadow contains a valid state and
@@ -1056,6 +1156,7 @@ exception_has_payload, exception_payload, and exception.pending fields
contain a valid state and shall be written into the VCPU.
ARM/ARM64:
+^^^^^^^^^^
User space may need to inject several types of events to the guest.
@@ -1078,31 +1179,35 @@ See KVM_GET_VCPU_EVENTS for the data structure.
4.33 KVM_GET_DEBUGREGS
+----------------------
-Capability: KVM_CAP_DEBUGREGS
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_debugregs (out)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_DEBUGREGS
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_debugregs (out)
+:Returns: 0 on success, -1 on error
Reads debug registers from the vcpu.
-struct kvm_debugregs {
+::
+
+ struct kvm_debugregs {
__u64 db[4];
__u64 dr6;
__u64 dr7;
__u64 flags;
__u64 reserved[9];
-};
+ };
4.34 KVM_SET_DEBUGREGS
+----------------------
-Capability: KVM_CAP_DEBUGREGS
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_debugregs (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_DEBUGREGS
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_debugregs (in)
+:Returns: 0 on success, -1 on error
Writes debug registers into the vcpu.
@@ -1111,24 +1216,27 @@ yet and must be cleared on entry.
4.35 KVM_SET_USER_MEMORY_REGION
+-------------------------------
-Capability: KVM_CAP_USER_MEMORY
-Architectures: all
-Type: vm ioctl
-Parameters: struct kvm_userspace_memory_region (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_USER_MEMORY
+:Architectures: all
+:Type: vm ioctl
+:Parameters: struct kvm_userspace_memory_region (in)
+:Returns: 0 on success, -1 on error
-struct kvm_userspace_memory_region {
+::
+
+ struct kvm_userspace_memory_region {
__u32 slot;
__u32 flags;
__u64 guest_phys_addr;
__u64 memory_size; /* bytes */
__u64 userspace_addr; /* start of the userspace allocated memory */
-};
+ };
-/* for kvm_memory_region::flags */
-#define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
-#define KVM_MEM_READONLY (1UL << 1)
+ /* for kvm_memory_region::flags */
+ #define KVM_MEM_LOG_DIRTY_PAGES (1UL << 0)
+ #define KVM_MEM_READONLY (1UL << 1)
This ioctl allows the user to create, modify or delete a guest physical
memory slot. Bits 0-15 of "slot" specify the slot id and this value
@@ -1174,12 +1282,13 @@ allocation and is deprecated.
4.36 KVM_SET_TSS_ADDR
+---------------------
-Capability: KVM_CAP_SET_TSS_ADDR
-Architectures: x86
-Type: vm ioctl
-Parameters: unsigned long tss_address (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_SET_TSS_ADDR
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: unsigned long tss_address (in)
+:Returns: 0 on success, -1 on error
This ioctl defines the physical address of a three-page region in the guest
physical address space. The region must be within the first 4GB of the
@@ -1193,21 +1302,24 @@ documentation when it pops into existence).
4.37 KVM_ENABLE_CAP
+-------------------
+
+:Capability: KVM_CAP_ENABLE_CAP
+:Architectures: mips, ppc, s390
+:Type: vcpu ioctl
+:Parameters: struct kvm_enable_cap (in)
+:Returns: 0 on success; -1 on error
-Capability: KVM_CAP_ENABLE_CAP
-Architectures: mips, ppc, s390
-Type: vcpu ioctl
-Parameters: struct kvm_enable_cap (in)
-Returns: 0 on success; -1 on error
+:Capability: KVM_CAP_ENABLE_CAP_VM
+:Architectures: all
+:Type: vcpu ioctl
+:Parameters: struct kvm_enable_cap (in)
+:Returns: 0 on success; -1 on error
-Capability: KVM_CAP_ENABLE_CAP_VM
-Architectures: all
-Type: vcpu ioctl
-Parameters: struct kvm_enable_cap (in)
-Returns: 0 on success; -1 on error
+.. note::
-+Not all extensions are enabled by default. Using this ioctl the application
-can enable an extension, making it available to the guest.
+ Not all extensions are enabled by default. Using this ioctl the application
+ can enable an extension, making it available to the guest.
On systems that do not support this ioctl, it always fails. On systems that
do support it, it only works for extensions that are supported for enablement.
@@ -1215,76 +1327,91 @@ do support it, it only works for extensions that are supported for enablement.
To check if a capability can be enabled, the KVM_CHECK_EXTENSION ioctl should
be used.
-struct kvm_enable_cap {
+::
+
+ struct kvm_enable_cap {
/* in */
__u32 cap;
The capability that is supposed to get enabled.
+::
+
__u32 flags;
A bitfield indicating future enhancements. Has to be 0 for now.
+::
+
__u64 args[4];
Arguments for enabling a feature. If a feature needs initial values to
function properly, this is the place to put them.
+::
+
__u8 pad[64];
-};
+ };
The vcpu ioctl should be used for vcpu-specific capabilities, the vm ioctl
for vm-wide capabilities.
4.38 KVM_GET_MP_STATE
+---------------------
+
+:Capability: KVM_CAP_MP_STATE
+:Architectures: x86, s390, arm, arm64
+:Type: vcpu ioctl
+:Parameters: struct kvm_mp_state (out)
+:Returns: 0 on success; -1 on error
-Capability: KVM_CAP_MP_STATE
-Architectures: x86, s390, arm, arm64
-Type: vcpu ioctl
-Parameters: struct kvm_mp_state (out)
-Returns: 0 on success; -1 on error
+::
-struct kvm_mp_state {
+ struct kvm_mp_state {
__u32 mp_state;
-};
+ };
Returns the vcpu's current "multiprocessing state" (though also valid on
uniprocessor guests).
Possible values are:
- - KVM_MP_STATE_RUNNABLE: the vcpu is currently running [x86,arm/arm64]
- - KVM_MP_STATE_UNINITIALIZED: the vcpu is an application processor (AP)
+ ========================== ===============================================
+ KVM_MP_STATE_RUNNABLE the vcpu is currently running [x86,arm/arm64]
+ KVM_MP_STATE_UNINITIALIZED the vcpu is an application processor (AP)
which has not yet received an INIT signal [x86]
- - KVM_MP_STATE_INIT_RECEIVED: the vcpu has received an INIT signal, and is
+ KVM_MP_STATE_INIT_RECEIVED the vcpu has received an INIT signal, and is
now ready for a SIPI [x86]
- - KVM_MP_STATE_HALTED: the vcpu has executed a HLT instruction and
+ KVM_MP_STATE_HALTED the vcpu has executed a HLT instruction and
is waiting for an interrupt [x86]
- - KVM_MP_STATE_SIPI_RECEIVED: the vcpu has just received a SIPI (vector
+ KVM_MP_STATE_SIPI_RECEIVED the vcpu has just received a SIPI (vector
accessible via KVM_GET_VCPU_EVENTS) [x86]
- - KVM_MP_STATE_STOPPED: the vcpu is stopped [s390,arm/arm64]
- - KVM_MP_STATE_CHECK_STOP: the vcpu is in a special error state [s390]
- - KVM_MP_STATE_OPERATING: the vcpu is operating (running or halted)
+ KVM_MP_STATE_STOPPED the vcpu is stopped [s390,arm/arm64]
+ KVM_MP_STATE_CHECK_STOP the vcpu is in a special error state [s390]
+ KVM_MP_STATE_OPERATING the vcpu is operating (running or halted)
[s390]
- - KVM_MP_STATE_LOAD: the vcpu is in a special load/startup state
+ KVM_MP_STATE_LOAD the vcpu is in a special load/startup state
[s390]
+ ========================== ===============================================
On x86, this ioctl is only useful after KVM_CREATE_IRQCHIP. Without an
in-kernel irqchip, the multiprocessing state must be maintained by userspace on
these architectures.
For arm/arm64:
+^^^^^^^^^^^^^^
The only states that are valid are KVM_MP_STATE_STOPPED and
KVM_MP_STATE_RUNNABLE which reflect if the vcpu is paused or not.
4.39 KVM_SET_MP_STATE
+---------------------
-Capability: KVM_CAP_MP_STATE
-Architectures: x86, s390, arm, arm64
-Type: vcpu ioctl
-Parameters: struct kvm_mp_state (in)
-Returns: 0 on success; -1 on error
+:Capability: KVM_CAP_MP_STATE
+:Architectures: x86, s390, arm, arm64
+:Type: vcpu ioctl
+:Parameters: struct kvm_mp_state (in)
+:Returns: 0 on success; -1 on error
Sets the vcpu's current "multiprocessing state"; see KVM_GET_MP_STATE for
arguments.
@@ -1294,17 +1421,19 @@ in-kernel irqchip, the multiprocessing state must be maintained by userspace on
these architectures.
For arm/arm64:
+^^^^^^^^^^^^^^
The only states that are valid are KVM_MP_STATE_STOPPED and
KVM_MP_STATE_RUNNABLE which reflect if the vcpu should be paused or not.
4.40 KVM_SET_IDENTITY_MAP_ADDR
+------------------------------
-Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR
-Architectures: x86
-Type: vm ioctl
-Parameters: unsigned long identity (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: unsigned long identity (in)
+:Returns: 0 on success, -1 on error
This ioctl defines the physical address of a one-page region in the guest
physical address space. The region must be within the first 4GB of the
@@ -1322,12 +1451,13 @@ documentation when it pops into existence).
Fails if any VCPU has already been created.
4.41 KVM_SET_BOOT_CPU_ID
+------------------------
-Capability: KVM_CAP_SET_BOOT_CPU_ID
-Architectures: x86
-Type: vm ioctl
-Parameters: unsigned long vcpu_id
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_SET_BOOT_CPU_ID
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: unsigned long vcpu_id
+:Returns: 0 on success, -1 on error
Define which vcpu is the Bootstrap Processor (BSP). Values are the same
as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default
@@ -1335,102 +1465,119 @@ is vcpu 0.
4.42 KVM_GET_XSAVE
+------------------
-Capability: KVM_CAP_XSAVE
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_xsave (out)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_XSAVE
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_xsave (out)
+:Returns: 0 on success, -1 on error
-struct kvm_xsave {
+
+::
+
+ struct kvm_xsave {
__u32 region[1024];
-};
+ };
This ioctl would copy current vcpu's xsave struct to the userspace.
4.43 KVM_SET_XSAVE
+------------------
+
+:Capability: KVM_CAP_XSAVE
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_xsave (in)
+:Returns: 0 on success, -1 on error
-Capability: KVM_CAP_XSAVE
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_xsave (in)
-Returns: 0 on success, -1 on error
+::
-struct kvm_xsave {
+
+ struct kvm_xsave {
__u32 region[1024];
-};
+ };
This ioctl would copy userspace's xsave struct to the kernel.
4.44 KVM_GET_XCRS
+-----------------
+
+:Capability: KVM_CAP_XCRS
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_xcrs (out)
+:Returns: 0 on success, -1 on error
-Capability: KVM_CAP_XCRS
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_xcrs (out)
-Returns: 0 on success, -1 on error
+::
-struct kvm_xcr {
+ struct kvm_xcr {
__u32 xcr;
__u32 reserved;
__u64 value;
-};
+ };
-struct kvm_xcrs {
+ struct kvm_xcrs {
__u32 nr_xcrs;
__u32 flags;
struct kvm_xcr xcrs[KVM_MAX_XCRS];
__u64 padding[16];
-};
+ };
This ioctl would copy current vcpu's xcrs to the userspace.
4.45 KVM_SET_XCRS
+-----------------
+
+:Capability: KVM_CAP_XCRS
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_xcrs (in)
+:Returns: 0 on success, -1 on error
-Capability: KVM_CAP_XCRS
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_xcrs (in)
-Returns: 0 on success, -1 on error
+::
-struct kvm_xcr {
+ struct kvm_xcr {
__u32 xcr;
__u32 reserved;
__u64 value;
-};
+ };
-struct kvm_xcrs {
+ struct kvm_xcrs {
__u32 nr_xcrs;
__u32 flags;
struct kvm_xcr xcrs[KVM_MAX_XCRS];
__u64 padding[16];
-};
+ };
This ioctl would set vcpu's xcr to the value userspace specified.
4.46 KVM_GET_SUPPORTED_CPUID
+----------------------------
-Capability: KVM_CAP_EXT_CPUID
-Architectures: x86
-Type: system ioctl
-Parameters: struct kvm_cpuid2 (in/out)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_EXT_CPUID
+:Architectures: x86
+:Type: system ioctl
+:Parameters: struct kvm_cpuid2 (in/out)
+:Returns: 0 on success, -1 on error
-struct kvm_cpuid2 {
+::
+
+ struct kvm_cpuid2 {
__u32 nent;
__u32 padding;
struct kvm_cpuid_entry2 entries[0];
-};
+ };
-#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
-#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
-#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
+ #define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
+ #define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
+ #define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
-struct kvm_cpuid_entry2 {
+ struct kvm_cpuid_entry2 {
__u32 function;
__u32 index;
__u32 flags;
@@ -1439,7 +1586,7 @@ struct kvm_cpuid_entry2 {
__u32 ecx;
__u32 edx;
__u32 padding[3];
-};
+ };
This ioctl returns x86 cpuid features which are supported by both the
hardware and kvm in its default configuration. Userspace can use the
@@ -1467,10 +1614,16 @@ with unknown or unsupported features masked out. Some features (for example,
x2apic), may not be present in the host cpu, but are exposed by kvm if it can
emulate them efficiently. The fields in each entry are defined as follows:
- function: the eax value used to obtain the entry
- index: the ecx value used to obtain the entry (for entries that are
+ function:
+ the eax value used to obtain the entry
+
+ index:
+ the ecx value used to obtain the entry (for entries that are
affected by ecx)
- flags: an OR of zero or more of the following:
+
+ flags:
+ an OR of zero or more of the following:
+
KVM_CPUID_FLAG_SIGNIFCANT_INDEX:
if the index field is valid
KVM_CPUID_FLAG_STATEFUL_FUNC:
@@ -1480,12 +1633,14 @@ emulate them efficiently. The fields in each entry are defined as follows:
KVM_CPUID_FLAG_STATE_READ_NEXT:
for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is
the first entry to be read by a cpu
- eax, ebx, ecx, edx: the values returned by the cpuid instruction for
+
+ eax, ebx, ecx, edx:
+ the values returned by the cpuid instruction for
this function/index combination
The TSC deadline timer feature (CPUID leaf 1, ecx[24]) is always returned
as false, since the feature depends on KVM_CREATE_IRQCHIP for local APIC
-support. Instead it is reported via
+support. Instead it is reported via::
ioctl(KVM_CHECK_EXTENSION, KVM_CAP_TSC_DEADLINE_TIMER)
@@ -1494,18 +1649,21 @@ feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
4.47 KVM_PPC_GET_PVINFO
+-----------------------
-Capability: KVM_CAP_PPC_GET_PVINFO
-Architectures: ppc
-Type: vm ioctl
-Parameters: struct kvm_ppc_pvinfo (out)
-Returns: 0 on success, !0 on error
+:Capability: KVM_CAP_PPC_GET_PVINFO
+:Architectures: ppc
+:Type: vm ioctl
+:Parameters: struct kvm_ppc_pvinfo (out)
+:Returns: 0 on success, !0 on error
-struct kvm_ppc_pvinfo {
+::
+
+ struct kvm_ppc_pvinfo {
__u32 flags;
__u32 hcall[4];
__u8 pad[108];
-};
+ };
This ioctl fetches PV specific information that need to be passed to the guest
using the device tree or other means from vm context.
@@ -1515,33 +1673,39 @@ The hcall array defines 4 instructions that make up a hypercall.
If any additional field gets added to this structure later on, a bit for that
additional piece of information will be set in the flags bitmap.
-The flags bitmap is defined as:
+The flags bitmap is defined as::
/* the host supports the ePAPR idle hcall
#define KVM_PPC_PVINFO_FLAGS_EV_IDLE (1<<0)
4.52 KVM_SET_GSI_ROUTING
+------------------------
-Capability: KVM_CAP_IRQ_ROUTING
-Architectures: x86 s390 arm arm64
-Type: vm ioctl
-Parameters: struct kvm_irq_routing (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_IRQ_ROUTING
+:Architectures: x86 s390 arm arm64
+:Type: vm ioctl
+:Parameters: struct kvm_irq_routing (in)
+:Returns: 0 on success, -1 on error
Sets the GSI routing table entries, overwriting any previously set entries.
On arm/arm64, GSI routing has the following limitation:
+
- GSI routing does not apply to KVM_IRQ_LINE but only to KVM_IRQFD.
-struct kvm_irq_routing {
+::
+
+ struct kvm_irq_routing {
__u32 nr;
__u32 flags;
struct kvm_irq_routing_entry entries[0];
-};
+ };
No flags are specified so far, the corresponding field must be set to zero.
-struct kvm_irq_routing_entry {
+::
+
+ struct kvm_irq_routing_entry {
__u32 gsi;
__u32 type;
__u32 flags;
@@ -1553,15 +1717,16 @@ struct kvm_irq_routing_entry {
struct kvm_irq_routing_hv_sint hv_sint;
__u32 pad[8];
} u;
-};
+ };
-/* gsi routing entry types */
-#define KVM_IRQ_ROUTING_IRQCHIP 1
-#define KVM_IRQ_ROUTING_MSI 2
-#define KVM_IRQ_ROUTING_S390_ADAPTER 3
-#define KVM_IRQ_ROUTING_HV_SINT 4
+ /* gsi routing entry types */
+ #define KVM_IRQ_ROUTING_IRQCHIP 1
+ #define KVM_IRQ_ROUTING_MSI 2
+ #define KVM_IRQ_ROUTING_S390_ADAPTER 3
+ #define KVM_IRQ_ROUTING_HV_SINT 4
flags:
+
- KVM_MSI_VALID_DEVID: used along with KVM_IRQ_ROUTING_MSI routing entry
type, specifies that the devid field contains a valid value. The per-VM
KVM_CAP_MSI_DEVID capability advertises the requirement to provide
@@ -1569,12 +1734,14 @@ flags:
never set the KVM_MSI_VALID_DEVID flag as the ioctl might fail.
- zero otherwise
-struct kvm_irq_routing_irqchip {
+::
+
+ struct kvm_irq_routing_irqchip {
__u32 irqchip;
__u32 pin;
-};
+ };
-struct kvm_irq_routing_msi {
+ struct kvm_irq_routing_msi {
__u32 address_lo;
__u32 address_hi;
__u32 data;
@@ -1582,7 +1749,7 @@ struct kvm_irq_routing_msi {
__u32 pad;
__u32 devid;
};
-};
+ };
If KVM_MSI_VALID_DEVID is set, devid contains a unique device identifier
for the device that wrote the MSI message. For PCI, this is usually a
@@ -1593,39 +1760,43 @@ feature of KVM_CAP_X2APIC_API capability is enabled. If it is enabled,
address_hi bits 31-8 provide bits 31-8 of the destination id. Bits 7-0 of
address_hi must be zero.
-struct kvm_irq_routing_s390_adapter {
+::
+
+ struct kvm_irq_routing_s390_adapter {
__u64 ind_addr;
__u64 summary_addr;
__u64 ind_offset;
__u32 summary_offset;
__u32 adapter_id;
-};
+ };
-struct kvm_irq_routing_hv_sint {
+ struct kvm_irq_routing_hv_sint {
__u32 vcpu;
__u32 sint;
-};
+ };
4.55 KVM_SET_TSC_KHZ
+--------------------
-Capability: KVM_CAP_TSC_CONTROL
-Architectures: x86
-Type: vcpu ioctl
-Parameters: virtual tsc_khz
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_TSC_CONTROL
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: virtual tsc_khz
+:Returns: 0 on success, -1 on error
Specifies the tsc frequency for the virtual machine. The unit of the
frequency is KHz.
4.56 KVM_GET_TSC_KHZ
+--------------------
-Capability: KVM_CAP_GET_TSC_KHZ
-Architectures: x86
-Type: vcpu ioctl
-Parameters: none
-Returns: virtual tsc-khz on success, negative value on error
+:Capability: KVM_CAP_GET_TSC_KHZ
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: none
+:Returns: virtual tsc-khz on success, negative value on error
Returns the tsc frequency of the guest. The unit of the return value is
KHz. If the host has unstable tsc this ioctl returns -EIO instead as an
@@ -1633,17 +1804,20 @@ error.
4.57 KVM_GET_LAPIC
+------------------
-Capability: KVM_CAP_IRQCHIP
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_lapic_state (out)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_IRQCHIP
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_lapic_state (out)
+:Returns: 0 on success, -1 on error
-#define KVM_APIC_REG_SIZE 0x400
-struct kvm_lapic_state {
+::
+
+ #define KVM_APIC_REG_SIZE 0x400
+ struct kvm_lapic_state {
char regs[KVM_APIC_REG_SIZE];
-};
+ };
Reads the Local APIC registers and copies them into the input argument. The
data format and layout are the same as documented in the architecture manual.
@@ -1661,17 +1835,20 @@ always uses xAPIC format.
4.58 KVM_SET_LAPIC
+------------------
-Capability: KVM_CAP_IRQCHIP
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_lapic_state (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_IRQCHIP
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_lapic_state (in)
+:Returns: 0 on success, -1 on error
-#define KVM_APIC_REG_SIZE 0x400
-struct kvm_lapic_state {
+::
+
+ #define KVM_APIC_REG_SIZE 0x400
+ struct kvm_lapic_state {
char regs[KVM_APIC_REG_SIZE];
-};
+ };
Copies the input argument into the Local APIC registers. The data format
and layout are the same as documented in the architecture manual.
@@ -1682,35 +1859,38 @@ See the note in KVM_GET_LAPIC.
4.59 KVM_IOEVENTFD
+------------------
-Capability: KVM_CAP_IOEVENTFD
-Architectures: all
-Type: vm ioctl
-Parameters: struct kvm_ioeventfd (in)
-Returns: 0 on success, !0 on error
+:Capability: KVM_CAP_IOEVENTFD
+:Architectures: all
+:Type: vm ioctl
+:Parameters: struct kvm_ioeventfd (in)
+:Returns: 0 on success, !0 on error
This ioctl attaches or detaches an ioeventfd to a legal pio/mmio address
within the guest. A guest write in the registered address will signal the
provided event instead of triggering an exit.
-struct kvm_ioeventfd {
+::
+
+ struct kvm_ioeventfd {
__u64 datamatch;
__u64 addr; /* legal pio/mmio address */
__u32 len; /* 0, 1, 2, 4, or 8 bytes */
__s32 fd;
__u32 flags;
__u8 pad[36];
-};
+ };
For the special case of virtio-ccw devices on s390, the ioevent is matched
to a subchannel/virtqueue tuple instead.
-The following flags are defined:
+The following flags are defined::
-#define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch)
-#define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio)
-#define KVM_IOEVENTFD_FLAG_DEASSIGN (1 << kvm_ioeventfd_flag_nr_deassign)
-#define KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY \
+ #define KVM_IOEVENTFD_FLAG_DATAMATCH (1 << kvm_ioeventfd_flag_nr_datamatch)
+ #define KVM_IOEVENTFD_FLAG_PIO (1 << kvm_ioeventfd_flag_nr_pio)
+ #define KVM_IOEVENTFD_FLAG_DEASSIGN (1 << kvm_ioeventfd_flag_nr_deassign)
+ #define KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY \
(1 << kvm_ioeventfd_flag_nr_virtio_ccw_notify)
If datamatch flag is set, the event will be signaled only if the written value
@@ -1725,17 +1905,20 @@ The speedup may only apply to specific architectures, but the ioeventfd will
work anyway.
4.60 KVM_DIRTY_TLB
+------------------
+
+:Capability: KVM_CAP_SW_TLB
+:Architectures: ppc
+:Type: vcpu ioctl
+:Parameters: struct kvm_dirty_tlb (in)
+:Returns: 0 on success, -1 on error
-Capability: KVM_CAP_SW_TLB
-Architectures: ppc
-Type: vcpu ioctl
-Parameters: struct kvm_dirty_tlb (in)
-Returns: 0 on success, -1 on error
+::
-struct kvm_dirty_tlb {
+ struct kvm_dirty_tlb {
__u64 bitmap;
__u32 num_dirty;
-};
+ };
This must be called whenever userspace has changed an entry in the shared
TLB, prior to calling KVM_RUN on the associated vcpu.
@@ -1758,23 +1941,26 @@ be set to the number of set bits in the bitmap.
4.62 KVM_CREATE_SPAPR_TCE
+-------------------------
-Capability: KVM_CAP_SPAPR_TCE
-Architectures: powerpc
-Type: vm ioctl
-Parameters: struct kvm_create_spapr_tce (in)
-Returns: file descriptor for manipulating the created TCE table
+:Capability: KVM_CAP_SPAPR_TCE
+:Architectures: powerpc
+:Type: vm ioctl
+:Parameters: struct kvm_create_spapr_tce (in)
+:Returns: file descriptor for manipulating the created TCE table
This creates a virtual TCE (translation control entry) table, which
is an IOMMU for PAPR-style virtual I/O. It is used to translate
logical addresses used in virtual I/O into guest physical addresses,
and provides a scatter/gather capability for PAPR virtual I/O.
-/* for KVM_CAP_SPAPR_TCE */
-struct kvm_create_spapr_tce {
+::
+
+ /* for KVM_CAP_SPAPR_TCE */
+ struct kvm_create_spapr_tce {
__u64 liobn;
__u32 window_size;
-};
+ };
The liobn field gives the logical IO bus number for which to create a
TCE table. The window_size field specifies the size of the DMA window
@@ -1794,12 +1980,13 @@ circumstances.
4.63 KVM_ALLOCATE_RMA
+---------------------
-Capability: KVM_CAP_PPC_RMA
-Architectures: powerpc
-Type: vm ioctl
-Parameters: struct kvm_allocate_rma (out)
-Returns: file descriptor for mapping the allocated RMA
+:Capability: KVM_CAP_PPC_RMA
+:Architectures: powerpc
+:Type: vm ioctl
+:Parameters: struct kvm_allocate_rma (out)
+:Returns: file descriptor for mapping the allocated RMA
This allocates a Real Mode Area (RMA) from the pool allocated at boot
time by the kernel. An RMA is a physically-contiguous, aligned region
@@ -1808,10 +1995,12 @@ will be accessed by real-mode (MMU off) accesses in a KVM guest.
POWER processors support a set of sizes for the RMA that usually
includes 64MB, 128MB, 256MB and some larger powers of two.
-/* for KVM_ALLOCATE_RMA */
-struct kvm_allocate_rma {
+::
+
+ /* for KVM_ALLOCATE_RMA */
+ struct kvm_allocate_rma {
__u64 rma_size;
-};
+ };
The return value is a file descriptor which can be passed to mmap(2)
to map the allocated RMA into userspace. The mapped area can then be
@@ -1827,12 +2016,13 @@ because it supports the Virtual RMA (VRMA) facility.
4.64 KVM_NMI
+------------
-Capability: KVM_CAP_USER_NMI
-Architectures: x86
-Type: vcpu ioctl
-Parameters: none
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_USER_NMI
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: none
+:Returns: 0 on success, -1 on error
Queues an NMI on the thread's vcpu. Note this is well defined only
when KVM_CREATE_IRQCHIP has not been called, since this is an interface
@@ -1853,14 +2043,16 @@ debugging.
4.65 KVM_S390_UCAS_MAP
+----------------------
-Capability: KVM_CAP_S390_UCONTROL
-Architectures: s390
-Type: vcpu ioctl
-Parameters: struct kvm_s390_ucas_mapping (in)
-Returns: 0 in case of success
+:Capability: KVM_CAP_S390_UCONTROL
+:Architectures: s390
+:Type: vcpu ioctl
+:Parameters: struct kvm_s390_ucas_mapping (in)
+:Returns: 0 in case of success
+
+The parameter is defined like this::
-The parameter is defined like this:
struct kvm_s390_ucas_mapping {
__u64 user_addr;
__u64 vcpu_addr;
@@ -1873,14 +2065,16 @@ be aligned by 1 megabyte.
4.66 KVM_S390_UCAS_UNMAP
+------------------------
-Capability: KVM_CAP_S390_UCONTROL
-Architectures: s390
-Type: vcpu ioctl
-Parameters: struct kvm_s390_ucas_mapping (in)
-Returns: 0 in case of success
+:Capability: KVM_CAP_S390_UCONTROL
+:Architectures: s390
+:Type: vcpu ioctl
+:Parameters: struct kvm_s390_ucas_mapping (in)
+:Returns: 0 in case of success
+
+The parameter is defined like this::
-The parameter is defined like this:
struct kvm_s390_ucas_mapping {
__u64 user_addr;
__u64 vcpu_addr;
@@ -1893,12 +2087,13 @@ All parameters need to be aligned by 1 megabyte.
4.67 KVM_S390_VCPU_FAULT
+------------------------
-Capability: KVM_CAP_S390_UCONTROL
-Architectures: s390
-Type: vcpu ioctl
-Parameters: vcpu absolute address (in)
-Returns: 0 in case of success
+:Capability: KVM_CAP_S390_UCONTROL
+:Architectures: s390
+:Type: vcpu ioctl
+:Parameters: vcpu absolute address (in)
+:Returns: 0 in case of success
This call creates a page table entry on the virtual cpu's address space
(for user controlled virtual machines) or the virtual machine's address
@@ -1910,23 +2105,31 @@ prior to calling the KVM_RUN ioctl.
4.68 KVM_SET_ONE_REG
+--------------------
+
+:Capability: KVM_CAP_ONE_REG
+:Architectures: all
+:Type: vcpu ioctl
+:Parameters: struct kvm_one_reg (in)
+:Returns: 0 on success, negative value on failure
-Capability: KVM_CAP_ONE_REG
-Architectures: all
-Type: vcpu ioctl
-Parameters: struct kvm_one_reg (in)
-Returns: 0 on success, negative value on failure
Errors:
-  ENOENT:   no such register
-  EINVAL:   invalid register ID, or no such register
-  EPERM:    (arm64) register access not allowed before vcpu finalization
+
+ ====== ============================================================
+  ENOENT   no such register
+  EINVAL   invalid register ID, or no such register
+  EPERM    (arm64) register access not allowed before vcpu finalization
+ ====== ============================================================
+
(These error codes are indicative only: do not rely on a specific error
code being returned in a specific situation.)
-struct kvm_one_reg {
+::
+
+ struct kvm_one_reg {
__u64 id;
__u64 addr;
-};
+ };
Using this ioctl, a single vcpu register can be set to a specific value
defined by user space with the passed in struct kvm_one_reg, where id
@@ -1936,217 +2139,226 @@ and architecture specific registers. Each have their own range of operation
and their own constants and width. To keep track of the implemented
registers, find a list below:
- Arch | Register | Width (bits)
- | |
- PPC | KVM_REG_PPC_HIOR | 64
- PPC | KVM_REG_PPC_IAC1 | 64
- PPC | KVM_REG_PPC_IAC2 | 64
- PPC | KVM_REG_PPC_IAC3 | 64
- PPC | KVM_REG_PPC_IAC4 | 64
- PPC | KVM_REG_PPC_DAC1 | 64
- PPC | KVM_REG_PPC_DAC2 | 64
- PPC | KVM_REG_PPC_DABR | 64
- PPC | KVM_REG_PPC_DSCR | 64
- PPC | KVM_REG_PPC_PURR | 64
- PPC | KVM_REG_PPC_SPURR | 64
- PPC | KVM_REG_PPC_DAR | 64
- PPC | KVM_REG_PPC_DSISR | 32
- PPC | KVM_REG_PPC_AMR | 64
- PPC | KVM_REG_PPC_UAMOR | 64
- PPC | KVM_REG_PPC_MMCR0 | 64
- PPC | KVM_REG_PPC_MMCR1 | 64
- PPC | KVM_REG_PPC_MMCRA | 64
- PPC | KVM_REG_PPC_MMCR2 | 64
- PPC | KVM_REG_PPC_MMCRS | 64
- PPC | KVM_REG_PPC_SIAR | 64
- PPC | KVM_REG_PPC_SDAR | 64
- PPC | KVM_REG_PPC_SIER | 64
- PPC | KVM_REG_PPC_PMC1 | 32
- PPC | KVM_REG_PPC_PMC2 | 32
- PPC | KVM_REG_PPC_PMC3 | 32
- PPC | KVM_REG_PPC_PMC4 | 32
- PPC | KVM_REG_PPC_PMC5 | 32
- PPC | KVM_REG_PPC_PMC6 | 32
- PPC | KVM_REG_PPC_PMC7 | 32
- PPC | KVM_REG_PPC_PMC8 | 32
- PPC | KVM_REG_PPC_FPR0 | 64
- ...
- PPC | KVM_REG_PPC_FPR31 | 64
- PPC | KVM_REG_PPC_VR0 | 128
- ...
- PPC | KVM_REG_PPC_VR31 | 128
- PPC | KVM_REG_PPC_VSR0 | 128
- ...
- PPC | KVM_REG_PPC_VSR31 | 128
- PPC | KVM_REG_PPC_FPSCR | 64
- PPC | KVM_REG_PPC_VSCR | 32
- PPC | KVM_REG_PPC_VPA_ADDR | 64
- PPC | KVM_REG_PPC_VPA_SLB | 128
- PPC | KVM_REG_PPC_VPA_DTL | 128
- PPC | KVM_REG_PPC_EPCR | 32
- PPC | KVM_REG_PPC_EPR | 32
- PPC | KVM_REG_PPC_TCR | 32
- PPC | KVM_REG_PPC_TSR | 32
- PPC | KVM_REG_PPC_OR_TSR | 32
- PPC | KVM_REG_PPC_CLEAR_TSR | 32
- PPC | KVM_REG_PPC_MAS0 | 32
- PPC | KVM_REG_PPC_MAS1 | 32
- PPC | KVM_REG_PPC_MAS2 | 64
- PPC | KVM_REG_PPC_MAS7_3 | 64
- PPC | KVM_REG_PPC_MAS4 | 32
- PPC | KVM_REG_PPC_MAS6 | 32
- PPC | KVM_REG_PPC_MMUCFG | 32
- PPC | KVM_REG_PPC_TLB0CFG | 32
- PPC | KVM_REG_PPC_TLB1CFG | 32
- PPC | KVM_REG_PPC_TLB2CFG | 32
- PPC | KVM_REG_PPC_TLB3CFG | 32
- PPC | KVM_REG_PPC_TLB0PS | 32
- PPC | KVM_REG_PPC_TLB1PS | 32
- PPC | KVM_REG_PPC_TLB2PS | 32
- PPC | KVM_REG_PPC_TLB3PS | 32
- PPC | KVM_REG_PPC_EPTCFG | 32
- PPC | KVM_REG_PPC_ICP_STATE | 64
- PPC | KVM_REG_PPC_VP_STATE | 128
- PPC | KVM_REG_PPC_TB_OFFSET | 64
- PPC | KVM_REG_PPC_SPMC1 | 32
- PPC | KVM_REG_PPC_SPMC2 | 32
- PPC | KVM_REG_PPC_IAMR | 64
- PPC | KVM_REG_PPC_TFHAR | 64
- PPC | KVM_REG_PPC_TFIAR | 64
- PPC | KVM_REG_PPC_TEXASR | 64
- PPC | KVM_REG_PPC_FSCR | 64
- PPC | KVM_REG_PPC_PSPB | 32
- PPC | KVM_REG_PPC_EBBHR | 64
- PPC | KVM_REG_PPC_EBBRR | 64
- PPC | KVM_REG_PPC_BESCR | 64
- PPC | KVM_REG_PPC_TAR | 64
- PPC | KVM_REG_PPC_DPDES | 64
- PPC | KVM_REG_PPC_DAWR | 64
- PPC | KVM_REG_PPC_DAWRX | 64
- PPC | KVM_REG_PPC_CIABR | 64
- PPC | KVM_REG_PPC_IC | 64
- PPC | KVM_REG_PPC_VTB | 64
- PPC | KVM_REG_PPC_CSIGR | 64
- PPC | KVM_REG_PPC_TACR | 64
- PPC | KVM_REG_PPC_TCSCR | 64
- PPC | KVM_REG_PPC_PID | 64
- PPC | KVM_REG_PPC_ACOP | 64
- PPC | KVM_REG_PPC_VRSAVE | 32
- PPC | KVM_REG_PPC_LPCR | 32
- PPC | KVM_REG_PPC_LPCR_64 | 64
- PPC | KVM_REG_PPC_PPR | 64
- PPC | KVM_REG_PPC_ARCH_COMPAT | 32
- PPC | KVM_REG_PPC_DABRX | 32
- PPC | KVM_REG_PPC_WORT | 64
- PPC | KVM_REG_PPC_SPRG9 | 64
- PPC | KVM_REG_PPC_DBSR | 32
- PPC | KVM_REG_PPC_TIDR | 64
- PPC | KVM_REG_PPC_PSSCR | 64
- PPC | KVM_REG_PPC_DEC_EXPIRY | 64
- PPC | KVM_REG_PPC_PTCR | 64
- PPC | KVM_REG_PPC_TM_GPR0 | 64
- ...
- PPC | KVM_REG_PPC_TM_GPR31 | 64
- PPC | KVM_REG_PPC_TM_VSR0 | 128
- ...
- PPC | KVM_REG_PPC_TM_VSR63 | 128
- PPC | KVM_REG_PPC_TM_CR | 64
- PPC | KVM_REG_PPC_TM_LR | 64
- PPC | KVM_REG_PPC_TM_CTR | 64
- PPC | KVM_REG_PPC_TM_FPSCR | 64
- PPC | KVM_REG_PPC_TM_AMR | 64
- PPC | KVM_REG_PPC_TM_PPR | 64
- PPC | KVM_REG_PPC_TM_VRSAVE | 64
- PPC | KVM_REG_PPC_TM_VSCR | 32
- PPC | KVM_REG_PPC_TM_DSCR | 64
- PPC | KVM_REG_PPC_TM_TAR | 64
- PPC | KVM_REG_PPC_TM_XER | 64
- | |
- MIPS | KVM_REG_MIPS_R0 | 64
- ...
- MIPS | KVM_REG_MIPS_R31 | 64
- MIPS | KVM_REG_MIPS_HI | 64
- MIPS | KVM_REG_MIPS_LO | 64
- MIPS | KVM_REG_MIPS_PC | 64
- MIPS | KVM_REG_MIPS_CP0_INDEX | 32
- MIPS | KVM_REG_MIPS_CP0_ENTRYLO0 | 64
- MIPS | KVM_REG_MIPS_CP0_ENTRYLO1 | 64
- MIPS | KVM_REG_MIPS_CP0_CONTEXT | 64
- MIPS | KVM_REG_MIPS_CP0_CONTEXTCONFIG| 32
- MIPS | KVM_REG_MIPS_CP0_USERLOCAL | 64
- MIPS | KVM_REG_MIPS_CP0_XCONTEXTCONFIG| 64
- MIPS | KVM_REG_MIPS_CP0_PAGEMASK | 32
- MIPS | KVM_REG_MIPS_CP0_PAGEGRAIN | 32
- MIPS | KVM_REG_MIPS_CP0_SEGCTL0 | 64
- MIPS | KVM_REG_MIPS_CP0_SEGCTL1 | 64
- MIPS | KVM_REG_MIPS_CP0_SEGCTL2 | 64
- MIPS | KVM_REG_MIPS_CP0_PWBASE | 64
- MIPS | KVM_REG_MIPS_CP0_PWFIELD | 64
- MIPS | KVM_REG_MIPS_CP0_PWSIZE | 64
- MIPS | KVM_REG_MIPS_CP0_WIRED | 32
- MIPS | KVM_REG_MIPS_CP0_PWCTL | 32
- MIPS | KVM_REG_MIPS_CP0_HWRENA | 32
- MIPS | KVM_REG_MIPS_CP0_BADVADDR | 64
- MIPS | KVM_REG_MIPS_CP0_BADINSTR | 32
- MIPS | KVM_REG_MIPS_CP0_BADINSTRP | 32
- MIPS | KVM_REG_MIPS_CP0_COUNT | 32
- MIPS | KVM_REG_MIPS_CP0_ENTRYHI | 64
- MIPS | KVM_REG_MIPS_CP0_COMPARE | 32
- MIPS | KVM_REG_MIPS_CP0_STATUS | 32
- MIPS | KVM_REG_MIPS_CP0_INTCTL | 32
- MIPS | KVM_REG_MIPS_CP0_CAUSE | 32
- MIPS | KVM_REG_MIPS_CP0_EPC | 64
- MIPS | KVM_REG_MIPS_CP0_PRID | 32
- MIPS | KVM_REG_MIPS_CP0_EBASE | 64
- MIPS | KVM_REG_MIPS_CP0_CONFIG | 32
- MIPS | KVM_REG_MIPS_CP0_CONFIG1 | 32
- MIPS | KVM_REG_MIPS_CP0_CONFIG2 | 32
- MIPS | KVM_REG_MIPS_CP0_CONFIG3 | 32
- MIPS | KVM_REG_MIPS_CP0_CONFIG4 | 32
- MIPS | KVM_REG_MIPS_CP0_CONFIG5 | 32
- MIPS | KVM_REG_MIPS_CP0_CONFIG7 | 32
- MIPS | KVM_REG_MIPS_CP0_XCONTEXT | 64
- MIPS | KVM_REG_MIPS_CP0_ERROREPC | 64
- MIPS | KVM_REG_MIPS_CP0_KSCRATCH1 | 64
- MIPS | KVM_REG_MIPS_CP0_KSCRATCH2 | 64
- MIPS | KVM_REG_MIPS_CP0_KSCRATCH3 | 64
- MIPS | KVM_REG_MIPS_CP0_KSCRATCH4 | 64
- MIPS | KVM_REG_MIPS_CP0_KSCRATCH5 | 64
- MIPS | KVM_REG_MIPS_CP0_KSCRATCH6 | 64
- MIPS | KVM_REG_MIPS_CP0_MAAR(0..63) | 64
- MIPS | KVM_REG_MIPS_COUNT_CTL | 64
- MIPS | KVM_REG_MIPS_COUNT_RESUME | 64
- MIPS | KVM_REG_MIPS_COUNT_HZ | 64
- MIPS | KVM_REG_MIPS_FPR_32(0..31) | 32
- MIPS | KVM_REG_MIPS_FPR_64(0..31) | 64
- MIPS | KVM_REG_MIPS_VEC_128(0..31) | 128
- MIPS | KVM_REG_MIPS_FCR_IR | 32
- MIPS | KVM_REG_MIPS_FCR_CSR | 32
- MIPS | KVM_REG_MIPS_MSA_IR | 32
- MIPS | KVM_REG_MIPS_MSA_CSR | 32
+ ======= =============================== ============
+ Arch Register Width (bits)
+ ======= =============================== ============
+ PPC KVM_REG_PPC_HIOR 64
+ PPC KVM_REG_PPC_IAC1 64
+ PPC KVM_REG_PPC_IAC2 64
+ PPC KVM_REG_PPC_IAC3 64
+ PPC KVM_REG_PPC_IAC4 64
+ PPC KVM_REG_PPC_DAC1 64
+ PPC KVM_REG_PPC_DAC2 64
+ PPC KVM_REG_PPC_DABR 64
+ PPC KVM_REG_PPC_DSCR 64
+ PPC KVM_REG_PPC_PURR 64
+ PPC KVM_REG_PPC_SPURR 64
+ PPC KVM_REG_PPC_DAR 64
+ PPC KVM_REG_PPC_DSISR 32
+ PPC KVM_REG_PPC_AMR 64
+ PPC KVM_REG_PPC_UAMOR 64
+ PPC KVM_REG_PPC_MMCR0 64
+ PPC KVM_REG_PPC_MMCR1 64
+ PPC KVM_REG_PPC_MMCRA 64
+ PPC KVM_REG_PPC_MMCR2 64
+ PPC KVM_REG_PPC_MMCRS 64
+ PPC KVM_REG_PPC_SIAR 64
+ PPC KVM_REG_PPC_SDAR 64
+ PPC KVM_REG_PPC_SIER 64
+ PPC KVM_REG_PPC_PMC1 32
+ PPC KVM_REG_PPC_PMC2 32
+ PPC KVM_REG_PPC_PMC3 32
+ PPC KVM_REG_PPC_PMC4 32
+ PPC KVM_REG_PPC_PMC5 32
+ PPC KVM_REG_PPC_PMC6 32
+ PPC KVM_REG_PPC_PMC7 32
+ PPC KVM_REG_PPC_PMC8 32
+ PPC KVM_REG_PPC_FPR0 64
+ ...
+ PPC KVM_REG_PPC_FPR31 64
+ PPC KVM_REG_PPC_VR0 128
+ ...
+ PPC KVM_REG_PPC_VR31 128
+ PPC KVM_REG_PPC_VSR0 128
+ ...
+ PPC KVM_REG_PPC_VSR31 128
+ PPC KVM_REG_PPC_FPSCR 64
+ PPC KVM_REG_PPC_VSCR 32
+ PPC KVM_REG_PPC_VPA_ADDR 64
+ PPC KVM_REG_PPC_VPA_SLB 128
+ PPC KVM_REG_PPC_VPA_DTL 128
+ PPC KVM_REG_PPC_EPCR 32
+ PPC KVM_REG_PPC_EPR 32
+ PPC KVM_REG_PPC_TCR 32
+ PPC KVM_REG_PPC_TSR 32
+ PPC KVM_REG_PPC_OR_TSR 32
+ PPC KVM_REG_PPC_CLEAR_TSR 32
+ PPC KVM_REG_PPC_MAS0 32
+ PPC KVM_REG_PPC_MAS1 32
+ PPC KVM_REG_PPC_MAS2 64
+ PPC KVM_REG_PPC_MAS7_3 64
+ PPC KVM_REG_PPC_MAS4 32
+ PPC KVM_REG_PPC_MAS6 32
+ PPC KVM_REG_PPC_MMUCFG 32
+ PPC KVM_REG_PPC_TLB0CFG 32
+ PPC KVM_REG_PPC_TLB1CFG 32
+ PPC KVM_REG_PPC_TLB2CFG 32
+ PPC KVM_REG_PPC_TLB3CFG 32
+ PPC KVM_REG_PPC_TLB0PS 32
+ PPC KVM_REG_PPC_TLB1PS 32
+ PPC KVM_REG_PPC_TLB2PS 32
+ PPC KVM_REG_PPC_TLB3PS 32
+ PPC KVM_REG_PPC_EPTCFG 32
+ PPC KVM_REG_PPC_ICP_STATE 64
+ PPC KVM_REG_PPC_VP_STATE 128
+ PPC KVM_REG_PPC_TB_OFFSET 64
+ PPC KVM_REG_PPC_SPMC1 32
+ PPC KVM_REG_PPC_SPMC2 32
+ PPC KVM_REG_PPC_IAMR 64
+ PPC KVM_REG_PPC_TFHAR 64
+ PPC KVM_REG_PPC_TFIAR 64
+ PPC KVM_REG_PPC_TEXASR 64
+ PPC KVM_REG_PPC_FSCR 64
+ PPC KVM_REG_PPC_PSPB 32
+ PPC KVM_REG_PPC_EBBHR 64
+ PPC KVM_REG_PPC_EBBRR 64
+ PPC KVM_REG_PPC_BESCR 64
+ PPC KVM_REG_PPC_TAR 64
+ PPC KVM_REG_PPC_DPDES 64
+ PPC KVM_REG_PPC_DAWR 64
+ PPC KVM_REG_PPC_DAWRX 64
+ PPC KVM_REG_PPC_CIABR 64
+ PPC KVM_REG_PPC_IC 64
+ PPC KVM_REG_PPC_VTB 64
+ PPC KVM_REG_PPC_CSIGR 64
+ PPC KVM_REG_PPC_TACR 64
+ PPC KVM_REG_PPC_TCSCR 64
+ PPC KVM_REG_PPC_PID 64
+ PPC KVM_REG_PPC_ACOP 64
+ PPC KVM_REG_PPC_VRSAVE 32
+ PPC KVM_REG_PPC_LPCR 32
+ PPC KVM_REG_PPC_LPCR_64 64
+ PPC KVM_REG_PPC_PPR 64
+ PPC KVM_REG_PPC_ARCH_COMPAT 32
+ PPC KVM_REG_PPC_DABRX 32
+ PPC KVM_REG_PPC_WORT 64
+ PPC KVM_REG_PPC_SPRG9 64
+ PPC KVM_REG_PPC_DBSR 32
+ PPC KVM_REG_PPC_TIDR 64
+ PPC KVM_REG_PPC_PSSCR 64
+ PPC KVM_REG_PPC_DEC_EXPIRY 64
+ PPC KVM_REG_PPC_PTCR 64
+ PPC KVM_REG_PPC_TM_GPR0 64
+ ...
+ PPC KVM_REG_PPC_TM_GPR31 64
+ PPC KVM_REG_PPC_TM_VSR0 128
+ ...
+ PPC KVM_REG_PPC_TM_VSR63 128
+ PPC KVM_REG_PPC_TM_CR 64
+ PPC KVM_REG_PPC_TM_LR 64
+ PPC KVM_REG_PPC_TM_CTR 64
+ PPC KVM_REG_PPC_TM_FPSCR 64
+ PPC KVM_REG_PPC_TM_AMR 64
+ PPC KVM_REG_PPC_TM_PPR 64
+ PPC KVM_REG_PPC_TM_VRSAVE 64
+ PPC KVM_REG_PPC_TM_VSCR 32
+ PPC KVM_REG_PPC_TM_DSCR 64
+ PPC KVM_REG_PPC_TM_TAR 64
+ PPC KVM_REG_PPC_TM_XER 64
+
+ MIPS KVM_REG_MIPS_R0 64
+ ...
+ MIPS KVM_REG_MIPS_R31 64
+ MIPS KVM_REG_MIPS_HI 64
+ MIPS KVM_REG_MIPS_LO 64
+ MIPS KVM_REG_MIPS_PC 64
+ MIPS KVM_REG_MIPS_CP0_INDEX 32
+ MIPS KVM_REG_MIPS_CP0_ENTRYLO0 64
+ MIPS KVM_REG_MIPS_CP0_ENTRYLO1 64
+ MIPS KVM_REG_MIPS_CP0_CONTEXT 64
+ MIPS KVM_REG_MIPS_CP0_CONTEXTCONFIG 32
+ MIPS KVM_REG_MIPS_CP0_USERLOCAL 64
+ MIPS KVM_REG_MIPS_CP0_XCONTEXTCONFIG 64
+ MIPS KVM_REG_MIPS_CP0_PAGEMASK 32
+ MIPS KVM_REG_MIPS_CP0_PAGEGRAIN 32
+ MIPS KVM_REG_MIPS_CP0_SEGCTL0 64
+ MIPS KVM_REG_MIPS_CP0_SEGCTL1 64
+ MIPS KVM_REG_MIPS_CP0_SEGCTL2 64
+ MIPS KVM_REG_MIPS_CP0_PWBASE 64
+ MIPS KVM_REG_MIPS_CP0_PWFIELD 64
+ MIPS KVM_REG_MIPS_CP0_PWSIZE 64
+ MIPS KVM_REG_MIPS_CP0_WIRED 32
+ MIPS KVM_REG_MIPS_CP0_PWCTL 32
+ MIPS KVM_REG_MIPS_CP0_HWRENA 32
+ MIPS KVM_REG_MIPS_CP0_BADVADDR 64
+ MIPS KVM_REG_MIPS_CP0_BADINSTR 32
+ MIPS KVM_REG_MIPS_CP0_BADINSTRP 32
+ MIPS KVM_REG_MIPS_CP0_COUNT 32
+ MIPS KVM_REG_MIPS_CP0_ENTRYHI 64
+ MIPS KVM_REG_MIPS_CP0_COMPARE 32
+ MIPS KVM_REG_MIPS_CP0_STATUS 32
+ MIPS KVM_REG_MIPS_CP0_INTCTL 32
+ MIPS KVM_REG_MIPS_CP0_CAUSE 32
+ MIPS KVM_REG_MIPS_CP0_EPC 64
+ MIPS KVM_REG_MIPS_CP0_PRID 32
+ MIPS KVM_REG_MIPS_CP0_EBASE 64
+ MIPS KVM_REG_MIPS_CP0_CONFIG 32
+ MIPS KVM_REG_MIPS_CP0_CONFIG1 32
+ MIPS KVM_REG_MIPS_CP0_CONFIG2 32
+ MIPS KVM_REG_MIPS_CP0_CONFIG3 32
+ MIPS KVM_REG_MIPS_CP0_CONFIG4 32
+ MIPS KVM_REG_MIPS_CP0_CONFIG5 32
+ MIPS KVM_REG_MIPS_CP0_CONFIG7 32
+ MIPS KVM_REG_MIPS_CP0_XCONTEXT 64
+ MIPS KVM_REG_MIPS_CP0_ERROREPC 64
+ MIPS KVM_REG_MIPS_CP0_KSCRATCH1 64
+ MIPS KVM_REG_MIPS_CP0_KSCRATCH2 64
+ MIPS KVM_REG_MIPS_CP0_KSCRATCH3 64
+ MIPS KVM_REG_MIPS_CP0_KSCRATCH4 64
+ MIPS KVM_REG_MIPS_CP0_KSCRATCH5 64
+ MIPS KVM_REG_MIPS_CP0_KSCRATCH6 64
+ MIPS KVM_REG_MIPS_CP0_MAAR(0..63) 64
+ MIPS KVM_REG_MIPS_COUNT_CTL 64
+ MIPS KVM_REG_MIPS_COUNT_RESUME 64
+ MIPS KVM_REG_MIPS_COUNT_HZ 64
+ MIPS KVM_REG_MIPS_FPR_32(0..31) 32
+ MIPS KVM_REG_MIPS_FPR_64(0..31) 64
+ MIPS KVM_REG_MIPS_VEC_128(0..31) 128
+ MIPS KVM_REG_MIPS_FCR_IR 32
+ MIPS KVM_REG_MIPS_FCR_CSR 32
+ MIPS KVM_REG_MIPS_MSA_IR 32
+ MIPS KVM_REG_MIPS_MSA_CSR 32
+ ======= =============================== ============
ARM registers are mapped using the lower 32 bits. The upper 16 of that
is the register group type, or coprocessor number:
-ARM core registers have the following id bit patterns:
+ARM core registers have the following id bit patterns::
+
0x4020 0000 0010 <index into the kvm_regs struct:16>
-ARM 32-bit CP15 registers have the following id bit patterns:
+ARM 32-bit CP15 registers have the following id bit patterns::
+
0x4020 0000 000F <zero:1> <crn:4> <crm:4> <opc1:4> <opc2:3>
-ARM 64-bit CP15 registers have the following id bit patterns:
+ARM 64-bit CP15 registers have the following id bit patterns::
+
0x4030 0000 000F <zero:1> <zero:4> <crm:4> <opc1:4> <zero:3>
-ARM CCSIDR registers are demultiplexed by CSSELR value:
+ARM CCSIDR registers are demultiplexed by CSSELR value::
+
0x4020 0000 0011 00 <csselr:8>
-ARM 32-bit VFP control registers have the following id bit patterns:
+ARM 32-bit VFP control registers have the following id bit patterns::
+
0x4020 0000 0012 1 <regno:12>
-ARM 64-bit FP registers have the following id bit patterns:
+ARM 64-bit FP registers have the following id bit patterns::
+
0x4030 0000 0012 0 <regno:12>
-ARM firmware pseudo-registers have the following bit pattern:
+ARM firmware pseudo-registers have the following bit pattern::
+
0x4030 0000 0014 <regno:16>
@@ -2156,15 +2368,18 @@ that is the register group type, or coprocessor number:
arm64 core/FP-SIMD registers have the following id bit patterns. Note
that the size of the access is variable, as the kvm_regs structure
contains elements ranging from 32 to 128 bits. The index is a 32bit
-value in the kvm_regs structure seen as a 32bit array.
+value in the kvm_regs structure seen as a 32bit array::
+
0x60x0 0000 0010 <index into the kvm_regs struct:16>
Specifically:
+
+======================= ========= ===== =======================================
Encoding Register Bits kvm_regs member
-----------------------------------------------------------------
+======================= ========= ===== =======================================
0x6030 0000 0010 0000 X0 64 regs.regs[0]
0x6030 0000 0010 0002 X1 64 regs.regs[1]
- ...
+ ...
0x6030 0000 0010 003c X30 64 regs.regs[30]
0x6030 0000 0010 003e SP 64 regs.sp
0x6030 0000 0010 0040 PC 64 regs.pc
@@ -2176,27 +2391,31 @@ Specifically:
0x6030 0000 0010 004c SPSR_UND 64 spsr[KVM_SPSR_UND]
0x6030 0000 0010 004e SPSR_IRQ 64 spsr[KVM_SPSR_IRQ]
0x6060 0000 0010 0050 SPSR_FIQ 64 spsr[KVM_SPSR_FIQ]
- 0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] (*)
- 0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] (*)
- ...
- 0x6040 0000 0010 00d0 V31 128 fp_regs.vregs[31] (*)
+ 0x6040 0000 0010 0054 V0 128 fp_regs.vregs[0] [1]_
+ 0x6040 0000 0010 0058 V1 128 fp_regs.vregs[1] [1]_
+ ...
+ 0x6040 0000 0010 00d0 V31 128 fp_regs.vregs[31] [1]_
0x6020 0000 0010 00d4 FPSR 32 fp_regs.fpsr
0x6020 0000 0010 00d5 FPCR 32 fp_regs.fpcr
+======================= ========= ===== =======================================
+
+.. [1] These encodings are not accepted for SVE-enabled vcpus. See
+ KVM_ARM_VCPU_INIT.
-(*) These encodings are not accepted for SVE-enabled vcpus. See
- KVM_ARM_VCPU_INIT.
+ The equivalent register content can be accessed via bits [127:0] of
+ the corresponding SVE Zn registers instead for vcpus that have SVE
+ enabled (see below).
- The equivalent register content can be accessed via bits [127:0] of
- the corresponding SVE Zn registers instead for vcpus that have SVE
- enabled (see below).
+arm64 CCSIDR registers are demultiplexed by CSSELR value::
-arm64 CCSIDR registers are demultiplexed by CSSELR value:
0x6020 0000 0011 00 <csselr:8>
-arm64 system registers have the following id bit patterns:
+arm64 system registers have the following id bit patterns::
+
0x6030 0000 0013 <op0:2> <op1:3> <crn:4> <crm:4> <op2:3>
-WARNING:
+.. warning::
+
Two system register IDs do not follow the specified pattern. These
are KVM_REG_ARM_TIMER_CVAL and KVM_REG_ARM_TIMER_CNT, which map to
system registers CNTV_CVAL_EL0 and CNTVCT_EL0 respectively. These
@@ -2205,10 +2424,12 @@ WARNING:
derived from the register encoding for CNTV_CVAL_EL0. As this is
API, it must remain this way.
-arm64 firmware pseudo-registers have the following bit pattern:
+arm64 firmware pseudo-registers have the following bit pattern::
+
0x6030 0000 0014 <regno:16>
-arm64 SVE registers have the following bit patterns:
+arm64 SVE registers have the following bit patterns::
+
0x6080 0000 0015 00 <n:5> <slice:5> Zn bits[2048*slice + 2047 : 2048*slice]
0x6050 0000 0015 04 <n:4> <slice:5> Pn bits[256*slice + 255 : 256*slice]
0x6050 0000 0015 060 <slice:5> FFR bits[256*slice + 255 : 256*slice]
@@ -2216,7 +2437,7 @@ arm64 SVE registers have the following bit patterns:
Access to register IDs where 2048 * slice >= 128 * max_vq will fail with
ENOENT. max_vq is the vcpu's maximum supported vector length in 128-bit
-quadwords: see (**) below.
+quadwords: see [2]_ below.
These registers are only accessible on vcpus for which SVE is enabled.
See KVM_ARM_VCPU_INIT for details.
@@ -2231,21 +2452,21 @@ lengths supported by the vcpu to be discovered and configured by
userspace. When transferred to or from user memory via KVM_GET_ONE_REG
or KVM_SET_ONE_REG, the value of this register is of type
__u64[KVM_ARM64_SVE_VLS_WORDS], and encodes the set of vector lengths as
-follows:
+follows::
-__u64 vector_lengths[KVM_ARM64_SVE_VLS_WORDS];
+ __u64 vector_lengths[KVM_ARM64_SVE_VLS_WORDS];
-if (vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX &&
- ((vector_lengths[(vq - KVM_ARM64_SVE_VQ_MIN) / 64] >>
+ if (vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX &&
+ ((vector_lengths[(vq - KVM_ARM64_SVE_VQ_MIN) / 64] >>
((vq - KVM_ARM64_SVE_VQ_MIN) % 64)) & 1))
/* Vector length vq * 16 bytes supported */
-else
+ else
/* Vector length vq * 16 bytes not supported */
-(**) The maximum value vq for which the above condition is true is
-max_vq. This is the maximum vector length available to the guest on
-this vcpu, and determines which register slices are visible through
-this ioctl interface.
+.. [2] The maximum value vq for which the above condition is true is
+ max_vq. This is the maximum vector length available to the guest on
+ this vcpu, and determines which register slices are visible through
+ this ioctl interface.
(See Documentation/arm64/sve.rst for an explanation of the "vq"
nomenclature.)
@@ -2270,11 +2491,13 @@ write this register will fail with EPERM.
MIPS registers are mapped using the lower 32 bits. The upper 16 of that is
the register group type:
-MIPS core registers (see above) have the following id bit patterns:
+MIPS core registers (see above) have the following id bit patterns::
+
0x7030 0000 0000 <reg:16>
MIPS CP0 registers (see KVM_REG_MIPS_CP0_* above) have the following id bit
-patterns depending on whether they're 32-bit or 64-bit registers:
+patterns depending on whether they're 32-bit or 64-bit registers::
+
0x7020 0000 0001 00 <reg:5> <sel:3> (32-bit)
0x7030 0000 0001 00 <reg:5> <sel:3> (64-bit)
@@ -2285,10 +2508,12 @@ with the RI and XI bits (if they exist) in bits 63 and 62 respectively, and
the PFNX field starting at bit 30.
MIPS MAARs (see KVM_REG_MIPS_CP0_MAAR(*) above) have the following id bit
-patterns:
+patterns::
+
0x7030 0000 0001 01 <reg:8>
-MIPS KVM control registers (see above) have the following id bit patterns:
+MIPS KVM control registers (see above) have the following id bit patterns::
+
0x7030 0000 0002 <reg:16>
MIPS FPU registers (see KVM_REG_MIPS_FPR_{32,64}() above) have the following
@@ -2297,31 +2522,40 @@ always accessed according to the current guest FPU mode (Status.FR and
Config5.FRE), i.e. as the guest would see them, and they become unpredictable
if the guest FPU mode is changed. MIPS SIMD Architecture (MSA) vector
registers (see KVM_REG_MIPS_VEC_128() above) have similar patterns as they
-overlap the FPU registers:
+overlap the FPU registers::
+
0x7020 0000 0003 00 <0:3> <reg:5> (32-bit FPU registers)
0x7030 0000 0003 00 <0:3> <reg:5> (64-bit FPU registers)
0x7040 0000 0003 00 <0:3> <reg:5> (128-bit MSA vector registers)
MIPS FPU control registers (see KVM_REG_MIPS_FCR_{IR,CSR} above) have the
-following id bit patterns:
+following id bit patterns::
+
0x7020 0000 0003 01 <0:3> <reg:5>
MIPS MSA control registers (see KVM_REG_MIPS_MSA_{IR,CSR} above) have the
-following id bit patterns:
+following id bit patterns::
+
0x7020 0000 0003 02 <0:3> <reg:5>
4.69 KVM_GET_ONE_REG
+--------------------
+
+:Capability: KVM_CAP_ONE_REG
+:Architectures: all
+:Type: vcpu ioctl
+:Parameters: struct kvm_one_reg (in and out)
+:Returns: 0 on success, negative value on failure
-Capability: KVM_CAP_ONE_REG
-Architectures: all
-Type: vcpu ioctl
-Parameters: struct kvm_one_reg (in and out)
-Returns: 0 on success, negative value on failure
Errors include:
-  ENOENT:   no such register
-  EINVAL:   invalid register ID, or no such register
-  EPERM:    (arm64) register access not allowed before vcpu finalization
+
+ ======== ============================================================
+  ENOENT   no such register
+  EINVAL   invalid register ID, or no such register
+  EPERM    (arm64) register access not allowed before vcpu finalization
+ ======== ============================================================
+
(These error codes are indicative only: do not rely on a specific error
code being returned in a specific situation.)
@@ -2335,12 +2569,13 @@ list in 4.68.
4.70 KVM_KVMCLOCK_CTRL
+----------------------
-Capability: KVM_CAP_KVMCLOCK_CTRL
-Architectures: Any that implement pvclocks (currently x86 only)
-Type: vcpu ioctl
-Parameters: None
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_KVMCLOCK_CTRL
+:Architectures: Any that implement pvclocks (currently x86 only)
+:Type: vcpu ioctl
+:Parameters: None
+:Returns: 0 on success, -1 on error
This signals to the host kernel that the specified guest is being paused by
userspace. The host will set a flag in the pvclock structure that is checked
@@ -2356,26 +2591,30 @@ after pausing the vcpu, but before it is resumed.
4.71 KVM_SIGNAL_MSI
+-------------------
-Capability: KVM_CAP_SIGNAL_MSI
-Architectures: x86 arm arm64
-Type: vm ioctl
-Parameters: struct kvm_msi (in)
-Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
+:Capability: KVM_CAP_SIGNAL_MSI
+:Architectures: x86 arm arm64
+:Type: vm ioctl
+:Parameters: struct kvm_msi (in)
+:Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
Directly inject a MSI message. Only valid with in-kernel irqchip that handles
MSI messages.
-struct kvm_msi {
+::
+
+ struct kvm_msi {
__u32 address_lo;
__u32 address_hi;
__u32 data;
__u32 flags;
__u32 devid;
__u8 pad[12];
-};
+ };
-flags: KVM_MSI_VALID_DEVID: devid contains a valid value. The per-VM
+flags:
+ KVM_MSI_VALID_DEVID: devid contains a valid value. The per-VM
KVM_CAP_MSI_DEVID capability advertises the requirement to provide
the device ID. If this capability is not available, userspace
should never set the KVM_MSI_VALID_DEVID flag as the ioctl might fail.
@@ -2391,30 +2630,31 @@ address_hi must be zero.
4.71 KVM_CREATE_PIT2
+--------------------
-Capability: KVM_CAP_PIT2
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_pit_config (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_PIT2
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_pit_config (in)
+:Returns: 0 on success, -1 on error
Creates an in-kernel device model for the i8254 PIT. This call is only valid
after enabling in-kernel irqchip support via KVM_CREATE_IRQCHIP. The following
-parameters have to be passed:
+parameters have to be passed::
-struct kvm_pit_config {
+ struct kvm_pit_config {
__u32 flags;
__u32 pad[15];
-};
+ };
-Valid flags are:
+Valid flags are::
-#define KVM_PIT_SPEAKER_DUMMY 1 /* emulate speaker port stub */
+ #define KVM_PIT_SPEAKER_DUMMY 1 /* emulate speaker port stub */
PIT timer interrupts may use a per-VM kernel thread for injection. If it
-exists, this thread will have a name of the following pattern:
+exists, this thread will have a name of the following pattern::
-kvm-pit/<owner-process-pid>
+ kvm-pit/<owner-process-pid>
When running a guest with elevated priorities, the scheduling parameters of
this thread may have to be adjusted accordingly.
@@ -2423,37 +2663,39 @@ This IOCTL replaces the obsolete KVM_CREATE_PIT.
4.72 KVM_GET_PIT2
+-----------------
-Capability: KVM_CAP_PIT_STATE2
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_pit_state2 (out)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_PIT_STATE2
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_pit_state2 (out)
+:Returns: 0 on success, -1 on error
Retrieves the state of the in-kernel PIT model. Only valid after
-KVM_CREATE_PIT2. The state is returned in the following structure:
+KVM_CREATE_PIT2. The state is returned in the following structure::
-struct kvm_pit_state2 {
+ struct kvm_pit_state2 {
struct kvm_pit_channel_state channels[3];
__u32 flags;
__u32 reserved[9];
-};
+ };
-Valid flags are:
+Valid flags are::
-/* disable PIT in HPET legacy mode */
-#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
+ /* disable PIT in HPET legacy mode */
+ #define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
This IOCTL replaces the obsolete KVM_GET_PIT.
4.73 KVM_SET_PIT2
+-----------------
-Capability: KVM_CAP_PIT_STATE2
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_pit_state2 (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_PIT_STATE2
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_pit_state2 (in)
+:Returns: 0 on success, -1 on error
Sets the state of the in-kernel PIT model. Only valid after KVM_CREATE_PIT2.
See KVM_GET_PIT2 for details on struct kvm_pit_state2.
@@ -2462,12 +2704,13 @@ This IOCTL replaces the obsolete KVM_SET_PIT.
4.74 KVM_PPC_GET_SMMU_INFO
+--------------------------
-Capability: KVM_CAP_PPC_GET_SMMU_INFO
-Architectures: powerpc
-Type: vm ioctl
-Parameters: None
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_PPC_GET_SMMU_INFO
+:Architectures: powerpc
+:Type: vm ioctl
+:Parameters: None
+:Returns: 0 on success, -1 on error
This populates and returns a structure describing the features of
the "Server" class MMU emulation supported by KVM.
@@ -2475,7 +2718,7 @@ This can in turn be used by userspace to generate the appropriate
device-tree properties for the guest operating system.
The structure contains some global information, followed by an
-array of supported segment page sizes:
+array of supported segment page sizes::
struct kvm_ppc_smmu_info {
__u64 flags;
@@ -2503,7 +2746,7 @@ The "slb_size" field indicates how many SLB entries are supported
The "sps" array contains 8 entries indicating the supported base
page sizes for a segment in increasing order. Each entry is defined
-as follow:
+as follow::
struct kvm_ppc_one_seg_page_size {
__u32 page_shift; /* Base page shift of segment (or 0) */
@@ -2524,7 +2767,7 @@ size provides the list of supported actual page sizes (which can be
only larger or equal to the base page size), along with the
corresponding encoding in the hash PTE. Similarly, the array is
8 entries sorted by increasing sizes and an entry with a "0" shift
-is an empty entry and a terminator:
+is an empty entry and a terminator::
struct kvm_ppc_one_page_size {
__u32 page_shift; /* Page shift (or 0) */
@@ -2536,12 +2779,13 @@ PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
into the hash PTE second double word).
4.75 KVM_IRQFD
+--------------
-Capability: KVM_CAP_IRQFD
-Architectures: x86 s390 arm arm64
-Type: vm ioctl
-Parameters: struct kvm_irqfd (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_IRQFD
+:Architectures: x86 s390 arm arm64
+:Type: vm ioctl
+:Parameters: struct kvm_irqfd (in)
+:Returns: 0 on success, -1 on error
Allows setting an eventfd to directly trigger a guest interrupt.
kvm_irqfd.fd specifies the file descriptor to use as the eventfd and
@@ -2565,6 +2809,7 @@ irqfd. The KVM_IRQFD_FLAG_RESAMPLE is only necessary on assignment
and need not be specified with KVM_IRQFD_FLAG_DEASSIGN.
On arm/arm64, gsi routing being supported, the following can happen:
+
- in case no routing entry is associated to this gsi, injection fails
- in case the gsi is associated to an irqchip routing entry,
irqchip.pin + 32 corresponds to the injected SPI ID.
@@ -2573,12 +2818,13 @@ On arm/arm64, gsi routing being supported, the following can happen:
to GICv3 ITS in-kernel emulation).
4.76 KVM_PPC_ALLOCATE_HTAB
+--------------------------
-Capability: KVM_CAP_PPC_ALLOC_HTAB
-Architectures: powerpc
-Type: vm ioctl
-Parameters: Pointer to u32 containing hash table order (in/out)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_PPC_ALLOC_HTAB
+:Architectures: powerpc
+:Type: vm ioctl
+:Parameters: Pointer to u32 containing hash table order (in/out)
+:Returns: 0 on success, -1 on error
This requests the host kernel to allocate an MMU hash table for a
guest using the PAPR paravirtualization interface. This only does
@@ -2609,75 +2855,88 @@ real-mode area (VRMA) facility, the kernel will re-create the VMRA
HPTEs on the next KVM_RUN of any vcpu.
4.77 KVM_S390_INTERRUPT
+-----------------------
-Capability: basic
-Architectures: s390
-Type: vm ioctl, vcpu ioctl
-Parameters: struct kvm_s390_interrupt (in)
-Returns: 0 on success, -1 on error
+:Capability: basic
+:Architectures: s390
+:Type: vm ioctl, vcpu ioctl
+:Parameters: struct kvm_s390_interrupt (in)
+:Returns: 0 on success, -1 on error
Allows to inject an interrupt to the guest. Interrupts can be floating
(vm ioctl) or per cpu (vcpu ioctl), depending on the interrupt type.
-Interrupt parameters are passed via kvm_s390_interrupt:
+Interrupt parameters are passed via kvm_s390_interrupt::
-struct kvm_s390_interrupt {
+ struct kvm_s390_interrupt {
__u32 type;
__u32 parm;
__u64 parm64;
-};
+ };
type can be one of the following:
-KVM_S390_SIGP_STOP (vcpu) - sigp stop; optional flags in parm
-KVM_S390_PROGRAM_INT (vcpu) - program check; code in parm
-KVM_S390_SIGP_SET_PREFIX (vcpu) - sigp set prefix; prefix address in parm
-KVM_S390_RESTART (vcpu) - restart
-KVM_S390_INT_CLOCK_COMP (vcpu) - clock comparator interrupt
-KVM_S390_INT_CPU_TIMER (vcpu) - CPU timer interrupt
-KVM_S390_INT_VIRTIO (vm) - virtio external interrupt; external interrupt
- parameters in parm and parm64
-KVM_S390_INT_SERVICE (vm) - sclp external interrupt; sclp parameter in parm
-KVM_S390_INT_EMERGENCY (vcpu) - sigp emergency; source cpu in parm
-KVM_S390_INT_EXTERNAL_CALL (vcpu) - sigp external call; source cpu in parm
-KVM_S390_INT_IO(ai,cssid,ssid,schid) (vm) - compound value to indicate an
- I/O interrupt (ai - adapter interrupt; cssid,ssid,schid - subchannel);
- I/O interruption parameters in parm (subchannel) and parm64 (intparm,
- interruption subclass)
-KVM_S390_MCHK (vm, vcpu) - machine check interrupt; cr 14 bits in parm,
- machine check interrupt code in parm64 (note that
- machine checks needing further payload are not
- supported by this ioctl)
+KVM_S390_SIGP_STOP (vcpu)
+ - sigp stop; optional flags in parm
+KVM_S390_PROGRAM_INT (vcpu)
+ - program check; code in parm
+KVM_S390_SIGP_SET_PREFIX (vcpu)
+ - sigp set prefix; prefix address in parm
+KVM_S390_RESTART (vcpu)
+ - restart
+KVM_S390_INT_CLOCK_COMP (vcpu)
+ - clock comparator interrupt
+KVM_S390_INT_CPU_TIMER (vcpu)
+ - CPU timer interrupt
+KVM_S390_INT_VIRTIO (vm)
+ - virtio external interrupt; external interrupt
+ parameters in parm and parm64
+KVM_S390_INT_SERVICE (vm)
+ - sclp external interrupt; sclp parameter in parm
+KVM_S390_INT_EMERGENCY (vcpu)
+ - sigp emergency; source cpu in parm
+KVM_S390_INT_EXTERNAL_CALL (vcpu)
+ - sigp external call; source cpu in parm
+KVM_S390_INT_IO(ai,cssid,ssid,schid) (vm)
+ - compound value to indicate an
+ I/O interrupt (ai - adapter interrupt; cssid,ssid,schid - subchannel);
+ I/O interruption parameters in parm (subchannel) and parm64 (intparm,
+ interruption subclass)
+KVM_S390_MCHK (vm, vcpu)
+ - machine check interrupt; cr 14 bits in parm, machine check interrupt
+ code in parm64 (note that machine checks needing further payload are not
+ supported by this ioctl)
This is an asynchronous vcpu ioctl and can be invoked from any thread.
4.78 KVM_PPC_GET_HTAB_FD
+------------------------
-Capability: KVM_CAP_PPC_HTAB_FD
-Architectures: powerpc
-Type: vm ioctl
-Parameters: Pointer to struct kvm_get_htab_fd (in)
-Returns: file descriptor number (>= 0) on success, -1 on error
+:Capability: KVM_CAP_PPC_HTAB_FD
+:Architectures: powerpc
+:Type: vm ioctl
+:Parameters: Pointer to struct kvm_get_htab_fd (in)
+:Returns: file descriptor number (>= 0) on success, -1 on error
This returns a file descriptor that can be used either to read out the
entries in the guest's hashed page table (HPT), or to write entries to
initialize the HPT. The returned fd can only be written to if the
KVM_GET_HTAB_WRITE bit is set in the flags field of the argument, and
can only be read if that bit is clear. The argument struct looks like
-this:
+this::
-/* For KVM_PPC_GET_HTAB_FD */
-struct kvm_get_htab_fd {
+ /* For KVM_PPC_GET_HTAB_FD */
+ struct kvm_get_htab_fd {
__u64 flags;
__u64 start_index;
__u64 reserved[2];
-};
+ };
-/* Values for kvm_get_htab_fd.flags */
-#define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1)
-#define KVM_GET_HTAB_WRITE ((__u64)0x2)
+ /* Values for kvm_get_htab_fd.flags */
+ #define KVM_GET_HTAB_BOLTED_ONLY ((__u64)0x1)
+ #define KVM_GET_HTAB_WRITE ((__u64)0x2)
-The `start_index' field gives the index in the HPT of the entry at
+The 'start_index' field gives the index in the HPT of the entry at
which to start reading. It is ignored when writing.
Reads on the fd will initially supply information about all
@@ -2692,29 +2951,34 @@ Data read or written is structured as a header (8 bytes) followed by a
series of valid HPT entries (16 bytes) each. The header indicates how
many valid HPT entries there are and how many invalid entries follow
the valid entries. The invalid entries are not represented explicitly
-in the stream. The header format is:
+in the stream. The header format is::
-struct kvm_get_htab_header {
+ struct kvm_get_htab_header {
__u32 index;
__u16 n_valid;
__u16 n_invalid;
-};
+ };
Writes to the fd create HPT entries starting at the index given in the
-header; first `n_valid' valid entries with contents from the data
-written, then `n_invalid' invalid entries, invalidating any previously
+header; first 'n_valid' valid entries with contents from the data
+written, then 'n_invalid' invalid entries, invalidating any previously
valid entries found.
4.79 KVM_CREATE_DEVICE
+----------------------
+
+:Capability: KVM_CAP_DEVICE_CTRL
+:Type: vm ioctl
+:Parameters: struct kvm_create_device (in/out)
+:Returns: 0 on success, -1 on error
-Capability: KVM_CAP_DEVICE_CTRL
-Type: vm ioctl
-Parameters: struct kvm_create_device (in/out)
-Returns: 0 on success, -1 on error
Errors:
- ENODEV: The device type is unknown or unsupported
- EEXIST: Device already created, and this type of device may not
+
+ ====== =======================================================
+ ENODEV The device type is unknown or unsupported
+ EEXIST Device already created, and this type of device may not
be instantiated multiple times
+ ====== =======================================================
Other error conditions may be defined by individual device types or
have their standard meanings.
@@ -2730,25 +2994,32 @@ Individual devices should not define flags. Attributes should be used
for specifying any behavior that is not implied by the device type
number.
-struct kvm_create_device {
+::
+
+ struct kvm_create_device {
__u32 type; /* in: KVM_DEV_TYPE_xxx */
__u32 fd; /* out: device handle */
__u32 flags; /* in: KVM_CREATE_DEVICE_xxx */
-};
+ };
4.80 KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR
+--------------------------------------------
+
+:Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
+ KVM_CAP_VCPU_ATTRIBUTES for vcpu device
+:Type: device ioctl, vm ioctl, vcpu ioctl
+:Parameters: struct kvm_device_attr
+:Returns: 0 on success, -1 on error
-Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
- KVM_CAP_VCPU_ATTRIBUTES for vcpu device
-Type: device ioctl, vm ioctl, vcpu ioctl
-Parameters: struct kvm_device_attr
-Returns: 0 on success, -1 on error
Errors:
- ENXIO: The group or attribute is unknown/unsupported for this device
+
+ ===== =============================================================
+ ENXIO The group or attribute is unknown/unsupported for this device
or hardware support is missing.
- EPERM: The attribute cannot (currently) be accessed this way
+ EPERM The attribute cannot (currently) be accessed this way
(e.g. read-only attribute, or attribute that only makes
sense when the device is in a different state)
+ ===== =============================================================
Other error conditions may be defined by individual device types.
@@ -2757,23 +3028,30 @@ semantics are device-specific. See individual device documentation in
the "devices" directory. As with ONE_REG, the size of the data
transferred is defined by the particular attribute.
-struct kvm_device_attr {
+::
+
+ struct kvm_device_attr {
__u32 flags; /* no flags currently defined */
__u32 group; /* device-defined */
__u64 attr; /* group-defined */
__u64 addr; /* userspace address of attr data */
-};
+ };
4.81 KVM_HAS_DEVICE_ATTR
+------------------------
+
+:Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
+ KVM_CAP_VCPU_ATTRIBUTES for vcpu device
+:Type: device ioctl, vm ioctl, vcpu ioctl
+:Parameters: struct kvm_device_attr
+:Returns: 0 on success, -1 on error
-Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
- KVM_CAP_VCPU_ATTRIBUTES for vcpu device
-Type: device ioctl, vm ioctl, vcpu ioctl
-Parameters: struct kvm_device_attr
-Returns: 0 on success, -1 on error
Errors:
- ENXIO: The group or attribute is unknown/unsupported for this device
+
+ ===== =============================================================
+ ENXIO The group or attribute is unknown/unsupported for this device
or hardware support is missing.
+ ===== =============================================================
Tests whether a device supports a particular attribute. A successful
return indicates the attribute is implemented. It does not necessarily
@@ -2781,15 +3059,20 @@ indicate that the attribute can be read or written in the device's
current state. "addr" is ignored.
4.82 KVM_ARM_VCPU_INIT
+----------------------
+
+:Capability: basic
+:Architectures: arm, arm64
+:Type: vcpu ioctl
+:Parameters: struct kvm_vcpu_init (in)
+:Returns: 0 on success; -1 on error
-Capability: basic
-Architectures: arm, arm64
-Type: vcpu ioctl
-Parameters: struct kvm_vcpu_init (in)
-Returns: 0 on success; -1 on error
Errors:
-  EINVAL:    the target is unknown, or the combination of features is invalid.
-  ENOENT:    a features bit specified is unknown.
+
+ ====== =================================================================
+  EINVAL    the target is unknown, or the combination of features is invalid.
+  ENOENT    a features bit specified is unknown.
+ ====== =================================================================
This tells KVM what type of CPU to present to the guest, and what
optional features it should have.  This will cause a reset of the cpu
@@ -2805,6 +3088,7 @@ state. All calls to this function after the initial call must use the same
target and same set of feature flags, otherwise EINVAL will be returned.
Possible features:
+
- KVM_ARM_VCPU_POWER_OFF: Starts the CPU in a power-off state.
Depends on KVM_CAP_ARM_PSCI. If not set, the CPU will be powered on
and execute guest code when KVM_RUN is called.
@@ -2861,14 +3145,19 @@ Possible features:
no longer be written using KVM_SET_ONE_REG.
4.83 KVM_ARM_PREFERRED_TARGET
+-----------------------------
+
+:Capability: basic
+:Architectures: arm, arm64
+:Type: vm ioctl
+:Parameters: struct struct kvm_vcpu_init (out)
+:Returns: 0 on success; -1 on error
-Capability: basic
-Architectures: arm, arm64
-Type: vm ioctl
-Parameters: struct struct kvm_vcpu_init (out)
-Returns: 0 on success; -1 on error
Errors:
- ENODEV: no preferred target available for the host
+
+ ====== ==========================================
+ ENODEV no preferred target available for the host
+ ====== ==========================================
This queries KVM for preferred CPU target type which can be emulated
by KVM on underlying host.
@@ -2885,43 +3174,57 @@ in VCPU matching underlying host.
4.84 KVM_GET_REG_LIST
+---------------------
+
+:Capability: basic
+:Architectures: arm, arm64, mips
+:Type: vcpu ioctl
+:Parameters: struct kvm_reg_list (in/out)
+:Returns: 0 on success; -1 on error
-Capability: basic
-Architectures: arm, arm64, mips
-Type: vcpu ioctl
-Parameters: struct kvm_reg_list (in/out)
-Returns: 0 on success; -1 on error
Errors:
-  E2BIG:     the reg index list is too big to fit in the array specified by
+
+ ===== ==============================================================
+  E2BIG     the reg index list is too big to fit in the array specified by
            the user (the number required will be written into n).
+ ===== ==============================================================
+
+::
-struct kvm_reg_list {
+ struct kvm_reg_list {
__u64 n; /* number of registers in reg[] */
__u64 reg[0];
-};
+ };
This ioctl returns the guest registers that are supported for the
KVM_GET_ONE_REG/KVM_SET_ONE_REG calls.
4.85 KVM_ARM_SET_DEVICE_ADDR (deprecated)
+-----------------------------------------
+
+:Capability: KVM_CAP_ARM_SET_DEVICE_ADDR
+:Architectures: arm, arm64
+:Type: vm ioctl
+:Parameters: struct kvm_arm_device_address (in)
+:Returns: 0 on success, -1 on error
-Capability: KVM_CAP_ARM_SET_DEVICE_ADDR
-Architectures: arm, arm64
-Type: vm ioctl
-Parameters: struct kvm_arm_device_address (in)
-Returns: 0 on success, -1 on error
Errors:
- ENODEV: The device id is unknown
- ENXIO: Device not supported on current system
- EEXIST: Address already set
- E2BIG: Address outside guest physical address space
- EBUSY: Address overlaps with other device range
-struct kvm_arm_device_addr {
+ ====== ============================================
+ ENODEV The device id is unknown
+ ENXIO Device not supported on current system
+ EEXIST Address already set
+ E2BIG Address outside guest physical address space
+ EBUSY Address overlaps with other device range
+ ====== ============================================
+
+::
+
+ struct kvm_arm_device_addr {
__u64 id;
__u64 addr;
-};
+ };
Specify a device address in the guest's physical address space where guests
can access emulated or directly exposed devices, which the host kernel needs
@@ -2929,7 +3232,7 @@ to know about. The id field is an architecture specific identifier for a
specific device.
ARM/arm64 divides the id field into two parts, a device id and an
-address type id specific to the individual device.
+address type id specific to the individual device::
 bits: | 63 ... 32 | 31 ... 16 | 15 ... 0 |
field: | 0x00000000 | device id | addr type id |
@@ -2947,12 +3250,13 @@ should be used instead.
4.86 KVM_PPC_RTAS_DEFINE_TOKEN
+------------------------------
-Capability: KVM_CAP_PPC_RTAS
-Architectures: ppc
-Type: vm ioctl
-Parameters: struct kvm_rtas_token_args
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_PPC_RTAS
+:Architectures: ppc
+:Type: vm ioctl
+:Parameters: struct kvm_rtas_token_args
+:Returns: 0 on success, -1 on error
Defines a token value for a RTAS (Run Time Abstraction Services)
service in order to allow it to be handled in the kernel. The
@@ -2966,18 +3270,21 @@ calls by the guest for that service will be passed to userspace to be
handled.
4.87 KVM_SET_GUEST_DEBUG
+------------------------
-Capability: KVM_CAP_SET_GUEST_DEBUG
-Architectures: x86, s390, ppc, arm64
-Type: vcpu ioctl
-Parameters: struct kvm_guest_debug (in)
-Returns: 0 on success; -1 on error
+:Capability: KVM_CAP_SET_GUEST_DEBUG
+:Architectures: x86, s390, ppc, arm64
+:Type: vcpu ioctl
+:Parameters: struct kvm_guest_debug (in)
+:Returns: 0 on success; -1 on error
-struct kvm_guest_debug {
+::
+
+ struct kvm_guest_debug {
__u32 control;
__u32 pad;
struct kvm_guest_debug_arch arch;
-};
+ };
Set up the processor specific debug registers and configure vcpu for
handling guest debug events. There are two parts to the structure, the
@@ -3019,26 +3326,31 @@ KVM_EXIT_DEBUG with the kvm_debug_exit_arch part of the kvm_run
structure containing architecture specific debug information.
4.88 KVM_GET_EMULATED_CPUID
+---------------------------
+
+:Capability: KVM_CAP_EXT_EMUL_CPUID
+:Architectures: x86
+:Type: system ioctl
+:Parameters: struct kvm_cpuid2 (in/out)
+:Returns: 0 on success, -1 on error
-Capability: KVM_CAP_EXT_EMUL_CPUID
-Architectures: x86
-Type: system ioctl
-Parameters: struct kvm_cpuid2 (in/out)
-Returns: 0 on success, -1 on error
+::
-struct kvm_cpuid2 {
+ struct kvm_cpuid2 {
__u32 nent;
__u32 flags;
struct kvm_cpuid_entry2 entries[0];
-};
+ };
The member 'flags' is used for passing flags from userspace.
-#define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
-#define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
-#define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
+::
-struct kvm_cpuid_entry2 {
+ #define KVM_CPUID_FLAG_SIGNIFCANT_INDEX BIT(0)
+ #define KVM_CPUID_FLAG_STATEFUL_FUNC BIT(1)
+ #define KVM_CPUID_FLAG_STATE_READ_NEXT BIT(2)
+
+ struct kvm_cpuid_entry2 {
__u32 function;
__u32 index;
__u32 flags;
@@ -3047,7 +3359,7 @@ struct kvm_cpuid_entry2 {
__u32 ecx;
__u32 edx;
__u32 padding[3];
-};
+ };
This ioctl returns x86 cpuid features which are emulated by
kvm.Userspace can use the information returned by this ioctl to query
@@ -3072,10 +3384,14 @@ emulated efficiently and thus not included here.
The fields in each entry are defined as follows:
- function: the eax value used to obtain the entry
- index: the ecx value used to obtain the entry (for entries that are
+ function:
+ the eax value used to obtain the entry
+ index:
+ the ecx value used to obtain the entry (for entries that are
affected by ecx)
- flags: an OR of zero or more of the following:
+ flags:
+ an OR of zero or more of the following:
+
KVM_CPUID_FLAG_SIGNIFCANT_INDEX:
if the index field is valid
KVM_CPUID_FLAG_STATEFUL_FUNC:
@@ -3085,24 +3401,28 @@ The fields in each entry are defined as follows:
KVM_CPUID_FLAG_STATE_READ_NEXT:
for KVM_CPUID_FLAG_STATEFUL_FUNC entries, set if this entry is
the first entry to be read by a cpu
- eax, ebx, ecx, edx: the values returned by the cpuid instruction for
+
+ eax, ebx, ecx, edx:
+
+ the values returned by the cpuid instruction for
this function/index combination
4.89 KVM_S390_MEM_OP
+--------------------
-Capability: KVM_CAP_S390_MEM_OP
-Architectures: s390
-Type: vcpu ioctl
-Parameters: struct kvm_s390_mem_op (in)
-Returns: = 0 on success,
- < 0 on generic error (e.g. -EFAULT or -ENOMEM),
- > 0 if an exception occurred while walking the page tables
+:Capability: KVM_CAP_S390_MEM_OP
+:Architectures: s390
+:Type: vcpu ioctl
+:Parameters: struct kvm_s390_mem_op (in)
+:Returns: = 0 on success,
+ < 0 on generic error (e.g. -EFAULT or -ENOMEM),
+ > 0 if an exception occurred while walking the page tables
Read or write data from/to the logical (virtual) memory of a VCPU.
-Parameters are specified via the following structure:
+Parameters are specified via the following structure::
-struct kvm_s390_mem_op {
+ struct kvm_s390_mem_op {
__u64 gaddr; /* the guest address */
__u64 flags; /* flags */
__u32 size; /* amount of bytes */
@@ -3110,7 +3430,7 @@ struct kvm_s390_mem_op {
__u64 buf; /* buffer in userspace */
__u8 ar; /* the access register number */
__u8 reserved[31]; /* should be set to 0 */
-};
+ };
The type of operation is specified in the "op" field. It is either
KVM_S390_MEMOP_LOGICAL_READ for reading from logical memory space or
@@ -3137,24 +3457,25 @@ The "reserved" field is meant for future extensions. It is not used by
KVM with the currently defined set of flags.
4.90 KVM_S390_GET_SKEYS
+-----------------------
-Capability: KVM_CAP_S390_SKEYS
-Architectures: s390
-Type: vm ioctl
-Parameters: struct kvm_s390_skeys
-Returns: 0 on success, KVM_S390_GET_KEYS_NONE if guest is not using storage
- keys, negative value on error
+:Capability: KVM_CAP_S390_SKEYS
+:Architectures: s390
+:Type: vm ioctl
+:Parameters: struct kvm_s390_skeys
+:Returns: 0 on success, KVM_S390_GET_KEYS_NONE if guest is not using storage
+ keys, negative value on error
This ioctl is used to get guest storage key values on the s390
-architecture. The ioctl takes parameters via the kvm_s390_skeys struct.
+architecture. The ioctl takes parameters via the kvm_s390_skeys struct::
-struct kvm_s390_skeys {
+ struct kvm_s390_skeys {
__u64 start_gfn;
__u64 count;
__u64 skeydata_addr;
__u32 flags;
__u32 reserved[9];
-};
+ };
The start_gfn field is the number of the first guest frame whose storage keys
you want to get.
@@ -3168,12 +3489,13 @@ The skeydata_addr field is the address to a buffer large enough to hold count
bytes. This buffer will be filled with storage key data by the ioctl.
4.91 KVM_S390_SET_SKEYS
+-----------------------
-Capability: KVM_CAP_S390_SKEYS
-Architectures: s390
-Type: vm ioctl
-Parameters: struct kvm_s390_skeys
-Returns: 0 on success, negative value on error
+:Capability: KVM_CAP_S390_SKEYS
+:Architectures: s390
+:Type: vm ioctl
+:Parameters: struct kvm_s390_skeys
+:Returns: 0 on success, negative value on error
This ioctl is used to set guest storage key values on the s390
architecture. The ioctl takes parameters via the kvm_s390_skeys struct.
@@ -3195,21 +3517,27 @@ Note: If any architecturally invalid key value is found in the given data then
the ioctl will return -EINVAL.
4.92 KVM_S390_IRQ
+-----------------
+
+:Capability: KVM_CAP_S390_INJECT_IRQ
+:Architectures: s390
+:Type: vcpu ioctl
+:Parameters: struct kvm_s390_irq (in)
+:Returns: 0 on success, -1 on error
-Capability: KVM_CAP_S390_INJECT_IRQ
-Architectures: s390
-Type: vcpu ioctl
-Parameters: struct kvm_s390_irq (in)
-Returns: 0 on success, -1 on error
Errors:
- EINVAL: interrupt type is invalid
- type is KVM_S390_SIGP_STOP and flag parameter is invalid value
+
+
+ ====== =================================================================
+ EINVAL interrupt type is invalid
+ type is KVM_S390_SIGP_STOP and flag parameter is invalid value,
type is KVM_S390_INT_EXTERNAL_CALL and code is bigger
- than the maximum of VCPUs
- EBUSY: type is KVM_S390_SIGP_SET_PREFIX and vcpu is not stopped
- type is KVM_S390_SIGP_STOP and a stop irq is already pending
+ than the maximum of VCPUs
+ EBUSY type is KVM_S390_SIGP_SET_PREFIX and vcpu is not stopped,
+ type is KVM_S390_SIGP_STOP and a stop irq is already pending,
type is KVM_S390_INT_EXTERNAL_CALL and an external call interrupt
- is already pending
+ is already pending
+ ====== =================================================================
Allows to inject an interrupt to the guest.
@@ -3217,9 +3545,9 @@ Using struct kvm_s390_irq as a parameter allows
to inject additional payload which is not
possible via KVM_S390_INTERRUPT.
-Interrupt parameters are passed via kvm_s390_irq:
+Interrupt parameters are passed via kvm_s390_irq::
-struct kvm_s390_irq {
+ struct kvm_s390_irq {
__u64 type;
union {
struct kvm_s390_io_info io;
@@ -3232,44 +3560,45 @@ struct kvm_s390_irq {
struct kvm_s390_mchk_info mchk;
char reserved[64];
} u;
-};
+ };
type can be one of the following:
-KVM_S390_SIGP_STOP - sigp stop; parameter in .stop
-KVM_S390_PROGRAM_INT - program check; parameters in .pgm
-KVM_S390_SIGP_SET_PREFIX - sigp set prefix; parameters in .prefix
-KVM_S390_RESTART - restart; no parameters
-KVM_S390_INT_CLOCK_COMP - clock comparator interrupt; no parameters
-KVM_S390_INT_CPU_TIMER - CPU timer interrupt; no parameters
-KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg
-KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall
-KVM_S390_MCHK - machine check interrupt; parameters in .mchk
+- KVM_S390_SIGP_STOP - sigp stop; parameter in .stop
+- KVM_S390_PROGRAM_INT - program check; parameters in .pgm
+- KVM_S390_SIGP_SET_PREFIX - sigp set prefix; parameters in .prefix
+- KVM_S390_RESTART - restart; no parameters
+- KVM_S390_INT_CLOCK_COMP - clock comparator interrupt; no parameters
+- KVM_S390_INT_CPU_TIMER - CPU timer interrupt; no parameters
+- KVM_S390_INT_EMERGENCY - sigp emergency; parameters in .emerg
+- KVM_S390_INT_EXTERNAL_CALL - sigp external call; parameters in .extcall
+- KVM_S390_MCHK - machine check interrupt; parameters in .mchk
This is an asynchronous vcpu ioctl and can be invoked from any thread.
4.94 KVM_S390_GET_IRQ_STATE
+---------------------------
-Capability: KVM_CAP_S390_IRQ_STATE
-Architectures: s390
-Type: vcpu ioctl
-Parameters: struct kvm_s390_irq_state (out)
-Returns: >= number of bytes copied into buffer,
- -EINVAL if buffer size is 0,
- -ENOBUFS if buffer size is too small to fit all pending interrupts,
- -EFAULT if the buffer address was invalid
+:Capability: KVM_CAP_S390_IRQ_STATE
+:Architectures: s390
+:Type: vcpu ioctl
+:Parameters: struct kvm_s390_irq_state (out)
+:Returns: >= number of bytes copied into buffer,
+ -EINVAL if buffer size is 0,
+ -ENOBUFS if buffer size is too small to fit all pending interrupts,
+ -EFAULT if the buffer address was invalid
This ioctl allows userspace to retrieve the complete state of all currently
pending interrupts in a single buffer. Use cases include migration
and introspection. The parameter structure contains the address of a
-userspace buffer and its length:
+userspace buffer and its length::
-struct kvm_s390_irq_state {
+ struct kvm_s390_irq_state {
__u64 buf;
__u32 flags; /* will stay unused for compatibility reasons */
__u32 len;
__u32 reserved[4]; /* will stay unused for compatibility reasons */
-};
+ };
Userspace passes in the above struct and for each pending interrupt a
struct kvm_s390_irq is copied to the provided buffer.
@@ -3283,29 +3612,30 @@ If -ENOBUFS is returned the buffer provided was too small and userspace
may retry with a bigger buffer.
4.95 KVM_S390_SET_IRQ_STATE
-
-Capability: KVM_CAP_S390_IRQ_STATE
-Architectures: s390
-Type: vcpu ioctl
-Parameters: struct kvm_s390_irq_state (in)
-Returns: 0 on success,
- -EFAULT if the buffer address was invalid,
- -EINVAL for an invalid buffer length (see below),
- -EBUSY if there were already interrupts pending,
- errors occurring when actually injecting the
+---------------------------
+
+:Capability: KVM_CAP_S390_IRQ_STATE
+:Architectures: s390
+:Type: vcpu ioctl
+:Parameters: struct kvm_s390_irq_state (in)
+:Returns: 0 on success,
+ -EFAULT if the buffer address was invalid,
+ -EINVAL for an invalid buffer length (see below),
+ -EBUSY if there were already interrupts pending,
+ errors occurring when actually injecting the
interrupt. See KVM_S390_IRQ.
This ioctl allows userspace to set the complete state of all cpu-local
interrupts currently pending for the vcpu. It is intended for restoring
interrupt state after a migration. The input parameter is a userspace buffer
-containing a struct kvm_s390_irq_state:
+containing a struct kvm_s390_irq_state::
-struct kvm_s390_irq_state {
+ struct kvm_s390_irq_state {
__u64 buf;
__u32 flags; /* will stay unused for compatibility reasons */
__u32 len;
__u32 reserved[4]; /* will stay unused for compatibility reasons */
-};
+ };
The restrictions for flags and reserved apply as well.
(see KVM_S390_GET_IRQ_STATE)
@@ -3320,20 +3650,22 @@ and it must not exceed (max_vcpus + 32) * sizeof(struct kvm_s390_irq),
which is the maximum number of possibly pending cpu-local interrupts.
4.96 KVM_SMI
+------------
-Capability: KVM_CAP_X86_SMM
-Architectures: x86
-Type: vcpu ioctl
-Parameters: none
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_X86_SMM
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: none
+:Returns: 0 on success, -1 on error
Queues an SMI on the thread's vcpu.
4.97 KVM_CAP_PPC_MULTITCE
+-------------------------
-Capability: KVM_CAP_PPC_MULTITCE
-Architectures: ppc
-Type: vm
+:Capability: KVM_CAP_PPC_MULTITCE
+:Architectures: ppc
+:Type: vm
This capability means the kernel is capable of handling hypercalls
H_PUT_TCE_INDIRECT and H_STUFF_TCE without passing those into the user
@@ -3355,26 +3687,27 @@ an implementation for these despite the in kernel acceleration.
This capability is always enabled.
4.98 KVM_CREATE_SPAPR_TCE_64
+----------------------------
-Capability: KVM_CAP_SPAPR_TCE_64
-Architectures: powerpc
-Type: vm ioctl
-Parameters: struct kvm_create_spapr_tce_64 (in)
-Returns: file descriptor for manipulating the created TCE table
+:Capability: KVM_CAP_SPAPR_TCE_64
+:Architectures: powerpc
+:Type: vm ioctl
+:Parameters: struct kvm_create_spapr_tce_64 (in)
+:Returns: file descriptor for manipulating the created TCE table
This is an extension for KVM_CAP_SPAPR_TCE which only supports 32bit
windows, described in 4.62 KVM_CREATE_SPAPR_TCE
-This capability uses extended struct in ioctl interface:
+This capability uses extended struct in ioctl interface::
-/* for KVM_CAP_SPAPR_TCE_64 */
-struct kvm_create_spapr_tce_64 {
+ /* for KVM_CAP_SPAPR_TCE_64 */
+ struct kvm_create_spapr_tce_64 {
__u64 liobn;
__u32 page_shift;
__u32 flags;
__u64 offset; /* in pages */
__u64 size; /* in pages */
-};
+ };
The aim of extension is to support an additional bigger DMA window with
a variable page size.
@@ -3387,12 +3720,13 @@ of IOMMU pages.
The rest of functionality is identical to KVM_CREATE_SPAPR_TCE.
4.99 KVM_REINJECT_CONTROL
+-------------------------
-Capability: KVM_CAP_REINJECT_CONTROL
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_reinject_control (in)
-Returns: 0 on success,
+:Capability: KVM_CAP_REINJECT_CONTROL
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_reinject_control (in)
+:Returns: 0 on success,
-EFAULT if struct kvm_reinject_control cannot be read,
-ENXIO if KVM_CREATE_PIT or KVM_CREATE_PIT2 didn't succeed earlier.
@@ -3402,21 +3736,24 @@ vector(s) that i8254 injects. Reinject mode dequeues a tick and injects its
interrupt whenever there isn't a pending interrupt from i8254.
!reinject mode injects an interrupt as soon as a tick arrives.
-struct kvm_reinject_control {
+::
+
+ struct kvm_reinject_control {
__u8 pit_reinject;
__u8 reserved[31];
-};
+ };
pit_reinject = 0 (!reinject mode) is recommended, unless running an old
operating system that uses the PIT for timing (e.g. Linux 2.4.x).
4.100 KVM_PPC_CONFIGURE_V3_MMU
+------------------------------
-Capability: KVM_CAP_PPC_RADIX_MMU or KVM_CAP_PPC_HASH_MMU_V3
-Architectures: ppc
-Type: vm ioctl
-Parameters: struct kvm_ppc_mmuv3_cfg (in)
-Returns: 0 on success,
+:Capability: KVM_CAP_PPC_RADIX_MMU or KVM_CAP_PPC_HASH_MMU_V3
+:Architectures: ppc
+:Type: vm ioctl
+:Parameters: struct kvm_ppc_mmuv3_cfg (in)
+:Returns: 0 on success,
-EFAULT if struct kvm_ppc_mmuv3_cfg cannot be read,
-EINVAL if the configuration is invalid
@@ -3424,10 +3761,12 @@ This ioctl controls whether the guest will use radix or HPT (hashed
page table) translation, and sets the pointer to the process table for
the guest.
-struct kvm_ppc_mmuv3_cfg {
+::
+
+ struct kvm_ppc_mmuv3_cfg {
__u64 flags;
__u64 process_table;
-};
+ };
There are two bits that can be set in flags; KVM_PPC_MMUV3_RADIX and
KVM_PPC_MMUV3_GTSE. KVM_PPC_MMUV3_RADIX, if set, configures the guest
@@ -3442,12 +3781,13 @@ as the second doubleword of the partition table entry, as defined in
the Power ISA V3.00, Book III section 5.7.6.1.
4.101 KVM_PPC_GET_RMMU_INFO
+---------------------------
-Capability: KVM_CAP_PPC_RADIX_MMU
-Architectures: ppc
-Type: vm ioctl
-Parameters: struct kvm_ppc_rmmu_info (out)
-Returns: 0 on success,
+:Capability: KVM_CAP_PPC_RADIX_MMU
+:Architectures: ppc
+:Type: vm ioctl
+:Parameters: struct kvm_ppc_rmmu_info (out)
+:Returns: 0 on success,
-EFAULT if struct kvm_ppc_rmmu_info cannot be written,
-EINVAL if no useful information can be returned
@@ -3456,14 +3796,16 @@ containing supported radix tree geometries, and (b) a list that maps
page sizes to put in the "AP" (actual page size) field for the tlbie
(TLB invalidate entry) instruction.
-struct kvm_ppc_rmmu_info {
+::
+
+ struct kvm_ppc_rmmu_info {
struct kvm_ppc_radix_geom {
__u8 page_shift;
__u8 level_bits[4];
__u8 pad[3];
} geometries[8];
__u32 ap_encodings[8];
-};
+ };
The geometries[] field gives up to 8 supported geometries for the
radix page table, in terms of the log base 2 of the smallest page
@@ -3476,19 +3818,54 @@ encodings, encoded with the AP value in the top 3 bits and the log
base 2 of the page size in the bottom 6 bits.
4.102 KVM_PPC_RESIZE_HPT_PREPARE
+--------------------------------
-Capability: KVM_CAP_SPAPR_RESIZE_HPT
-Architectures: powerpc
-Type: vm ioctl
-Parameters: struct kvm_ppc_resize_hpt (in)
-Returns: 0 on successful completion,
+:Capability: KVM_CAP_SPAPR_RESIZE_HPT
+:Architectures: powerpc
+:Type: vm ioctl
+:Parameters: struct kvm_ppc_resize_hpt (in)
+:Returns: 0 on successful completion,
>0 if a new HPT is being prepared, the value is an estimated
- number of milliseconds until preparation is complete
+ number of milliseconds until preparation is complete,
-EFAULT if struct kvm_reinject_control cannot be read,
- -EINVAL if the supplied shift or flags are invalid
- -ENOMEM if unable to allocate the new HPT
- -ENOSPC if there was a hash collision when moving existing
- HPT entries to the new HPT
+ -EINVAL if the supplied shift or flags are invalid,
+ -ENOMEM if unable to allocate the new HPT,
+ -ENOSPC if there was a hash collision
+
+::
+
+ struct kvm_ppc_rmmu_info {
+ struct kvm_ppc_radix_geom {
+ __u8 page_shift;
+ __u8 level_bits[4];
+ __u8 pad[3];
+ } geometries[8];
+ __u32 ap_encodings[8];
+ };
+
+The geometries[] field gives up to 8 supported geometries for the
+radix page table, in terms of the log base 2 of the smallest page
+size, and the number of bits indexed at each level of the tree, from
+the PTE level up to the PGD level in that order. Any unused entries
+will have 0 in the page_shift field.
+
+The ap_encodings gives the supported page sizes and their AP field
+encodings, encoded with the AP value in the top 3 bits and the log
+base 2 of the page size in the bottom 6 bits.
+
+4.102 KVM_PPC_RESIZE_HPT_PREPARE
+--------------------------------
+
+:Capability: KVM_CAP_SPAPR_RESIZE_HPT
+:Architectures: powerpc
+:Type: vm ioctl
+:Parameters: struct kvm_ppc_resize_hpt (in)
+:Returns: 0 on successful completion,
+ >0 if a new HPT is being prepared, the value is an estimated
+ number of milliseconds until preparation is complete,
+ -EFAULT if struct kvm_reinject_control cannot be read,
+ -EINVAL if the supplied shift or flags are invalid,when moving existing
+ HPT entries to the new HPT,
-EIO on other error conditions
Used to implement the PAPR extension for runtime resizing of a guest's
@@ -3506,6 +3883,7 @@ requested in the parameters, discards the existing pending HPT and
creates a new one as above.
If called when there is a pending HPT of the size requested, will:
+
* If preparation of the pending HPT is already complete, return 0
* If preparation of the pending HPT has failed, return an error
code, then discard the pending HPT.
@@ -3522,26 +3900,29 @@ Normally this will be called repeatedly with the same parameters until
it returns <= 0. The first call will initiate preparation, subsequent
ones will monitor preparation until it completes or fails.
-struct kvm_ppc_resize_hpt {
+::
+
+ struct kvm_ppc_resize_hpt {
__u64 flags;
__u32 shift;
__u32 pad;
-};
+ };
4.103 KVM_PPC_RESIZE_HPT_COMMIT
+-------------------------------
-Capability: KVM_CAP_SPAPR_RESIZE_HPT
-Architectures: powerpc
-Type: vm ioctl
-Parameters: struct kvm_ppc_resize_hpt (in)
-Returns: 0 on successful completion,
+:Capability: KVM_CAP_SPAPR_RESIZE_HPT
+:Architectures: powerpc
+:Type: vm ioctl
+:Parameters: struct kvm_ppc_resize_hpt (in)
+:Returns: 0 on successful completion,
-EFAULT if struct kvm_reinject_control cannot be read,
- -EINVAL if the supplied shift or flags are invalid
+ -EINVAL if the supplied shift or flags are invalid,
-ENXIO is there is no pending HPT, or the pending HPT doesn't
- have the requested size
- -EBUSY if the pending HPT is not fully prepared
+ have the requested size,
+ -EBUSY if the pending HPT is not fully prepared,
-ENOSPC if there was a hash collision when moving existing
- HPT entries to the new HPT
+ HPT entries to the new HPT,
-EIO on other error conditions
Used to implement the PAPR extension for runtime resizing of a guest's
@@ -3564,31 +3945,35 @@ HPT and the previous HPT will be discarded.
On failure, the guest will still be operating on its previous HPT.
-struct kvm_ppc_resize_hpt {
+::
+
+ struct kvm_ppc_resize_hpt {
__u64 flags;
__u32 shift;
__u32 pad;
-};
+ };
4.104 KVM_X86_GET_MCE_CAP_SUPPORTED
+-----------------------------------
-Capability: KVM_CAP_MCE
-Architectures: x86
-Type: system ioctl
-Parameters: u64 mce_cap (out)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_MCE
+:Architectures: x86
+:Type: system ioctl
+:Parameters: u64 mce_cap (out)
+:Returns: 0 on success, -1 on error
Returns supported MCE capabilities. The u64 mce_cap parameter
has the same format as the MSR_IA32_MCG_CAP register. Supported
capabilities will have the corresponding bits set.
4.105 KVM_X86_SETUP_MCE
+-----------------------
-Capability: KVM_CAP_MCE
-Architectures: x86
-Type: vcpu ioctl
-Parameters: u64 mcg_cap (in)
-Returns: 0 on success,
+:Capability: KVM_CAP_MCE
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: u64 mcg_cap (in)
+:Returns: 0 on success,
-EFAULT if u64 mcg_cap cannot be read,
-EINVAL if the requested number of banks is invalid,
-EINVAL if requested MCE capability is not supported.
@@ -3601,20 +3986,21 @@ checking for KVM_CAP_MCE. The supported capabilities can be
retrieved with KVM_X86_GET_MCE_CAP_SUPPORTED.
4.106 KVM_X86_SET_MCE
+---------------------
-Capability: KVM_CAP_MCE
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_x86_mce (in)
-Returns: 0 on success,
+:Capability: KVM_CAP_MCE
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_x86_mce (in)
+:Returns: 0 on success,
-EFAULT if struct kvm_x86_mce cannot be read,
-EINVAL if the bank number is invalid,
-EINVAL if VAL bit is not set in status field.
Inject a machine check error (MCE) into the guest. The input
-parameter is:
+parameter is::
-struct kvm_x86_mce {
+ struct kvm_x86_mce {
__u64 status;
__u64 addr;
__u64 misc;
@@ -3622,7 +4008,7 @@ struct kvm_x86_mce {
__u8 bank;
__u8 pad1[7];
__u64 pad2[3];
-};
+ };
If the MCE being reported is an uncorrected error, KVM will
inject it as an MCE exception into the guest. If the guest
@@ -3634,15 +4020,17 @@ store it in the corresponding bank (provided this bank is
not holding a previously reported uncorrected error).
4.107 KVM_S390_GET_CMMA_BITS
+----------------------------
-Capability: KVM_CAP_S390_CMMA_MIGRATION
-Architectures: s390
-Type: vm ioctl
-Parameters: struct kvm_s390_cmma_log (in, out)
-Returns: 0 on success, a negative value on error
+:Capability: KVM_CAP_S390_CMMA_MIGRATION
+:Architectures: s390
+:Type: vm ioctl
+:Parameters: struct kvm_s390_cmma_log (in, out)
+:Returns: 0 on success, a negative value on error
This ioctl is used to get the values of the CMMA bits on the s390
architecture. It is meant to be used in two scenarios:
+
- During live migration to save the CMMA values. Live migration needs
to be enabled via the KVM_REQ_START_MIGRATION VM property.
- To non-destructively peek at the CMMA values, with the flag
@@ -3652,9 +4040,12 @@ The ioctl takes parameters via the kvm_s390_cmma_log struct. The desired
values are written to a buffer whose location is indicated via the "values"
member in the kvm_s390_cmma_log struct. The values in the input struct are
also updated as needed.
+
Each CMMA value takes up one byte.
-struct kvm_s390_cmma_log {
+::
+
+ struct kvm_s390_cmma_log {
__u64 start_gfn;
__u32 count;
__u32 flags;
@@ -3663,7 +4054,7 @@ struct kvm_s390_cmma_log {
__u64 mask;
};
__u64 values;
-};
+ };
start_gfn is the number of the first guest frame whose CMMA values are
to be retrieved,
@@ -3724,12 +4115,13 @@ KVM_S390_CMMA_PEEK is not set but migration mode was not enabled, with
present for the addresses (e.g. when using hugepages).
4.108 KVM_S390_SET_CMMA_BITS
+----------------------------
-Capability: KVM_CAP_S390_CMMA_MIGRATION
-Architectures: s390
-Type: vm ioctl
-Parameters: struct kvm_s390_cmma_log (in)
-Returns: 0 on success, a negative value on error
+:Capability: KVM_CAP_S390_CMMA_MIGRATION
+:Architectures: s390
+:Type: vm ioctl
+:Parameters: struct kvm_s390_cmma_log (in)
+:Returns: 0 on success, a negative value on error
This ioctl is used to set the values of the CMMA bits on the s390
architecture. It is meant to be used during live migration to restore
@@ -3737,16 +4129,18 @@ the CMMA values, but there are no restrictions on its use.
The ioctl takes parameters via the kvm_s390_cmma_values struct.
Each CMMA value takes up one byte.
-struct kvm_s390_cmma_log {
+::
+
+ struct kvm_s390_cmma_log {
__u64 start_gfn;
__u32 count;
__u32 flags;
union {
__u64 remaining;
__u64 mask;
- };
+ };
__u64 values;
-};
+ };
start_gfn indicates the starting guest frame number,
@@ -3769,26 +4163,27 @@ or if no page table is present for the addresses (e.g. when using
hugepages).
4.109 KVM_PPC_GET_CPU_CHAR
+--------------------------
-Capability: KVM_CAP_PPC_GET_CPU_CHAR
-Architectures: powerpc
-Type: vm ioctl
-Parameters: struct kvm_ppc_cpu_char (out)
-Returns: 0 on successful completion
+:Capability: KVM_CAP_PPC_GET_CPU_CHAR
+:Architectures: powerpc
+:Type: vm ioctl
+:Parameters: struct kvm_ppc_cpu_char (out)
+:Returns: 0 on successful completion,
-EFAULT if struct kvm_ppc_cpu_char cannot be written
This ioctl gives userspace information about certain characteristics
of the CPU relating to speculative execution of instructions and
possible information leakage resulting from speculative execution (see
CVE-2017-5715, CVE-2017-5753 and CVE-2017-5754). The information is
-returned in struct kvm_ppc_cpu_char, which looks like this:
+returned in struct kvm_ppc_cpu_char, which looks like this::
-struct kvm_ppc_cpu_char {
+ struct kvm_ppc_cpu_char {
__u64 character; /* characteristics of the CPU */
__u64 behaviour; /* recommended software behaviour */
__u64 character_mask; /* valid bits in character */
__u64 behaviour_mask; /* valid bits in behaviour */
-};
+ };
For extensibility, the character_mask and behaviour_mask fields
indicate which bits of character and behaviour have been filled in by
@@ -3815,12 +4210,13 @@ These fields use the same bit definitions as the new
H_GET_CPU_CHARACTERISTICS hypercall.
4.110 KVM_MEMORY_ENCRYPT_OP
+---------------------------
-Capability: basic
-Architectures: x86
-Type: system
-Parameters: an opaque platform specific structure (in/out)
-Returns: 0 on success; -1 on error
+:Capability: basic
+:Architectures: x86
+:Type: system
+:Parameters: an opaque platform specific structure (in/out)
+:Returns: 0 on success; -1 on error
If the platform supports creating encrypted VMs then this ioctl can be used
for issuing platform-specific memory encryption commands to manage those
@@ -3831,12 +4227,13 @@ Currently, this ioctl is used for issuing Secure Encrypted Virtualization
Documentation/virt/kvm/amd-memory-encryption.rst.
4.111 KVM_MEMORY_ENCRYPT_REG_REGION
+-----------------------------------
-Capability: basic
-Architectures: x86
-Type: system
-Parameters: struct kvm_enc_region (in)
-Returns: 0 on success; -1 on error
+:Capability: basic
+:Architectures: x86
+:Type: system
+:Parameters: struct kvm_enc_region (in)
+:Returns: 0 on success; -1 on error
This ioctl can be used to register a guest memory region which may
contain encrypted data (e.g. guest RAM, SMRAM etc).
@@ -3854,60 +4251,71 @@ swap or migrate (move) ciphertext pages. Hence, for now we pin the guest
memory region registered with the ioctl.
4.112 KVM_MEMORY_ENCRYPT_UNREG_REGION
+-------------------------------------
-Capability: basic
-Architectures: x86
-Type: system
-Parameters: struct kvm_enc_region (in)
-Returns: 0 on success; -1 on error
+:Capability: basic
+:Architectures: x86
+:Type: system
+:Parameters: struct kvm_enc_region (in)
+:Returns: 0 on success; -1 on error
This ioctl can be used to unregister the guest memory region registered
with KVM_MEMORY_ENCRYPT_REG_REGION ioctl above.
4.113 KVM_HYPERV_EVENTFD
+------------------------
-Capability: KVM_CAP_HYPERV_EVENTFD
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_hyperv_eventfd (in)
+:Capability: KVM_CAP_HYPERV_EVENTFD
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_hyperv_eventfd (in)
This ioctl (un)registers an eventfd to receive notifications from the guest on
the specified Hyper-V connection id through the SIGNAL_EVENT hypercall, without
causing a user exit. SIGNAL_EVENT hypercall with non-zero event flag number
(bits 24-31) still triggers a KVM_EXIT_HYPERV_HCALL user exit.
-struct kvm_hyperv_eventfd {
+::
+
+ struct kvm_hyperv_eventfd {
__u32 conn_id;
__s32 fd;
__u32 flags;
__u32 padding[3];
-};
+ };
-The conn_id field should fit within 24 bits:
+The conn_id field should fit within 24 bits::
-#define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
+ #define KVM_HYPERV_CONN_ID_MASK 0x00ffffff
-The acceptable values for the flags field are:
+The acceptable values for the flags field are::
-#define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
+ #define KVM_HYPERV_EVENTFD_DEASSIGN (1 << 0)
-Returns: 0 on success,
- -EINVAL if conn_id or flags is outside the allowed range
- -ENOENT on deassign if the conn_id isn't registered
- -EEXIST on assign if the conn_id is already registered
+:Returns: 0 on success,
+ -EINVAL if conn_id or flags is outside the allowed range,
+ -ENOENT on deassign if the conn_id isn't registered,
+ -EEXIST on assign if the conn_id is already registered
4.114 KVM_GET_NESTED_STATE
+--------------------------
+
+:Capability: KVM_CAP_NESTED_STATE
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_nested_state (in/out)
+:Returns: 0 on success, -1 on error
-Capability: KVM_CAP_NESTED_STATE
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_nested_state (in/out)
-Returns: 0 on success, -1 on error
Errors:
- E2BIG: the total state size exceeds the value of 'size' specified by
+
+ ===== =============================================================
+ E2BIG the total state size exceeds the value of 'size' specified by
the user; the size required will be written into size.
+ ===== =============================================================
+
+::
-struct kvm_nested_state {
+ struct kvm_nested_state {
__u16 flags;
__u16 format;
__u32 size;
@@ -3924,33 +4332,33 @@ struct kvm_nested_state {
struct kvm_vmx_nested_state_data vmx[0];
struct kvm_svm_nested_state_data svm[0];
} data;
-};
+ };
-#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
-#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
-#define KVM_STATE_NESTED_EVMCS 0x00000004
+ #define KVM_STATE_NESTED_GUEST_MODE 0x00000001
+ #define KVM_STATE_NESTED_RUN_PENDING 0x00000002
+ #define KVM_STATE_NESTED_EVMCS 0x00000004
-#define KVM_STATE_NESTED_FORMAT_VMX 0
-#define KVM_STATE_NESTED_FORMAT_SVM 1
+ #define KVM_STATE_NESTED_FORMAT_VMX 0
+ #define KVM_STATE_NESTED_FORMAT_SVM 1
-#define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
+ #define KVM_STATE_NESTED_VMX_VMCS_SIZE 0x1000
-#define KVM_STATE_NESTED_VMX_SMM_GUEST_MODE 0x00000001
-#define KVM_STATE_NESTED_VMX_SMM_VMXON 0x00000002
+ #define KVM_STATE_NESTED_VMX_SMM_GUEST_MODE 0x00000001
+ #define KVM_STATE_NESTED_VMX_SMM_VMXON 0x00000002
-struct kvm_vmx_nested_state_hdr {
+ struct kvm_vmx_nested_state_hdr {
__u64 vmxon_pa;
__u64 vmcs12_pa;
struct {
__u16 flags;
} smm;
-};
+ };
-struct kvm_vmx_nested_state_data {
+ struct kvm_vmx_nested_state_data {
__u8 vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
__u8 shadow_vmcs12[KVM_STATE_NESTED_VMX_VMCS_SIZE];
-};
+ };
This ioctl copies the vcpu's nested virtualization state from the kernel to
userspace.
@@ -3959,24 +4367,26 @@ The maximum size of the state can be retrieved by passing KVM_CAP_NESTED_STATE
to the KVM_CHECK_EXTENSION ioctl().
4.115 KVM_SET_NESTED_STATE
+--------------------------
-Capability: KVM_CAP_NESTED_STATE
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_nested_state (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_NESTED_STATE
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_nested_state (in)
+:Returns: 0 on success, -1 on error
This copies the vcpu's kvm_nested_state struct from userspace to the kernel.
For the definition of struct kvm_nested_state, see KVM_GET_NESTED_STATE.
4.116 KVM_(UN)REGISTER_COALESCED_MMIO
+-------------------------------------
-Capability: KVM_CAP_COALESCED_MMIO (for coalesced mmio)
- KVM_CAP_COALESCED_PIO (for coalesced pio)
-Architectures: all
-Type: vm ioctl
-Parameters: struct kvm_coalesced_mmio_zone
-Returns: 0 on success, < 0 on error
+:Capability: KVM_CAP_COALESCED_MMIO (for coalesced mmio)
+ KVM_CAP_COALESCED_PIO (for coalesced pio)
+:Architectures: all
+:Type: vm ioctl
+:Parameters: struct kvm_coalesced_mmio_zone
+:Returns: 0 on success, < 0 on error
Coalesced I/O is a performance optimization that defers hardware
register write emulation so that userspace exits are avoided. It is
@@ -3998,15 +4408,18 @@ between coalesced mmio and pio except that coalesced pio records accesses
to I/O ports.
4.117 KVM_CLEAR_DIRTY_LOG (vm ioctl)
+------------------------------------
-Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
-Architectures: x86, arm, arm64, mips
-Type: vm ioctl
-Parameters: struct kvm_dirty_log (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
+:Architectures: x86, arm, arm64, mips
+:Type: vm ioctl
+:Parameters: struct kvm_dirty_log (in)
+:Returns: 0 on success, -1 on error
-/* for KVM_CLEAR_DIRTY_LOG */
-struct kvm_clear_dirty_log {
+::
+
+ /* for KVM_CLEAR_DIRTY_LOG */
+ struct kvm_clear_dirty_log {
__u32 slot;
__u32 num_pages;
__u64 first_page;
@@ -4014,7 +4427,7 @@ struct kvm_clear_dirty_log {
void __user *dirty_bitmap; /* one bit per page */
__u64 padding;
};
-};
+ };
The ioctl clears the dirty status of pages in a memory slot, according to
the bitmap that is passed in struct kvm_clear_dirty_log's dirty_bitmap
@@ -4038,20 +4451,23 @@ However, it can always be used as long as KVM_CHECK_EXTENSION confirms
that KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 is present.
4.118 KVM_GET_SUPPORTED_HV_CPUID
+--------------------------------
+
+:Capability: KVM_CAP_HYPERV_CPUID
+:Architectures: x86
+:Type: vcpu ioctl
+:Parameters: struct kvm_cpuid2 (in/out)
+:Returns: 0 on success, -1 on error
-Capability: KVM_CAP_HYPERV_CPUID
-Architectures: x86
-Type: vcpu ioctl
-Parameters: struct kvm_cpuid2 (in/out)
-Returns: 0 on success, -1 on error
+::
-struct kvm_cpuid2 {
+ struct kvm_cpuid2 {
__u32 nent;
__u32 padding;
struct kvm_cpuid_entry2 entries[0];
-};
+ };
-struct kvm_cpuid_entry2 {
+ struct kvm_cpuid_entry2 {
__u32 function;
__u32 index;
__u32 flags;
@@ -4060,7 +4476,7 @@ struct kvm_cpuid_entry2 {
__u32 ecx;
__u32 edx;
__u32 padding[3];
-};
+ };
This ioctl returns x86 cpuid features leaves related to Hyper-V emulation in
KVM. Userspace can use the information returned by this ioctl to construct
@@ -4073,13 +4489,13 @@ KVM_GET_SUPPORTED_CPUID ioctl because some of them intersect with KVM feature
leaves (0x40000000, 0x40000001).
Currently, the following list of CPUID leaves are returned:
- HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS
- HYPERV_CPUID_INTERFACE
- HYPERV_CPUID_VERSION
- HYPERV_CPUID_FEATURES
- HYPERV_CPUID_ENLIGHTMENT_INFO
- HYPERV_CPUID_IMPLEMENT_LIMITS
- HYPERV_CPUID_NESTED_FEATURES
+ - HYPERV_CPUID_VENDOR_AND_MAX_FUNCTIONS
+ - HYPERV_CPUID_INTERFACE
+ - HYPERV_CPUID_VERSION
+ - HYPERV_CPUID_FEATURES
+ - HYPERV_CPUID_ENLIGHTMENT_INFO
+ - HYPERV_CPUID_IMPLEMENT_LIMITS
+ - HYPERV_CPUID_NESTED_FEATURES
HYPERV_CPUID_NESTED_FEATURES leaf is only exposed when Enlightened VMCS was
enabled on the corresponding vCPU (KVM_CAP_HYPERV_ENLIGHTENED_VMCS).
@@ -4095,17 +4511,25 @@ number of valid entries in the 'entries' array, which is then filled.
userspace should not expect to get any particular value there.
4.119 KVM_ARM_VCPU_FINALIZE
+---------------------------
+
+:Architectures: arm, arm64
+:Type: vcpu ioctl
+:Parameters: int feature (in)
+:Returns: 0 on success, -1 on error
-Architectures: arm, arm64
-Type: vcpu ioctl
-Parameters: int feature (in)
-Returns: 0 on success, -1 on error
Errors:
- EPERM: feature not enabled, needs configuration, or already finalized
- EINVAL: feature unknown or not present
+
+ ====== ==============================================================
+ EPERM feature not enabled, needs configuration, or already finalized
+ EINVAL feature unknown or not present
+ ====== ==============================================================
Recognised values for feature:
+
+ ===== ===========================================
arm64 KVM_ARM_VCPU_SVE (requires KVM_CAP_ARM_SVE)
+ ===== ===========================================
Finalizes the configuration of the specified vcpu feature.
@@ -4129,21 +4553,24 @@ See KVM_ARM_VCPU_INIT for details of vcpu features that require finalization
using this ioctl.
4.120 KVM_SET_PMU_EVENT_FILTER
+------------------------------
-Capability: KVM_CAP_PMU_EVENT_FILTER
-Architectures: x86
-Type: vm ioctl
-Parameters: struct kvm_pmu_event_filter (in)
-Returns: 0 on success, -1 on error
+:Capability: KVM_CAP_PMU_EVENT_FILTER
+:Architectures: x86
+:Type: vm ioctl
+:Parameters: struct kvm_pmu_event_filter (in)
+:Returns: 0 on success, -1 on error
-struct kvm_pmu_event_filter {
+::
+
+ struct kvm_pmu_event_filter {
__u32 action;
__u32 nevents;
__u32 fixed_counter_bitmap;
__u32 flags;
__u32 pad[4];
__u64 events[0];
-};
+ };
This ioctl restricts the set of PMU events that the guest can program.
The argument holds a list of events which will be allowed or denied.
@@ -4154,20 +4581,26 @@ counters are controlled by the fixed_counter_bitmap.
No flags are defined yet, the field must be zero.
-Valid values for 'action':
-#define KVM_PMU_EVENT_ALLOW 0
-#define KVM_PMU_EVENT_DENY 1
+Valid values for 'action'::
+
+ #define KVM_PMU_EVENT_ALLOW 0
+ #define KVM_PMU_EVENT_DENY 1
4.121 KVM_PPC_SVM_OFF
+---------------------
+
+:Capability: basic
+:Architectures: powerpc
+:Type: vm ioctl
+:Parameters: none
+:Returns: 0 on successful completion,
-Capability: basic
-Architectures: powerpc
-Type: vm ioctl
-Parameters: none
-Returns: 0 on successful completion,
Errors:
- EINVAL: if ultravisor failed to terminate the secure guest
- ENOMEM: if hypervisor failed to allocate new radix page tables for guest
+
+ ====== ================================================================
+ EINVAL if ultravisor failed to terminate the secure guest
+ ENOMEM if hypervisor failed to allocate new radix page tables for guest
+ ====== ================================================================
This ioctl is used to turn off the secure mode of the guest or transition
the guest from secure mode to normal mode. This is invoked when the guest
@@ -4178,35 +4611,38 @@ unpins the VPA pages and releases all the device pages that are used to
track the secure pages by hypervisor.
4.122 KVM_S390_NORMAL_RESET
+---------------------------
-Capability: KVM_CAP_S390_VCPU_RESETS
-Architectures: s390
-Type: vcpu ioctl
-Parameters: none
-Returns: 0
+:Capability: KVM_CAP_S390_VCPU_RESETS
+:Architectures: s390
+:Type: vcpu ioctl
+:Parameters: none
+:Returns: 0
This ioctl resets VCPU registers and control structures according to
the cpu reset definition in the POP (Principles Of Operation).
4.123 KVM_S390_INITIAL_RESET
+----------------------------
-Capability: none
-Architectures: s390
-Type: vcpu ioctl
-Parameters: none
-Returns: 0
+:Capability: none
+:Architectures: s390
+:Type: vcpu ioctl
+:Parameters: none
+:Returns: 0
This ioctl resets VCPU registers and control structures according to
the initial cpu reset definition in the POP. However, the cpu is not
put into ESA mode. This reset is a superset of the normal reset.
4.124 KVM_S390_CLEAR_RESET
+--------------------------
-Capability: KVM_CAP_S390_VCPU_RESETS
-Architectures: s390
-Type: vcpu ioctl
-Parameters: none
-Returns: 0
+:Capability: KVM_CAP_S390_VCPU_RESETS
+:Architectures: s390
+:Type: vcpu ioctl
+:Parameters: none
+:Returns: 0
This ioctl resets VCPU registers and control structures according to
the clear cpu reset definition in the POP. However, the cpu is not put
@@ -4214,7 +4650,7 @@ into ESA mode. This reset is a superset of the initial reset.
5. The kvm_run structure
-------------------------
+========================
Application code obtains a pointer to the kvm_run structure by
mmap()ing a vcpu fd. From that point, application code can control
@@ -4222,13 +4658,17 @@ execution by changing fields in kvm_run prior to calling the KVM_RUN
ioctl, and obtain information about the reason KVM_RUN returned by
looking up structure members.
-struct kvm_run {
+::
+
+ struct kvm_run {
/* in */
__u8 request_interrupt_window;
Request that KVM_RUN return when it becomes possible to inject external
interrupts into the guest. Useful in conjunction with KVM_INTERRUPT.
+::
+
__u8 immediate_exit;
This field is polled once when KVM_RUN starts; if non-zero, KVM_RUN
@@ -4240,6 +4680,8 @@ a signal handler that sets run->immediate_exit to a non-zero value.
This field is ignored if KVM_CAP_IMMEDIATE_EXIT is not available.
+::
+
__u8 padding1[6];
/* out */
@@ -4249,16 +4691,22 @@ When KVM_RUN has returned successfully (return value 0), this informs
application code why KVM_RUN has returned. Allowable values for this
field are detailed below.
+::
+
__u8 ready_for_interrupt_injection;
If request_interrupt_window has been specified, this field indicates
an interrupt can be injected now with KVM_INTERRUPT.
+::
+
__u8 if_flag;
The value of the current interrupt flag. Only valid if in-kernel
local APIC is not used.
+::
+
__u16 flags;
More architecture-specific flags detailing state of the VCPU that may
@@ -4266,17 +4714,23 @@ affect the device's behavior. The only currently defined flag is
KVM_RUN_X86_SMM, which is valid on x86 machines and is set if the
VCPU is in system management mode.
+::
+
/* in (pre_kvm_run), out (post_kvm_run) */
__u64 cr8;
The value of the cr8 register. Only valid if in-kernel local APIC is
not used. Both input and output.
+::
+
__u64 apic_base;
The value of the APIC BASE msr. Only valid if in-kernel local
APIC is not used. Both input and output.
+::
+
union {
/* KVM_EXIT_UNKNOWN */
struct {
@@ -4287,6 +4741,8 @@ If exit_reason is KVM_EXIT_UNKNOWN, the vcpu has exited due to unknown
reasons. Further architecture-specific information is available in
hardware_exit_reason.
+::
+
/* KVM_EXIT_FAIL_ENTRY */
struct {
__u64 hardware_entry_failure_reason;
@@ -4296,6 +4752,8 @@ If exit_reason is KVM_EXIT_FAIL_ENTRY, the vcpu could not be run due
to unknown reasons. Further architecture-specific information is
available in hardware_entry_failure_reason.
+::
+
/* KVM_EXIT_EXCEPTION */
struct {
__u32 exception;
@@ -4304,10 +4762,12 @@ available in hardware_entry_failure_reason.
Unused.
+::
+
/* KVM_EXIT_IO */
struct {
-#define KVM_EXIT_IO_IN 0
-#define KVM_EXIT_IO_OUT 1
+ #define KVM_EXIT_IO_IN 0
+ #define KVM_EXIT_IO_OUT 1
__u8 direction;
__u8 size; /* bytes */
__u16 port;
@@ -4321,6 +4781,8 @@ data_offset describes where the data is located (KVM_EXIT_IO_OUT) or
where kvm expects application code to place the data for the next
KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a packed array.
+::
+
/* KVM_EXIT_DEBUG */
struct {
struct kvm_debug_exit_arch arch;
@@ -4329,6 +4791,8 @@ KVM_RUN invocation (KVM_EXIT_IO_IN). Data format is a packed array.
If the exit_reason is KVM_EXIT_DEBUG, then a vcpu is processing a debug event
for which architecture specific information is returned.
+::
+
/* KVM_EXIT_MMIO */
struct {
__u64 phys_addr;
@@ -4346,14 +4810,19 @@ The 'data' member contains, in its first 'len' bytes, the value as it would
appear if the VCPU performed a load or store of the appropriate width directly
to the byte array.
-NOTE: For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_PAPR and
+.. note::
+
+ For KVM_EXIT_IO, KVM_EXIT_MMIO, KVM_EXIT_OSI, KVM_EXIT_PAPR and
KVM_EXIT_EPR the corresponding
+
operations are complete (and guest state is consistent) only after userspace
has re-entered the kernel with KVM_RUN. The kernel side will first finish
incomplete operations and then check for pending signals. Userspace
can re-enter the guest with an unmasked signal pending to complete
pending operations.
+::
+
/* KVM_EXIT_HYPERCALL */
struct {
__u64 nr;
@@ -4365,7 +4834,10 @@ pending operations.
Unused. This was once used for 'hypercall to userspace'. To implement
such functionality, use KVM_EXIT_IO (x86) or KVM_EXIT_MMIO (all except s390).
-Note KVM_EXIT_IO is significantly faster than KVM_EXIT_MMIO.
+
+.. note:: KVM_EXIT_IO is significantly faster than KVM_EXIT_MMIO.
+
+::
/* KVM_EXIT_TPR_ACCESS */
struct {
@@ -4376,6 +4848,8 @@ Note KVM_EXIT_IO is significantly faster than KVM_EXIT_MMIO.
To be documented (KVM_TPR_ACCESS_REPORTING).
+::
+
/* KVM_EXIT_S390_SIEIC */
struct {
__u8 icptcode;
@@ -4387,16 +4861,20 @@ To be documented (KVM_TPR_ACCESS_REPORTING).
s390 specific.
+::
+
/* KVM_EXIT_S390_RESET */
-#define KVM_S390_RESET_POR 1
-#define KVM_S390_RESET_CLEAR 2
-#define KVM_S390_RESET_SUBSYSTEM 4
-#define KVM_S390_RESET_CPU_INIT 8
-#define KVM_S390_RESET_IPL 16
+ #define KVM_S390_RESET_POR 1
+ #define KVM_S390_RESET_CLEAR 2
+ #define KVM_S390_RESET_SUBSYSTEM 4
+ #define KVM_S390_RESET_CPU_INIT 8
+ #define KVM_S390_RESET_IPL 16
__u64 s390_reset_flags;
s390 specific.
+::
+
/* KVM_EXIT_S390_UCONTROL */
struct {
__u64 trans_exc_code;
@@ -4411,6 +4889,8 @@ in the cpu's lowcore are presented here as defined by the z Architecture
Principles of Operation Book in the Chapter for Dynamic Address Translation
(DAT)
+::
+
/* KVM_EXIT_DCR */
struct {
__u32 dcrn;
@@ -4420,6 +4900,8 @@ Principles of Operation Book in the Chapter for Dynamic Address Translation
Deprecated - was used for 440 KVM.
+::
+
/* KVM_EXIT_OSI */
struct {
__u64 gprs[32];
@@ -4433,6 +4915,8 @@ Userspace can now handle the hypercall and when it's done modify the gprs as
necessary. Upon guest entry all guest GPRs will then be replaced by the values
in this struct.
+::
+
/* KVM_EXIT_PAPR_HCALL */
struct {
__u64 nr;
@@ -4450,6 +4934,8 @@ The possible hypercalls are defined in the Power Architecture Platform
Requirements (PAPR) document available from www.power.org (free
developer registration required to access it).
+::
+
/* KVM_EXIT_S390_TSCH */
struct {
__u16 subchannel_id;
@@ -4466,6 +4952,8 @@ interrupt for the target subchannel has been dequeued and subchannel_id,
subchannel_nr, io_int_parm and io_int_word contain the parameters for that
interrupt. ipb is needed for instruction parameter decoding.
+::
+
/* KVM_EXIT_EPR */
struct {
__u32 epr;
@@ -4485,11 +4973,13 @@ It gets triggered whenever both KVM_CAP_PPC_EPR are enabled and an
external interrupt has just been delivered into the guest. User space
should put the acknowledged interrupt vector into the 'epr' field.
+::
+
/* KVM_EXIT_SYSTEM_EVENT */
struct {
-#define KVM_SYSTEM_EVENT_SHUTDOWN 1
-#define KVM_SYSTEM_EVENT_RESET 2
-#define KVM_SYSTEM_EVENT_CRASH 3
+ #define KVM_SYSTEM_EVENT_SHUTDOWN 1
+ #define KVM_SYSTEM_EVENT_RESET 2
+ #define KVM_SYSTEM_EVENT_CRASH 3
__u32 type;
__u64 flags;
} system_event;
@@ -4502,18 +4992,21 @@ the system-level event type. The 'flags' field describes architecture
specific flags for the system-level event.
Valid values for 'type' are:
- KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the
+
+ - KVM_SYSTEM_EVENT_SHUTDOWN -- the guest has requested a shutdown of the
VM. Userspace is not obliged to honour this, and if it does honour
this does not need to destroy the VM synchronously (ie it may call
KVM_RUN again before shutdown finally occurs).
- KVM_SYSTEM_EVENT_RESET -- the guest has requested a reset of the VM.
+ - KVM_SYSTEM_EVENT_RESET -- the guest has requested a reset of the VM.
As with SHUTDOWN, userspace can choose to ignore the request, or
to schedule the reset to occur in the future and may call KVM_RUN again.
- KVM_SYSTEM_EVENT_CRASH -- the guest crash occurred and the guest
+ - KVM_SYSTEM_EVENT_CRASH -- the guest crash occurred and the guest
has requested a crash condition maintenance. Userspace can choose
to ignore the request, or to gather VM memory core dump and/or
reset/shutdown of the VM.
+::
+
/* KVM_EXIT_IOAPIC_EOI */
struct {
__u8 vector;
@@ -4526,9 +5019,11 @@ the userspace IOAPIC should process the EOI and retrigger the interrupt if
it is still asserted. Vector is the LAPIC interrupt vector for which the
EOI was received.
+::
+
struct kvm_hyperv_exit {
-#define KVM_EXIT_HYPERV_SYNIC 1
-#define KVM_EXIT_HYPERV_HCALL 2
+ #define KVM_EXIT_HYPERV_SYNIC 1
+ #define KVM_EXIT_HYPERV_HCALL 2
__u32 type;
union {
struct {
@@ -4546,14 +5041,20 @@ EOI was received.
};
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
+
Indicates that the VCPU exits into userspace to process some tasks
related to Hyper-V emulation.
+
Valid values for 'type' are:
- KVM_EXIT_HYPERV_SYNIC -- synchronously notify user-space about
+
+ - KVM_EXIT_HYPERV_SYNIC -- synchronously notify user-space about
+
Hyper-V SynIC state change. Notification is used to remap SynIC
event/message pages and to enable/disable SynIC messages/events processing
in userspace.
+::
+
/* KVM_EXIT_ARM_NISV */
struct {
__u64 esr_iss;
@@ -4587,6 +5088,8 @@ Note that KVM does not skip the faulting instruction as it does for
KVM_EXIT_MMIO, but userspace has to emulate any change to the processing state
if it decides to decode and emulate the instruction.
+::
+
/* Fix the size of the union. */
char padding[256];
};
@@ -4611,18 +5114,20 @@ avoid some system call overhead if userspace has to handle the exit.
Userspace can query the validity of the structure by checking
kvm_valid_regs for specific bits. These bits are architecture specific
and usually define the validity of a groups of registers. (e.g. one bit
- for general purpose registers)
+for general purpose registers)
Please note that the kernel is allowed to use the kvm_run structure as the
primary storage for certain register types. Therefore, the kernel may use the
values in kvm_run even if the corresponding bit in kvm_dirty_regs is not set.
-};
+::
+
+ };
6. Capabilities that can be enabled on vCPUs
---------------------------------------------
+============================================
There are certain capabilities that change the behavior of the virtual CPU or
the virtual machine when enabled. To enable them, please see section 4.37.
@@ -4631,23 +5136,28 @@ the virtual machine is when enabling them.
The following information is provided along with the description:
- Architectures: which instruction set architectures provide this ioctl.
+ Architectures:
+ which instruction set architectures provide this ioctl.
x86 includes both i386 and x86_64.
- Target: whether this is a per-vcpu or per-vm capability.
+ Target:
+ whether this is a per-vcpu or per-vm capability.
- Parameters: what parameters are accepted by the capability.
+ Parameters:
+ what parameters are accepted by the capability.
- Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
+ Returns:
+ the return value. General error numbers (EBADF, ENOMEM, EINVAL)
are not detailed, but errors with specific meanings are.
6.1 KVM_CAP_PPC_OSI
+-------------------
-Architectures: ppc
-Target: vcpu
-Parameters: none
-Returns: 0 on success; -1 on error
+:Architectures: ppc
+:Target: vcpu
+:Parameters: none
+:Returns: 0 on success; -1 on error
This capability enables interception of OSI hypercalls that otherwise would
be treated as normal system calls to be injected into the guest. OSI hypercalls
@@ -4658,11 +5168,12 @@ When this capability is enabled, KVM_EXIT_OSI can occur.
6.2 KVM_CAP_PPC_PAPR
+--------------------
-Architectures: ppc
-Target: vcpu
-Parameters: none
-Returns: 0 on success; -1 on error
+:Architectures: ppc
+:Target: vcpu
+:Parameters: none
+:Returns: 0 on success; -1 on error
This capability enables interception of PAPR hypercalls. PAPR hypercalls are
done using the hypercall instruction "sc 1".
@@ -4678,18 +5189,21 @@ When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur.
6.3 KVM_CAP_SW_TLB
+------------------
+
+:Architectures: ppc
+:Target: vcpu
+:Parameters: args[0] is the address of a struct kvm_config_tlb
+:Returns: 0 on success; -1 on error
-Architectures: ppc
-Target: vcpu
-Parameters: args[0] is the address of a struct kvm_config_tlb
-Returns: 0 on success; -1 on error
+::
-struct kvm_config_tlb {
+ struct kvm_config_tlb {
__u64 params;
__u64 array;
__u32 mmu_type;
__u32 array_len;
-};
+ };
Configures the virtual CPU's TLB array, establishing a shared memory area
between userspace and KVM. The "params" and "array" fields are userspace
@@ -4708,6 +5222,7 @@ to tell KVM which entries have been changed, prior to calling KVM_RUN again
on this vcpu.
For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
+
- The "params" field is of type "struct kvm_book3e_206_tlb_params".
- The "array" field points to an array of type "struct
kvm_book3e_206_tlb_entry".
@@ -4721,11 +5236,12 @@ For mmu types KVM_MMU_FSL_BOOKE_NOHV and KVM_MMU_FSL_BOOKE_HV:
hardware ignores this value for TLB0.
6.4 KVM_CAP_S390_CSS_SUPPORT
+----------------------------
-Architectures: s390
-Target: vcpu
-Parameters: none
-Returns: 0 on success; -1 on error
+:Architectures: s390
+:Target: vcpu
+:Parameters: none
+:Returns: 0 on success; -1 on error
This capability enables support for handling of channel I/O instructions.
@@ -4739,11 +5255,12 @@ Note that even though this capability is enabled per-vcpu, the complete
virtual machine is affected.
6.5 KVM_CAP_PPC_EPR
+-------------------
-Architectures: ppc
-Target: vcpu
-Parameters: args[0] defines whether the proxy facility is active
-Returns: 0 on success; -1 on error
+:Architectures: ppc
+:Target: vcpu
+:Parameters: args[0] defines whether the proxy facility is active
+:Returns: 0 on success; -1 on error
This capability enables or disables the delivery of interrupts through the
external proxy facility.
@@ -4757,62 +5274,70 @@ When disabled (args[0] == 0), behavior is as if this facility is unsupported.
When this capability is enabled, KVM_EXIT_EPR can occur.
6.6 KVM_CAP_IRQ_MPIC
+--------------------
-Architectures: ppc
-Parameters: args[0] is the MPIC device fd
- args[1] is the MPIC CPU number for this vcpu
+:Architectures: ppc
+:Parameters: args[0] is the MPIC device fd;
+ args[1] is the MPIC CPU number for this vcpu
This capability connects the vcpu to an in-kernel MPIC device.
6.7 KVM_CAP_IRQ_XICS
+--------------------
-Architectures: ppc
-Target: vcpu
-Parameters: args[0] is the XICS device fd
- args[1] is the XICS CPU number (server ID) for this vcpu
+:Architectures: ppc
+:Target: vcpu
+:Parameters: args[0] is the XICS device fd;
+ args[1] is the XICS CPU number (server ID) for this vcpu
This capability connects the vcpu to an in-kernel XICS device.
6.8 KVM_CAP_S390_IRQCHIP
+------------------------
-Architectures: s390
-Target: vm
-Parameters: none
+:Architectures: s390
+:Target: vm
+:Parameters: none
This capability enables the in-kernel irqchip for s390. Please refer to
"4.24 KVM_CREATE_IRQCHIP" for details.
6.9 KVM_CAP_MIPS_FPU
+--------------------
-Architectures: mips
-Target: vcpu
-Parameters: args[0] is reserved for future use (should be 0).
+:Architectures: mips
+:Target: vcpu
+:Parameters: args[0] is reserved for future use (should be 0).
This capability allows the use of the host Floating Point Unit by the guest. It
allows the Config1.FP bit to be set to enable the FPU in the guest. Once this is
-done the KVM_REG_MIPS_FPR_* and KVM_REG_MIPS_FCR_* registers can be accessed
-(depending on the current guest FPU register mode), and the Status.FR,
+done the ``KVM_REG_MIPS_FPR_*`` and ``KVM_REG_MIPS_FCR_*`` registers can be
+accessed (depending on the current guest FPU register mode), and the Status.FR,
Config5.FRE bits are accessible via the KVM API and also from the guest,
depending on them being supported by the FPU.
6.10 KVM_CAP_MIPS_MSA
+---------------------
-Architectures: mips
-Target: vcpu
-Parameters: args[0] is reserved for future use (should be 0).
+:Architectures: mips
+:Target: vcpu
+:Parameters: args[0] is reserved for future use (should be 0).
This capability allows the use of the MIPS SIMD Architecture (MSA) by the guest.
It allows the Config3.MSAP bit to be set to enable the use of MSA by the guest.
-Once this is done the KVM_REG_MIPS_VEC_* and KVM_REG_MIPS_MSA_* registers can be
-accessed, and the Config5.MSAEn bit is accessible via the KVM API and also from
-the guest.
+Once this is done the ``KVM_REG_MIPS_VEC_*`` and ``KVM_REG_MIPS_MSA_*``
+registers can be accessed, and the Config5.MSAEn bit is accessible via the
+KVM API and also from the guest.
6.74 KVM_CAP_SYNC_REGS
-Architectures: s390, x86
-Target: s390: always enabled, x86: vcpu
-Parameters: none
-Returns: x86: KVM_CHECK_EXTENSION returns a bit-array indicating which register
-sets are supported (bitfields defined in arch/x86/include/uapi/asm/kvm.h).
+----------------------
+
+:Architectures: s390, x86
+:Target: s390: always enabled, x86: vcpu
+:Parameters: none
+:Returns: x86: KVM_CHECK_EXTENSION returns a bit-array indicating which register
+ sets are supported
+ (bitfields defined in arch/x86/include/uapi/asm/kvm.h).
As described above in the kvm_sync_regs struct info in section 5 (kvm_run):
KVM_CAP_SYNC_REGS "allow[s] userspace to access certain guest registers
@@ -4825,6 +5350,7 @@ userspace.
For s390 specifics, please refer to the source code.
For x86:
+
- the register sets to be copied out to kvm_run are selectable
by userspace (rather that all sets being copied out for every exit).
- vcpu_events are available in addition to regs and sregs.
@@ -4841,23 +5367,26 @@ into the vCPU even if they've been modified.
Unused bitfields in the bitarrays must be set to zero.
-struct kvm_sync_regs {
+::
+
+ struct kvm_sync_regs {
struct kvm_regs regs;
struct kvm_sregs sregs;
struct kvm_vcpu_events events;
-};
+ };
6.75 KVM_CAP_PPC_IRQ_XIVE
+-------------------------
-Architectures: ppc
-Target: vcpu
-Parameters: args[0] is the XIVE device fd
- args[1] is the XIVE CPU number (server ID) for this vcpu
+:Architectures: ppc
+:Target: vcpu
+:Parameters: args[0] is the XIVE device fd;
+ args[1] is the XIVE CPU number (server ID) for this vcpu
This capability connects the vcpu to an in-kernel XIVE device.
7. Capabilities that can be enabled on VMs
-------------------------------------------
+==========================================
There are certain capabilities that change the behavior of the virtual
machine when enabled. To enable them, please see section 4.37. Below
@@ -4866,20 +5395,24 @@ is when enabling them.
The following information is provided along with the description:
- Architectures: which instruction set architectures provide this ioctl.
+ Architectures:
+ which instruction set architectures provide this ioctl.
x86 includes both i386 and x86_64.
- Parameters: what parameters are accepted by the capability.
+ Parameters:
+ what parameters are accepted by the capability.
- Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
+ Returns:
+ the return value. General error numbers (EBADF, ENOMEM, EINVAL)
are not detailed, but errors with specific meanings are.
7.1 KVM_CAP_PPC_ENABLE_HCALL
+----------------------------
-Architectures: ppc
-Parameters: args[0] is the sPAPR hcall number
- args[1] is 0 to disable, 1 to enable in-kernel handling
+:Architectures: ppc
+:Parameters: args[0] is the sPAPR hcall number;
+ args[1] is 0 to disable, 1 to enable in-kernel handling
This capability controls whether individual sPAPR hypercalls (hcalls)
get handled by the kernel or not. Enabling or disabling in-kernel
@@ -4897,13 +5430,15 @@ implementation, the KVM_ENABLE_CAP ioctl will fail with an EINVAL
error.
7.2 KVM_CAP_S390_USER_SIGP
+--------------------------
-Architectures: s390
-Parameters: none
+:Architectures: s390
+:Parameters: none
This capability controls which SIGP orders will be handled completely in user
space. With this capability enabled, all fast orders will be handled completely
in the kernel:
+
- SENSE
- SENSE RUNNING
- EXTERNAL CALL
@@ -4917,48 +5452,52 @@ in the hardware prior to interception). If this capability is not enabled, the
old way of handling SIGP orders is used (partially in kernel and user space).
7.3 KVM_CAP_S390_VECTOR_REGISTERS
+---------------------------------
-Architectures: s390
-Parameters: none
-Returns: 0 on success, negative value on error
+:Architectures: s390
+:Parameters: none
+:Returns: 0 on success, negative value on error
Allows use of the vector registers introduced with z13 processor, and
provides for the synchronization between host and user space. Will
return -EINVAL if the machine does not support vectors.
7.4 KVM_CAP_S390_USER_STSI
+--------------------------
-Architectures: s390
-Parameters: none
+:Architectures: s390
+:Parameters: none
This capability allows post-handlers for the STSI instruction. After
initial handling in the kernel, KVM exits to user space with
KVM_EXIT_S390_STSI to allow user space to insert further data.
Before exiting to userspace, kvm handlers should fill in s390_stsi field of
-vcpu->run:
-struct {
+vcpu->run::
+
+ struct {
__u64 addr;
__u8 ar;
__u8 reserved;
__u8 fc;
__u8 sel1;
__u16 sel2;
-} s390_stsi;
+ } s390_stsi;
-@addr - guest address of STSI SYSIB
-@fc - function code
-@sel1 - selector 1
-@sel2 - selector 2
-@ar - access register number
+ @addr - guest address of STSI SYSIB
+ @fc - function code
+ @sel1 - selector 1
+ @sel2 - selector 2
+ @ar - access register number
KVM handlers should exit to userspace with rc = -EREMOTE.
7.5 KVM_CAP_SPLIT_IRQCHIP
+-------------------------
-Architectures: x86
-Parameters: args[0] - number of routes reserved for userspace IOAPICs
-Returns: 0 on success, -1 on error
+:Architectures: x86
+:Parameters: args[0] - number of routes reserved for userspace IOAPICs
+:Returns: 0 on success, -1 on error
Create a local apic for each processor in the kernel. This can be used
instead of KVM_CREATE_IRQCHIP if the userspace VMM wishes to emulate the
@@ -4975,24 +5514,26 @@ Fails if VCPU has already been created, or if the irqchip is already in the
kernel (i.e. KVM_CREATE_IRQCHIP has already been called).
7.6 KVM_CAP_S390_RI
+-------------------
-Architectures: s390
-Parameters: none
+:Architectures: s390
+:Parameters: none
Allows use of runtime-instrumentation introduced with zEC12 processor.
Will return -EINVAL if the machine does not support runtime-instrumentation.
Will return -EBUSY if a VCPU has already been created.
7.7 KVM_CAP_X2APIC_API
+----------------------
-Architectures: x86
-Parameters: args[0] - features that should be enabled
-Returns: 0 on success, -EINVAL when args[0] contains invalid features
+:Architectures: x86
+:Parameters: args[0] - features that should be enabled
+:Returns: 0 on success, -EINVAL when args[0] contains invalid features
-Valid feature flags in args[0] are
+Valid feature flags in args[0] are::
-#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
-#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
+ #define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
+ #define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
Enabling KVM_X2APIC_API_USE_32BIT_IDS changes the behavior of
KVM_SET_GSI_ROUTING, KVM_SIGNAL_MSI, KVM_SET_LAPIC, and KVM_GET_LAPIC,
@@ -5006,9 +5547,10 @@ without interrupt remapping. This is undesirable in logical mode,
where 0xff represents CPUs 0-7 in cluster 0.
7.8 KVM_CAP_S390_USER_INSTR0
+----------------------------
-Architectures: s390
-Parameters: none
+:Architectures: s390
+:Parameters: none
With this capability enabled, all illegal instructions 0x0000 (2 bytes) will
be intercepted and forwarded to user space. User space can use this
@@ -5020,26 +5562,29 @@ This capability can be enabled dynamically even if VCPUs were already
created and are running.
7.9 KVM_CAP_S390_GS
+-------------------
-Architectures: s390
-Parameters: none
-Returns: 0 on success; -EINVAL if the machine does not support
- guarded storage; -EBUSY if a VCPU has already been created.
+:Architectures: s390
+:Parameters: none
+:Returns: 0 on success; -EINVAL if the machine does not support
+ guarded storage; -EBUSY if a VCPU has already been created.
Allows use of guarded storage for the KVM guest.
7.10 KVM_CAP_S390_AIS
+---------------------
-Architectures: s390
-Parameters: none
+:Architectures: s390
+:Parameters: none
Allow use of adapter-interruption suppression.
-Returns: 0 on success; -EBUSY if a VCPU has already been created.
+:Returns: 0 on success; -EBUSY if a VCPU has already been created.
7.11 KVM_CAP_PPC_SMT
+--------------------
-Architectures: ppc
-Parameters: vsmt_mode, flags
+:Architectures: ppc
+:Parameters: vsmt_mode, flags
Enabling this capability on a VM provides userspace with a way to set
the desired virtual SMT mode (i.e. the number of virtual CPUs per
@@ -5054,9 +5599,10 @@ The KVM_CAP_PPC_SMT_POSSIBLE capability indicates which virtual SMT
modes are available.
7.12 KVM_CAP_PPC_FWNMI
+----------------------
-Architectures: ppc
-Parameters: none
+:Architectures: ppc
+:Parameters: none
With this capability a machine check exception in the guest address
space will cause KVM to exit the guest with NMI exit reason. This
@@ -5065,17 +5611,18 @@ machine check handling routine. Without this capability KVM will
branch to guests' 0x200 interrupt vector.
7.13 KVM_CAP_X86_DISABLE_EXITS
+------------------------------
-Architectures: x86
-Parameters: args[0] defines which exits are disabled
-Returns: 0 on success, -EINVAL when args[0] contains invalid exits
+:Architectures: x86
+:Parameters: args[0] defines which exits are disabled
+:Returns: 0 on success, -EINVAL when args[0] contains invalid exits
-Valid bits in args[0] are
+Valid bits in args[0] are::
-#define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
-#define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
-#define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
-#define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
+ #define KVM_X86_DISABLE_EXITS_MWAIT (1 << 0)
+ #define KVM_X86_DISABLE_EXITS_HLT (1 << 1)
+ #define KVM_X86_DISABLE_EXITS_PAUSE (1 << 2)
+ #define KVM_X86_DISABLE_EXITS_CSTATE (1 << 3)
Enabling this capability on a VM provides userspace with a way to no
longer intercept some instructions for improved latency in some
@@ -5087,12 +5634,13 @@ all such vmexits.
Do not enable KVM_FEATURE_PV_UNHALT if you disable HLT exits.
7.14 KVM_CAP_S390_HPAGE_1M
+--------------------------
-Architectures: s390
-Parameters: none
-Returns: 0 on success, -EINVAL if hpage module parameter was not set
- or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL
- flag set
+:Architectures: s390
+:Parameters: none
+:Returns: 0 on success, -EINVAL if hpage module parameter was not set
+ or cmma is enabled, or the VM has the KVM_VM_S390_UCONTROL
+ flag set
With this capability the KVM support for memory backing with 1m pages
through hugetlbfs can be enabled for a VM. After the capability is
@@ -5104,20 +5652,22 @@ While it is generally possible to create a huge page backed VM without
this capability, the VM will not be able to run.
7.15 KVM_CAP_MSR_PLATFORM_INFO
+------------------------------
-Architectures: x86
-Parameters: args[0] whether feature should be enabled or not
+:Architectures: x86
+:Parameters: args[0] whether feature should be enabled or not
With this capability, a guest may read the MSR_PLATFORM_INFO MSR. Otherwise,
a #GP would be raised when the guest tries to access. Currently, this
capability does not enable write permissions of this MSR for the guest.
7.16 KVM_CAP_PPC_NESTED_HV
+--------------------------
-Architectures: ppc
-Parameters: none
-Returns: 0 on success, -EINVAL when the implementation doesn't support
- nested-HV virtualization.
+:Architectures: ppc
+:Parameters: none
+:Returns: 0 on success, -EINVAL when the implementation doesn't support
+ nested-HV virtualization.
HV-KVM on POWER9 and later systems allows for "nested-HV"
virtualization, which provides a way for a guest VM to run guests that
@@ -5127,9 +5677,10 @@ the necessary functionality and on the facility being enabled with a
kvm-hv module parameter.
7.17 KVM_CAP_EXCEPTION_PAYLOAD
+------------------------------
-Architectures: x86
-Parameters: args[0] whether feature should be enabled or not
+:Architectures: x86
+:Parameters: args[0] whether feature should be enabled or not
With this capability enabled, CR2 will not be modified prior to the
emulated VM-exit when L1 intercepts a #PF exception that occurs in
@@ -5140,21 +5691,21 @@ L2. As a result, when KVM_GET_VCPU_EVENTS reports a pending #PF (or
faulting address (or the new DR6 bits*) will be reported in the
exception_payload field. Similarly, when userspace injects a #PF (or
#DB) into L2 using KVM_SET_VCPU_EVENTS, it is expected to set
-exception.has_payload and to put the faulting address (or the new DR6
-bits*) in the exception_payload field.
+exception.has_payload and to put the faulting address - or the new DR6
+bits\ [#]_ - in the exception_payload field.
This capability also enables exception.pending in struct
kvm_vcpu_events, which allows userspace to distinguish between pending
and injected exceptions.
-* For the new DR6 bits, note that bit 16 is set iff the #DB exception
- will clear DR6.RTM.
+.. [#] For the new DR6 bits, note that bit 16 is set iff the #DB exception
+ will clear DR6.RTM.
7.18 KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2
-Architectures: x86, arm, arm64, mips
-Parameters: args[0] whether feature should be enabled or not
+:Architectures: x86, arm, arm64, mips
+:Parameters: args[0] whether feature should be enabled or not
With this capability enabled, KVM_GET_DIRTY_LOG will not automatically
clear and write-protect all pages that are returned as dirty.
@@ -5181,14 +5732,15 @@ KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2 signals that those bugs are fixed.
Userspace should not try to use KVM_CAP_MANUAL_DIRTY_LOG_PROTECT.
8. Other capabilities.
-----------------------
+======================
This section lists capabilities that give information about other
features of the KVM implementation.
8.1 KVM_CAP_PPC_HWRNG
+---------------------
-Architectures: ppc
+:Architectures: ppc
This capability, if KVM_CHECK_EXTENSION indicates that it is
available, means that that the kernel has an implementation of the
@@ -5197,8 +5749,10 @@ If present, the kernel H_RANDOM handler can be enabled for guest use
with the KVM_CAP_PPC_ENABLE_HCALL capability.
8.2 KVM_CAP_HYPERV_SYNIC
+------------------------
+
+:Architectures: x86
-Architectures: x86
This capability, if KVM_CHECK_EXTENSION indicates that it is
available, means that that the kernel has an implementation of the
Hyper-V Synthetic interrupt controller(SynIC). Hyper-V SynIC is
@@ -5210,8 +5764,9 @@ will disable the use of APIC hardware virtualization even if supported
by the CPU, as it's incompatible with SynIC auto-EOI behavior.
8.3 KVM_CAP_PPC_RADIX_MMU
+-------------------------
-Architectures: ppc
+:Architectures: ppc
This capability, if KVM_CHECK_EXTENSION indicates that it is
available, means that that the kernel can support guests using the
@@ -5219,8 +5774,9 @@ radix MMU defined in Power ISA V3.00 (as implemented in the POWER9
processor).
8.4 KVM_CAP_PPC_HASH_MMU_V3
+---------------------------
-Architectures: ppc
+:Architectures: ppc
This capability, if KVM_CHECK_EXTENSION indicates that it is
available, means that that the kernel can support guests using the
@@ -5228,8 +5784,9 @@ hashed page table MMU defined in Power ISA V3.00 (as implemented in
the POWER9 processor), including in-memory segment tables.
8.5 KVM_CAP_MIPS_VZ
+-------------------
-Architectures: mips
+:Architectures: mips
This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
it is available, means that full hardware assisted virtualization capabilities
@@ -5247,16 +5804,19 @@ values (see below). All other values are reserved. This is to allow for the
possibility of other hardware assisted virtualization implementations which
may be incompatible with the MIPS VZ ASE.
- 0: The trap & emulate implementation is in use to run guest code in user
+== ==========================================================================
+ 0 The trap & emulate implementation is in use to run guest code in user
mode. Guest virtual memory segments are rearranged to fit the guest in the
user mode address space.
- 1: The MIPS VZ ASE is in use, providing full hardware assisted
+ 1 The MIPS VZ ASE is in use, providing full hardware assisted
virtualization, including standard guest virtual memory segments.
+== ==========================================================================
8.6 KVM_CAP_MIPS_TE
+-------------------
-Architectures: mips
+:Architectures: mips
This capability, if KVM_CHECK_EXTENSION on the main kvm handle indicates that
it is available, means that the trap & emulate implementation is available to
@@ -5268,8 +5828,9 @@ If KVM_CHECK_EXTENSION on a kvm VM handle indicates that this capability is
available, it means that the VM is using trap & emulate.
8.7 KVM_CAP_MIPS_64BIT
+----------------------
-Architectures: mips
+:Architectures: mips
This capability indicates the supported architecture type of the guest, i.e. the
supported register and address width.
@@ -5279,22 +5840,26 @@ kvm VM handle correspond roughly to the CP0_Config.AT register field, and should
be checked specifically against known values (see below). All other values are
reserved.
- 0: MIPS32 or microMIPS32.
+== ========================================================================
+ 0 MIPS32 or microMIPS32.
Both registers and addresses are 32-bits wide.
It will only be possible to run 32-bit guest code.
- 1: MIPS64 or microMIPS64 with access only to 32-bit compatibility segments.
+ 1 MIPS64 or microMIPS64 with access only to 32-bit compatibility segments.
Registers are 64-bits wide, but addresses are 32-bits wide.
64-bit guest code may run but cannot access MIPS64 memory segments.
It will also be possible to run 32-bit guest code.
- 2: MIPS64 or microMIPS64 with access to all address segments.
+ 2 MIPS64 or microMIPS64 with access to all address segments.
Both registers and addresses are 64-bits wide.
It will be possible to run 64-bit or 32-bit guest code.
+== ========================================================================
8.9 KVM_CAP_ARM_USER_IRQ
+------------------------
+
+:Architectures: arm, arm64
-Architectures: arm, arm64
This capability, if KVM_CHECK_EXTENSION indicates that it is available, means
that if userspace creates a VM without an in-kernel interrupt controller, it
will be notified of changes to the output level of in-kernel emulated devices,
@@ -5321,7 +5886,7 @@ If KVM_CAP_ARM_USER_IRQ is supported, the KVM_CHECK_EXTENSION ioctl returns a
number larger than 0 indicating the version of this capability is implemented
and thereby which bits in in run->s.regs.device_irq_level can signal values.
-Currently the following bits are defined for the device_irq_level bitmap:
+Currently the following bits are defined for the device_irq_level bitmap::
KVM_CAP_ARM_USER_IRQ >= 1:
@@ -5334,8 +5899,9 @@ indicated by returning a higher number from KVM_CHECK_EXTENSION and will be
listed above.
8.10 KVM_CAP_PPC_SMT_POSSIBLE
+-----------------------------
-Architectures: ppc
+:Architectures: ppc
Querying this capability returns a bitmap indicating the possible
virtual SMT modes that can be set using KVM_CAP_PPC_SMT. If bit N
@@ -5343,8 +5909,9 @@ virtual SMT modes that can be set using KVM_CAP_PPC_SMT. If bit N
available.
8.11 KVM_CAP_HYPERV_SYNIC2
+--------------------------
-Architectures: x86
+:Architectures: x86
This capability enables a newer version of Hyper-V Synthetic interrupt
controller (SynIC). The only difference with KVM_CAP_HYPERV_SYNIC is that KVM
@@ -5352,8 +5919,9 @@ doesn't clear SynIC message and event flags pages when they are enabled by
writing to the respective MSRs.
8.12 KVM_CAP_HYPERV_VP_INDEX
+----------------------------
-Architectures: x86
+:Architectures: x86
This capability indicates that userspace can load HV_X64_MSR_VP_INDEX msr. Its
value is used to denote the target vcpu for a SynIC interrupt. For
@@ -5361,47 +5929,53 @@ compatibilty, KVM initializes this msr to KVM's internal vcpu index. When this
capability is absent, userspace can still query this msr's value.
8.13 KVM_CAP_S390_AIS_MIGRATION
+-------------------------------
-Architectures: s390
-Parameters: none
+:Architectures: s390
+:Parameters: none
This capability indicates if the flic device will be able to get/set the
AIS states for migration via the KVM_DEV_FLIC_AISM_ALL attribute and allows
to discover this without having to create a flic device.
8.14 KVM_CAP_S390_PSW
+---------------------
-Architectures: s390
+:Architectures: s390
This capability indicates that the PSW is exposed via the kvm_run structure.
8.15 KVM_CAP_S390_GMAP
+----------------------
-Architectures: s390
+:Architectures: s390
This capability indicates that the user space memory used as guest mapping can
be anywhere in the user memory address space, as long as the memory slots are
aligned and sized to a segment (1MB) boundary.
8.16 KVM_CAP_S390_COW
+---------------------
-Architectures: s390
+:Architectures: s390
This capability indicates that the user space memory used as guest mapping can
use copy-on-write semantics as well as dirty pages tracking via read-only page
tables.
8.17 KVM_CAP_S390_BPB
+---------------------
-Architectures: s390
+:Architectures: s390
This capability indicates that kvm will implement the interfaces to handle
reset, migration and nested KVM for branch prediction blocking. The stfle
facility 82 should not be provided to the guest without this capability.
8.18 KVM_CAP_HYPERV_TLBFLUSH
+----------------------------
-Architectures: x86
+:Architectures: x86
This capability indicates that KVM supports paravirtualized Hyper-V TLB Flush
hypercalls:
@@ -5409,8 +5983,9 @@ HvFlushVirtualAddressSpace, HvFlushVirtualAddressSpaceEx,
HvFlushVirtualAddressList, HvFlushVirtualAddressListEx.
8.19 KVM_CAP_ARM_INJECT_SERROR_ESR
+----------------------------------
-Architectures: arm, arm64
+:Architectures: arm, arm64
This capability indicates that userspace can specify (via the
KVM_SET_VCPU_EVENTS ioctl) the syndrome value reported to the guest when it
@@ -5421,16 +5996,20 @@ CPU when the exception is taken. If this virtual SError is taken to EL1 using
AArch64, this value will be reported in the ISS field of ESR_ELx.
See KVM_CAP_VCPU_EVENTS for more details.
+
8.20 KVM_CAP_HYPERV_SEND_IPI
+----------------------------
-Architectures: x86
+:Architectures: x86
This capability indicates that KVM supports paravirtualized Hyper-V IPI send
hypercalls:
HvCallSendSyntheticClusterIpi, HvCallSendSyntheticClusterIpiEx.
+
8.21 KVM_CAP_HYPERV_DIRECT_TLBFLUSH
+-----------------------------------
-Architecture: x86
+:Architecture: x86
This capability indicates that KVM running on top of Hyper-V hypervisor
enables Direct TLB flush for its guests meaning that TLB flush
diff --git a/Documentation/virt/kvm/arm/hyp-abi.txt b/Documentation/virt/kvm/arm/hyp-abi.rst
index a20a0bee268d..d1fc27d848e9 100644
--- a/Documentation/virt/kvm/arm/hyp-abi.txt
+++ b/Documentation/virt/kvm/arm/hyp-abi.rst
@@ -1,4 +1,8 @@
-* Internal ABI between the kernel and HYP
+.. SPDX-License-Identifier: GPL-2.0
+
+=======================================
+Internal ABI between the kernel and HYP
+=======================================
This file documents the interaction between the Linux kernel and the
hypervisor layer when running Linux as a hypervisor (for example
@@ -19,25 +23,31 @@ and only act on individual CPUs.
Unless specified otherwise, any built-in hypervisor must implement
these functions (see arch/arm{,64}/include/asm/virt.h):
-* r0/x0 = HVC_SET_VECTORS
- r1/x1 = vectors
+* ::
+
+ r0/x0 = HVC_SET_VECTORS
+ r1/x1 = vectors
Set HVBAR/VBAR_EL2 to 'vectors' to enable a hypervisor. 'vectors'
must be a physical address, and respect the alignment requirements
of the architecture. Only implemented by the initial stubs, not by
Linux hypervisors.
-* r0/x0 = HVC_RESET_VECTORS
+* ::
+
+ r0/x0 = HVC_RESET_VECTORS
Turn HYP/EL2 MMU off, and reset HVBAR/VBAR_EL2 to the initials
stubs' exception vector value. This effectively disables an existing
hypervisor.
-* r0/x0 = HVC_SOFT_RESTART
- r1/x1 = restart address
- x2 = x0's value when entering the next payload (arm64)
- x3 = x1's value when entering the next payload (arm64)
- x4 = x2's value when entering the next payload (arm64)
+* ::
+
+ r0/x0 = HVC_SOFT_RESTART
+ r1/x1 = restart address
+ x2 = x0's value when entering the next payload (arm64)
+ x3 = x1's value when entering the next payload (arm64)
+ x4 = x2's value when entering the next payload (arm64)
Mask all exceptions, disable the MMU, move the arguments into place
(arm64 only), and jump to the restart address while at HYP/EL2. This
diff --git a/Documentation/virt/kvm/arm/index.rst b/Documentation/virt/kvm/arm/index.rst
new file mode 100644
index 000000000000..3e2b2aba90fc
--- /dev/null
+++ b/Documentation/virt/kvm/arm/index.rst
@@ -0,0 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===
+ARM
+===
+
+.. toctree::
+ :maxdepth: 2
+
+ hyp-abi
+ psci
+ pvtime
diff --git a/Documentation/virt/kvm/arm/psci.txt b/Documentation/virt/kvm/arm/psci.rst
index 559586fc9d37..d52c2e83b5b8 100644
--- a/Documentation/virt/kvm/arm/psci.txt
+++ b/Documentation/virt/kvm/arm/psci.rst
@@ -1,3 +1,9 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================================
+Power State Coordination Interface (PSCI)
+=========================================
+
KVM implements the PSCI (Power State Coordination Interface)
specification in order to provide services such as CPU on/off, reset
and power-off to the guest.
@@ -30,32 +36,42 @@ The following register is defined:
- Affects the whole VM (even if the register view is per-vcpu)
* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
- Holds the state of the firmware support to mitigate CVE-2017-5715, as
- offered by KVM to the guest via a HVC call. The workaround is described
- under SMCCC_ARCH_WORKAROUND_1 in [1].
+ Holds the state of the firmware support to mitigate CVE-2017-5715, as
+ offered by KVM to the guest via a HVC call. The workaround is described
+ under SMCCC_ARCH_WORKAROUND_1 in [1].
+
Accepted values are:
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL: KVM does not offer
+
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL:
+ KVM does not offer
firmware support for the workaround. The mitigation status for the
guest is unknown.
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL: The workaround HVC call is
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL:
+ The workaround HVC call is
available to the guest and required for the mitigation.
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED: The workaround HVC call
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED:
+ The workaround HVC call
is available to the guest, but it is not needed on this VCPU.
* KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
- Holds the state of the firmware support to mitigate CVE-2018-3639, as
- offered by KVM to the guest via a HVC call. The workaround is described
- under SMCCC_ARCH_WORKAROUND_2 in [1].
+ Holds the state of the firmware support to mitigate CVE-2018-3639, as
+ offered by KVM to the guest via a HVC call. The workaround is described
+ under SMCCC_ARCH_WORKAROUND_2 in [1]_.
+
Accepted values are:
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL: A workaround is not
+
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
+ A workaround is not
available. KVM does not offer firmware support for the workaround.
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN: The workaround state is
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
+ The workaround state is
unknown. KVM does not offer firmware support for the workaround.
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL: The workaround is available,
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
+ The workaround is available,
and can be disabled by a vCPU. If
KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED is set, it is active for
this vCPU.
- KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED: The workaround is
- always active on this vCPU or it is not needed.
+ KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
+ The workaround is always active on this vCPU or it is not needed.
-[1] https://developer.arm.com/-/media/developer/pdf/ARM_DEN_0070A_Firmware_interfaces_for_mitigating_CVE-2017-5715.pdf
+.. [1] https://developer.arm.com/-/media/developer/pdf/ARM_DEN_0070A_Firmware_interfaces_for_mitigating_CVE-2017-5715.pdf
diff --git a/Documentation/virt/kvm/devices/arm-vgic-its.txt b/Documentation/virt/kvm/devices/arm-vgic-its.rst
index eeaa95b893a8..6c304fd2b1b4 100644
--- a/Documentation/virt/kvm/devices/arm-vgic-its.txt
+++ b/Documentation/virt/kvm/devices/arm-vgic-its.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===============================================
ARM Virtual Interrupt Translation Service (ITS)
===============================================
@@ -12,22 +15,32 @@ There can be multiple ITS controllers per guest, each of them has to have
a separate, non-overlapping MMIO region.
-Groups:
- KVM_DEV_ARM_VGIC_GRP_ADDR
+Groups
+======
+
+KVM_DEV_ARM_VGIC_GRP_ADDR
+-------------------------
+
Attributes:
KVM_VGIC_ITS_ADDR_TYPE (rw, 64-bit)
Base address in the guest physical address space of the GICv3 ITS
control register frame.
This address needs to be 64K aligned and the region covers 128K.
+
Errors:
- -E2BIG: Address outside of addressable IPA range
- -EINVAL: Incorrectly aligned address
- -EEXIST: Address already configured
- -EFAULT: Invalid user pointer for attr->addr.
- -ENODEV: Incorrect attribute or the ITS is not supported.
+ ======= =================================================
+ -E2BIG Address outside of addressable IPA range
+ -EINVAL Incorrectly aligned address
+ -EEXIST Address already configured
+ -EFAULT Invalid user pointer for attr->addr.
+ -ENODEV Incorrect attribute or the ITS is not supported.
+ ======= =================================================
+
+
+KVM_DEV_ARM_VGIC_GRP_CTRL
+-------------------------
- KVM_DEV_ARM_VGIC_GRP_CTRL
Attributes:
KVM_DEV_ARM_VGIC_CTRL_INIT
request the initialization of the ITS, no additional parameter in
@@ -58,16 +71,21 @@ Groups:
"ITS Restore Sequence".
Errors:
- -ENXIO: ITS not properly configured as required prior to setting
+
+ ======= ==========================================================
+ -ENXIO ITS not properly configured as required prior to setting
this attribute
- -ENOMEM: Memory shortage when allocating ITS internal data
- -EINVAL: Inconsistent restored data
- -EFAULT: Invalid guest ram access
- -EBUSY: One or more VCPUS are running
- -EACCES: The virtual ITS is backed by a physical GICv4 ITS, and the
+ -ENOMEM Memory shortage when allocating ITS internal data
+ -EINVAL Inconsistent restored data
+ -EFAULT Invalid guest ram access
+ -EBUSY One or more VCPUS are running
+ -EACCES The virtual ITS is backed by a physical GICv4 ITS, and the
state is not available
+ ======= ==========================================================
+
+KVM_DEV_ARM_VGIC_GRP_ITS_REGS
+-----------------------------
- KVM_DEV_ARM_VGIC_GRP_ITS_REGS
Attributes:
The attr field of kvm_device_attr encodes the offset of the
ITS register, relative to the ITS control frame base address
@@ -78,6 +96,7 @@ Groups:
be accessed with full length.
Writes to read-only registers are ignored by the kernel except for:
+
- GITS_CREADR. It must be restored otherwise commands in the queue
will be re-executed after restoring CWRITER. GITS_CREADR must be
restored before restoring the GITS_CTLR which is likely to enable the
@@ -91,30 +110,36 @@ Groups:
For other registers, getting or setting a register has the same
effect as reading/writing the register on real hardware.
+
Errors:
- -ENXIO: Offset does not correspond to any supported register
- -EFAULT: Invalid user pointer for attr->addr
- -EINVAL: Offset is not 64-bit aligned
- -EBUSY: one or more VCPUS are running
- ITS Restore Sequence:
- -------------------------
+ ======= ====================================================
+ -ENXIO Offset does not correspond to any supported register
+ -EFAULT Invalid user pointer for attr->addr
+ -EINVAL Offset is not 64-bit aligned
+ -EBUSY one or more VCPUS are running
+ ======= ====================================================
+
+ITS Restore Sequence:
+---------------------
The following ordering must be followed when restoring the GIC and the ITS:
+
a) restore all guest memory and create vcpus
b) restore all redistributors
c) provide the ITS base address
(KVM_DEV_ARM_VGIC_GRP_ADDR)
d) restore the ITS in the following order:
- 1. Restore GITS_CBASER
- 2. Restore all other GITS_ registers, except GITS_CTLR!
- 3. Load the ITS table data (KVM_DEV_ARM_ITS_RESTORE_TABLES)
- 4. Restore GITS_CTLR
+
+ 1. Restore GITS_CBASER
+ 2. Restore all other ``GITS_`` registers, except GITS_CTLR!
+ 3. Load the ITS table data (KVM_DEV_ARM_ITS_RESTORE_TABLES)
+ 4. Restore GITS_CTLR
Then vcpus can be started.
- ITS Table ABI REV0:
- -------------------
+ITS Table ABI REV0:
+-------------------
Revision 0 of the ABI only supports the features of a virtual GICv3, and does
not support a virtual GICv4 with support for direct injection of virtual
@@ -125,12 +150,13 @@ Then vcpus can be started.
entries in the collection are listed in no particular order.
All entries are 8 bytes.
- Device Table Entry (DTE):
+ Device Table Entry (DTE)::
- bits: | 63| 62 ... 49 | 48 ... 5 | 4 ... 0 |
- values: | V | next | ITT_addr | Size |
+ bits: | 63| 62 ... 49 | 48 ... 5 | 4 ... 0 |
+ values: | V | next | ITT_addr | Size |
+
+ where:
- where;
- V indicates whether the entry is valid. If not, other fields
are not meaningful.
- next: equals to 0 if this entry is the last one; otherwise it
@@ -140,32 +166,34 @@ Then vcpus can be started.
- Size specifies the supported number of bits for the EventID,
minus one
- Collection Table Entry (CTE):
+ Collection Table Entry (CTE)::
- bits: | 63| 62 .. 52 | 51 ... 16 | 15 ... 0 |
- values: | V | RES0 | RDBase | ICID |
+ bits: | 63| 62 .. 52 | 51 ... 16 | 15 ... 0 |
+ values: | V | RES0 | RDBase | ICID |
where:
+
- V indicates whether the entry is valid. If not, other fields are
not meaningful.
- RES0: reserved field with Should-Be-Zero-or-Preserved behavior.
- RDBase is the PE number (GICR_TYPER.Processor_Number semantic),
- ICID is the collection ID
- Interrupt Translation Entry (ITE):
+ Interrupt Translation Entry (ITE)::
- bits: | 63 ... 48 | 47 ... 16 | 15 ... 0 |
- values: | next | pINTID | ICID |
+ bits: | 63 ... 48 | 47 ... 16 | 15 ... 0 |
+ values: | next | pINTID | ICID |
where:
+
- next: equals to 0 if this entry is the last one; otherwise it corresponds
to the EventID offset to the next ITE capped by 2^16 -1.
- pINTID is the physical LPI ID; if zero, it means the entry is not valid
and other fields are not meaningful.
- ICID is the collection ID
- ITS Reset State:
- ----------------
+ITS Reset State:
+----------------
RESET returns the ITS to the same state that it was when first created and
initialized. When the RESET command returns, the following things are
diff --git a/Documentation/virt/kvm/devices/arm-vgic-v3.txt b/Documentation/virt/kvm/devices/arm-vgic-v3.rst
index ff290b43c8e5..5dd3bff51978 100644
--- a/Documentation/virt/kvm/devices/arm-vgic-v3.txt
+++ b/Documentation/virt/kvm/devices/arm-vgic-v3.rst
@@ -1,9 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==============================================================
ARM Virtual Generic Interrupt Controller v3 and later (VGICv3)
==============================================================
Device types supported:
- KVM_DEV_TYPE_ARM_VGIC_V3 ARM Generic Interrupt Controller v3.0
+ - KVM_DEV_TYPE_ARM_VGIC_V3 ARM Generic Interrupt Controller v3.0
Only one VGIC instance may be instantiated through this API. The created VGIC
will act as the VM interrupt controller, requiring emulated user-space devices
@@ -15,7 +18,8 @@ Creating a guest GICv3 device requires a host GICv3 as well.
Groups:
KVM_DEV_ARM_VGIC_GRP_ADDR
- Attributes:
+ Attributes:
+
KVM_VGIC_V3_ADDR_TYPE_DIST (rw, 64-bit)
Base address in the guest physical address space of the GICv3 distributor
register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V3.
@@ -29,21 +33,25 @@ Groups:
This address needs to be 64K aligned.
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION (rw, 64-bit)
- The attribute data pointed to by kvm_device_attr.addr is a __u64 value:
- bits: | 63 .... 52 | 51 .... 16 | 15 - 12 |11 - 0
- values: | count | base | flags | index
+ The attribute data pointed to by kvm_device_attr.addr is a __u64 value::
+
+ bits: | 63 .... 52 | 51 .... 16 | 15 - 12 |11 - 0
+ values: | count | base | flags | index
+
- index encodes the unique redistributor region index
- flags: reserved for future use, currently 0
- base field encodes bits [51:16] of the guest physical base address
of the first redistributor in the region.
- count encodes the number of redistributors in the region. Must be
greater than 0.
+
There are two 64K pages for each redistributor in the region and
redistributors are laid out contiguously within the region. Regions
are filled with redistributors in the index order. The sum of all
region count fields must be greater than or equal to the number of
VCPUs. Redistributor regions must be registered in the incremental
index order, starting from index 0.
+
The characteristics of a specific redistributor region can be read
by presetting the index field in the attr data.
Only valid for KVM_DEV_TYPE_ARM_VGIC_V3.
@@ -52,23 +60,27 @@ Groups:
KVM_VGIC_V3_ADDR_TYPE_REDIST_REGION attributes.
Errors:
- -E2BIG: Address outside of addressable IPA range
- -EINVAL: Incorrectly aligned address, bad redistributor region
+
+ ======= =============================================================
+ -E2BIG Address outside of addressable IPA range
+ -EINVAL Incorrectly aligned address, bad redistributor region
count/index, mixed redistributor region attribute usage
- -EEXIST: Address already configured
- -ENOENT: Attempt to read the characteristics of a non existing
+ -EEXIST Address already configured
+ -ENOENT Attempt to read the characteristics of a non existing
redistributor region
- -ENXIO: The group or attribute is unknown/unsupported for this device
+ -ENXIO The group or attribute is unknown/unsupported for this device
or hardware support is missing.
- -EFAULT: Invalid user pointer for attr->addr.
+ -EFAULT Invalid user pointer for attr->addr.
+ ======= =============================================================
+
+ KVM_DEV_ARM_VGIC_GRP_DIST_REGS, KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
+ Attributes:
- KVM_DEV_ARM_VGIC_GRP_DIST_REGS
- KVM_DEV_ARM_VGIC_GRP_REDIST_REGS
- Attributes:
- The attr field of kvm_device_attr encodes two values:
- bits: | 63 .... 32 | 31 .... 0 |
- values: | mpidr | offset |
+ The attr field of kvm_device_attr encodes two values::
+
+ bits: | 63 .... 32 | 31 .... 0 |
+ values: | mpidr | offset |
All distributor regs are (rw, 32-bit) and kvm_device_attr.addr points to a
__u32 value. 64-bit registers must be accessed by separately accessing the
@@ -93,7 +105,8 @@ Groups:
redistributor is accessed. The mpidr is ignored for the distributor.
The mpidr encoding is based on the affinity information in the
- architecture defined MPIDR, and the field is encoded as follows:
+ architecture defined MPIDR, and the field is encoded as follows::
+
| 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 |
| Aff3 | Aff2 | Aff1 | Aff0 |
@@ -148,24 +161,30 @@ Groups:
ignored.
Errors:
- -ENXIO: Getting or setting this register is not yet supported
- -EBUSY: One or more VCPUs are running
+
+ ====== =====================================================
+ -ENXIO Getting or setting this register is not yet supported
+ -EBUSY One or more VCPUs are running
+ ====== =====================================================
KVM_DEV_ARM_VGIC_GRP_CPU_SYSREGS
- Attributes:
- The attr field of kvm_device_attr encodes two values:
- bits: | 63 .... 32 | 31 .... 16 | 15 .... 0 |
- values: | mpidr | RES | instr |
+ Attributes:
+
+ The attr field of kvm_device_attr encodes two values::
+
+ bits: | 63 .... 32 | 31 .... 16 | 15 .... 0 |
+ values: | mpidr | RES | instr |
The mpidr field encodes the CPU ID based on the affinity information in the
- architecture defined MPIDR, and the field is encoded as follows:
+ architecture defined MPIDR, and the field is encoded as follows::
+
| 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 |
| Aff3 | Aff2 | Aff1 | Aff0 |
The instr field encodes the system register to access based on the fields
defined in the A64 instruction set encoding for system register access
- (RES means the bits are reserved for future use and should be zero):
+ (RES means the bits are reserved for future use and should be zero)::
| 15 ... 14 | 13 ... 11 | 10 ... 7 | 6 ... 3 | 2 ... 0 |
| Op 0 | Op1 | CRn | CRm | Op2 |
@@ -178,26 +197,35 @@ Groups:
CPU interface registers access is not implemented for AArch32 mode.
Error -ENXIO is returned when accessed in AArch32 mode.
+
Errors:
- -ENXIO: Getting or setting this register is not yet supported
- -EBUSY: VCPU is running
- -EINVAL: Invalid mpidr or register value supplied
+
+ ======= =====================================================
+ -ENXIO Getting or setting this register is not yet supported
+ -EBUSY VCPU is running
+ -EINVAL Invalid mpidr or register value supplied
+ ======= =====================================================
KVM_DEV_ARM_VGIC_GRP_NR_IRQS
- Attributes:
+ Attributes:
+
A value describing the number of interrupts (SGI, PPI and SPI) for
this GIC instance, ranging from 64 to 1024, in increments of 32.
kvm_device_attr.addr points to a __u32 value.
Errors:
- -EINVAL: Value set is out of the expected range
- -EBUSY: Value has already be set.
+
+ ======= ======================================
+ -EINVAL Value set is out of the expected range
+ -EBUSY Value has already be set.
+ ======= ======================================
KVM_DEV_ARM_VGIC_GRP_CTRL
- Attributes:
+ Attributes:
+
KVM_DEV_ARM_VGIC_CTRL_INIT
request the initialization of the VGIC, no additional parameter in
kvm_device_attr.addr.
@@ -205,20 +233,26 @@ Groups:
save all LPI pending bits into guest RAM pending tables.
The first kB of the pending table is not altered by this operation.
+
Errors:
- -ENXIO: VGIC not properly configured as required prior to calling
- this attribute
- -ENODEV: no online VCPU
- -ENOMEM: memory shortage when allocating vgic internal data
- -EFAULT: Invalid guest ram access
- -EBUSY: One or more VCPUS are running
+
+ ======= ========================================================
+ -ENXIO VGIC not properly configured as required prior to calling
+ this attribute
+ -ENODEV no online VCPU
+ -ENOMEM memory shortage when allocating vgic internal data
+ -EFAULT Invalid guest ram access
+ -EBUSY One or more VCPUS are running
+ ======= ========================================================
KVM_DEV_ARM_VGIC_GRP_LEVEL_INFO
- Attributes:
- The attr field of kvm_device_attr encodes the following values:
- bits: | 63 .... 32 | 31 .... 10 | 9 .... 0 |
- values: | mpidr | info | vINTID |
+ Attributes:
+
+ The attr field of kvm_device_attr encodes the following values::
+
+ bits: | 63 .... 32 | 31 .... 10 | 9 .... 0 |
+ values: | mpidr | info | vINTID |
The vINTID specifies which set of IRQs is reported on.
@@ -228,6 +262,7 @@ Groups:
VGIC_LEVEL_INFO_LINE_LEVEL:
Get/Set the input level of the IRQ line for a set of 32 contiguously
numbered interrupts.
+
vINTID must be a multiple of 32.
kvm_device_attr.addr points to a __u32 value which will contain a
@@ -243,9 +278,14 @@ Groups:
reported with the same value regardless of the mpidr specified.
The mpidr field encodes the CPU ID based on the affinity information in the
- architecture defined MPIDR, and the field is encoded as follows:
+ architecture defined MPIDR, and the field is encoded as follows::
+
| 63 .... 56 | 55 .... 48 | 47 .... 40 | 39 .... 32 |
| Aff3 | Aff2 | Aff1 | Aff0 |
+
Errors:
- -EINVAL: vINTID is not multiple of 32 or
- info field is not VGIC_LEVEL_INFO_LINE_LEVEL
+
+ ======= =============================================
+ -EINVAL vINTID is not multiple of 32 or info field is
+ not VGIC_LEVEL_INFO_LINE_LEVEL
+ ======= =============================================
diff --git a/Documentation/virt/kvm/devices/arm-vgic.txt b/Documentation/virt/kvm/devices/arm-vgic.rst
index 97b6518148f8..40bdeea1d86e 100644
--- a/Documentation/virt/kvm/devices/arm-vgic.txt
+++ b/Documentation/virt/kvm/devices/arm-vgic.rst
@@ -1,8 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==================================================
ARM Virtual Generic Interrupt Controller v2 (VGIC)
==================================================
Device types supported:
- KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0
+
+ - KVM_DEV_TYPE_ARM_VGIC_V2 ARM Generic Interrupt Controller v2.0
Only one VGIC instance may be instantiated through either this API or the
legacy KVM_CREATE_IRQCHIP API. The created VGIC will act as the VM interrupt
@@ -17,7 +21,8 @@ create both a GICv3 and GICv2 device on the same VM.
Groups:
KVM_DEV_ARM_VGIC_GRP_ADDR
- Attributes:
+ Attributes:
+
KVM_VGIC_V2_ADDR_TYPE_DIST (rw, 64-bit)
Base address in the guest physical address space of the GIC distributor
register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2.
@@ -27,19 +32,25 @@ Groups:
Base address in the guest physical address space of the GIC virtual cpu
interface register mappings. Only valid for KVM_DEV_TYPE_ARM_VGIC_V2.
This address needs to be 4K aligned and the region covers 4 KByte.
+
Errors:
- -E2BIG: Address outside of addressable IPA range
- -EINVAL: Incorrectly aligned address
- -EEXIST: Address already configured
- -ENXIO: The group or attribute is unknown/unsupported for this device
+
+ ======= =============================================================
+ -E2BIG Address outside of addressable IPA range
+ -EINVAL Incorrectly aligned address
+ -EEXIST Address already configured
+ -ENXIO The group or attribute is unknown/unsupported for this device
or hardware support is missing.
- -EFAULT: Invalid user pointer for attr->addr.
+ -EFAULT Invalid user pointer for attr->addr.
+ ======= =============================================================
KVM_DEV_ARM_VGIC_GRP_DIST_REGS
- Attributes:
- The attr field of kvm_device_attr encodes two values:
- bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 |
- values: | reserved | vcpu_index | offset |
+ Attributes:
+
+ The attr field of kvm_device_attr encodes two values::
+
+ bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 |
+ values: | reserved | vcpu_index | offset |
All distributor regs are (rw, 32-bit)
@@ -58,16 +69,22 @@ Groups:
KVM_DEV_ARM_VGIC_GRP_DIST_REGS and KVM_DEV_ARM_VGIC_GRP_CPU_REGS) to ensure
the expected behavior. Unless GICD_IIDR has been set from userspace, writes
to the interrupt group registers (GICD_IGROUPR) are ignored.
+
Errors:
- -ENXIO: Getting or setting this register is not yet supported
- -EBUSY: One or more VCPUs are running
- -EINVAL: Invalid vcpu_index supplied
+
+ ======= =====================================================
+ -ENXIO Getting or setting this register is not yet supported
+ -EBUSY One or more VCPUs are running
+ -EINVAL Invalid vcpu_index supplied
+ ======= =====================================================
KVM_DEV_ARM_VGIC_GRP_CPU_REGS
- Attributes:
- The attr field of kvm_device_attr encodes two values:
- bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 |
- values: | reserved | vcpu_index | offset |
+ Attributes:
+
+ The attr field of kvm_device_attr encodes two values::
+
+ bits: | 63 .... 40 | 39 .. 32 | 31 .... 0 |
+ values: | reserved | vcpu_index | offset |
All CPU interface regs are (rw, 32-bit)
@@ -101,27 +118,39 @@ Groups:
value left by 3 places to obtain the actual priority mask level.
Errors:
- -ENXIO: Getting or setting this register is not yet supported
- -EBUSY: One or more VCPUs are running
- -EINVAL: Invalid vcpu_index supplied
+
+ ======= =====================================================
+ -ENXIO Getting or setting this register is not yet supported
+ -EBUSY One or more VCPUs are running
+ -EINVAL Invalid vcpu_index supplied
+ ======= =====================================================
KVM_DEV_ARM_VGIC_GRP_NR_IRQS
- Attributes:
+ Attributes:
+
A value describing the number of interrupts (SGI, PPI and SPI) for
this GIC instance, ranging from 64 to 1024, in increments of 32.
Errors:
- -EINVAL: Value set is out of the expected range
- -EBUSY: Value has already be set, or GIC has already been initialized
- with default values.
+
+ ======= =============================================================
+ -EINVAL Value set is out of the expected range
+ -EBUSY Value has already be set, or GIC has already been initialized
+ with default values.
+ ======= =============================================================
KVM_DEV_ARM_VGIC_GRP_CTRL
- Attributes:
+ Attributes:
+
KVM_DEV_ARM_VGIC_CTRL_INIT
request the initialization of the VGIC or ITS, no additional parameter
in kvm_device_attr.addr.
+
Errors:
- -ENXIO: VGIC not properly configured as required prior to calling
- this attribute
- -ENODEV: no online VCPU
- -ENOMEM: memory shortage when allocating vgic internal data
+
+ ======= =========================================================
+ -ENXIO VGIC not properly configured as required prior to calling
+ this attribute
+ -ENODEV no online VCPU
+ -ENOMEM memory shortage when allocating vgic internal data
+ ======= =========================================================
diff --git a/Documentation/virt/kvm/devices/index.rst b/Documentation/virt/kvm/devices/index.rst
new file mode 100644
index 000000000000..192cda7405c8
--- /dev/null
+++ b/Documentation/virt/kvm/devices/index.rst
@@ -0,0 +1,19 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=======
+Devices
+=======
+
+.. toctree::
+ :maxdepth: 2
+
+ arm-vgic-its
+ arm-vgic
+ arm-vgic-v3
+ mpic
+ s390_flic
+ vcpu
+ vfio
+ vm
+ xics
+ xive
diff --git a/Documentation/virt/kvm/devices/mpic.txt b/Documentation/virt/kvm/devices/mpic.rst
index 8257397adc3c..55cefe030d41 100644
--- a/Documentation/virt/kvm/devices/mpic.txt
+++ b/Documentation/virt/kvm/devices/mpic.rst
@@ -1,9 +1,13 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================
MPIC interrupt controller
=========================
Device types supported:
- KVM_DEV_TYPE_FSL_MPIC_20 Freescale MPIC v2.0
- KVM_DEV_TYPE_FSL_MPIC_42 Freescale MPIC v4.2
+
+ - KVM_DEV_TYPE_FSL_MPIC_20 Freescale MPIC v2.0
+ - KVM_DEV_TYPE_FSL_MPIC_42 Freescale MPIC v4.2
Only one MPIC instance, of any type, may be instantiated. The created
MPIC will act as the system interrupt controller, connecting to each
@@ -11,7 +15,8 @@ vcpu's interrupt inputs.
Groups:
KVM_DEV_MPIC_GRP_MISC
- Attributes:
+ Attributes:
+
KVM_DEV_MPIC_BASE_ADDR (rw, 64-bit)
Base address of the 256 KiB MPIC register space. Must be
naturally aligned. A value of zero disables the mapping.
diff --git a/Documentation/virt/kvm/devices/s390_flic.txt b/Documentation/virt/kvm/devices/s390_flic.rst
index a4e20a090174..954190da7d04 100644
--- a/Documentation/virt/kvm/devices/s390_flic.txt
+++ b/Documentation/virt/kvm/devices/s390_flic.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================================
FLIC (floating interrupt controller)
====================================
@@ -31,8 +34,10 @@ Groups:
Copies all floating interrupts into a buffer provided by userspace.
When the buffer is too small it returns -ENOMEM, which is the indication
for userspace to try again with a bigger buffer.
+
-ENOBUFS is returned when the allocation of a kernelspace buffer has
failed.
+
-EFAULT is returned when copying data to userspace failed.
All interrupts remain pending, i.e. are not deleted from the list of
currently pending interrupts.
@@ -60,38 +65,41 @@ Groups:
KVM_DEV_FLIC_ADAPTER_REGISTER
Register an I/O adapter interrupt source. Takes a kvm_s390_io_adapter
- describing the adapter to register:
+ describing the adapter to register::
-struct kvm_s390_io_adapter {
- __u32 id;
- __u8 isc;
- __u8 maskable;
- __u8 swap;
- __u8 flags;
-};
+ struct kvm_s390_io_adapter {
+ __u32 id;
+ __u8 isc;
+ __u8 maskable;
+ __u8 swap;
+ __u8 flags;
+ };
id contains the unique id for the adapter, isc the I/O interruption subclass
to use, maskable whether this adapter may be masked (interrupts turned off),
swap whether the indicators need to be byte swapped, and flags contains
further characteristics of the adapter.
+
Currently defined values for 'flags' are:
+
- KVM_S390_ADAPTER_SUPPRESSIBLE: adapter is subject to AIS
(adapter-interrupt-suppression) facility. This flag only has an effect if
the AIS capability is enabled.
+
Unknown flag values are ignored.
KVM_DEV_FLIC_ADAPTER_MODIFY
Modifies attributes of an existing I/O adapter interrupt source. Takes
- a kvm_s390_io_adapter_req specifying the adapter and the operation:
+ a kvm_s390_io_adapter_req specifying the adapter and the operation::
-struct kvm_s390_io_adapter_req {
- __u32 id;
- __u8 type;
- __u8 mask;
- __u16 pad0;
- __u64 addr;
-};
+ struct kvm_s390_io_adapter_req {
+ __u32 id;
+ __u8 type;
+ __u8 mask;
+ __u16 pad0;
+ __u64 addr;
+ };
id specifies the adapter and type the operation. The supported operations
are:
@@ -103,8 +111,9 @@ struct kvm_s390_io_adapter_req {
perform a gmap translation for the guest address provided in addr,
pin a userspace page for the translated address and add it to the
list of mappings
- Note: A new mapping will be created unconditionally; therefore,
- the calling code should avoid making duplicate mappings.
+
+ .. note:: A new mapping will be created unconditionally; therefore,
+ the calling code should avoid making duplicate mappings.
KVM_S390_IO_ADAPTER_UNMAP
release a userspace page for the translated address specified in addr
@@ -112,16 +121,17 @@ struct kvm_s390_io_adapter_req {
KVM_DEV_FLIC_AISM
modify the adapter-interruption-suppression mode for a given isc if the
- AIS capability is enabled. Takes a kvm_s390_ais_req describing:
+ AIS capability is enabled. Takes a kvm_s390_ais_req describing::
-struct kvm_s390_ais_req {
- __u8 isc;
- __u16 mode;
-};
+ struct kvm_s390_ais_req {
+ __u8 isc;
+ __u16 mode;
+ };
isc contains the target I/O interruption subclass, mode the target
adapter-interruption-suppression mode. The following modes are
currently supported:
+
- KVM_S390_AIS_MODE_ALL: ALL-Interruptions Mode, i.e. airq injection
is always allowed;
- KVM_S390_AIS_MODE_SINGLE: SINGLE-Interruption Mode, i.e. airq
@@ -139,12 +149,12 @@ struct kvm_s390_ais_req {
KVM_DEV_FLIC_AISM_ALL
Gets or sets the adapter-interruption-suppression mode for all ISCs. Takes
- a kvm_s390_ais_all describing:
+ a kvm_s390_ais_all describing::
-struct kvm_s390_ais_all {
- __u8 simm; /* Single-Interruption-Mode mask */
- __u8 nimm; /* No-Interruption-Mode mask *
-};
+ struct kvm_s390_ais_all {
+ __u8 simm; /* Single-Interruption-Mode mask */
+ __u8 nimm; /* No-Interruption-Mode mask *
+ };
simm contains Single-Interruption-Mode mask for all ISCs, nimm contains
No-Interruption-Mode mask for all ISCs. Each bit in simm and nimm corresponds
@@ -159,5 +169,5 @@ ENXIO, as specified in the API documentation). It is not possible to conclude
that a FLIC operation is unavailable based on the error code resulting from a
usage attempt.
-Note: The KVM_DEV_FLIC_CLEAR_IO_IRQ ioctl will return EINVAL in case a zero
-schid is specified.
+.. note:: The KVM_DEV_FLIC_CLEAR_IO_IRQ ioctl will return EINVAL in case a
+ zero schid is specified.
diff --git a/Documentation/virt/kvm/devices/vcpu.rst b/Documentation/virt/kvm/devices/vcpu.rst
new file mode 100644
index 000000000000..9963e680770a
--- /dev/null
+++ b/Documentation/virt/kvm/devices/vcpu.rst
@@ -0,0 +1,114 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
+Generic vcpu interface
+======================
+
+The virtual cpu "device" also accepts the ioctls KVM_SET_DEVICE_ATTR,
+KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same struct
+kvm_device_attr as other devices, but targets VCPU-wide settings and controls.
+
+The groups and attributes per virtual cpu, if any, are architecture specific.
+
+1. GROUP: KVM_ARM_VCPU_PMU_V3_CTRL
+==================================
+
+:Architectures: ARM64
+
+1.1. ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_IRQ
+---------------------------------------
+
+:Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a
+ pointer to an int
+
+Returns:
+
+ ======= ========================================================
+ -EBUSY The PMU overflow interrupt is already set
+ -ENXIO The overflow interrupt not set when attempting to get it
+ -ENODEV PMUv3 not supported
+ -EINVAL Invalid PMU overflow interrupt number supplied or
+ trying to set the IRQ number without using an in-kernel
+ irqchip.
+ ======= ========================================================
+
+A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt
+number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt
+type must be same for each vcpu. As a PPI, the interrupt number is the same for
+all vcpus, while as an SPI it must be a separate number per vcpu.
+
+1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT
+---------------------------------------
+
+:Parameters: no additional parameter in kvm_device_attr.addr
+
+Returns:
+
+ ======= ======================================================
+ -ENODEV PMUv3 not supported or GIC not initialized
+ -ENXIO PMUv3 not properly configured or in-kernel irqchip not
+ configured as required prior to calling this attribute
+ -EBUSY PMUv3 already initialized
+ ======= ======================================================
+
+Request the initialization of the PMUv3. If using the PMUv3 with an in-kernel
+virtual GIC implementation, this must be done after initializing the in-kernel
+irqchip.
+
+
+2. GROUP: KVM_ARM_VCPU_TIMER_CTRL
+=================================
+
+:Architectures: ARM, ARM64
+
+2.1. ATTRIBUTES: KVM_ARM_VCPU_TIMER_IRQ_VTIMER, KVM_ARM_VCPU_TIMER_IRQ_PTIMER
+-----------------------------------------------------------------------------
+
+:Parameters: in kvm_device_attr.addr the address for the timer interrupt is a
+ pointer to an int
+
+Returns:
+
+ ======= =================================
+ -EINVAL Invalid timer interrupt number
+ -EBUSY One or more VCPUs has already run
+ ======= =================================
+
+A value describing the architected timer interrupt number when connected to an
+in-kernel virtual GIC. These must be a PPI (16 <= intid < 32). Setting the
+attribute overrides the default values (see below).
+
+============================= ==========================================
+KVM_ARM_VCPU_TIMER_IRQ_VTIMER The EL1 virtual timer intid (default: 27)
+KVM_ARM_VCPU_TIMER_IRQ_PTIMER The EL1 physical timer intid (default: 30)
+============================= ==========================================
+
+Setting the same PPI for different timers will prevent the VCPUs from running.
+Setting the interrupt number on a VCPU configures all VCPUs created at that
+time to use the number provided for a given timer, overwriting any previously
+configured values on other VCPUs. Userspace should configure the interrupt
+numbers on at least one VCPU after creating all VCPUs and before running any
+VCPUs.
+
+3. GROUP: KVM_ARM_VCPU_PVTIME_CTRL
+==================================
+
+:Architectures: ARM64
+
+3.1 ATTRIBUTE: KVM_ARM_VCPU_PVTIME_IPA
+--------------------------------------
+
+:Parameters: 64-bit base address
+
+Returns:
+
+ ======= ======================================
+ -ENXIO Stolen time not implemented
+ -EEXIST Base address already set for this VCPU
+ -EINVAL Base address not 64 byte aligned
+ ======= ======================================
+
+Specifies the base address of the stolen time structure for this VCPU. The
+base address must be 64 byte aligned and exist within a valid guest memory
+region. See Documentation/virt/kvm/arm/pvtime.txt for more information
+including the layout of the stolen time structure.
diff --git a/Documentation/virt/kvm/devices/vcpu.txt b/Documentation/virt/kvm/devices/vcpu.txt
deleted file mode 100644
index 6f3bd64a05b0..000000000000
--- a/Documentation/virt/kvm/devices/vcpu.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-Generic vcpu interface
-====================================
-
-The virtual cpu "device" also accepts the ioctls KVM_SET_DEVICE_ATTR,
-KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same struct
-kvm_device_attr as other devices, but targets VCPU-wide settings and controls.
-
-The groups and attributes per virtual cpu, if any, are architecture specific.
-
-1. GROUP: KVM_ARM_VCPU_PMU_V3_CTRL
-Architectures: ARM64
-
-1.1. ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_IRQ
-Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a
- pointer to an int
-Returns: -EBUSY: The PMU overflow interrupt is already set
- -ENXIO: The overflow interrupt not set when attempting to get it
- -ENODEV: PMUv3 not supported
- -EINVAL: Invalid PMU overflow interrupt number supplied or
- trying to set the IRQ number without using an in-kernel
- irqchip.
-
-A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt
-number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt
-type must be same for each vcpu. As a PPI, the interrupt number is the same for
-all vcpus, while as an SPI it must be a separate number per vcpu.
-
-1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT
-Parameters: no additional parameter in kvm_device_attr.addr
-Returns: -ENODEV: PMUv3 not supported or GIC not initialized
- -ENXIO: PMUv3 not properly configured or in-kernel irqchip not
- configured as required prior to calling this attribute
- -EBUSY: PMUv3 already initialized
-
-Request the initialization of the PMUv3. If using the PMUv3 with an in-kernel
-virtual GIC implementation, this must be done after initializing the in-kernel
-irqchip.
-
-
-2. GROUP: KVM_ARM_VCPU_TIMER_CTRL
-Architectures: ARM,ARM64
-
-2.1. ATTRIBUTE: KVM_ARM_VCPU_TIMER_IRQ_VTIMER
-2.2. ATTRIBUTE: KVM_ARM_VCPU_TIMER_IRQ_PTIMER
-Parameters: in kvm_device_attr.addr the address for the timer interrupt is a
- pointer to an int
-Returns: -EINVAL: Invalid timer interrupt number
- -EBUSY: One or more VCPUs has already run
-
-A value describing the architected timer interrupt number when connected to an
-in-kernel virtual GIC. These must be a PPI (16 <= intid < 32). Setting the
-attribute overrides the default values (see below).
-
-KVM_ARM_VCPU_TIMER_IRQ_VTIMER: The EL1 virtual timer intid (default: 27)
-KVM_ARM_VCPU_TIMER_IRQ_PTIMER: The EL1 physical timer intid (default: 30)
-
-Setting the same PPI for different timers will prevent the VCPUs from running.
-Setting the interrupt number on a VCPU configures all VCPUs created at that
-time to use the number provided for a given timer, overwriting any previously
-configured values on other VCPUs. Userspace should configure the interrupt
-numbers on at least one VCPU after creating all VCPUs and before running any
-VCPUs.
-
-3. GROUP: KVM_ARM_VCPU_PVTIME_CTRL
-Architectures: ARM64
-
-3.1 ATTRIBUTE: KVM_ARM_VCPU_PVTIME_IPA
-Parameters: 64-bit base address
-Returns: -ENXIO: Stolen time not implemented
- -EEXIST: Base address already set for this VCPU
- -EINVAL: Base address not 64 byte aligned
-
-Specifies the base address of the stolen time structure for this VCPU. The
-base address must be 64 byte aligned and exist within a valid guest memory
-region. See Documentation/virt/kvm/arm/pvtime.txt for more information
-including the layout of the stolen time structure.
diff --git a/Documentation/virt/kvm/devices/vfio.txt b/Documentation/virt/kvm/devices/vfio.rst
index 528c77c8022c..2d20dc561069 100644
--- a/Documentation/virt/kvm/devices/vfio.txt
+++ b/Documentation/virt/kvm/devices/vfio.rst
@@ -1,8 +1,12 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
VFIO virtual device
===================
Device types supported:
- KVM_DEV_TYPE_VFIO
+
+ - KVM_DEV_TYPE_VFIO
Only one VFIO instance may be created per VM. The created device
tracks VFIO groups in use by the VM and features of those groups
@@ -23,14 +27,15 @@ KVM_DEV_VFIO_GROUP attributes:
for the VFIO group.
KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE: attaches a guest visible TCE table
allocated by sPAPR KVM.
- kvm_device_attr.addr points to a struct:
+ kvm_device_attr.addr points to a struct::
+
+ struct kvm_vfio_spapr_tce {
+ __s32 groupfd;
+ __s32 tablefd;
+ };
- struct kvm_vfio_spapr_tce {
- __s32 groupfd;
- __s32 tablefd;
- };
+ where:
- where
- @groupfd is a file descriptor for a VFIO group;
- @tablefd is a file descriptor for a TCE table allocated via
- KVM_CREATE_SPAPR_TCE.
+ - @groupfd is a file descriptor for a VFIO group;
+ - @tablefd is a file descriptor for a TCE table allocated via
+ KVM_CREATE_SPAPR_TCE.
diff --git a/Documentation/virt/kvm/devices/vm.txt b/Documentation/virt/kvm/devices/vm.rst
index 4ffb82b02468..0aa5b1cfd700 100644
--- a/Documentation/virt/kvm/devices/vm.txt
+++ b/Documentation/virt/kvm/devices/vm.rst
@@ -1,5 +1,8 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+====================
Generic vm interface
-====================================
+====================
The virtual machine "device" also accepts the ioctls KVM_SET_DEVICE_ATTR,
KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same
@@ -10,30 +13,38 @@ The groups and attributes per virtual machine, if any, are architecture
specific.
1. GROUP: KVM_S390_VM_MEM_CTRL
-Architectures: s390
+==============================
+
+:Architectures: s390
1.1. ATTRIBUTE: KVM_S390_VM_MEM_ENABLE_CMMA
-Parameters: none
-Returns: -EBUSY if a vcpu is already defined, otherwise 0
+-------------------------------------------
+
+:Parameters: none
+:Returns: -EBUSY if a vcpu is already defined, otherwise 0
Enables Collaborative Memory Management Assist (CMMA) for the virtual machine.
1.2. ATTRIBUTE: KVM_S390_VM_MEM_CLR_CMMA
-Parameters: none
-Returns: -EINVAL if CMMA was not enabled
- 0 otherwise
+----------------------------------------
+
+:Parameters: none
+:Returns: -EINVAL if CMMA was not enabled;
+ 0 otherwise
Clear the CMMA status for all guest pages, so any pages the guest marked
as unused are again used any may not be reclaimed by the host.
1.3. ATTRIBUTE KVM_S390_VM_MEM_LIMIT_SIZE
-Parameters: in attr->addr the address for the new limit of guest memory
-Returns: -EFAULT if the given address is not accessible
- -EINVAL if the virtual machine is of type UCONTROL
- -E2BIG if the given guest memory is to big for that machine
- -EBUSY if a vcpu is already defined
- -ENOMEM if not enough memory is available for a new shadow guest mapping
- 0 otherwise
+-----------------------------------------
+
+:Parameters: in attr->addr the address for the new limit of guest memory
+:Returns: -EFAULT if the given address is not accessible;
+ -EINVAL if the virtual machine is of type UCONTROL;
+ -E2BIG if the given guest memory is to big for that machine;
+ -EBUSY if a vcpu is already defined;
+ -ENOMEM if not enough memory is available for a new shadow guest mapping;
+ 0 otherwise.
Allows userspace to query the actual limit and set a new limit for
the maximum guest memory size. The limit will be rounded up to
@@ -42,78 +53,92 @@ the number of page table levels. In the case that there is no limit we will set
the limit to KVM_S390_NO_MEM_LIMIT (U64_MAX).
2. GROUP: KVM_S390_VM_CPU_MODEL
-Architectures: s390
+===============================
+
+:Architectures: s390
2.1. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE (r/o)
+---------------------------------------------
-Allows user space to retrieve machine and kvm specific cpu related information:
+Allows user space to retrieve machine and kvm specific cpu related information::
-struct kvm_s390_vm_cpu_machine {
+ struct kvm_s390_vm_cpu_machine {
__u64 cpuid; # CPUID of host
__u32 ibc; # IBC level range offered by host
__u8 pad[4];
__u64 fac_mask[256]; # set of cpu facilities enabled by KVM
__u64 fac_list[256]; # set of cpu facilities offered by host
-}
+ }
-Parameters: address of buffer to store the machine related cpu data
- of type struct kvm_s390_vm_cpu_machine*
-Returns: -EFAULT if the given address is not accessible from kernel space
- -ENOMEM if not enough memory is available to process the ioctl
- 0 in case of success
+:Parameters: address of buffer to store the machine related cpu data
+ of type struct kvm_s390_vm_cpu_machine*
+:Returns: -EFAULT if the given address is not accessible from kernel space;
+ -ENOMEM if not enough memory is available to process the ioctl;
+ 0 in case of success.
2.2. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR (r/w)
+===============================================
-Allows user space to retrieve or request to change cpu related information for a vcpu:
+Allows user space to retrieve or request to change cpu related information for a vcpu::
-struct kvm_s390_vm_cpu_processor {
+ struct kvm_s390_vm_cpu_processor {
__u64 cpuid; # CPUID currently (to be) used by this vcpu
__u16 ibc; # IBC level currently (to be) used by this vcpu
__u8 pad[6];
__u64 fac_list[256]; # set of cpu facilities currently (to be) used
- # by this vcpu
-}
+ # by this vcpu
+ }
KVM does not enforce or limit the cpu model data in any form. Take the information
retrieved by means of KVM_S390_VM_CPU_MACHINE as hint for reasonable configuration
setups. Instruction interceptions triggered by additionally set facility bits that
are not handled by KVM need to by imlemented in the VM driver code.
-Parameters: address of buffer to store/set the processor related cpu
- data of type struct kvm_s390_vm_cpu_processor*.
-Returns: -EBUSY in case 1 or more vcpus are already activated (only in write case)
- -EFAULT if the given address is not accessible from kernel space
- -ENOMEM if not enough memory is available to process the ioctl
- 0 in case of success
+:Parameters: address of buffer to store/set the processor related cpu
+ data of type struct kvm_s390_vm_cpu_processor*.
+:Returns: -EBUSY in case 1 or more vcpus are already activated (only in write case);
+ -EFAULT if the given address is not accessible from kernel space;
+ -ENOMEM if not enough memory is available to process the ioctl;
+ 0 in case of success.
+
+.. _KVM_S390_VM_CPU_MACHINE_FEAT:
2.3. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE_FEAT (r/o)
+--------------------------------------------------
Allows user space to retrieve available cpu features. A feature is available if
provided by the hardware and supported by kvm. In theory, cpu features could
even be completely emulated by kvm.
-struct kvm_s390_vm_cpu_feat {
- __u64 feat[16]; # Bitmap (1 = feature available), MSB 0 bit numbering
-};
+::
-Parameters: address of a buffer to load the feature list from.
-Returns: -EFAULT if the given address is not accessible from kernel space.
- 0 in case of success.
+ struct kvm_s390_vm_cpu_feat {
+ __u64 feat[16]; # Bitmap (1 = feature available), MSB 0 bit numbering
+ };
+
+:Parameters: address of a buffer to load the feature list from.
+:Returns: -EFAULT if the given address is not accessible from kernel space;
+ 0 in case of success.
2.4. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR_FEAT (r/w)
+----------------------------------------------------
Allows user space to retrieve or change enabled cpu features for all VCPUs of a
VM. Features that are not available cannot be enabled.
-See 2.3. for a description of the parameter struct.
+See :ref:`KVM_S390_VM_CPU_MACHINE_FEAT` for
+a description of the parameter struct.
-Parameters: address of a buffer to store/load the feature list from.
-Returns: -EFAULT if the given address is not accessible from kernel space.
- -EINVAL if a cpu feature that is not available is to be enabled.
- -EBUSY if at least one VCPU has already been defined.
+:Parameters: address of a buffer to store/load the feature list from.
+:Returns: -EFAULT if the given address is not accessible from kernel space;
+ -EINVAL if a cpu feature that is not available is to be enabled;
+ -EBUSY if at least one VCPU has already been defined;
0 in case of success.
+.. _KVM_S390_VM_CPU_MACHINE_SUBFUNC:
+
2.5. ATTRIBUTE: KVM_S390_VM_CPU_MACHINE_SUBFUNC (r/o)
+-----------------------------------------------------
Allows user space to retrieve available cpu subfunctions without any filtering
done by a set IBC. These subfunctions are indicated to the guest VCPU via
@@ -126,7 +151,9 @@ contained in the returned struct. If the affected instruction
indicates subfunctions via a "test bit" mechanism, the subfunction codes are
contained in the returned struct in MSB 0 bit numbering.
-struct kvm_s390_vm_cpu_subfunc {
+::
+
+ struct kvm_s390_vm_cpu_subfunc {
u8 plo[32]; # always valid (ESA/390 feature)
u8 ptff[16]; # valid with TOD-clock steering
u8 kmac[16]; # valid with Message-Security-Assist
@@ -143,13 +170,14 @@ struct kvm_s390_vm_cpu_subfunc {
u8 kma[16]; # valid with Message-Security-Assist-Extension 8
u8 kdsa[16]; # valid with Message-Security-Assist-Extension 9
u8 reserved[1792]; # reserved for future instructions
-};
+ };
-Parameters: address of a buffer to load the subfunction blocks from.
-Returns: -EFAULT if the given address is not accessible from kernel space.
+:Parameters: address of a buffer to load the subfunction blocks from.
+:Returns: -EFAULT if the given address is not accessible from kernel space;
0 in case of success.
2.6. ATTRIBUTE: KVM_S390_VM_CPU_PROCESSOR_SUBFUNC (r/w)
+-------------------------------------------------------
Allows user space to retrieve or change cpu subfunctions to be indicated for
all VCPUs of a VM. This attribute will only be available if kernel and
@@ -164,107 +192,125 @@ As long as no data has been written, a read will fail. The IBC will be used
to determine available subfunctions in this case, this will guarantee backward
compatibility.
-See 2.5. for a description of the parameter struct.
+See :ref:`KVM_S390_VM_CPU_MACHINE_SUBFUNC` for a
+description of the parameter struct.
-Parameters: address of a buffer to store/load the subfunction blocks from.
-Returns: -EFAULT if the given address is not accessible from kernel space.
- -EINVAL when reading, if there was no write yet.
- -EBUSY if at least one VCPU has already been defined.
+:Parameters: address of a buffer to store/load the subfunction blocks from.
+:Returns: -EFAULT if the given address is not accessible from kernel space;
+ -EINVAL when reading, if there was no write yet;
+ -EBUSY if at least one VCPU has already been defined;
0 in case of success.
3. GROUP: KVM_S390_VM_TOD
-Architectures: s390
+=========================
+
+:Architectures: s390
3.1. ATTRIBUTE: KVM_S390_VM_TOD_HIGH
+------------------------------------
Allows user space to set/get the TOD clock extension (u8) (superseded by
KVM_S390_VM_TOD_EXT).
-Parameters: address of a buffer in user space to store the data (u8) to
-Returns: -EFAULT if the given address is not accessible from kernel space
+:Parameters: address of a buffer in user space to store the data (u8) to
+:Returns: -EFAULT if the given address is not accessible from kernel space;
-EINVAL if setting the TOD clock extension to != 0 is not supported
3.2. ATTRIBUTE: KVM_S390_VM_TOD_LOW
+-----------------------------------
Allows user space to set/get bits 0-63 of the TOD clock register as defined in
the POP (u64).
-Parameters: address of a buffer in user space to store the data (u64) to
-Returns: -EFAULT if the given address is not accessible from kernel space
+:Parameters: address of a buffer in user space to store the data (u64) to
+:Returns: -EFAULT if the given address is not accessible from kernel space
3.3. ATTRIBUTE: KVM_S390_VM_TOD_EXT
+-----------------------------------
+
Allows user space to set/get bits 0-63 of the TOD clock register as defined in
the POP (u64). If the guest CPU model supports the TOD clock extension (u8), it
also allows user space to get/set it. If the guest CPU model does not support
it, it is stored as 0 and not allowed to be set to a value != 0.
-Parameters: address of a buffer in user space to store the data
- (kvm_s390_vm_tod_clock) to
-Returns: -EFAULT if the given address is not accessible from kernel space
+:Parameters: address of a buffer in user space to store the data
+ (kvm_s390_vm_tod_clock) to
+:Returns: -EFAULT if the given address is not accessible from kernel space;
-EINVAL if setting the TOD clock extension to != 0 is not supported
4. GROUP: KVM_S390_VM_CRYPTO
-Architectures: s390
+============================
+
+:Architectures: s390
4.1. ATTRIBUTE: KVM_S390_VM_CRYPTO_ENABLE_AES_KW (w/o)
+------------------------------------------------------
Allows user space to enable aes key wrapping, including generating a new
wrapping key.
-Parameters: none
-Returns: 0
+:Parameters: none
+:Returns: 0
4.2. ATTRIBUTE: KVM_S390_VM_CRYPTO_ENABLE_DEA_KW (w/o)
+------------------------------------------------------
Allows user space to enable dea key wrapping, including generating a new
wrapping key.
-Parameters: none
-Returns: 0
+:Parameters: none
+:Returns: 0
4.3. ATTRIBUTE: KVM_S390_VM_CRYPTO_DISABLE_AES_KW (w/o)
+-------------------------------------------------------
Allows user space to disable aes key wrapping, clearing the wrapping key.
-Parameters: none
-Returns: 0
+:Parameters: none
+:Returns: 0
4.4. ATTRIBUTE: KVM_S390_VM_CRYPTO_DISABLE_DEA_KW (w/o)
+-------------------------------------------------------
Allows user space to disable dea key wrapping, clearing the wrapping key.
-Parameters: none
-Returns: 0
+:Parameters: none
+:Returns: 0
5. GROUP: KVM_S390_VM_MIGRATION
-Architectures: s390
+===============================
+
+:Architectures: s390
5.1. ATTRIBUTE: KVM_S390_VM_MIGRATION_STOP (w/o)
+------------------------------------------------
Allows userspace to stop migration mode, needed for PGSTE migration.
Setting this attribute when migration mode is not active will have no
effects.
-Parameters: none
-Returns: 0
+:Parameters: none
+:Returns: 0
5.2. ATTRIBUTE: KVM_S390_VM_MIGRATION_START (w/o)
+-------------------------------------------------
Allows userspace to start migration mode, needed for PGSTE migration.
Setting this attribute when migration mode is already active will have
no effects.
-Parameters: none
-Returns: -ENOMEM if there is not enough free memory to start migration mode
- -EINVAL if the state of the VM is invalid (e.g. no memory defined)
+:Parameters: none
+:Returns: -ENOMEM if there is not enough free memory to start migration mode;
+ -EINVAL if the state of the VM is invalid (e.g. no memory defined);
0 in case of success.
5.3. ATTRIBUTE: KVM_S390_VM_MIGRATION_STATUS (r/o)
+--------------------------------------------------
Allows userspace to query the status of migration mode.
-Parameters: address of a buffer in user space to store the data (u64) to;
- the data itself is either 0 if migration mode is disabled or 1
- if it is enabled
-Returns: -EFAULT if the given address is not accessible from kernel space
+:Parameters: address of a buffer in user space to store the data (u64) to;
+ the data itself is either 0 if migration mode is disabled or 1
+ if it is enabled
+:Returns: -EFAULT if the given address is not accessible from kernel space;
0 in case of success.
diff --git a/Documentation/virt/kvm/devices/xics.txt b/Documentation/virt/kvm/devices/xics.rst
index 423332dda7bc..2d6927e0b776 100644
--- a/Documentation/virt/kvm/devices/xics.txt
+++ b/Documentation/virt/kvm/devices/xics.rst
@@ -1,20 +1,31 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=========================
XICS interrupt controller
+=========================
Device type supported: KVM_DEV_TYPE_XICS
Groups:
1. KVM_DEV_XICS_GRP_SOURCES
- Attributes: One per interrupt source, indexed by the source number.
+ Attributes:
+ One per interrupt source, indexed by the source number.
2. KVM_DEV_XICS_GRP_CTRL
- Attributes:
- 2.1 KVM_DEV_XICS_NR_SERVERS (write only)
+ Attributes:
+
+ 2.1 KVM_DEV_XICS_NR_SERVERS (write only)
+
The kvm_device_attr.addr points to a __u32 value which is the number of
interrupt server numbers (ie, highest possible vcpu id plus one).
+
Errors:
- -EINVAL: Value greater than KVM_MAX_VCPU_ID.
- -EFAULT: Invalid user pointer for attr->addr.
- -EBUSY: A vcpu is already connected to the device.
+
+ ======= ==========================================
+ -EINVAL Value greater than KVM_MAX_VCPU_ID.
+ -EFAULT Invalid user pointer for attr->addr.
+ -EBUSY A vcpu is already connected to the device.
+ ======= ==========================================
This device emulates the XICS (eXternal Interrupt Controller
Specification) defined in PAPR. The XICS has a set of interrupt
@@ -53,24 +64,29 @@ the interrupt source number. The 64 bit state word has the following
bitfields, starting from the least-significant end of the word:
* Destination (server number), 32 bits
+
This specifies where the interrupt should be sent, and is the
interrupt server number specified for the destination vcpu.
* Priority, 8 bits
+
This is the priority specified for this interrupt source, where 0 is
the highest priority and 255 is the lowest. An interrupt with a
priority of 255 will never be delivered.
* Level sensitive flag, 1 bit
+
This bit is 1 for a level-sensitive interrupt source, or 0 for
edge-sensitive (or MSI).
* Masked flag, 1 bit
+
This bit is set to 1 if the interrupt is masked (cannot be delivered
regardless of its priority), for example by the ibm,int-off RTAS
call, or 0 if it is not masked.
* Pending flag, 1 bit
+
This bit is 1 if the source has a pending interrupt, otherwise 0.
Only one XICS instance may be created per VM.
diff --git a/Documentation/virt/kvm/devices/xive.txt b/Documentation/virt/kvm/devices/xive.rst
index f5d1d6b5af61..8bdf3dc38f01 100644
--- a/Documentation/virt/kvm/devices/xive.txt
+++ b/Documentation/virt/kvm/devices/xive.rst
@@ -1,8 +1,11 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================================================
POWER9 eXternal Interrupt Virtualization Engine (XIVE Gen1)
-==========================================================
+===========================================================
Device types supported:
- KVM_DEV_TYPE_XIVE POWER9 XIVE Interrupt Controller generation 1
+ - KVM_DEV_TYPE_XIVE POWER9 XIVE Interrupt Controller generation 1
This device acts as a VM interrupt controller. It provides the KVM
interface to configure the interrupt sources of a VM in the underlying
@@ -64,72 +67,100 @@ the legacy interrupt mode, referred as XICS (POWER7/8).
* Groups:
- 1. KVM_DEV_XIVE_GRP_CTRL
- Provides global controls on the device
+1. KVM_DEV_XIVE_GRP_CTRL
+ Provides global controls on the device
+
Attributes:
1.1 KVM_DEV_XIVE_RESET (write only)
Resets the interrupt controller configuration for sources and event
queues. To be used by kexec and kdump.
+
Errors: none
1.2 KVM_DEV_XIVE_EQ_SYNC (write only)
Sync all the sources and queues and mark the EQ pages dirty. This
to make sure that a consistent memory state is captured when
migrating the VM.
+
Errors: none
1.3 KVM_DEV_XIVE_NR_SERVERS (write only)
The kvm_device_attr.addr points to a __u32 value which is the number of
interrupt server numbers (ie, highest possible vcpu id plus one).
+
Errors:
- -EINVAL: Value greater than KVM_MAX_VCPU_ID.
- -EFAULT: Invalid user pointer for attr->addr.
- -EBUSY: A vCPU is already connected to the device.
- 2. KVM_DEV_XIVE_GRP_SOURCE (write only)
- Initializes a new source in the XIVE device and mask it.
+ ======= ==========================================
+ -EINVAL Value greater than KVM_MAX_VCPU_ID.
+ -EFAULT Invalid user pointer for attr->addr.
+ -EBUSY A vCPU is already connected to the device.
+ ======= ==========================================
+
+2. KVM_DEV_XIVE_GRP_SOURCE (write only)
+ Initializes a new source in the XIVE device and mask it.
+
Attributes:
Interrupt source number (64-bit)
- The kvm_device_attr.addr points to a __u64 value:
- bits: | 63 .... 2 | 1 | 0
- values: | unused | level | type
+
+ The kvm_device_attr.addr points to a __u64 value::
+
+ bits: | 63 .... 2 | 1 | 0
+ values: | unused | level | type
+
- type: 0:MSI 1:LSI
- level: assertion level in case of an LSI.
+
Errors:
- -E2BIG: Interrupt source number is out of range
- -ENOMEM: Could not create a new source block
- -EFAULT: Invalid user pointer for attr->addr.
- -ENXIO: Could not allocate underlying HW interrupt
- 3. KVM_DEV_XIVE_GRP_SOURCE_CONFIG (write only)
- Configures source targeting
+ ======= ==========================================
+ -E2BIG Interrupt source number is out of range
+ -ENOMEM Could not create a new source block
+ -EFAULT Invalid user pointer for attr->addr.
+ -ENXIO Could not allocate underlying HW interrupt
+ ======= ==========================================
+
+3. KVM_DEV_XIVE_GRP_SOURCE_CONFIG (write only)
+ Configures source targeting
+
Attributes:
Interrupt source number (64-bit)
- The kvm_device_attr.addr points to a __u64 value:
- bits: | 63 .... 33 | 32 | 31 .. 3 | 2 .. 0
- values: | eisn | mask | server | priority
+
+ The kvm_device_attr.addr points to a __u64 value::
+
+ bits: | 63 .... 33 | 32 | 31 .. 3 | 2 .. 0
+ values: | eisn | mask | server | priority
+
- priority: 0-7 interrupt priority level
- server: CPU number chosen to handle the interrupt
- mask: mask flag (unused)
- eisn: Effective Interrupt Source Number
+
Errors:
- -ENOENT: Unknown source number
- -EINVAL: Not initialized source number
- -EINVAL: Invalid priority
- -EINVAL: Invalid CPU number.
- -EFAULT: Invalid user pointer for attr->addr.
- -ENXIO: CPU event queues not configured or configuration of the
- underlying HW interrupt failed
- -EBUSY: No CPU available to serve interrupt
-
- 4. KVM_DEV_XIVE_GRP_EQ_CONFIG (read-write)
- Configures an event queue of a CPU
+
+ ======= =======================================================
+ -ENOENT Unknown source number
+ -EINVAL Not initialized source number
+ -EINVAL Invalid priority
+ -EINVAL Invalid CPU number.
+ -EFAULT Invalid user pointer for attr->addr.
+ -ENXIO CPU event queues not configured or configuration of the
+ underlying HW interrupt failed
+ -EBUSY No CPU available to serve interrupt
+ ======= =======================================================
+
+4. KVM_DEV_XIVE_GRP_EQ_CONFIG (read-write)
+ Configures an event queue of a CPU
+
Attributes:
EQ descriptor identifier (64-bit)
- The EQ descriptor identifier is a tuple (server, priority) :
- bits: | 63 .... 32 | 31 .. 3 | 2 .. 0
- values: | unused | server | priority
- The kvm_device_attr.addr points to :
+
+ The EQ descriptor identifier is a tuple (server, priority)::
+
+ bits: | 63 .... 32 | 31 .. 3 | 2 .. 0
+ values: | unused | server | priority
+
+ The kvm_device_attr.addr points to::
+
struct kvm_ppc_xive_eq {
__u32 flags;
__u32 qshift;
@@ -138,8 +169,9 @@ the legacy interrupt mode, referred as XICS (POWER7/8).
__u32 qindex;
__u8 pad[40];
};
+
- flags: queue flags
- KVM_XIVE_EQ_ALWAYS_NOTIFY (required)
+ KVM_XIVE_EQ_ALWAYS_NOTIFY (required)
forces notification without using the coalescing mechanism
provided by the XIVE END ESBs.
- qshift: queue size (power of 2)
@@ -147,22 +179,31 @@ the legacy interrupt mode, referred as XICS (POWER7/8).
- qtoggle: current queue toggle bit
- qindex: current queue index
- pad: reserved for future use
+
Errors:
- -ENOENT: Invalid CPU number
- -EINVAL: Invalid priority
- -EINVAL: Invalid flags
- -EINVAL: Invalid queue size
- -EINVAL: Invalid queue address
- -EFAULT: Invalid user pointer for attr->addr.
- -EIO: Configuration of the underlying HW failed
-
- 5. KVM_DEV_XIVE_GRP_SOURCE_SYNC (write only)
- Synchronize the source to flush event notifications
+
+ ======= =========================================
+ -ENOENT Invalid CPU number
+ -EINVAL Invalid priority
+ -EINVAL Invalid flags
+ -EINVAL Invalid queue size
+ -EINVAL Invalid queue address
+ -EFAULT Invalid user pointer for attr->addr.
+ -EIO Configuration of the underlying HW failed
+ ======= =========================================
+
+5. KVM_DEV_XIVE_GRP_SOURCE_SYNC (write only)
+ Synchronize the source to flush event notifications
+
Attributes:
Interrupt source number (64-bit)
+
Errors:
- -ENOENT: Unknown source number
- -EINVAL: Not initialized source number
+
+ ======= =============================
+ -ENOENT Unknown source number
+ -EINVAL Not initialized source number
+ ======= =============================
* VCPU state
@@ -175,11 +216,12 @@ the legacy interrupt mode, referred as XICS (POWER7/8).
as it synthesizes the priorities of the pending interrupts. We
capture a bit more to report debug information.
- KVM_REG_PPC_VP_STATE (2 * 64bits)
- bits: | 63 .... 32 | 31 .... 0 |
- values: | TIMA word0 | TIMA word1 |
- bits: | 127 .......... 64 |
- values: | unused |
+ KVM_REG_PPC_VP_STATE (2 * 64bits)::
+
+ bits: | 63 .... 32 | 31 .... 0 |
+ values: | TIMA word0 | TIMA word1 |
+ bits: | 127 .......... 64 |
+ values: | unused |
* Migration:
@@ -196,7 +238,7 @@ the legacy interrupt mode, referred as XICS (POWER7/8).
3. Capture the state of the source targeting, the EQs configuration
and the state of thread interrupt context registers.
- Restore is similar :
+ Restore is similar:
1. Restore the EQ configuration. As targeting depends on it.
2. Restore targeting
diff --git a/Documentation/virt/kvm/halt-polling.txt b/Documentation/virt/kvm/halt-polling.rst
index 4f791b128dd2..4922e4a15f18 100644
--- a/Documentation/virt/kvm/halt-polling.txt
+++ b/Documentation/virt/kvm/halt-polling.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+===========================
The KVM halt polling system
===========================
@@ -68,7 +71,8 @@ steady state polling interval but will only really do a good job for wakeups
which come at an approximately constant rate, otherwise there will be constant
adjustment of the polling interval.
-[0] total block time: the time between when the halt polling function is
+[0] total block time:
+ the time between when the halt polling function is
invoked and a wakeup source received (irrespective of
whether the scheduler is invoked within that function).
@@ -81,31 +85,32 @@ shrunk. These variables are defined in include/linux/kvm_host.h and as module
parameters in virt/kvm/kvm_main.c, or arch/powerpc/kvm/book3s_hv.c in the
powerpc kvm-hv case.
-Module Parameter | Description | Default Value
---------------------------------------------------------------------------------
-halt_poll_ns | The global max polling | KVM_HALT_POLL_NS_DEFAULT
- | interval which defines |
- | the ceiling value of the |
- | polling interval for | (per arch value)
- | each vcpu. |
---------------------------------------------------------------------------------
-halt_poll_ns_grow | The value by which the | 2
- | halt polling interval is |
- | multiplied in the |
- | grow_halt_poll_ns() |
- | function. |
---------------------------------------------------------------------------------
-halt_poll_ns_grow_start | The initial value to grow | 10000
- | to from zero in the |
- | grow_halt_poll_ns() |
- | function. |
---------------------------------------------------------------------------------
-halt_poll_ns_shrink | The value by which the | 0
- | halt polling interval is |
- | divided in the |
- | shrink_halt_poll_ns() |
- | function. |
---------------------------------------------------------------------------------
++-----------------------+---------------------------+-------------------------+
+|Module Parameter | Description | Default Value |
++-----------------------+---------------------------+-------------------------+
+|halt_poll_ns | The global max polling | KVM_HALT_POLL_NS_DEFAULT|
+| | interval which defines | |
+| | the ceiling value of the | |
+| | polling interval for | (per arch value) |
+| | each vcpu. | |
++-----------------------+---------------------------+-------------------------+
+|halt_poll_ns_grow | The value by which the | 2 |
+| | halt polling interval is | |
+| | multiplied in the | |
+| | grow_halt_poll_ns() | |
+| | function. | |
++-----------------------+---------------------------+-------------------------+
+|halt_poll_ns_grow_start| The initial value to grow | 10000 |
+| | to from zero in the | |
+| | grow_halt_poll_ns() | |
+| | function. | |
++-----------------------+---------------------------+-------------------------+
+|halt_poll_ns_shrink | The value by which the | 0 |
+| | halt polling interval is | |
+| | divided in the | |
+| | shrink_halt_poll_ns() | |
+| | function. | |
++-----------------------+---------------------------+-------------------------+
These module parameters can be set from the debugfs files in:
@@ -117,20 +122,19 @@ Note: that these module parameters are system wide values and are not able to
Further Notes
=============
-- Care should be taken when setting the halt_poll_ns module parameter as a
-large value has the potential to drive the cpu usage to 100% on a machine which
-would be almost entirely idle otherwise. This is because even if a guest has
-wakeups during which very little work is done and which are quite far apart, if
-the period is shorter than the global max polling interval (halt_poll_ns) then
-the host will always poll for the entire block time and thus cpu utilisation
-will go to 100%.
-
-- Halt polling essentially presents a trade off between power usage and latency
-and the module parameters should be used to tune the affinity for this. Idle
-cpu time is essentially converted to host kernel time with the aim of decreasing
-latency when entering the guest.
-
-- Halt polling will only be conducted by the host when no other tasks are
-runnable on that cpu, otherwise the polling will cease immediately and
-schedule will be invoked to allow that other task to run. Thus this doesn't
-allow a guest to denial of service the cpu.
+- Care should be taken when setting the halt_poll_ns module parameter as a large value
+ has the potential to drive the cpu usage to 100% on a machine which would be almost
+ entirely idle otherwise. This is because even if a guest has wakeups during which very
+ little work is done and which are quite far apart, if the period is shorter than the
+ global max polling interval (halt_poll_ns) then the host will always poll for the
+ entire block time and thus cpu utilisation will go to 100%.
+
+- Halt polling essentially presents a trade off between power usage and latency and
+ the module parameters should be used to tune the affinity for this. Idle cpu time is
+ essentially converted to host kernel time with the aim of decreasing latency when
+ entering the guest.
+
+- Halt polling will only be conducted by the host when no other tasks are runnable on
+ that cpu, otherwise the polling will cease immediately and schedule will be invoked to
+ allow that other task to run. Thus this doesn't allow a guest to denial of service the
+ cpu.
diff --git a/Documentation/virt/kvm/hypercalls.txt b/Documentation/virt/kvm/hypercalls.rst
index 5f6d291bd004..dbaf207e560d 100644
--- a/Documentation/virt/kvm/hypercalls.txt
+++ b/Documentation/virt/kvm/hypercalls.rst
@@ -1,5 +1,9 @@
-Linux KVM Hypercall:
+.. SPDX-License-Identifier: GPL-2.0
+
+===================
+Linux KVM Hypercall
===================
+
X86:
KVM Hypercalls have a three-byte sequence of either the vmcall or the vmmcall
instruction. The hypervisor can replace it with instructions that are
@@ -20,7 +24,7 @@ S390:
For further information on the S390 diagnose call as supported by KVM,
refer to Documentation/virt/kvm/s390-diag.txt.
- PowerPC:
+PowerPC:
It uses R3-R10 and hypercall number in R11. R4-R11 are used as output registers.
Return value is placed in R3.
@@ -34,7 +38,8 @@ MIPS:
the return value is placed in $2 (v0).
KVM Hypercalls Documentation
-===========================
+============================
+
The template for each hypercall is:
1. Hypercall name.
2. Architecture(s)
@@ -43,56 +48,64 @@ The template for each hypercall is:
1. KVM_HC_VAPIC_POLL_IRQ
------------------------
-Architecture: x86
-Status: active
-Purpose: Trigger guest exit so that the host can check for pending
-interrupts on reentry.
+
+:Architecture: x86
+:Status: active
+:Purpose: Trigger guest exit so that the host can check for pending
+ interrupts on reentry.
2. KVM_HC_MMU_OP
-------------------------
-Architecture: x86
-Status: deprecated.
-Purpose: Support MMU operations such as writing to PTE,
-flushing TLB, release PT.
+----------------
+
+:Architecture: x86
+:Status: deprecated.
+:Purpose: Support MMU operations such as writing to PTE,
+ flushing TLB, release PT.
3. KVM_HC_FEATURES
-------------------------
-Architecture: PPC
-Status: active
-Purpose: Expose hypercall availability to the guest. On x86 platforms, cpuid
-used to enumerate which hypercalls are available. On PPC, either device tree
-based lookup ( which is also what EPAPR dictates) OR KVM specific enumeration
-mechanism (which is this hypercall) can be used.
+------------------
+
+:Architecture: PPC
+:Status: active
+:Purpose: Expose hypercall availability to the guest. On x86 platforms, cpuid
+ used to enumerate which hypercalls are available. On PPC, either
+ device tree based lookup ( which is also what EPAPR dictates)
+ OR KVM specific enumeration mechanism (which is this hypercall)
+ can be used.
4. KVM_HC_PPC_MAP_MAGIC_PAGE
-------------------------
-Architecture: PPC
-Status: active
-Purpose: To enable communication between the hypervisor and guest there is a
-shared page that contains parts of supervisor visible register state.
-The guest can map this shared page to access its supervisor register through
-memory using this hypercall.
+----------------------------
+
+:Architecture: PPC
+:Status: active
+:Purpose: To enable communication between the hypervisor and guest there is a
+ shared page that contains parts of supervisor visible register state.
+ The guest can map this shared page to access its supervisor register
+ through memory using this hypercall.
5. KVM_HC_KICK_CPU
-------------------------
-Architecture: x86
-Status: active
-Purpose: Hypercall used to wakeup a vcpu from HLT state
-Usage example : A vcpu of a paravirtualized guest that is busywaiting in guest
-kernel mode for an event to occur (ex: a spinlock to become available) can
-execute HLT instruction once it has busy-waited for more than a threshold
-time-interval. Execution of HLT instruction would cause the hypervisor to put
-the vcpu to sleep until occurrence of an appropriate event. Another vcpu of the
-same guest can wakeup the sleeping vcpu by issuing KVM_HC_KICK_CPU hypercall,
-specifying APIC ID (a1) of the vcpu to be woken up. An additional argument (a0)
-is used in the hypercall for future use.
+------------------
+
+:Architecture: x86
+:Status: active
+:Purpose: Hypercall used to wakeup a vcpu from HLT state
+:Usage example:
+ A vcpu of a paravirtualized guest that is busywaiting in guest
+ kernel mode for an event to occur (ex: a spinlock to become available) can
+ execute HLT instruction once it has busy-waited for more than a threshold
+ time-interval. Execution of HLT instruction would cause the hypervisor to put
+ the vcpu to sleep until occurrence of an appropriate event. Another vcpu of the
+ same guest can wakeup the sleeping vcpu by issuing KVM_HC_KICK_CPU hypercall,
+ specifying APIC ID (a1) of the vcpu to be woken up. An additional argument (a0)
+ is used in the hypercall for future use.
6. KVM_HC_CLOCK_PAIRING
-------------------------
-Architecture: x86
-Status: active
-Purpose: Hypercall used to synchronize host and guest clocks.
+-----------------------
+:Architecture: x86
+:Status: active
+:Purpose: Hypercall used to synchronize host and guest clocks.
+
Usage:
a0: guest physical address where host copies
@@ -101,6 +114,8 @@ a0: guest physical address where host copies
a1: clock_type, ATM only KVM_CLOCK_PAIRING_WALLCLOCK (0)
is supported (corresponding to the host's CLOCK_REALTIME clock).
+ ::
+
struct kvm_clock_pairing {
__s64 sec;
__s64 nsec;
@@ -123,15 +138,16 @@ Returns KVM_EOPNOTSUPP if the host does not use TSC clocksource,
or if clock type is different than KVM_CLOCK_PAIRING_WALLCLOCK.
6. KVM_HC_SEND_IPI
-------------------------
-Architecture: x86
-Status: active
-Purpose: Send IPIs to multiple vCPUs.
+------------------
+
+:Architecture: x86
+:Status: active
+:Purpose: Send IPIs to multiple vCPUs.
-a0: lower part of the bitmap of destination APIC IDs
-a1: higher part of the bitmap of destination APIC IDs
-a2: the lowest APIC ID in bitmap
-a3: APIC ICR
+- a0: lower part of the bitmap of destination APIC IDs
+- a1: higher part of the bitmap of destination APIC IDs
+- a2: the lowest APIC ID in bitmap
+- a3: APIC ICR
The hypercall lets a guest send multicast IPIs, with at most 128
128 destinations per hypercall in 64-bit mode and 64 vCPUs per
@@ -143,12 +159,13 @@ corresponds to the APIC ID a2+1, and so on.
Returns the number of CPUs to which the IPIs were delivered successfully.
7. KVM_HC_SCHED_YIELD
-------------------------
-Architecture: x86
-Status: active
-Purpose: Hypercall used to yield if the IPI target vCPU is preempted
+---------------------
+
+:Architecture: x86
+:Status: active
+:Purpose: Hypercall used to yield if the IPI target vCPU is preempted
a0: destination APIC ID
-Usage example: When sending a call-function IPI-many to vCPUs, yield if
-any of the IPI target vCPUs was preempted.
+:Usage example: When sending a call-function IPI-many to vCPUs, yield if
+ any of the IPI target vCPUs was preempted.
diff --git a/Documentation/virt/kvm/index.rst b/Documentation/virt/kvm/index.rst
index ada224a511fe..774deaebf7fa 100644
--- a/Documentation/virt/kvm/index.rst
+++ b/Documentation/virt/kvm/index.rst
@@ -7,6 +7,22 @@ KVM
.. toctree::
:maxdepth: 2
+ api
amd-memory-encryption
cpuid
+ halt-polling
+ hypercalls
+ locking
+ mmu
+ msr
+ nested-vmx
+ ppc-pv
+ s390-diag
+ timekeeping
vcpu-requests
+
+ review-checklist
+
+ arm/index
+
+ devices/index
diff --git a/Documentation/virt/kvm/locking.rst b/Documentation/virt/kvm/locking.rst
new file mode 100644
index 000000000000..c02291beac3f
--- /dev/null
+++ b/Documentation/virt/kvm/locking.rst
@@ -0,0 +1,243 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+KVM Lock Overview
+=================
+
+1. Acquisition Orders
+---------------------
+
+The acquisition orders for mutexes are as follows:
+
+- kvm->lock is taken outside vcpu->mutex
+
+- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock
+
+- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
+ them together is quite rare.
+
+On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
+
+Everything else is a leaf: no other lock is taken inside the critical
+sections.
+
+2. Exception
+------------
+
+Fast page fault:
+
+Fast page fault is the fast path which fixes the guest page fault out of
+the mmu-lock on x86. Currently, the page fault can be fast in one of the
+following two cases:
+
+1. Access Tracking: The SPTE is not present, but it is marked for access
+ tracking i.e. the SPTE_SPECIAL_MASK is set. That means we need to
+ restore the saved R/X bits. This is described in more detail later below.
+
+2. Write-Protection: The SPTE is present and the fault is
+ caused by write-protect. That means we just need to change the W bit of
+ the spte.
+
+What we use to avoid all the race is the SPTE_HOST_WRITEABLE bit and
+SPTE_MMU_WRITEABLE bit on the spte:
+
+- SPTE_HOST_WRITEABLE means the gfn is writable on host.
+- SPTE_MMU_WRITEABLE means the gfn is writable on mmu. The bit is set when
+ the gfn is writable on guest mmu and it is not write-protected by shadow
+ page write-protection.
+
+On fast page fault path, we will use cmpxchg to atomically set the spte W
+bit if spte.SPTE_HOST_WRITEABLE = 1 and spte.SPTE_WRITE_PROTECT = 1, or
+restore the saved R/X bits if VMX_EPT_TRACK_ACCESS mask is set, or both. This
+is safe because whenever changing these bits can be detected by cmpxchg.
+
+But we need carefully check these cases:
+
+1) The mapping from gfn to pfn
+
+The mapping from gfn to pfn may be changed since we can only ensure the pfn
+is not changed during cmpxchg. This is a ABA problem, for example, below case
+will happen:
+
++------------------------------------------------------------------------+
+| At the beginning:: |
+| |
+| gpte = gfn1 |
+| gfn1 is mapped to pfn1 on host |
+| spte is the shadow page table entry corresponding with gpte and |
+| spte = pfn1 |
++------------------------------------------------------------------------+
+| On fast page fault path: |
++------------------------------------+-----------------------------------+
+| CPU 0: | CPU 1: |
++------------------------------------+-----------------------------------+
+| :: | |
+| | |
+| old_spte = *spte; | |
++------------------------------------+-----------------------------------+
+| | pfn1 is swapped out:: |
+| | |
+| | spte = 0; |
+| | |
+| | pfn1 is re-alloced for gfn2. |
+| | |
+| | gpte is changed to point to |
+| | gfn2 by the guest:: |
+| | |
+| | spte = pfn1; |
++------------------------------------+-----------------------------------+
+| :: |
+| |
+| if (cmpxchg(spte, old_spte, old_spte+W) |
+| mark_page_dirty(vcpu->kvm, gfn1) |
+| OOPS!!! |
++------------------------------------------------------------------------+
+
+We dirty-log for gfn1, that means gfn2 is lost in dirty-bitmap.
+
+For direct sp, we can easily avoid it since the spte of direct sp is fixed
+to gfn. For indirect sp, before we do cmpxchg, we call gfn_to_pfn_atomic()
+to pin gfn to pfn, because after gfn_to_pfn_atomic():
+
+- We have held the refcount of pfn that means the pfn can not be freed and
+ be reused for another gfn.
+- The pfn is writable that means it can not be shared between different gfns
+ by KSM.
+
+Then, we can ensure the dirty bitmaps is correctly set for a gfn.
+
+Currently, to simplify the whole things, we disable fast page fault for
+indirect shadow page.
+
+2) Dirty bit tracking
+
+In the origin code, the spte can be fast updated (non-atomically) if the
+spte is read-only and the Accessed bit has already been set since the
+Accessed bit and Dirty bit can not be lost.
+
+But it is not true after fast page fault since the spte can be marked
+writable between reading spte and updating spte. Like below case:
+
++------------------------------------------------------------------------+
+| At the beginning:: |
+| |
+| spte.W = 0 |
+| spte.Accessed = 1 |
++------------------------------------+-----------------------------------+
+| CPU 0: | CPU 1: |
++------------------------------------+-----------------------------------+
+| In mmu_spte_clear_track_bits():: | |
+| | |
+| old_spte = *spte; | |
+| | |
+| | |
+| /* 'if' condition is satisfied. */| |
+| if (old_spte.Accessed == 1 && | |
+| old_spte.W == 0) | |
+| spte = 0ull; | |
++------------------------------------+-----------------------------------+
+| | on fast page fault path:: |
+| | |
+| | spte.W = 1 |
+| | |
+| | memory write on the spte:: |
+| | |
+| | spte.Dirty = 1 |
++------------------------------------+-----------------------------------+
+| :: | |
+| | |
+| else | |
+| old_spte = xchg(spte, 0ull) | |
+| if (old_spte.Accessed == 1) | |
+| kvm_set_pfn_accessed(spte.pfn);| |
+| if (old_spte.Dirty == 1) | |
+| kvm_set_pfn_dirty(spte.pfn); | |
+| OOPS!!! | |
++------------------------------------+-----------------------------------+
+
+The Dirty bit is lost in this case.
+
+In order to avoid this kind of issue, we always treat the spte as "volatile"
+if it can be updated out of mmu-lock, see spte_has_volatile_bits(), it means,
+the spte is always atomically updated in this case.
+
+3) flush tlbs due to spte updated
+
+If the spte is updated from writable to readonly, we should flush all TLBs,
+otherwise rmap_write_protect will find a read-only spte, even though the
+writable spte might be cached on a CPU's TLB.
+
+As mentioned before, the spte can be updated to writable out of mmu-lock on
+fast page fault path, in order to easily audit the path, we see if TLBs need
+be flushed caused by this reason in mmu_spte_update() since this is a common
+function to update spte (present -> present).
+
+Since the spte is "volatile" if it can be updated out of mmu-lock, we always
+atomically update the spte, the race caused by fast page fault can be avoided,
+See the comments in spte_has_volatile_bits() and mmu_spte_update().
+
+Lockless Access Tracking:
+
+This is used for Intel CPUs that are using EPT but do not support the EPT A/D
+bits. In this case, when the KVM MMU notifier is called to track accesses to a
+page (via kvm_mmu_notifier_clear_flush_young), it marks the PTE as not-present
+by clearing the RWX bits in the PTE and storing the original R & X bits in
+some unused/ignored bits. In addition, the SPTE_SPECIAL_MASK is also set on the
+PTE (using the ignored bit 62). When the VM tries to access the page later on,
+a fault is generated and the fast page fault mechanism described above is used
+to atomically restore the PTE to a Present state. The W bit is not saved when
+the PTE is marked for access tracking and during restoration to the Present
+state, the W bit is set depending on whether or not it was a write access. If
+it wasn't, then the W bit will remain clear until a write access happens, at
+which time it will be set using the Dirty tracking mechanism described above.
+
+3. Reference
+------------
+
+:Name: kvm_lock
+:Type: mutex
+:Arch: any
+:Protects: - vm_list
+
+:Name: kvm_count_lock
+:Type: raw_spinlock_t
+:Arch: any
+:Protects: - hardware virtualization enable/disable
+:Comment: 'raw' because hardware enabling/disabling must be atomic /wrt
+ migration.
+
+:Name: kvm_arch::tsc_write_lock
+:Type: raw_spinlock
+:Arch: x86
+:Protects: - kvm_arch::{last_tsc_write,last_tsc_nsec,last_tsc_offset}
+ - tsc offset in vmcb
+:Comment: 'raw' because updating the tsc offsets must not be preempted.
+
+:Name: kvm->mmu_lock
+:Type: spinlock_t
+:Arch: any
+:Protects: -shadow page/shadow tlb entry
+:Comment: it is a spinlock since it is used in mmu notifier.
+
+:Name: kvm->srcu
+:Type: srcu lock
+:Arch: any
+:Protects: - kvm->memslots
+ - kvm->buses
+:Comment: The srcu read lock must be held while accessing memslots (e.g.
+ when using gfn_to_* functions) and while accessing in-kernel
+ MMIO/PIO address->device structure mapping (kvm->buses).
+ The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu
+ if it is needed by multiple functions.
+
+:Name: blocked_vcpu_on_cpu_lock
+:Type: spinlock_t
+:Arch: x86
+:Protects: blocked_vcpu_on_cpu
+:Comment: This is a per-CPU lock and it is used for VT-d posted-interrupts.
+ When VT-d posted-interrupts is supported and the VM has assigned
+ devices, we put the blocked vCPU on the list blocked_vcpu_on_cpu
+ protected by blocked_vcpu_on_cpu_lock, when VT-d hardware issues
+ wakeup notification event since external interrupts from the
+ assigned devices happens, we will find the vCPU on the list to
+ wakeup.
diff --git a/Documentation/virt/kvm/locking.txt b/Documentation/virt/kvm/locking.txt
deleted file mode 100644
index 635cd6eaf714..000000000000
--- a/Documentation/virt/kvm/locking.txt
+++ /dev/null
@@ -1,215 +0,0 @@
-KVM Lock Overview
-=================
-
-1. Acquisition Orders
----------------------
-
-The acquisition orders for mutexes are as follows:
-
-- kvm->lock is taken outside vcpu->mutex
-
-- kvm->lock is taken outside kvm->slots_lock and kvm->irq_lock
-
-- kvm->slots_lock is taken outside kvm->irq_lock, though acquiring
- them together is quite rare.
-
-On x86, vcpu->mutex is taken outside kvm->arch.hyperv.hv_lock.
-
-Everything else is a leaf: no other lock is taken inside the critical
-sections.
-
-2: Exception
-------------
-
-Fast page fault:
-
-Fast page fault is the fast path which fixes the guest page fault out of
-the mmu-lock on x86. Currently, the page fault can be fast in one of the
-following two cases:
-
-1. Access Tracking: The SPTE is not present, but it is marked for access
-tracking i.e. the SPTE_SPECIAL_MASK is set. That means we need to
-restore the saved R/X bits. This is described in more detail later below.
-
-2. Write-Protection: The SPTE is present and the fault is
-caused by write-protect. That means we just need to change the W bit of the
-spte.
-
-What we use to avoid all the race is the SPTE_HOST_WRITEABLE bit and
-SPTE_MMU_WRITEABLE bit on the spte:
-- SPTE_HOST_WRITEABLE means the gfn is writable on host.
-- SPTE_MMU_WRITEABLE means the gfn is writable on mmu. The bit is set when
- the gfn is writable on guest mmu and it is not write-protected by shadow
- page write-protection.
-
-On fast page fault path, we will use cmpxchg to atomically set the spte W
-bit if spte.SPTE_HOST_WRITEABLE = 1 and spte.SPTE_WRITE_PROTECT = 1, or
-restore the saved R/X bits if VMX_EPT_TRACK_ACCESS mask is set, or both. This
-is safe because whenever changing these bits can be detected by cmpxchg.
-
-But we need carefully check these cases:
-1): The mapping from gfn to pfn
-The mapping from gfn to pfn may be changed since we can only ensure the pfn
-is not changed during cmpxchg. This is a ABA problem, for example, below case
-will happen:
-
-At the beginning:
-gpte = gfn1
-gfn1 is mapped to pfn1 on host
-spte is the shadow page table entry corresponding with gpte and
-spte = pfn1
-
- VCPU 0 VCPU0
-on fast page fault path:
-
- old_spte = *spte;
- pfn1 is swapped out:
- spte = 0;
-
- pfn1 is re-alloced for gfn2.
-
- gpte is changed to point to
- gfn2 by the guest:
- spte = pfn1;
-
- if (cmpxchg(spte, old_spte, old_spte+W)
- mark_page_dirty(vcpu->kvm, gfn1)
- OOPS!!!
-
-We dirty-log for gfn1, that means gfn2 is lost in dirty-bitmap.
-
-For direct sp, we can easily avoid it since the spte of direct sp is fixed
-to gfn. For indirect sp, before we do cmpxchg, we call gfn_to_pfn_atomic()
-to pin gfn to pfn, because after gfn_to_pfn_atomic():
-- We have held the refcount of pfn that means the pfn can not be freed and
- be reused for another gfn.
-- The pfn is writable that means it can not be shared between different gfns
- by KSM.
-
-Then, we can ensure the dirty bitmaps is correctly set for a gfn.
-
-Currently, to simplify the whole things, we disable fast page fault for
-indirect shadow page.
-
-2): Dirty bit tracking
-In the origin code, the spte can be fast updated (non-atomically) if the
-spte is read-only and the Accessed bit has already been set since the
-Accessed bit and Dirty bit can not be lost.
-
-But it is not true after fast page fault since the spte can be marked
-writable between reading spte and updating spte. Like below case:
-
-At the beginning:
-spte.W = 0
-spte.Accessed = 1
-
- VCPU 0 VCPU0
-In mmu_spte_clear_track_bits():
-
- old_spte = *spte;
-
- /* 'if' condition is satisfied. */
- if (old_spte.Accessed == 1 &&
- old_spte.W == 0)
- spte = 0ull;
- on fast page fault path:
- spte.W = 1
- memory write on the spte:
- spte.Dirty = 1
-
-
- else
- old_spte = xchg(spte, 0ull)
-
-
- if (old_spte.Accessed == 1)
- kvm_set_pfn_accessed(spte.pfn);
- if (old_spte.Dirty == 1)
- kvm_set_pfn_dirty(spte.pfn);
- OOPS!!!
-
-The Dirty bit is lost in this case.
-
-In order to avoid this kind of issue, we always treat the spte as "volatile"
-if it can be updated out of mmu-lock, see spte_has_volatile_bits(), it means,
-the spte is always atomically updated in this case.
-
-3): flush tlbs due to spte updated
-If the spte is updated from writable to readonly, we should flush all TLBs,
-otherwise rmap_write_protect will find a read-only spte, even though the
-writable spte might be cached on a CPU's TLB.
-
-As mentioned before, the spte can be updated to writable out of mmu-lock on
-fast page fault path, in order to easily audit the path, we see if TLBs need
-be flushed caused by this reason in mmu_spte_update() since this is a common
-function to update spte (present -> present).
-
-Since the spte is "volatile" if it can be updated out of mmu-lock, we always
-atomically update the spte, the race caused by fast page fault can be avoided,
-See the comments in spte_has_volatile_bits() and mmu_spte_update().
-
-Lockless Access Tracking:
-
-This is used for Intel CPUs that are using EPT but do not support the EPT A/D
-bits. In this case, when the KVM MMU notifier is called to track accesses to a
-page (via kvm_mmu_notifier_clear_flush_young), it marks the PTE as not-present
-by clearing the RWX bits in the PTE and storing the original R & X bits in
-some unused/ignored bits. In addition, the SPTE_SPECIAL_MASK is also set on the
-PTE (using the ignored bit 62). When the VM tries to access the page later on,
-a fault is generated and the fast page fault mechanism described above is used
-to atomically restore the PTE to a Present state. The W bit is not saved when
-the PTE is marked for access tracking and during restoration to the Present
-state, the W bit is set depending on whether or not it was a write access. If
-it wasn't, then the W bit will remain clear until a write access happens, at
-which time it will be set using the Dirty tracking mechanism described above.
-
-3. Reference
-------------
-
-Name: kvm_lock
-Type: mutex
-Arch: any
-Protects: - vm_list
-
-Name: kvm_count_lock
-Type: raw_spinlock_t
-Arch: any
-Protects: - hardware virtualization enable/disable
-Comment: 'raw' because hardware enabling/disabling must be atomic /wrt
- migration.
-
-Name: kvm_arch::tsc_write_lock
-Type: raw_spinlock
-Arch: x86
-Protects: - kvm_arch::{last_tsc_write,last_tsc_nsec,last_tsc_offset}
- - tsc offset in vmcb
-Comment: 'raw' because updating the tsc offsets must not be preempted.
-
-Name: kvm->mmu_lock
-Type: spinlock_t
-Arch: any
-Protects: -shadow page/shadow tlb entry
-Comment: it is a spinlock since it is used in mmu notifier.
-
-Name: kvm->srcu
-Type: srcu lock
-Arch: any
-Protects: - kvm->memslots
- - kvm->buses
-Comment: The srcu read lock must be held while accessing memslots (e.g.
- when using gfn_to_* functions) and while accessing in-kernel
- MMIO/PIO address->device structure mapping (kvm->buses).
- The srcu index can be stored in kvm_vcpu->srcu_idx per vcpu
- if it is needed by multiple functions.
-
-Name: blocked_vcpu_on_cpu_lock
-Type: spinlock_t
-Arch: x86
-Protects: blocked_vcpu_on_cpu
-Comment: This is a per-CPU lock and it is used for VT-d posted-interrupts.
- When VT-d posted-interrupts is supported and the VM has assigned
- devices, we put the blocked vCPU on the list blocked_vcpu_on_cpu
- protected by blocked_vcpu_on_cpu_lock, when VT-d hardware issues
- wakeup notification event since external interrupts from the
- assigned devices happens, we will find the vCPU on the list to
- wakeup.
diff --git a/Documentation/virt/kvm/mmu.txt b/Documentation/virt/kvm/mmu.rst
index dadb29e8738f..60981887d20b 100644
--- a/Documentation/virt/kvm/mmu.txt
+++ b/Documentation/virt/kvm/mmu.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+======================
The x86 kvm shadow mmu
======================
@@ -7,27 +10,37 @@ physical addresses to host physical addresses.
The mmu code attempts to satisfy the following requirements:
-- correctness: the guest should not be able to determine that it is running
+- correctness:
+ the guest should not be able to determine that it is running
on an emulated mmu except for timing (we attempt to comply
with the specification, not emulate the characteristics of
a particular implementation such as tlb size)
-- security: the guest must not be able to touch host memory not assigned
+- security:
+ the guest must not be able to touch host memory not assigned
to it
-- performance: minimize the performance penalty imposed by the mmu
-- scaling: need to scale to large memory and large vcpu guests
-- hardware: support the full range of x86 virtualization hardware
-- integration: Linux memory management code must be in control of guest memory
+- performance:
+ minimize the performance penalty imposed by the mmu
+- scaling:
+ need to scale to large memory and large vcpu guests
+- hardware:
+ support the full range of x86 virtualization hardware
+- integration:
+ Linux memory management code must be in control of guest memory
so that swapping, page migration, page merging, transparent
hugepages, and similar features work without change
-- dirty tracking: report writes to guest memory to enable live migration
+- dirty tracking:
+ report writes to guest memory to enable live migration
and framebuffer-based displays
-- footprint: keep the amount of pinned kernel memory low (most memory
+- footprint:
+ keep the amount of pinned kernel memory low (most memory
should be shrinkable)
-- reliability: avoid multipage or GFP_ATOMIC allocations
+- reliability:
+ avoid multipage or GFP_ATOMIC allocations
Acronyms
========
+==== ====================================================================
pfn host page frame number
hpa host physical address
hva host virtual address
@@ -41,6 +54,7 @@ pte page table entry (used also to refer generically to paging structure
gpte guest pte (referring to gfns)
spte shadow pte (referring to pfns)
tdp two dimensional paging (vendor neutral term for NPT and EPT)
+==== ====================================================================
Virtual and real hardware supported
===================================
@@ -90,11 +104,13 @@ Events
The mmu is driven by events, some from the guest, some from the host.
Guest generated events:
+
- writes to control registers (especially cr3)
- invlpg/invlpga instruction execution
- access to missing or protected translations
Host generated events:
+
- changes in the gpa->hpa translation (either through gpa->hva changes or
through hva->hpa changes)
- memory pressure (the shrinker)
@@ -117,16 +133,19 @@ Leaf ptes point at guest pages.
The following table shows translations encoded by leaf ptes, with higher-level
translations in parentheses:
- Non-nested guests:
+ Non-nested guests::
+
nonpaging: gpa->hpa
paging: gva->gpa->hpa
paging, tdp: (gva->)gpa->hpa
- Nested guests:
+
+ Nested guests::
+
non-tdp: ngva->gpa->hpa (*)
tdp: (ngva->)ngpa->gpa->hpa
-(*) the guest hypervisor will encode the ngva->gpa translation into its page
- tables if npt is not present
+ (*) the guest hypervisor will encode the ngva->gpa translation into its page
+ tables if npt is not present
Shadow pages contain the following information:
role.level:
@@ -291,28 +310,41 @@ Handling a page fault is performed as follows:
- if the RSV bit of the error code is set, the page fault is caused by guest
accessing MMIO and cached MMIO information is available.
+
- walk shadow page table
- check for valid generation number in the spte (see "Fast invalidation of
MMIO sptes" below)
- cache the information to vcpu->arch.mmio_gva, vcpu->arch.mmio_access and
vcpu->arch.mmio_gfn, and call the emulator
+
- If both P bit and R/W bit of error code are set, this could possibly
be handled as a "fast page fault" (fixed without taking the MMU lock). See
the description in Documentation/virt/kvm/locking.txt.
+
- if needed, walk the guest page tables to determine the guest translation
(gva->gpa or ngpa->gpa)
+
- if permissions are insufficient, reflect the fault back to the guest
+
- determine the host page
+
- if this is an mmio request, there is no host page; cache the info to
vcpu->arch.mmio_gva, vcpu->arch.mmio_access and vcpu->arch.mmio_gfn
+
- walk the shadow page table to find the spte for the translation,
instantiating missing intermediate page tables as necessary
+
- If this is an mmio request, cache the mmio info to the spte and set some
reserved bit on the spte (see callers of kvm_mmu_set_mmio_spte_mask)
+
- try to unsynchronize the page
+
- if successful, we can let the guest continue and modify the gpte
+
- emulate the instruction
+
- if failed, unshadow the page and let the guest continue
+
- update any translations that were modified by the instruction
invlpg handling:
@@ -324,10 +356,12 @@ invlpg handling:
Guest control register updates:
- mov to cr3
+
- look up new shadow roots
- synchronize newly reachable shadow pages
- mov to cr0/cr4/efer
+
- set up mmu context for new paging mode
- look up new shadow roots
- synchronize newly reachable shadow pages
@@ -358,6 +392,7 @@ on fault type:
(user write faults generate a #PF)
In the first case there are two additional complications:
+
- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
the kernel may now execute it. We handle this by also setting spte.nx.
If we get a user fetch or read fault, we'll change spte.u=1 and
@@ -446,4 +481,3 @@ Further reading
- NPT presentation from KVM Forum 2008
http://www.linux-kvm.org/images/c/c8/KvmForum2008%24kdf2008_21.pdf
-
diff --git a/Documentation/virt/kvm/msr.txt b/Documentation/virt/kvm/msr.rst
index df1f4338b3ca..33892036672d 100644
--- a/Documentation/virt/kvm/msr.txt
+++ b/Documentation/virt/kvm/msr.rst
@@ -1,6 +1,10 @@
-KVM-specific MSRs.
-Glauber Costa <[email protected]>, Red Hat Inc, 2010
-=====================================================
+.. SPDX-License-Identifier: GPL-2.0
+
+=================
+KVM-specific MSRs
+=================
+
+:Author: Glauber Costa <[email protected]>, Red Hat Inc, 2010
KVM makes use of some custom MSRs to service some requests.
@@ -9,34 +13,39 @@ Custom MSRs have a range reserved for them, that goes from
but they are deprecated and their use is discouraged.
Custom MSR list
---------
+---------------
The current supported Custom MSR list is:
-MSR_KVM_WALL_CLOCK_NEW: 0x4b564d00
+MSR_KVM_WALL_CLOCK_NEW:
+ 0x4b564d00
- data: 4-byte alignment physical address of a memory area which must be
+data:
+ 4-byte alignment physical address of a memory area which must be
in guest RAM. This memory is expected to hold a copy of the following
- structure:
+ structure::
- struct pvclock_wall_clock {
+ struct pvclock_wall_clock {
u32 version;
u32 sec;
u32 nsec;
- } __attribute__((__packed__));
+ } __attribute__((__packed__));
whose data will be filled in by the hypervisor. The hypervisor is only
guaranteed to update this data at the moment of MSR write.
Users that want to reliably query this information more than once have
to write more than once to this MSR. Fields have the following meanings:
- version: guest has to check version before and after grabbing
+ version:
+ guest has to check version before and after grabbing
time information and check that they are both equal and even.
An odd version indicates an in-progress update.
- sec: number of seconds for wallclock at time of boot.
+ sec:
+ number of seconds for wallclock at time of boot.
- nsec: number of nanoseconds for wallclock at time of boot.
+ nsec:
+ number of nanoseconds for wallclock at time of boot.
In order to get the current wallclock time, the system_time from
MSR_KVM_SYSTEM_TIME_NEW needs to be added.
@@ -47,13 +56,15 @@ MSR_KVM_WALL_CLOCK_NEW: 0x4b564d00
Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid
leaf prior to usage.
-MSR_KVM_SYSTEM_TIME_NEW: 0x4b564d01
+MSR_KVM_SYSTEM_TIME_NEW:
+ 0x4b564d01
- data: 4-byte aligned physical address of a memory area which must be in
+data:
+ 4-byte aligned physical address of a memory area which must be in
guest RAM, plus an enable bit in bit 0. This memory is expected to hold
- a copy of the following structure:
+ a copy of the following structure::
- struct pvclock_vcpu_time_info {
+ struct pvclock_vcpu_time_info {
u32 version;
u32 pad0;
u64 tsc_timestamp;
@@ -62,7 +73,7 @@ MSR_KVM_SYSTEM_TIME_NEW: 0x4b564d01
s8 tsc_shift;
u8 flags;
u8 pad[2];
- } __attribute__((__packed__)); /* 32 bytes */
+ } __attribute__((__packed__)); /* 32 bytes */
whose data will be filled in by the hypervisor periodically. Only one
write, or registration, is needed for each VCPU. The interval between
@@ -72,23 +83,28 @@ MSR_KVM_SYSTEM_TIME_NEW: 0x4b564d01
Fields have the following meanings:
- version: guest has to check version before and after grabbing
+ version:
+ guest has to check version before and after grabbing
time information and check that they are both equal and even.
An odd version indicates an in-progress update.
- tsc_timestamp: the tsc value at the current VCPU at the time
+ tsc_timestamp:
+ the tsc value at the current VCPU at the time
of the update of this structure. Guests can subtract this value
from current tsc to derive a notion of elapsed time since the
structure update.
- system_time: a host notion of monotonic time, including sleep
+ system_time:
+ a host notion of monotonic time, including sleep
time at the time this structure was last updated. Unit is
nanoseconds.
- tsc_to_system_mul: multiplier to be used when converting
+ tsc_to_system_mul:
+ multiplier to be used when converting
tsc-related quantity to nanoseconds
- tsc_shift: shift to be used when converting tsc-related
+ tsc_shift:
+ shift to be used when converting tsc-related
quantity to nanoseconds. This shift will ensure that
multiplication with tsc_to_system_mul does not overflow.
A positive value denotes a left shift, a negative value
@@ -96,7 +112,7 @@ MSR_KVM_SYSTEM_TIME_NEW: 0x4b564d01
The conversion from tsc to nanoseconds involves an additional
right shift by 32 bits. With this information, guests can
- derive per-CPU time by doing:
+ derive per-CPU time by doing::
time = (current_tsc - tsc_timestamp)
if (tsc_shift >= 0)
@@ -106,29 +122,34 @@ MSR_KVM_SYSTEM_TIME_NEW: 0x4b564d01
time = (time * tsc_to_system_mul) >> 32
time = time + system_time
- flags: bits in this field indicate extended capabilities
+ flags:
+ bits in this field indicate extended capabilities
coordinated between the guest and the hypervisor. Availability
of specific flags has to be checked in 0x40000001 cpuid leaf.
Current flags are:
- flag bit | cpuid bit | meaning
- -------------------------------------------------------------
- | | time measures taken across
- 0 | 24 | multiple cpus are guaranteed to
- | | be monotonic
- -------------------------------------------------------------
- | | guest vcpu has been paused by
- 1 | N/A | the host
- | | See 4.70 in api.txt
- -------------------------------------------------------------
+
+ +-----------+--------------+----------------------------------+
+ | flag bit | cpuid bit | meaning |
+ +-----------+--------------+----------------------------------+
+ | | | time measures taken across |
+ | 0 | 24 | multiple cpus are guaranteed to |
+ | | | be monotonic |
+ +-----------+--------------+----------------------------------+
+ | | | guest vcpu has been paused by |
+ | 1 | N/A | the host |
+ | | | See 4.70 in api.txt |
+ +-----------+--------------+----------------------------------+
Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid
leaf prior to usage.
-MSR_KVM_WALL_CLOCK: 0x11
+MSR_KVM_WALL_CLOCK:
+ 0x11
- data and functioning: same as MSR_KVM_WALL_CLOCK_NEW. Use that instead.
+data and functioning:
+ same as MSR_KVM_WALL_CLOCK_NEW. Use that instead.
This MSR falls outside the reserved KVM range and may be removed in the
future. Its usage is deprecated.
@@ -136,9 +157,11 @@ MSR_KVM_WALL_CLOCK: 0x11
Availability of this MSR must be checked via bit 0 in 0x4000001 cpuid
leaf prior to usage.
-MSR_KVM_SYSTEM_TIME: 0x12
+MSR_KVM_SYSTEM_TIME:
+ 0x12
- data and functioning: same as MSR_KVM_SYSTEM_TIME_NEW. Use that instead.
+data and functioning:
+ same as MSR_KVM_SYSTEM_TIME_NEW. Use that instead.
This MSR falls outside the reserved KVM range and may be removed in the
future. Its usage is deprecated.
@@ -146,7 +169,7 @@ MSR_KVM_SYSTEM_TIME: 0x12
Availability of this MSR must be checked via bit 0 in 0x4000001 cpuid
leaf prior to usage.
- The suggested algorithm for detecting kvmclock presence is then:
+ The suggested algorithm for detecting kvmclock presence is then::
if (!kvm_para_available()) /* refer to cpuid.txt */
return NON_PRESENT;
@@ -163,8 +186,11 @@ MSR_KVM_SYSTEM_TIME: 0x12
} else
return NON_PRESENT;
-MSR_KVM_ASYNC_PF_EN: 0x4b564d02
- data: Bits 63-6 hold 64-byte aligned physical address of a
+MSR_KVM_ASYNC_PF_EN:
+ 0x4b564d02
+
+data:
+ Bits 63-6 hold 64-byte aligned physical address of a
64 byte memory area which must be in guest RAM and must be
zeroed. Bits 5-3 are reserved and should be zero. Bit 0 is 1
when asynchronous page faults are enabled on the vcpu 0 when
@@ -200,20 +226,22 @@ MSR_KVM_ASYNC_PF_EN: 0x4b564d02
Currently type 2 APF will be always delivered on the same vcpu as
type 1 was, but guest should not rely on that.
-MSR_KVM_STEAL_TIME: 0x4b564d03
+MSR_KVM_STEAL_TIME:
+ 0x4b564d03
- data: 64-byte alignment physical address of a memory area which must be
+data:
+ 64-byte alignment physical address of a memory area which must be
in guest RAM, plus an enable bit in bit 0. This memory is expected to
- hold a copy of the following structure:
+ hold a copy of the following structure::
- struct kvm_steal_time {
+ struct kvm_steal_time {
__u64 steal;
__u32 version;
__u32 flags;
__u8 preempted;
__u8 u8_pad[3];
__u32 pad[11];
- }
+ }
whose data will be filled in by the hypervisor periodically. Only one
write, or registration, is needed for each VCPU. The interval between
@@ -224,25 +252,32 @@ MSR_KVM_STEAL_TIME: 0x4b564d03
Fields have the following meanings:
- version: a sequence counter. In other words, guest has to check
+ version:
+ a sequence counter. In other words, guest has to check
this field before and after grabbing time information and make
sure they are both equal and even. An odd version indicates an
in-progress update.
- flags: At this point, always zero. May be used to indicate
+ flags:
+ At this point, always zero. May be used to indicate
changes in this structure in the future.
- steal: the amount of time in which this vCPU did not run, in
+ steal:
+ the amount of time in which this vCPU did not run, in
nanoseconds. Time during which the vcpu is idle, will not be
reported as steal time.
- preempted: indicate the vCPU who owns this struct is running or
+ preempted:
+ indicate the vCPU who owns this struct is running or
not. Non-zero values mean the vCPU has been preempted. Zero
means the vCPU is not preempted. NOTE, it is always zero if the
the hypervisor doesn't support this field.
-MSR_KVM_EOI_EN: 0x4b564d04
- data: Bit 0 is 1 when PV end of interrupt is enabled on the vcpu; 0
+MSR_KVM_EOI_EN:
+ 0x4b564d04
+
+data:
+ Bit 0 is 1 when PV end of interrupt is enabled on the vcpu; 0
when disabled. Bit 1 is reserved and must be zero. When PV end of
interrupt is enabled (bit 0 set), bits 63-2 hold a 4-byte aligned
physical address of a 4 byte memory area which must be in guest RAM and
@@ -274,11 +309,13 @@ MSR_KVM_EOI_EN: 0x4b564d04
clear it using a single CPU instruction, such as test and clear, or
compare and exchange.
-MSR_KVM_POLL_CONTROL: 0x4b564d05
+MSR_KVM_POLL_CONTROL:
+ 0x4b564d05
+
Control host-side polling.
- data: Bit 0 enables (1) or disables (0) host-side HLT polling logic.
+data:
+ Bit 0 enables (1) or disables (0) host-side HLT polling logic.
KVM guests can request the host not to poll on HLT, for example if
they are performing polling themselves.
-
diff --git a/Documentation/virt/kvm/nested-vmx.txt b/Documentation/virt/kvm/nested-vmx.rst
index 97eb1353e962..592b0ab6970b 100644
--- a/Documentation/virt/kvm/nested-vmx.txt
+++ b/Documentation/virt/kvm/nested-vmx.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+==========
Nested VMX
==========
@@ -41,9 +44,9 @@ No modifications are required to user space (qemu). However, qemu's default
emulated CPU type (qemu64) does not list the "VMX" CPU feature, so it must be
explicitly enabled, by giving qemu one of the following options:
- -cpu host (emulated CPU has all features of the real CPU)
+ - cpu host (emulated CPU has all features of the real CPU)
- -cpu qemu64,+vmx (add just the vmx feature to a named CPU type)
+ - cpu qemu64,+vmx (add just the vmx feature to a named CPU type)
ABIs
@@ -75,6 +78,8 @@ of this structure changes, this can break live migration across KVM versions.
VMCS12_REVISION (from vmx.c) should be changed if struct vmcs12 or its inner
struct shadow_vmcs is ever changed.
+::
+
typedef u64 natural_width;
struct __packed vmcs12 {
/* According to the Intel spec, a VMCS region must start with
@@ -220,21 +225,21 @@ Authors
-------
These patches were written by:
- Abel Gordon, abelg <at> il.ibm.com
- Nadav Har'El, nyh <at> il.ibm.com
- Orit Wasserman, oritw <at> il.ibm.com
- Ben-Ami Yassor, benami <at> il.ibm.com
- Muli Ben-Yehuda, muli <at> il.ibm.com
+ - Abel Gordon, abelg <at> il.ibm.com
+ - Nadav Har'El, nyh <at> il.ibm.com
+ - Orit Wasserman, oritw <at> il.ibm.com
+ - Ben-Ami Yassor, benami <at> il.ibm.com
+ - Muli Ben-Yehuda, muli <at> il.ibm.com
With contributions by:
- Anthony Liguori, aliguori <at> us.ibm.com
- Mike Day, mdday <at> us.ibm.com
- Michael Factor, factor <at> il.ibm.com
- Zvi Dubitzky, dubi <at> il.ibm.com
+ - Anthony Liguori, aliguori <at> us.ibm.com
+ - Mike Day, mdday <at> us.ibm.com
+ - Michael Factor, factor <at> il.ibm.com
+ - Zvi Dubitzky, dubi <at> il.ibm.com
And valuable reviews by:
- Avi Kivity, avi <at> redhat.com
- Gleb Natapov, gleb <at> redhat.com
- Marcelo Tosatti, mtosatti <at> redhat.com
- Kevin Tian, kevin.tian <at> intel.com
- and others.
+ - Avi Kivity, avi <at> redhat.com
+ - Gleb Natapov, gleb <at> redhat.com
+ - Marcelo Tosatti, mtosatti <at> redhat.com
+ - Kevin Tian, kevin.tian <at> intel.com
+ - and others.
diff --git a/Documentation/virt/kvm/ppc-pv.txt b/Documentation/virt/kvm/ppc-pv.rst
index e26115ce4258..5fdb907670be 100644
--- a/Documentation/virt/kvm/ppc-pv.txt
+++ b/Documentation/virt/kvm/ppc-pv.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=================================
The PPC KVM paravirtual interface
=================================
@@ -34,8 +37,9 @@ up the hypercall. To call a hypercall, just call these instructions.
The parameters are as follows:
+ ======== ================ ================
Register IN OUT
-
+ ======== ================ ================
r0 - volatile
r3 1st parameter Return code
r4 2nd parameter 1st output value
@@ -47,6 +51,7 @@ The parameters are as follows:
r10 8th parameter 7th output value
r11 hypercall number 8th output value
r12 - volatile
+ ======== ================ ================
Hypercall definitions are shared in generic code, so the same hypercall numbers
apply for x86 and powerpc alike with the exception that each KVM hypercall
@@ -54,11 +59,13 @@ also needs to be ORed with the KVM vendor code which is (42 << 16).
Return codes can be as follows:
+ ==== =========================
Code Meaning
-
+ ==== =========================
0 Success
12 Hypercall not implemented
<0 Error
+ ==== =========================
The magic page
==============
@@ -72,7 +79,7 @@ desired location. The first parameter indicates the effective address when the
MMU is enabled. The second parameter indicates the address in real mode, if
applicable to the target. For now, we always map the page to -4096. This way we
can access it using absolute load and store functions. The following
-instruction reads the first field of the magic page:
+instruction reads the first field of the magic page::
ld rX, -4096(0)
@@ -93,8 +100,10 @@ a bitmap of available features inside the magic page.
The following enhancements to the magic page are currently available:
+ ============================ =======================================
KVM_MAGIC_FEAT_SR Maps SR registers r/w in the magic page
KVM_MAGIC_FEAT_MAS0_TO_SPRG7 Maps MASn, ESR, PIR and high SPRGs
+ ============================ =======================================
For enhanced features in the magic page, please check for the existence of the
feature before using them!
@@ -121,8 +130,8 @@ when entering the guest or don't have any impact on the hypervisor's behavior.
The following bits are safe to be set inside the guest:
- MSR_EE
- MSR_RI
+ - MSR_EE
+ - MSR_RI
If any other bit changes in the MSR, please still use mtmsr(d).
@@ -138,9 +147,9 @@ guest. Implementing any of those mappings is optional, as the instruction traps
also act on the shared page. So calling privileged instructions still works as
before.
+======================= ================================
From To
-==== ==
-
+======================= ================================
mfmsr rX ld rX, magic_page->msr
mfsprg rX, 0 ld rX, magic_page->sprg0
mfsprg rX, 1 ld rX, magic_page->sprg1
@@ -173,7 +182,7 @@ mtsrin rX, rY b <special mtsrin section>
[BookE only]
wrteei [0|1] b <special wrteei section>
-
+======================= ================================
Some instructions require more logic to determine what's going on than a load
or store instruction can deliver. To enable patching of those, we keep some
@@ -191,6 +200,7 @@ for example.
Hypercall ABIs in KVM on PowerPC
=================================
+
1) KVM hypercalls (ePAPR)
These are ePAPR compliant hypercall implementation (mentioned above). Even
diff --git a/Documentation/virt/kvm/review-checklist.txt b/Documentation/virt/kvm/review-checklist.rst
index 499af499e296..1f86a9d3f705 100644
--- a/Documentation/virt/kvm/review-checklist.txt
+++ b/Documentation/virt/kvm/review-checklist.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+================================
Review checklist for kvm patches
================================
diff --git a/Documentation/virt/kvm/s390-diag.txt b/Documentation/virt/kvm/s390-diag.rst
index 7c52e5f8b210..eaac4864d3d6 100644
--- a/Documentation/virt/kvm/s390-diag.txt
+++ b/Documentation/virt/kvm/s390-diag.rst
@@ -1,3 +1,6 @@
+.. SPDX-License-Identifier: GPL-2.0
+
+=============================
The s390 DIAGNOSE call on KVM
=============================
@@ -16,12 +19,12 @@ DIAGNOSE calls by the guest cause a mandatory intercept. This implies
all supported DIAGNOSE calls need to be handled by either KVM or its
userspace.
-All DIAGNOSE calls supported by KVM use the RS-a format:
+All DIAGNOSE calls supported by KVM use the RS-a format::
---------------------------------------
-| '83' | R1 | R3 | B2 | D2 |
---------------------------------------
-0 8 12 16 20 31
+ --------------------------------------
+ | '83' | R1 | R3 | B2 | D2 |
+ --------------------------------------
+ 0 8 12 16 20 31
The second-operand address (obtained by the base/displacement calculation)
is not used to address data. Instead, bits 48-63 of this address specify
diff --git a/Documentation/virt/kvm/timekeeping.txt b/Documentation/virt/kvm/timekeeping.rst
index 76808a17ad84..21ae7efa29ba 100644
--- a/Documentation/virt/kvm/timekeeping.txt
+++ b/Documentation/virt/kvm/timekeeping.rst
@@ -1,17 +1,21 @@
+.. SPDX-License-Identifier: GPL-2.0
- Timekeeping Virtualization for X86-Based Architectures
+======================================================
+Timekeeping Virtualization for X86-Based Architectures
+======================================================
- Zachary Amsden <[email protected]>
- Copyright (c) 2010, Red Hat. All rights reserved.
+:Author: Zachary Amsden <[email protected]>
+:Copyright: (c) 2010, Red Hat. All rights reserved.
-1) Overview
-2) Timing Devices
-3) TSC Hardware
-4) Virtualization Problems
+.. Contents
-=========================================================================
+ 1) Overview
+ 2) Timing Devices
+ 3) TSC Hardware
+ 4) Virtualization Problems
-1) Overview
+1. Overview
+===========
One of the most complicated parts of the X86 platform, and specifically,
the virtualization of this platform is the plethora of timing devices available
@@ -27,15 +31,15 @@ The purpose of this document is to collect data and information relevant to
timekeeping which may be difficult to find elsewhere, specifically,
information relevant to KVM and hardware-based virtualization.
-=========================================================================
-
-2) Timing Devices
+2. Timing Devices
+=================
First we discuss the basic hardware devices available. TSC and the related
KVM clock are special enough to warrant a full exposition and are described in
the following section.
-2.1) i8254 - PIT
+2.1. i8254 - PIT
+----------------
One of the first timer devices available is the programmable interrupt timer,
or PIT. The PIT has a fixed frequency 1.193182 MHz base clock and three
@@ -50,13 +54,13 @@ The PIT uses I/O ports 0x40 - 0x43. Access to the 16-bit counters is done
using single or multiple byte access to the I/O ports. There are 6 modes
available, but not all modes are available to all timers, as only timer 2
has a connected gate input, required for modes 1 and 5. The gate line is
-controlled by port 61h, bit 0, as illustrated in the following diagram.
+controlled by port 61h, bit 0, as illustrated in the following diagram::
- -------------- ----------------
-| | | |
-| 1.1932 MHz |---------->| CLOCK OUT | ---------> IRQ 0
-| Clock | | | |
- -------------- | +->| GATE TIMER 0 |
+ -------------- ----------------
+ | | | |
+ | 1.1932 MHz|---------->| CLOCK OUT | ---------> IRQ 0
+ | Clock | | | |
+ -------------- | +->| GATE TIMER 0 |
| ----------------
|
| ----------------
@@ -70,29 +74,33 @@ controlled by port 61h, bit 0, as illustrated in the following diagram.
| | |
|------>| CLOCK OUT | ---------> Port 61h, bit 5
| | |
-Port 61h, bit 0 ---------->| GATE TIMER 2 | \_.---- ____
+ Port 61h, bit 0 -------->| GATE TIMER 2 | \_.---- ____
---------------- _| )--|LPF|---Speaker
/ *---- \___/
-Port 61h, bit 1 -----------------------------------/
+ Port 61h, bit 1 ---------------------------------/
The timer modes are now described.
-Mode 0: Single Timeout. This is a one-shot software timeout that counts down
+Mode 0: Single Timeout.
+ This is a one-shot software timeout that counts down
when the gate is high (always true for timers 0 and 1). When the count
reaches zero, the output goes high.
-Mode 1: Triggered One-shot. The output is initially set high. When the gate
+Mode 1: Triggered One-shot.
+ The output is initially set high. When the gate
line is set high, a countdown is initiated (which does not stop if the gate is
lowered), during which the output is set low. When the count reaches zero,
the output goes high.
-Mode 2: Rate Generator. The output is initially set high. When the countdown
+Mode 2: Rate Generator.
+ The output is initially set high. When the countdown
reaches 1, the output goes low for one count and then returns high. The value
is reloaded and the countdown automatically resumes. If the gate line goes
low, the count is halted. If the output is low when the gate is lowered, the
output automatically goes high (this only affects timer 2).
-Mode 3: Square Wave. This generates a high / low square wave. The count
+Mode 3: Square Wave.
+ This generates a high / low square wave. The count
determines the length of the pulse, which alternates between high and low
when zero is reached. The count only proceeds when gate is high and is
automatically reloaded on reaching zero. The count is decremented twice at
@@ -103,12 +111,14 @@ Mode 3: Square Wave. This generates a high / low square wave. The count
values are not observed when reading. This is the intended mode for timer 2,
which generates sine-like tones by low-pass filtering the square wave output.
-Mode 4: Software Strobe. After programming this mode and loading the counter,
+Mode 4: Software Strobe.
+ After programming this mode and loading the counter,
the output remains high until the counter reaches zero. Then the output
goes low for 1 clock cycle and returns high. The counter is not reloaded.
Counting only occurs when gate is high.
-Mode 5: Hardware Strobe. After programming and loading the counter, the
+Mode 5: Hardware Strobe.
+ After programming and loading the counter, the
output remains high. When the gate is raised, a countdown is initiated
(which does not stop if the gate is lowered). When the counter reaches zero,
the output goes low for 1 clock cycle and then returns high. The counter is
@@ -118,49 +128,49 @@ In addition to normal binary counting, the PIT supports BCD counting. The
command port, 0x43 is used to set the counter and mode for each of the three
timers.
-PIT commands, issued to port 0x43, using the following bit encoding:
+PIT commands, issued to port 0x43, using the following bit encoding::
-Bit 7-4: Command (See table below)
-Bit 3-1: Mode (000 = Mode 0, 101 = Mode 5, 11X = undefined)
-Bit 0 : Binary (0) / BCD (1)
+ Bit 7-4: Command (See table below)
+ Bit 3-1: Mode (000 = Mode 0, 101 = Mode 5, 11X = undefined)
+ Bit 0 : Binary (0) / BCD (1)
-Command table:
+Command table::
-0000 - Latch Timer 0 count for port 0x40
+ 0000 - Latch Timer 0 count for port 0x40
sample and hold the count to be read in port 0x40;
additional commands ignored until counter is read;
mode bits ignored.
-0001 - Set Timer 0 LSB mode for port 0x40
+ 0001 - Set Timer 0 LSB mode for port 0x40
set timer to read LSB only and force MSB to zero;
mode bits set timer mode
-0010 - Set Timer 0 MSB mode for port 0x40
+ 0010 - Set Timer 0 MSB mode for port 0x40
set timer to read MSB only and force LSB to zero;
mode bits set timer mode
-0011 - Set Timer 0 16-bit mode for port 0x40
+ 0011 - Set Timer 0 16-bit mode for port 0x40
set timer to read / write LSB first, then MSB;
mode bits set timer mode
-0100 - Latch Timer 1 count for port 0x41 - as described above
-0101 - Set Timer 1 LSB mode for port 0x41 - as described above
-0110 - Set Timer 1 MSB mode for port 0x41 - as described above
-0111 - Set Timer 1 16-bit mode for port 0x41 - as described above
+ 0100 - Latch Timer 1 count for port 0x41 - as described above
+ 0101 - Set Timer 1 LSB mode for port 0x41 - as described above
+ 0110 - Set Timer 1 MSB mode for port 0x41 - as described above
+ 0111 - Set Timer 1 16-bit mode for port 0x41 - as described above
-1000 - Latch Timer 2 count for port 0x42 - as described above
-1001 - Set Timer 2 LSB mode for port 0x42 - as described above
-1010 - Set Timer 2 MSB mode for port 0x42 - as described above
-1011 - Set Timer 2 16-bit mode for port 0x42 as described above
+ 1000 - Latch Timer 2 count for port 0x42 - as described above
+ 1001 - Set Timer 2 LSB mode for port 0x42 - as described above
+ 1010 - Set Timer 2 MSB mode for port 0x42 - as described above
+ 1011 - Set Timer 2 16-bit mode for port 0x42 as described above
-1101 - General counter latch
+ 1101 - General counter latch
Latch combination of counters into corresponding ports
Bit 3 = Counter 2
Bit 2 = Counter 1
Bit 1 = Counter 0
Bit 0 = Unused
-1110 - Latch timer status
+ 1110 - Latch timer status
Latch combination of counter mode into corresponding ports
Bit 3 = Counter 2
Bit 2 = Counter 1
@@ -177,7 +187,8 @@ Command table:
Bit 3-1 = Mode
Bit 0 = Binary (0) / BCD mode (1)
-2.2) RTC
+2.2. RTC
+--------
The second device which was available in the original PC was the MC146818 real
time clock. The original device is now obsolete, and usually emulated by the
@@ -201,21 +212,21 @@ in progress, as indicated in the status register.
The clock uses a 32.768kHz crystal, so bits 6-4 of register A should be
programmed to a 32kHz divider if the RTC is to count seconds.
-This is the RAM map originally used for the RTC/CMOS:
-
-Location Size Description
-------------------------------------------
-00h byte Current second (BCD)
-01h byte Seconds alarm (BCD)
-02h byte Current minute (BCD)
-03h byte Minutes alarm (BCD)
-04h byte Current hour (BCD)
-05h byte Hours alarm (BCD)
-06h byte Current day of week (BCD)
-07h byte Current day of month (BCD)
-08h byte Current month (BCD)
-09h byte Current year (BCD)
-0Ah byte Register A
+This is the RAM map originally used for the RTC/CMOS::
+
+ Location Size Description
+ ------------------------------------------
+ 00h byte Current second (BCD)
+ 01h byte Seconds alarm (BCD)
+ 02h byte Current minute (BCD)
+ 03h byte Minutes alarm (BCD)
+ 04h byte Current hour (BCD)
+ 05h byte Hours alarm (BCD)
+ 06h byte Current day of week (BCD)
+ 07h byte Current day of month (BCD)
+ 08h byte Current month (BCD)
+ 09h byte Current year (BCD)
+ 0Ah byte Register A
bit 7 = Update in progress
bit 6-4 = Divider for clock
000 = 4.194 MHz
@@ -234,7 +245,7 @@ Location Size Description
1101 = 125 mS
1110 = 250 mS
1111 = 500 mS
-0Bh byte Register B
+ 0Bh byte Register B
bit 7 = Run (0) / Halt (1)
bit 6 = Periodic interrupt enable
bit 5 = Alarm interrupt enable
@@ -243,19 +254,20 @@ Location Size Description
bit 2 = BCD calendar (0) / Binary (1)
bit 1 = 12-hour mode (0) / 24-hour mode (1)
bit 0 = 0 (DST off) / 1 (DST enabled)
-OCh byte Register C (read only)
+ OCh byte Register C (read only)
bit 7 = interrupt request flag (IRQF)
bit 6 = periodic interrupt flag (PF)
bit 5 = alarm interrupt flag (AF)
bit 4 = update interrupt flag (UF)
bit 3-0 = reserved
-ODh byte Register D (read only)
+ ODh byte Register D (read only)
bit 7 = RTC has power
bit 6-0 = reserved
-32h byte Current century BCD (*)
+ 32h byte Current century BCD (*)
(*) location vendor specific and now determined from ACPI global tables
-2.3) APIC
+2.3. APIC
+---------
On Pentium and later processors, an on-board timer is available to each CPU
as part of the Advanced Programmable Interrupt Controller. The APIC is
@@ -276,7 +288,8 @@ timer is programmed through the LVT (local vector timer) register, is capable
of one-shot or periodic operation, and is based on the bus clock divided down
by the programmable divider register.
-2.4) HPET
+2.4. HPET
+---------
HPET is quite complex, and was originally intended to replace the PIT / RTC
support of the X86 PC. It remains to be seen whether that will be the case, as
@@ -297,7 +310,8 @@ indicated through ACPI tables by the BIOS.
Detailed specification of the HPET is beyond the current scope of this
document, as it is also very well documented elsewhere.
-2.5) Offboard Timers
+2.5. Offboard Timers
+--------------------
Several cards, both proprietary (watchdog boards) and commonplace (e1000) have
timing chips built into the cards which may have registers which are accessible
@@ -307,9 +321,8 @@ general frowned upon as not playing by the agreed rules of the game. Such a
timer device would require additional support to be virtualized properly and is
not considered important at this time as no known operating system does this.
-=========================================================================
-
-3) TSC Hardware
+3. TSC Hardware
+===============
The TSC or time stamp counter is relatively simple in theory; it counts
instruction cycles issued by the processor, which can be used as a measure of
@@ -340,7 +353,8 @@ allows the guest visible TSC to be offset by a constant. Newer implementations
promise to allow the TSC to additionally be scaled, but this hardware is not
yet widely available.
-3.1) TSC synchronization
+3.1. TSC synchronization
+------------------------
The TSC is a CPU-local clock in most implementations. This means, on SMP
platforms, the TSCs of different CPUs may start at different times depending
@@ -357,7 +371,8 @@ practice, getting a perfectly synchronized TSC will not be possible unless all
values are read from the same clock, which generally only is possible on single
socket systems or those with special hardware support.
-3.2) TSC and CPU hotplug
+3.2. TSC and CPU hotplug
+------------------------
As touched on already, CPUs which arrive later than the boot time of the system
may not have a TSC value that is synchronized with the rest of the system.
@@ -367,7 +382,8 @@ a guarantee. This can have the effect of bringing a system from a state where
TSC is synchronized back to a state where TSC synchronization flaws, however
small, may be exposed to the OS and any virtualization environment.
-3.3) TSC and multi-socket / NUMA
+3.3. TSC and multi-socket / NUMA
+--------------------------------
Multi-socket systems, especially large multi-socket systems are likely to have
individual clocksources rather than a single, universally distributed clock.
@@ -385,7 +401,8 @@ standards for telecommunications and computer equipment.
It is recommended not to trust the TSCs to remain synchronized on NUMA or
multiple socket systems for these reasons.
-3.4) TSC and C-states
+3.4. TSC and C-states
+---------------------
C-states, or idling states of the processor, especially C1E and deeper sleep
states may be problematic for TSC as well. The TSC may stop advancing in such
@@ -396,7 +413,8 @@ based on CPU and chipset identifications.
The TSC in such a case may be corrected by catching it up to a known external
clocksource.
-3.5) TSC frequency change / P-states
+3.5. TSC frequency change / P-states
+------------------------------------
To make things slightly more interesting, some CPUs may change frequency. They
may or may not run the TSC at the same rate, and because the frequency change
@@ -416,14 +434,16 @@ other processors. In such cases, the TSC on halted CPUs could advance faster
than that of non-halted processors. AMD Turion processors are known to have
this problem.
-3.6) TSC and STPCLK / T-states
+3.6. TSC and STPCLK / T-states
+------------------------------
External signals given to the processor may also have the effect of stopping
the TSC. This is typically done for thermal emergency power control to prevent
an overheating condition, and typically, there is no way to detect that this
condition has happened.
-3.7) TSC virtualization - VMX
+3.7. TSC virtualization - VMX
+-----------------------------
VMX provides conditional trapping of RDTSC, RDMSR, WRMSR and RDTSCP
instructions, which is enough for full virtualization of TSC in any manner. In
@@ -431,14 +451,16 @@ addition, VMX allows passing through the host TSC plus an additional TSC_OFFSET
field specified in the VMCS. Special instructions must be used to read and
write the VMCS field.
-3.8) TSC virtualization - SVM
+3.8. TSC virtualization - SVM
+-----------------------------
SVM provides conditional trapping of RDTSC, RDMSR, WRMSR and RDTSCP
instructions, which is enough for full virtualization of TSC in any manner. In
addition, SVM allows passing through the host TSC plus an additional offset
field specified in the SVM control block.
-3.9) TSC feature bits in Linux
+3.9. TSC feature bits in Linux
+------------------------------
In summary, there is no way to guarantee the TSC remains in perfect
synchronization unless it is explicitly guaranteed by the architecture. Even
@@ -448,13 +470,16 @@ despite being locally consistent.
The following feature bits are used by Linux to signal various TSC attributes,
but they can only be taken to be meaningful for UP or single node systems.
-X86_FEATURE_TSC : The TSC is available in hardware
-X86_FEATURE_RDTSCP : The RDTSCP instruction is available
-X86_FEATURE_CONSTANT_TSC : The TSC rate is unchanged with P-states
-X86_FEATURE_NONSTOP_TSC : The TSC does not stop in C-states
-X86_FEATURE_TSC_RELIABLE : TSC sync checks are skipped (VMware)
+========================= =======================================
+X86_FEATURE_TSC The TSC is available in hardware
+X86_FEATURE_RDTSCP The RDTSCP instruction is available
+X86_FEATURE_CONSTANT_TSC The TSC rate is unchanged with P-states
+X86_FEATURE_NONSTOP_TSC The TSC does not stop in C-states
+X86_FEATURE_TSC_RELIABLE TSC sync checks are skipped (VMware)
+========================= =======================================
-4) Virtualization Problems
+4. Virtualization Problems
+==========================
Timekeeping is especially problematic for virtualization because a number of
challenges arise. The most obvious problem is that time is now shared between
@@ -473,7 +498,8 @@ BIOS, but not in such an extreme fashion. However, the fact that SMM mode may
cause similar problems to virtualization makes it a good justification for
solving many of these problems on bare metal.
-4.1) Interrupt clocking
+4.1. Interrupt clocking
+-----------------------
One of the most immediate problems that occurs with legacy operating systems
is that the system timekeeping routines are often designed to keep track of
@@ -502,7 +528,8 @@ thus requires interrupt slewing to keep proper time. It does use a low enough
rate (ed: is it 18.2 Hz?) however that it has not yet been a problem in
practice.
-4.2) TSC sampling and serialization
+4.2. TSC sampling and serialization
+-----------------------------------
As the highest precision time source available, the cycle counter of the CPU
has aroused much interest from developers. As explained above, this timer has
@@ -524,7 +551,8 @@ it may be necessary for an implementation to guard against "backwards" reads of
the TSC as seen from other CPUs, even in an otherwise perfectly synchronized
system.
-4.3) Timespec aliasing
+4.3. Timespec aliasing
+----------------------
Additionally, this lack of serialization from the TSC poses another challenge
when using results of the TSC when measured against another time source. As
@@ -548,7 +576,8 @@ This aliasing requires care in the computation and recalibration of kvmclock
and any other values derived from TSC computation (such as TSC virtualization
itself).
-4.4) Migration
+4.4. Migration
+--------------
Migration of a virtual machine raises problems for timekeeping in two ways.
First, the migration itself may take time, during which interrupts cannot be
@@ -566,7 +595,8 @@ always be caught up to the original rate. KVM clock avoids these problems by
simply storing multipliers and offsets against the TSC for the guest to convert
back into nanosecond resolution values.
-4.5) Scheduling
+4.5. Scheduling
+---------------
Since scheduling may be based on precise timing and firing of interrupts, the
scheduling algorithms of an operating system may be adversely affected by
@@ -579,7 +609,8 @@ In an attempt to work around this, several implementations have provided a
paravirtualized scheduler clock, which reveals the true amount of CPU time for
which a virtual machine has been running.
-4.6) Watchdogs
+4.6. Watchdogs
+--------------
Watchdog timers, such as the lock detector in Linux may fire accidentally when
running under hardware virtualization due to timer interrupts being delayed or
@@ -587,7 +618,8 @@ misinterpretation of the passage of real time. Usually, these warnings are
spurious and can be ignored, but in some circumstances it may be necessary to
disable such detection.
-4.7) Delays and precision timing
+4.7. Delays and precision timing
+--------------------------------
Precise timing and delays may not be possible in a virtualized system. This
can happen if the system is controlling physical hardware, or issues delays to
@@ -600,7 +632,8 @@ The second issue may cause performance problems, but this is unlikely to be a
significant issue. In many cases these delays may be eliminated through
configuration or paravirtualization.
-4.8) Covert channels and leaks
+4.8. Covert channels and leaks
+------------------------------
In addition to the above problems, time information will inevitably leak to the
guest about the host in anything but a perfect implementation of virtualized
diff --git a/Documentation/virt/uml/UserModeLinux-HOWTO.txt b/Documentation/virt/uml/user_mode_linux.rst
index 87b80f589e1c..de0f0b2c9d5b 100644
--- a/Documentation/virt/uml/UserModeLinux-HOWTO.txt
+++ b/Documentation/virt/uml/user_mode_linux.rst
@@ -1,12 +1,17 @@
- User Mode Linux HOWTO
- User Mode Linux Core Team
- Mon Nov 18 14:16:16 EST 2002
+.. SPDX-License-Identifier: GPL-2.0
- This document describes the use and abuse of Jeff Dike's User Mode
- Linux: a port of the Linux kernel as a normal Intel Linux process.
- ______________________________________________________________________
+=====================
+User Mode Linux HOWTO
+=====================
- Table of Contents
+:Author: User Mode Linux Core Team
+:Last-updated: Sat Jan 25 16:07:55 CET 2020
+
+This document describes the use and abuse of Jeff Dike's User Mode
+Linux: a port of the Linux kernel as a normal Intel Linux process.
+
+
+.. Table of Contents
1. Introduction
@@ -132,19 +137,19 @@
15.5 Other contributions
- ______________________________________________________________________
-
- 1. Introduction
+1. Introduction
+================
Welcome to User Mode Linux. It's going to be fun.
- 1.1. How is User Mode Linux Different?
+1.1. How is User Mode Linux Different?
+---------------------------------------
Normally, the Linux Kernel talks straight to your hardware (video
card, keyboard, hard drives, etc), and any programs which run ask the
- kernel to operate the hardware, like so:
+ kernel to operate the hardware, like so::
@@ -160,10 +165,10 @@
The User Mode Linux Kernel is different; instead of talking to the
- hardware, it talks to a `real' Linux kernel (called the `host kernel'
+ hardware, it talks to a `real` Linux kernel (called the `host kernel`
from now on), like any other program. Programs can then run inside
User-Mode Linux as if they were running under a normal kernel, like
- so:
+ so::
@@ -181,7 +186,8 @@
- 1.2. Why Would I Want User Mode Linux?
+1.2. Why Would I Want User Mode Linux?
+---------------------------------------
1. If User Mode Linux crashes, your host kernel is still fine.
@@ -204,83 +210,41 @@
+.. _Compiling_the_kernel_and_modules:
-
- 2. Compiling the kernel and modules
+2. Compiling the kernel and modules
+====================================
- 2.1. Compiling the kernel
+2.1. Compiling the kernel
+--------------------------
Compiling the user mode kernel is just like compiling any other
- kernel. Let's go through the steps, using 2.4.0-prerelease (current
- as of this writing) as an example:
-
-
- 1. Download the latest UML patch from
-
- the download page <http://user-mode-linux.sourceforge.net/
-
- In this example, the file is uml-patch-2.4.0-prerelease.bz2.
+ kernel.
- 2. Download the matching kernel from your favourite kernel mirror,
+ 1. Download the latest kernel from your favourite kernel mirror,
such as:
- ftp://ftp.ca.kernel.org/pub/kernel/v2.4/linux-2.4.0-prerelease.tar.bz2
- <ftp://ftp.ca.kernel.org/pub/kernel/v2.4/linux-2.4.0-prerelease.tar.bz2>
- .
-
-
- 3. Make a directory and unpack the kernel into it.
-
+ https://mirrors.edge.kernel.org/pub/linux/kernel/v5.x/linux-5.4.14.tar.xz
+ 2. Make a directory and unpack the kernel into it::
host%
mkdir ~/uml
-
-
-
-
-
host%
cd ~/uml
-
-
-
-
-
- host%
- tar -xzvf linux-2.4.0-prerelease.tar.bz2
-
-
-
-
-
-
- 4. Apply the patch using
-
-
-
- host%
- cd ~/uml/linux
-
-
-
host%
- bzcat uml-patch-2.4.0-prerelease.bz2 | patch -p1
+ tar xvf linux-5.4.14.tar.xz
-
-
-
-
- 5. Run your favorite config; `make xconfig ARCH=um' is the most
- convenient. `make config ARCH=um' and 'make menuconfig ARCH=um'
+ 3. Run your favorite config; ``make xconfig ARCH=um`` is the most
+ convenient. ``make config ARCH=um`` and ``make menuconfig ARCH=um``
will work as well. The defaults will give you a useful kernel. If
you want to change something, go ahead, it probably won't hurt
anything.
@@ -288,44 +252,20 @@
Note: If the host is configured with a 2G/2G address space split
rather than the usual 3G/1G split, then the packaged UML binaries
- will not run. They will immediately segfault. See ``UML on 2G/2G
- hosts'' for the scoop on running UML on your system.
-
-
-
- 6. Finish with `make linux ARCH=um': the result is a file called
- `linux' in the top directory of your source tree.
-
- Make sure that you don't build this kernel in /usr/src/linux. On some
- distributions, /usr/include/asm is a link into this pool. The user-
- mode build changes the other end of that link, and things that include
- <asm/anything.h> stop compiling.
-
- The sources are also available from cvs at the project's cvs page,
- which has directions on getting the sources. You can also browse the
- CVS pool from there.
+ will not run. They will immediately segfault. See
+ :ref:`UML_on_2G/2G_hosts` for the scoop on running UML on your system.
- If you get the CVS sources, you will have to check them out into an
- empty directory. You will then have to copy each file into the
- corresponding directory in the appropriate kernel pool.
- If you don't have the latest kernel pool, you can get the
- corresponding user-mode sources with
+ 4. Finish with ``make linux ARCH=um``: the result is a file called
+ ``linux`` in the top directory of your source tree.
- host% cvs co -r v_2_3_x linux
-
-
-
- where 'x' is the version in your pool. Note that you will not get the
- bug fixes and enhancements that have gone into subsequent releases.
-
-
- 2.2. Compiling and installing kernel modules
+2.2. Compiling and installing kernel modules
+---------------------------------------------
UML modules are built in the same way as the native kernel (with the
- exception of the 'ARCH=um' that you always need for UML):
+ exception of the 'ARCH=um' that you always need for UML)::
host% make modules ARCH=um
@@ -337,12 +277,12 @@
the user-mode pool. Modules from the native kernel won't work.
You can install them by using ftp or something to copy them into the
- virtual machine and dropping them into /lib/modules/`uname -r`.
+ virtual machine and dropping them into ``/lib/modules/$(uname -r)``.
You can also get the kernel build process to install them as follows:
1. with the kernel not booted, mount the root filesystem in the top
- level of the kernel pool:
+ level of the kernel pool::
host% mount root_fs mnt -o loop
@@ -352,7 +292,7 @@
- 2. run
+ 2. run::
host%
@@ -363,7 +303,7 @@
- 3. unmount the filesystem
+ 3. unmount the filesystem::
host% umount mnt
@@ -381,27 +321,28 @@
as modules, especially filesystems and network protocols and filters,
so most symbols which need to be exported probably already are.
However, if you do find symbols that need exporting, let us
- <http://user-mode-linux.sourceforge.net/> know, and
+ know at http://user-mode-linux.sourceforge.net/, and
they'll be "taken care of".
- 2.3. Compiling and installing uml_utilities
+2.3. Compiling and installing uml_utilities
+--------------------------------------------
Many features of the UML kernel require a user-space helper program,
so a uml_utilities package is distributed separately from the kernel
patch which provides these helpers. Included within this is:
- o port-helper - Used by consoles which connect to xterms or ports
+ - port-helper - Used by consoles which connect to xterms or ports
- o tunctl - Configuration tool to create and delete tap devices
+ - tunctl - Configuration tool to create and delete tap devices
- o uml_net - Setuid binary for automatic tap device configuration
+ - uml_net - Setuid binary for automatic tap device configuration
- o uml_switch - User-space virtual switch required for daemon
+ - uml_switch - User-space virtual switch required for daemon
transport
- The uml_utilities tree is compiled with:
+ The uml_utilities tree is compiled with::
host#
@@ -423,38 +364,42 @@
- 3. Running UML and logging in
+3. Running UML and logging in
+==============================
- 3.1. Running UML
+3.1. Running UML
+-----------------
- It runs on 2.2.15 or later, and all 2.4 kernels.
+ It runs on 2.2.15 or later, and all kernel versions since 2.4.
Booting UML is straightforward. Simply run 'linux': it will try to
- mount the file `root_fs' in the current directory. You do not need to
- run it as root. If your root filesystem is not named `root_fs', then
- you need to put a `ubd0=root_fs_whatever' switch on the linux command
+ mount the file ``root_fs`` in the current directory. You do not need to
+ run it as root. If your root filesystem is not named ``root_fs``, then
+ you need to put a ``ubd0=root_fs_whatever`` switch on the linux command
line.
You will need a filesystem to boot UML from. There are a number
- available for download from here <http://user-mode-
- linux.sourceforge.net/> . There are also several tools
- <http://user-mode-linux.sourceforge.net/> which can be
+ available for download from http://user-mode-linux.sourceforge.net.
+ There are also several tools at
+ http://user-mode-linux.sourceforge.net/ which can be
used to generate UML-compatible filesystem images from media.
The kernel will boot up and present you with a login prompt.
- Note: If the host is configured with a 2G/2G address space split
+Note:
+ If the host is configured with a 2G/2G address space split
rather than the usual 3G/1G split, then the packaged UML binaries will
- not run. They will immediately segfault. See ``UML on 2G/2G hosts''
+ not run. They will immediately segfault. See :ref:`UML_on_2G/2G_hosts`
for the scoop on running UML on your system.
- 3.2. Logging in
+3.2. Logging in
+----------------
@@ -468,22 +413,22 @@
There are a couple of other ways to log in:
- o On a virtual console
+ - On a virtual console
Each virtual console that is configured (i.e. the device exists in
/dev and /etc/inittab runs a getty on it) will come up in its own
- xterm. If you get tired of the xterms, read ``Setting up serial
- lines and consoles'' to see how to attach the consoles to
- something else, like host ptys.
+ xterm. If you get tired of the xterms, read
+ :ref:`setting_up_serial_lines_and_consoles` to see how to attach
+ the consoles to something else, like host ptys.
- o Over the serial line
+ - Over the serial line
- In the boot output, find a line that looks like:
+ In the boot output, find a line that looks like::
@@ -493,7 +438,7 @@
Attach your favorite terminal program to the corresponding tty. I.e.
- for minicom, the command would be
+ for minicom, the command would be::
host% minicom -o -p /dev/ttyp1
@@ -503,37 +448,40 @@
- o Over the net
+ - Over the net
If the network is running, then you can telnet to the virtual
- machine and log in to it. See ``Setting up the network'' to learn
+ machine and log in to it. See :ref:`Setting_up_the_network` to learn
about setting up a virtual network.
When you're done using it, run halt, and the kernel will bring itself
down and the process will exit.
- 3.3. Examples
+3.3. Examples
+--------------
Here are some examples of UML in action:
- o A login session <http://user-mode-linux.sourceforge.net/login.html>
+ - A login session http://user-mode-linux.sourceforge.net/old/login.html
- o A virtual network <http://user-mode-linux.sourceforge.net/net.html>
+ - A virtual network http://user-mode-linux.sourceforge.net/old/net.html
+.. _UML_on_2G/2G_hosts:
+4. UML on 2G/2G hosts
+======================
- 4. UML on 2G/2G hosts
-
- 4.1. Introduction
+4.1. Introduction
+------------------
Most Linux machines are configured so that the kernel occupies the
@@ -546,7 +494,8 @@
- 4.2. The problem
+4.2. The problem
+-----------------
The prebuilt UML binaries on this site will not run on 2G/2G hosts
@@ -558,13 +507,14 @@
- 4.3. The solution
+4.3. The solution
+------------------
The fix for this is to rebuild UML from source after enabling
CONFIG_HOST_2G_2G (under 'General Setup'). This will cause UML to
load itself in the top .5G of that smaller process address space,
- where it will run fine. See ``Compiling the kernel and modules'' if
+ where it will run fine. See :ref:`Compiling_the_kernel_and_modules` if
you need help building UML from source.
@@ -573,10 +523,11 @@
+.. _setting_up_serial_lines_and_consoles:
-
- 5. Setting up serial lines and consoles
+5. Setting up serial lines and consoles
+========================================
It is possible to attach UML serial lines and consoles to many types
@@ -584,22 +535,23 @@
You can attach them to host ptys, ttys, file descriptors, and ports.
- This allows you to do things like
+ This allows you to do things like:
- o have a UML console appear on an unused host console,
+ - have a UML console appear on an unused host console,
- o hook two virtual machines together by having one attach to a pty
+ - hook two virtual machines together by having one attach to a pty
and having the other attach to the corresponding tty
- o make a virtual machine accessible from the net by attaching a
+ - make a virtual machine accessible from the net by attaching a
console to a port on the host.
- The general format of the command line option is device=channel.
+ The general format of the command line option is ``device=channel``.
- 5.1. Specifying the device
+5.1. Specifying the device
+---------------------------
Devices are specified with "con" or "ssl" (console or serial line,
respectively), optionally with a device number if you are talking
@@ -613,7 +565,7 @@
A specific device name will override a less general "con=" or "ssl=".
So, for example, you can assign a pty to each of the serial lines
- except for the first two like this:
+ except for the first two like this::
ssl=pty ssl0=tty:/dev/tty0 ssl1=tty:/dev/tty1
@@ -626,13 +578,14 @@
- 5.2. Specifying the channel
+5.2. Specifying the channel
+----------------------------
There are a number of different types of channels to attach a UML
device to, each with a different way of specifying exactly what to
attach to.
- o pseudo-terminals - device=pty pts terminals - device=pts
+ - pseudo-terminals - device=pty pts terminals - device=pts
This will cause UML to allocate a free host pseudo-terminal for the
@@ -640,23 +593,23 @@
log. You access it by attaching a terminal program to the
corresponding tty:
- o screen /dev/pts/n
+ - screen /dev/pts/n
- o screen /dev/ttyxx
+ - screen /dev/ttyxx
- o minicom -o -p /dev/ttyxx - minicom seems not able to handle pts
+ - minicom -o -p /dev/ttyxx - minicom seems not able to handle pts
devices
- o kermit - start it up, 'open' the device, then 'connect'
+ - kermit - start it up, 'open' the device, then 'connect'
- o terminals - device=tty:tty device file
+ - terminals - device=tty:tty device file
- This will make UML attach the device to the specified tty (i.e
+ This will make UML attach the device to the specified tty (i.e::
con1=tty:/dev/tty3
@@ -672,7 +625,7 @@
- o xterms - device=xterm
+ - xterms - device=xterm
UML will run an xterm and the device will be attached to it.
@@ -681,12 +634,12 @@
- o Port - device=port:port number
+ - Port - device=port:port number
This will attach the UML devices to the specified host port.
Attaching console 1 to the host's port 9000 would be done like
- this:
+ this::
con1=port:9000
@@ -694,7 +647,7 @@
- Attaching all the serial lines to that port would be done similarly:
+ Attaching all the serial lines to that port would be done similarly::
ssl=port:9000
@@ -702,8 +655,8 @@
- You access these devices by telnetting to that port. Each active tel-
- net session gets a different device. If there are more telnets to a
+ You access these devices by telnetting to that port. Each active
+ telnet session gets a different device. If there are more telnets to a
port than UML devices attached to it, then the extra telnet sessions
will block until an existing telnet detaches, or until another device
becomes active (i.e. by being activated in /etc/inittab).
@@ -725,13 +678,13 @@
- o already-existing file descriptors - device=file descriptor
+ - already-existing file descriptors - device=file descriptor
If you set up a file descriptor on the UML command line, you can
attach a UML device to it. This is most commonly used to put the
main console back on stdin and stdout after assigning all the other
- consoles to something else:
+ consoles to something else::
con0=fd:0,fd:1 con=pts
@@ -743,7 +696,7 @@
- o Nothing - device=null
+ - Nothing - device=null
This allows the device to be opened, in contrast to 'none', but
@@ -754,7 +707,7 @@
- o None - device=none
+ - None - device=none
This causes the device to disappear.
@@ -762,7 +715,7 @@
You can also specify different input and output channels for a device
- by putting a comma between them:
+ by putting a comma between them::
ssl3=tty:/dev/tty2,xterm
@@ -785,14 +738,15 @@
- 5.3. Examples
+5.3. Examples
+--------------
There are a number of interesting things you can do with this
capability.
First, this is how you get rid of those bleeding console xterms by
- attaching them to host ptys:
+ attaching them to host ptys::
con=pty con0=fd:0,fd:1
@@ -802,7 +756,7 @@
This will make a UML console take over an unused host virtual console,
so that when you switch to it, you will see the UML login prompt
- rather than the host login prompt:
+ rather than the host login prompt::
con1=tty:/dev/tty6
@@ -813,7 +767,7 @@
You can attach two virtual machines together with what amounts to a
serial line as follows:
- Run one UML with a serial line attached to a pty -
+ Run one UML with a serial line attached to a pty::
ssl1=pty
@@ -825,7 +779,7 @@
that it got /dev/ptyp1).
Boot the other UML with a serial line attached to the corresponding
- tty -
+ tty::
ssl1=tty:/dev/ttyp1
@@ -838,7 +792,10 @@
prompt of the other virtual machine.
- 6. Setting up the network
+.. _setting_up_the_network:
+
+6. Setting up the network
+==========================
@@ -858,19 +815,19 @@
There are currently five transport types available for a UML virtual
machine to exchange packets with other hosts:
- o ethertap
+ - ethertap
- o TUN/TAP
+ - TUN/TAP
- o Multicast
+ - Multicast
- o a switch daemon
+ - a switch daemon
- o slip
+ - slip
- o slirp
+ - slirp
- o pcap
+ - pcap
The TUN/TAP, ethertap, slip, and slirp transports allow a UML
instance to exchange packets with the host. They may be directed
@@ -893,28 +850,28 @@
With so many host transports, which one should you use? Here's when
you should use each one:
- o ethertap - if you want access to the host networking and it is
+ - ethertap - if you want access to the host networking and it is
running 2.2
- o TUN/TAP - if you want access to the host networking and it is
+ - TUN/TAP - if you want access to the host networking and it is
running 2.4. Also, the TUN/TAP transport is able to use a
preconfigured device, allowing it to avoid using the setuid uml_net
helper, which is a security advantage.
- o Multicast - if you want a purely virtual network and you don't want
+ - Multicast - if you want a purely virtual network and you don't want
to set up anything but the UML
- o a switch daemon - if you want a purely virtual network and you
+ - a switch daemon - if you want a purely virtual network and you
don't mind running the daemon in order to get somewhat better
performance
- o slip - there is no particular reason to run the slip backend unless
+ - slip - there is no particular reason to run the slip backend unless
ethertap and TUN/TAP are just not available for some reason
- o slirp - if you don't have root access on the host to setup
+ - slirp - if you don't have root access on the host to setup
networking, or if you don't want to allocate an IP to your UML
- o pcap - not much use for actual network connectivity, but great for
+ - pcap - not much use for actual network connectivity, but great for
monitoring traffic on the host
Ethertap is available on 2.4 and works fine. TUN/TAP is preferred
@@ -926,7 +883,8 @@
exploit the helper's root privileges.
- 6.1. General setup
+6.1. General setup
+-------------------
First, you must have the virtual network enabled in your UML. If are
running a prebuilt kernel from this site, everything is already
@@ -938,7 +896,7 @@
The next step is to provide a network device to the virtual machine.
This is done by describing it on the kernel command line.
- The general format is
+ The general format is::
eth <n> = <transport> , <transport args>
@@ -947,7 +905,7 @@
For example, a virtual ethernet device may be attached to a host
- ethertap device as follows:
+ ethertap device as follows::
eth0=ethertap,tap0,fe:fd:0:0:0:1,192.168.0.254
@@ -978,7 +936,7 @@
You can also add devices to a UML and remove them at runtime. See the
- ``The Management Console'' page for details.
+ :ref:`The_Management_Console` page for details.
The sections below describe this in more detail.
@@ -995,7 +953,8 @@
- 6.2. Userspace daemons
+6.2. Userspace daemons
+-----------------------
You will likely need the setuid helper, or the switch daemon, or both.
They are both installed with the RPM and deb, so if you've installed
@@ -1011,7 +970,8 @@
- 6.3. Specifying ethernet addresses
+6.3. Specifying ethernet addresses
+-----------------------------------
Below, you will see that the TUN/TAP, ethertap, and daemon interfaces
allow you to specify hardware addresses for the virtual ethernet
@@ -1023,21 +983,21 @@
sufficient to guarantee a unique hardware address for the device. A
couple of exceptions are:
- o Another set of virtual ethernet devices are on the same network and
+ - Another set of virtual ethernet devices are on the same network and
they are assigned hardware addresses using a different scheme which
may conflict with the UML IP address-based scheme
- o You aren't going to use the device for IP networking, so you don't
+ - You aren't going to use the device for IP networking, so you don't
assign the device an IP address
If you let the driver provide the hardware address, you should make
sure that the device IP address is known before the interface is
- brought up. So, inside UML, this will guarantee that:
+ brought up. So, inside UML, this will guarantee that::
- UML#
- ifconfig eth0 192.168.0.250 up
+ UML#
+ ifconfig eth0 192.168.0.250 up
@@ -1049,13 +1009,14 @@
- 6.4. UML interface setup
+6.4. UML interface setup
+-------------------------
Once the network devices have been described on the command line, you
should boot UML and log in.
- The first thing to do is bring the interface up:
+ The first thing to do is bring the interface up::
UML# ifconfig ethn ip-address up
@@ -1067,7 +1028,7 @@
To reach the rest of the world, you should set a default route to the
- host:
+ host::
UML# route add default gw host ip
@@ -1075,7 +1036,7 @@
- Again, with host ip of 192.168.0.4:
+ Again, with host ip of 192.168.0.4::
UML# route add default gw 192.168.0.4
@@ -1097,29 +1058,25 @@
Note: If you can't communicate with other hosts on your physical
ethernet, it's probably because of a network route that's
automatically set up. If you run 'route -n' and see a route that
- looks like this:
+ looks like this::
- Destination Gateway Genmask Flags Metric Ref Use Iface
- 192.168.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
+ Destination Gateway Genmask Flags Metric Ref Use Iface
+ 192.168.0.0 0.0.0.0 255.255.255.0 U 0 0 0 eth0
with a mask that's not 255.255.255.255, then replace it with a route
- to your host:
+ to your host::
UML#
route del -net 192.168.0.0 dev eth0 netmask 255.255.255.0
-
-
-
-
UML#
route add -host 192.168.0.4 dev eth0
@@ -1131,7 +1088,8 @@
- 6.5. Multicast
+6.5. Multicast
+---------------
The simplest way to set up a virtual network between multiple UMLs is
to use the mcast transport. This was written by Harald Welte and is
@@ -1142,7 +1100,7 @@
messages when you bring the device up inside UML.
- To use it, run two UMLs with
+ To use it, run two UMLs with::
eth0=mcast
@@ -1151,16 +1109,12 @@
on their command lines. Log in, configure the ethernet device in each
- machine with different IP addresses:
+ machine with different IP addresses::
UML1# ifconfig eth0 192.168.0.254
-
-
-
-
UML2# ifconfig eth0 192.168.0.253
@@ -1168,7 +1122,7 @@
and they should be able to talk to each other.
- The full set of command line options for this transport are
+ The full set of command line options for this transport are::
@@ -1177,16 +1131,11 @@
-
- Harald's original README is here <http://user-mode-linux.source-
- forge.net/> and explains these in detail, as well as
- some other issues.
-
There is also a related point-to-point only "ucast" transport.
This is useful when your network does not support multicast, and
all network connections are simple point to point links.
- The full set of command line options for this transport are
+ The full set of command line options for this transport are::
ethn=ucast,ethernet address,remote address,listen port,remote port
@@ -1194,7 +1143,8 @@
- 6.6. TUN/TAP with the uml_net helper
+6.6. TUN/TAP with the uml_net helper
+-------------------------------------
TUN/TAP is the preferred mechanism on 2.4 to exchange packets with the
host. The TUN/TAP backend has been in UML since 2.4.9-3um.
@@ -1216,7 +1166,7 @@
kernel or as the tun.o module.
The format of the command line switch to attach a device to a TUN/TAP
- device is
+ device is::
eth <n> =tuntap,,, <IP address>
@@ -1226,7 +1176,7 @@
For example, this argument will attach the UML's eth0 to the next
available tap device and assign an ethernet address to it based on its
- IP address
+ IP address::
eth0=tuntap,,,192.168.0.254
@@ -1247,10 +1197,10 @@
There are a couple potential problems with running the TUN/TAP
transport on a 2.4 host kernel
- o TUN/TAP seems not to work on 2.4.3 and earlier. Upgrade the host
+ - TUN/TAP seems not to work on 2.4.3 and earlier. Upgrade the host
kernel or use the ethertap transport.
- o With an upgraded kernel, TUN/TAP may fail with
+ - With an upgraded kernel, TUN/TAP may fail with::
File descriptor in bad state
@@ -1263,13 +1213,12 @@
make sure that /usr/src/linux points to the headers for the running
kernel.
- These were pointed out by Tim Robinson <timro at trkr dot net> in
- <http://www.geocrawler.com/> name="this uml-
- user post"> .
+ These were pointed out by Tim Robinson <timro at trkr dot net> in the past.
- 6.7. TUN/TAP with a preconfigured tap device
+6.7. TUN/TAP with a preconfigured tap device
+---------------------------------------------
If you prefer not to have UML use uml_net (which is somewhat
insecure), with UML 2.4.17-11, you can set up a TUN/TAP device
@@ -1277,8 +1226,8 @@
there is no need for root assistance. Setting up the device is done
as follows:
- o Create the device with tunctl (available from the UML utilities
- tarball)
+ - Create the device with tunctl (available from the UML utilities
+ tarball)::
@@ -1291,8 +1240,8 @@
where uid is the user id or username that UML will be run as. This
will tell you what device was created.
- o Configure the device IP (change IP addresses and device name to
- suit)
+ - Configure the device IP (change IP addresses and device name to
+ suit)::
@@ -1303,8 +1252,8 @@
- o Set up routing and arping if desired - this is my recipe, there are
- other ways of doing the same thing
+ - Set up routing and arping if desired - this is my recipe, there are
+ other ways of doing the same thing::
host#
@@ -1313,19 +1262,9 @@
host#
route add -host 192.168.0.253 dev tap0
-
-
-
-
-
host#
bash -c 'echo 1 > /proc/sys/net/ipv4/conf/tap0/proxy_arp'
-
-
-
-
-
host#
arp -Ds 192.168.0.253 eth0 pub
@@ -1338,76 +1277,43 @@
utility which reads the information from a config file and sets up
devices at boot time.
- o Rather than using up two IPs and ARPing for one of them, you can
+ - Rather than using up two IPs and ARPing for one of them, you can
also provide direct access to your LAN by the UML by using a
- bridge.
+ bridge::
host#
brctl addbr br0
-
-
-
-
host#
ifconfig eth0 0.0.0.0 promisc up
-
-
-
-
host#
ifconfig tap0 0.0.0.0 promisc up
-
-
-
-
host#
ifconfig br0 192.168.0.1 netmask 255.255.255.0 up
-
-
-
-
-
- host#
- brctl stp br0 off
-
-
-
-
+ host#
+ brctl stp br0 off
host#
brctl setfd br0 1
-
-
-
-
host#
brctl sethello br0 1
-
-
-
-
host#
brctl addif br0 eth0
-
-
-
-
host#
brctl addif br0 tap0
@@ -1417,12 +1323,12 @@
Note that 'br0' should be setup using ifconfig with the existing IP
address of eth0, as eth0 no longer has its own IP.
- o
+ -
Also, the /dev/net/tun device must be writable by the user running
UML in order for the UML to use the device that's been configured
- for it. The simplest thing to do is
+ for it. The simplest thing to do is::
host# chmod 666 /dev/net/tun
@@ -1438,14 +1344,14 @@
devices and chgrp /dev/net/tun to that group with mode 664 or 660.
- o Once the device is set up, run UML with 'eth0=tuntap,device name'
+ - Once the device is set up, run UML with 'eth0=tuntap,device name'
(i.e. 'eth0=tuntap,tap0') on the command line (or do it with the
mconsole config command).
- o Bring the eth device up in UML and you're in business.
+ - Bring the eth device up in UML and you're in business.
If you don't want that tap device any more, you can make it non-
- persistent with
+ persistent with::
host# tunctl -d tap device
@@ -1455,7 +1361,7 @@
Finally, tunctl has a -b (for brief mode) switch which causes it to
output only the name of the tap device it created. This makes it
- suitable for capture by a script:
+ suitable for capture by a script::
host# TAP=`tunctl -u 1000 -b`
@@ -1465,7 +1371,8 @@
- 6.8. Ethertap
+6.8. Ethertap
+--------------
Ethertap is the general mechanism on 2.2 for userspace processes to
exchange packets with the kernel.
@@ -1473,7 +1380,7 @@
To use this transport, you need to describe the virtual network device
- on the UML command line. The general format for this is
+ on the UML command line. The general format for this is::
eth <n> =ethertap, <device> , <ethernet address> , <tap IP address>
@@ -1481,7 +1388,7 @@
- So, the previous example
+ So, the previous example::
eth0=ethertap,tap0,fe:fd:0:0:0:1,192.168.0.254
@@ -1521,7 +1428,7 @@
If you want to set things up yourself, you need to make sure that the
appropriate /dev entry exists. If it doesn't, become root and create
- it as follows:
+ it as follows::
mknod /dev/tap <minor> c 36 <minor> + 16
@@ -1529,7 +1436,7 @@
- For example, this is how to create /dev/tap0:
+ For example, this is how to create /dev/tap0::
mknod /dev/tap0 c 36 0 + 16
@@ -1539,7 +1446,7 @@
You also need to make sure that the host kernel has ethertap support.
If ethertap is enabled as a module, you apparently need to insmod
- ethertap once for each ethertap device you want to enable. So,
+ ethertap once for each ethertap device you want to enable. So,::
host#
@@ -1549,7 +1456,7 @@
will give you the tap0 interface. To get the tap1 interface, you need
- to run
+ to run::
host#
@@ -1561,7 +1468,8 @@
- 6.9. The switch daemon
+6.9. The switch daemon
+-----------------------
Note: This is the daemon formerly known as uml_router, but which was
renamed so the network weenies of the world would stop growling at me.
@@ -1577,7 +1485,7 @@
sockets.
- If you want it to listen on a different pair of sockets, use
+ If you want it to listen on a different pair of sockets, use::
-unix control socket data socket
@@ -1586,7 +1494,7 @@
- If you want it to act as a hub rather than a switch, use
+ If you want it to act as a hub rather than a switch, use::
-hub
@@ -1596,7 +1504,7 @@
If you want the switch to be connected to host networking (allowing
- the umls to get access to the outside world through the host), use
+ the umls to get access to the outside world through the host), use::
-tap tap0
@@ -1610,7 +1518,7 @@
device than tap0, specify that instead of tap0.
- uml_switch can be backgrounded as follows
+ uml_switch can be backgrounded as follows::
host%
@@ -1623,7 +1531,7 @@
stdin for EOF. When it sees that, it exits.
- The general format of the kernel command line switch is
+ The general format of the kernel command line switch is::
@@ -1639,7 +1547,8 @@
how to communicate with the daemon. You should only specify them if
you told the daemon to use different sockets than the default. So, if
you ran the daemon with no arguments, running the UML on the same
- machine with
+ machine with::
+
eth0=daemon
@@ -1649,7 +1558,8 @@
- 6.10. Slip
+6.10. Slip
+-----------
Slip is another, less general, mechanism for a process to communicate
with the host networking. In contrast to the ethertap interface,
@@ -1658,7 +1568,7 @@
IP.
- The general format of the command line switch is
+ The general format of the command line switch is::
@@ -1681,7 +1591,8 @@
- 6.11. Slirp
+6.11. Slirp
+------------
slirp uses an external program, usually /usr/bin/slirp, to provide IP
only networking connectivity through the host. This is similar to IP
@@ -1691,7 +1602,7 @@
root access or setuid binaries on the host.
- The general format of the command line switch for slirp is:
+ The general format of the command line switch for slirp is::
@@ -1716,7 +1627,7 @@
The eth0 interface on UML should be set up with the IP 10.2.0.15,
although you can use anything as long as it is not used by a network
you will be connecting to. The default route on UML should be set to
- use
+ use::
UML#
@@ -1737,10 +1648,11 @@
- 6.12. pcap
+6.12. pcap
+-----------
The pcap transport is attached to a UML ethernet device on the command
- line or with uml_mconsole with the following syntax:
+ line or with uml_mconsole with the following syntax::
@@ -1762,7 +1674,7 @@
expression optimizer is used.
- Example:
+ Example::
@@ -1777,7 +1689,8 @@
- 6.13. Setting up the host yourself
+6.13. Setting up the host yourself
+-----------------------------------
If you don't specify an address for the host side of the ethertap or
slip device, UML won't do any setup on the host. So this is what is
@@ -1785,19 +1698,15 @@
192.168.0.251 and a UML-side IP of 192.168.0.250 - adjust to suit your
own network):
- o The device needs to be configured with its IP address. Tap devices
+ - The device needs to be configured with its IP address. Tap devices
are also configured with an mtu of 1484. Slip devices are
configured with a point-to-point address pointing at the UML ip
- address.
+ address::
host# ifconfig tap0 arp mtu 1484 192.168.0.251 up
-
-
-
-
host#
ifconfig sl0 192.168.0.251 pointopoint 192.168.0.250 up
@@ -1805,7 +1714,7 @@
- o If a tap device is being set up, a route is set to the UML IP.
+ - If a tap device is being set up, a route is set to the UML IP::
UML# route add -host 192.168.0.250 gw 192.168.0.251
@@ -1814,8 +1723,8 @@
- o To allow other hosts on your network to see the virtual machine,
- proxy arp is set up for it.
+ - To allow other hosts on your network to see the virtual machine,
+ proxy arp is set up for it::
host# arp -Ds 192.168.0.250 eth0 pub
@@ -1824,7 +1733,7 @@
- o Finally, the host is set up to route packets.
+ - Finally, the host is set up to route packets::
host# echo 1 > /proc/sys/net/ipv4/ip_forward
@@ -1838,12 +1747,14 @@
- 7. Sharing Filesystems between Virtual Machines
+7. Sharing Filesystems between Virtual Machines
+================================================
- 7.1. A warning
+7.1. A warning
+---------------
Don't attempt to share filesystems simply by booting two UMLs from the
same file. That's the same thing as booting two physical machines
@@ -1851,7 +1762,8 @@
- 7.2. Using layered block devices
+7.2. Using layered block devices
+---------------------------------
The way to share a filesystem between two virtual machines is to use
the copy-on-write (COW) layering capability of the ubd block driver.
@@ -1872,7 +1784,7 @@
To add a copy-on-write layer to an existing block device file, simply
- add the name of the COW file to the appropriate ubd switch:
+ add the name of the COW file to the appropriate ubd switch::
ubd0=root_fs_cow,root_fs_debian_22
@@ -1883,7 +1795,7 @@
where 'root_fs_cow' is the private COW file and 'root_fs_debian_22' is
the existing shared filesystem. The COW file need not exist. If it
doesn't, the driver will create and initialize it. Once the COW file
- has been initialized, it can be used on its own on the command line:
+ has been initialized, it can be used on its own on the command line::
ubd0=root_fs_cow
@@ -1896,14 +1808,16 @@
- 7.3. Note!
+7.3. Note!
+-----------
When checking the size of the COW file in order to see the gobs of
space that you're saving, make sure you use 'ls -ls' to see the actual
disk consumption rather than the length of the file. The COW file is
sparse, so the length will be very different from the disk usage.
Here is a 'ls -l' of a COW file and backing file from one boot and
- shutdown:
+ shutdown::
+
host% ls -l cow.debian debian2.2
-rw-r--r-- 1 jdike jdike 492504064 Aug 6 21:16 cow.debian
-rwxrw-rw- 1 jdike jdike 537919488 Aug 6 20:42 debian2.2
@@ -1911,7 +1825,7 @@
- Doesn't look like much saved space, does it? Well, here's 'ls -ls':
+ Doesn't look like much saved space, does it? Well, here's 'ls -ls'::
host% ls -ls cow.debian debian2.2
@@ -1926,7 +1840,8 @@
- 7.4. Another warning
+7.4. Another warning
+---------------------
Once a filesystem is being used as a readonly backing file for a COW
file, do not boot directly from it or modify it in any way. Doing so
@@ -1952,7 +1867,8 @@
- 7.5. uml_moo : Merging a COW file with its backing file
+7.5. uml_moo : Merging a COW file with its backing file
+--------------------------------------------------------
Depending on how you use UML and COW devices, it may be advisable to
merge the changes in the COW file into the backing file every once in
@@ -1961,7 +1877,7 @@
- The utility that does this is uml_moo. Its usage is
+ The utility that does this is uml_moo. Its usage is::
host% uml_moo COW file new backing file
@@ -1991,8 +1907,8 @@
uml_moo is installed with the UML deb and RPM. If you didn't install
UML from one of those packages, you can also get it from the UML
- utilities <http://user-mode-linux.sourceforge.net/
- utilities> tar file in tools/moo.
+ utilities http://user-mode-linux.sourceforge.net/utilities tar file
+ in tools/moo.
@@ -2001,7 +1917,8 @@
- 8. Creating filesystems
+8. Creating filesystems
+========================
You may want to create and mount new UML filesystems, either because
@@ -2015,13 +1932,14 @@
should be easy to translate to the filesystem of your choice.
- 8.1. Create the filesystem file
+8.1. Create the filesystem file
+================================
dd is your friend. All you need to do is tell dd to create an empty
file of the appropriate size. I usually make it sparse to save time
and to avoid allocating disk space until it's actually used. For
example, the following command will create a sparse 100 meg file full
- of zeroes.
+ of zeroes::
host%
@@ -2034,9 +1952,9 @@
8.2. Assign the file to a UML device
- Add an argument like the following to the UML command line:
+ Add an argument like the following to the UML command line::
- ubd4=new_filesystem
+ ubd4=new_filesystem
@@ -2053,7 +1971,7 @@
etc), then get them into UML by way of the net or hostfs.
- Make the new filesystem on the device assigned to the new file:
+ Make the new filesystem on the device assigned to the new file::
host# mkreiserfs /dev/ubd/4
@@ -2077,7 +1995,7 @@
- Now, mount it:
+ Now, mount it::
UML#
@@ -2096,7 +2014,8 @@
- 9. Host file access
+9. Host file access
+====================
If you want to access files on the host machine from inside UML, you
@@ -2112,10 +2031,11 @@
files contained in it just as you would on the host.
- 9.1. Using hostfs
+9.1. Using hostfs
+------------------
To begin with, make sure that hostfs is available inside the virtual
- machine with
+ machine with::
UML# cat /proc/filesystems
@@ -2127,7 +2047,7 @@
module and available inside the virtual machine, and insmod it.
- Now all you need to do is run mount:
+ Now all you need to do is run mount::
UML# mount none /mnt/host -t hostfs
@@ -2139,7 +2059,7 @@
If you don't want to mount the host root directory, then you can
- specify a subdirectory to mount with the -o switch to mount:
+ specify a subdirectory to mount with the -o switch to mount::
UML# mount none /mnt/home -t hostfs -o /home
@@ -2151,13 +2071,14 @@
- 9.2. hostfs as the root filesystem
+9.2. hostfs as the root filesystem
+-----------------------------------
It's possible to boot from a directory hierarchy on the host using
hostfs rather than using the standard filesystem in a file.
To start, you need that hierarchy. The easiest way is to loop mount
- an existing root_fs file:
+ an existing root_fs file::
host# mount root_fs uml_root_dir -o loop
@@ -2166,15 +2087,15 @@
You need to change the filesystem type of / in etc/fstab to be
- 'hostfs', so that line looks like this:
+ 'hostfs', so that line looks like this::
- /dev/ubd/0 / hostfs defaults 1 1
+ /dev/ubd/0 / hostfs defaults 1 1
Then you need to chown to yourself all the files in that directory
- that are owned by root. This worked for me:
+ that are owned by root. This worked for me::
host# find . -uid 0 -exec chown jdike {} \;
@@ -2183,7 +2104,7 @@
Next, make sure that your UML kernel has hostfs compiled in, not as a
- module. Then run UML with the boot device pointing at that directory:
+ module. Then run UML with the boot device pointing at that directory::
ubd0=/path/to/uml/root/directory
@@ -2194,41 +2115,35 @@
UML should then boot as it does normally.
- 9.3. Building hostfs
+9.3. Building hostfs
+---------------------
If you need to build hostfs because it's not in your kernel, you have
two choices:
- o Compiling hostfs into the kernel:
+ - Compiling hostfs into the kernel:
Reconfigure the kernel and set the 'Host filesystem' option under
- o Compiling hostfs as a module:
+ - Compiling hostfs as a module:
Reconfigure the kernel and set the 'Host filesystem' option under
be in arch/um/fs/hostfs/hostfs.o. Install that in
- /lib/modules/`uname -r`/fs in the virtual machine, boot it up, and
+ ``/lib/modules/$(uname -r)/fs`` in the virtual machine, boot it up, and::
UML# insmod hostfs
+.. _The_Management_Console:
-
-
-
-
-
-
-
-
-
- 10. The Management Console
+10. The Management Console
+===========================
@@ -2240,15 +2155,15 @@
There are a number of things you can do with the mconsole interface:
- o get the kernel version
+ - get the kernel version
- o add and remove devices
+ - add and remove devices
- o halt or reboot the machine
+ - halt or reboot the machine
- o Send SysRq commands
+ - Send SysRq commands
- o Pause and resume the UML
+ - Pause and resume the UML
You need the mconsole client (uml_mconsole) which is present in CVS
@@ -2257,7 +2172,7 @@
You also need CONFIG_MCONSOLE (under 'General Setup') enabled in UML.
- When you boot UML, you'll see a line like:
+ When you boot UML, you'll see a line like::
mconsole initialized on /home/jdike/.uml/umlNJ32yL/mconsole
@@ -2265,7 +2180,7 @@
- If you specify a unique machine id one the UML command line, i.e.
+ If you specify a unique machine id one the UML command line, i.e.::
umid=debian
@@ -2273,7 +2188,7 @@
- you'll see this
+ you'll see this::
mconsole initialized on /home/jdike/.uml/debian/mconsole
@@ -2282,7 +2197,7 @@
That file is the socket that uml_mconsole will use to communicate with
- UML. Run it with either the umid or the full path as its argument:
+ UML. Run it with either the umid or the full path as its argument::
host% uml_mconsole debian
@@ -2290,7 +2205,7 @@
- or
+ or::
host% uml_mconsole /home/jdike/.uml/debian/mconsole
@@ -2300,30 +2215,31 @@
You'll get a prompt, at which you can run one of these commands:
- o version
+ - version
- o halt
+ - halt
- o reboot
+ - reboot
- o config
+ - config
- o remove
+ - remove
- o sysrq
+ - sysrq
- o help
+ - help
- o cad
+ - cad
- o stop
+ - stop
- o go
+ - go
- 10.1. version
+10.1. version
+--------------
- This takes no arguments. It prints the UML version.
+ This takes no arguments. It prints the UML version::
(mconsole) version
@@ -2342,11 +2258,12 @@
- 10.2. halt and reboot
+10.2. halt and reboot
+----------------------
These take no arguments. They shut the machine down immediately, with
no syncing of disks and no clean shutdown of userspace. So, they are
- pretty close to crashing the machine.
+ pretty close to crashing the machine::
(mconsole) halt
@@ -2357,34 +2274,36 @@
- 10.3. config
+10.3. config
+-------------
"config" adds a new device to the virtual machine. Currently the ubd
and network drivers support this. It takes one argument, which is the
- device to add, with the same syntax as the kernel command line.
+ device to add, with the same syntax as the kernel command line::
- (mconsole)
- config ubd3=/home/jdike/incoming/roots/root_fs_debian22
+ (mconsole)
+ config ubd3=/home/jdike/incoming/roots/root_fs_debian22
- OK
- (mconsole) config eth1=mcast
- OK
+ OK
+ (mconsole) config eth1=mcast
+ OK
- 10.4. remove
+10.4. remove
+-------------
"remove" deletes a device from the system. Its argument is just the
name of the device to be removed. The device must be idle in whatever
sense the driver considers necessary. In the case of the ubd driver,
the removed block device must not be mounted, swapped on, or otherwise
- open, and in the case of the network driver, the device must be down.
+ open, and in the case of the network driver, the device must be down::
(mconsole) remove ubd3
@@ -2397,7 +2316,8 @@
- 10.5. sysrq
+10.5. sysrq
+------------
This takes one argument, which is a single letter. It calls the
generic kernel's SysRq driver, which does whatever is called for by
@@ -2407,19 +2327,21 @@
- 10.6. help
+10.6. help
+-----------
"help" returns a string listing the valid commands and what each one
does.
- 10.7. cad
+10.7. cad
+----------
This invokes the Ctl-Alt-Del action on init. What exactly this ends
up doing is up to /etc/inittab. Normally, it reboots the machine.
With UML, this is usually not desired, so if a halt would be better,
- then find the section of inittab that looks like this
+ then find the section of inittab that looks like this::
# What to do when CTRL-ALT-DEL is pressed.
@@ -2432,7 +2354,8 @@
- 10.8. stop
+10.8. stop
+-----------
This puts the UML in a loop reading mconsole requests until a 'go'
mconsole command is received. This is very useful for making backups
@@ -2448,7 +2371,8 @@
- 10.9. go
+10.9. go
+---------
This resumes a UML after being paused by a 'stop' command. Note that
when the UML has resumed, TCP connections may have timed out and if
@@ -2460,9 +2384,10 @@
+.. _Kernel_debugging:
-
- 11. Kernel debugging
+11. Kernel debugging
+=====================
Note: The interface that makes debugging, as described here, possible
@@ -2477,15 +2402,16 @@
In order to debug the kernel, you need build it from source. See
- ``Compiling the kernel and modules'' for information on doing that.
+ :ref:`Compiling_the_kernel_and_modules` for information on doing that.
Make sure that you enable CONFIG_DEBUGSYM and CONFIG_PT_PROXY during
- the config. These will compile the kernel with -g, and enable the
+ the config. These will compile the kernel with ``-g``, and enable the
ptrace proxy so that gdb works with UML, respectively.
- 11.1. Starting the kernel under gdb
+11.1. Starting the kernel under gdb
+------------------------------------
You can have the kernel running under the control of gdb from the
beginning by putting 'debug' on the command line. You will get an
@@ -2498,7 +2424,11 @@
There is a transcript of a debugging session here <debug-
session.html> , with breakpoints being set in the scheduler and in an
interrupt handler.
- 11.2. Examining sleeping processes
+
+
+11.2. Examining sleeping processes
+-----------------------------------
+
Not every bug is evident in the currently running process. Sometimes,
processes hang in the kernel when they shouldn't because they've
@@ -2516,7 +2446,7 @@
Now what you do is this:
- o detach from the current thread
+ - detach from the current thread::
(UML gdb) det
@@ -2525,7 +2455,7 @@
- o attach to the thread you are interested in
+ - attach to the thread you are interested in::
(UML gdb) att <host pid>
@@ -2534,7 +2464,7 @@
- o look at its stack and anything else of interest
+ - look at its stack and anything else of interest::
(UML gdb) bt
@@ -2545,18 +2475,14 @@
Note that you can't do anything at this point that requires that a
process execute, e.g. calling a function
- o when you're done looking at that process, reattach to the current
- thread and continue it
+ - when you're done looking at that process, reattach to the current
+ thread and continue it::
(UML gdb)
att 1
-
-
-
-
(UML gdb)
c
@@ -2569,12 +2495,13 @@
- 11.3. Running ddd on UML
+11.3. Running ddd on UML
+-------------------------
ddd works on UML, but requires a special kludge. The process goes
like this:
- o Start ddd
+ - Start ddd::
host% ddd linux
@@ -2583,14 +2510,14 @@
- o With ps, get the pid of the gdb that ddd started. You can ask the
+ - With ps, get the pid of the gdb that ddd started. You can ask the
gdb to tell you, but for some reason that confuses things and
causes a hang.
- o run UML with 'debug=parent gdb-pid=<pid>' added to the command line
+ - run UML with 'debug=parent gdb-pid=<pid>' added to the command line
- it will just sit there after you hit return
- o type 'att 1' to the ddd gdb and you will see something like
+ - type 'att 1' to the ddd gdb and you will see something like::
0xa013dc51 in __kill ()
@@ -2602,12 +2529,14 @@
- o At this point, type 'c', UML will boot up, and you can use ddd just
+ - At this point, type 'c', UML will boot up, and you can use ddd just
as you do on any other process.
- 11.4. Debugging modules
+11.4. Debugging modules
+------------------------
+
gdb has support for debugging code which is dynamically loaded into
the process. This support is what is needed to debug kernel modules
@@ -2629,7 +2558,8 @@
First, you must tell it where your modules are. There is a list in
- the script that looks like this:
+ the script that looks like this::
+
set MODULE_PATHS {
"fat" "/usr/src/uml/linux-2.4.18/fs/fat/fat.o"
"isofs" "/usr/src/uml/linux-2.4.18/fs/isofs/isofs.o"
@@ -2641,9 +2571,7 @@
You change that to list the names and paths of the modules that you
are going to debug. Then you run it from the toplevel directory of
- your UML pool and it basically tells you what to do:
-
-
+ your UML pool and it basically tells you what to do::
******** GDB pid is 21903 ********
@@ -2666,7 +2594,7 @@
After you run UML and it sits there doing nothing, you hit return at
- the 'att 1' and continue it:
+ the 'att 1' and continue it::
Attaching to program: /home/jdike/linux/2.4/um/./linux, process 1
@@ -2678,63 +2606,48 @@
At this point, you debug normally. When you insmod something, the
- expect magic will kick in and you'll see something like:
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- *** Module hostfs loaded ***
- Breakpoint 1, sys_init_module (name_user=0x805abb0 "hostfs",
- mod_user=0x8070e00) at module.c:349
- 349 char *name, *n_name, *name_tmp = NULL;
- (UML gdb) finish
- Run till exit from #0 sys_init_module (name_user=0x805abb0 "hostfs",
- mod_user=0x8070e00) at module.c:349
- 0xa00e2e23 in execute_syscall (r=0xa8140284) at syscall_kern.c:411
- 411 else res = EXECUTE_SYSCALL(syscall, regs);
- Value returned is $1 = 0
- (UML gdb)
- p/x (int)module_list + module_list->size_of_struct
-
- $2 = 0xa9021054
- (UML gdb) symbol-file ./linux
- Load new symbol table from "./linux"? (y or n) y
- Reading symbols from ./linux...
- done.
- (UML gdb)
- add-symbol-file /home/jdike/linux/2.4/um/arch/um/fs/hostfs/hostfs.o 0xa9021054
-
- add symbol table from file "/home/jdike/linux/2.4/um/arch/um/fs/hostfs/hostfs.o" at
- .text_addr = 0xa9021054
- (y or n) y
-
- Reading symbols from /home/jdike/linux/2.4/um/arch/um/fs/hostfs/hostfs.o...
- done.
- (UML gdb) p *module_list
- $1 = {size_of_struct = 84, next = 0xa0178720, name = 0xa9022de0 "hostfs",
- size = 9016, uc = {usecount = {counter = 0}, pad = 0}, flags = 1,
- nsyms = 57, ndeps = 0, syms = 0xa9023170, deps = 0x0, refs = 0x0,
- init = 0xa90221f0 <init_hostfs>, cleanup = 0xa902222c <exit_hostfs>,
- ex_table_start = 0x0, ex_table_end = 0x0, persist_start = 0x0,
- persist_end = 0x0, can_unload = 0, runsize = 0, kallsyms_start = 0x0,
- kallsyms_end = 0x0,
- archdata_start = 0x1b855 <Address 0x1b855 out of bounds>,
- archdata_end = 0xe5890000 <Address 0xe5890000 out of bounds>,
- kernel_data = 0xf689c35d <Address 0xf689c35d out of bounds>}
- >> Finished loading symbols for hostfs ...
+ expect magic will kick in and you'll see something like::
+
+
+ *** Module hostfs loaded ***
+ Breakpoint 1, sys_init_module (name_user=0x805abb0 "hostfs",
+ mod_user=0x8070e00) at module.c:349
+ 349 char *name, *n_name, *name_tmp = NULL;
+ (UML gdb) finish
+ Run till exit from #0 sys_init_module (name_user=0x805abb0 "hostfs",
+ mod_user=0x8070e00) at module.c:349
+ 0xa00e2e23 in execute_syscall (r=0xa8140284) at syscall_kern.c:411
+ 411 else res = EXECUTE_SYSCALL(syscall, regs);
+ Value returned is $1 = 0
+ (UML gdb)
+ p/x (int)module_list + module_list->size_of_struct
+
+ $2 = 0xa9021054
+ (UML gdb) symbol-file ./linux
+ Load new symbol table from "./linux"? (y or n) y
+ Reading symbols from ./linux...
+ done.
+ (UML gdb)
+ add-symbol-file /home/jdike/linux/2.4/um/arch/um/fs/hostfs/hostfs.o 0xa9021054
+
+ add symbol table from file "/home/jdike/linux/2.4/um/arch/um/fs/hostfs/hostfs.o" at
+ .text_addr = 0xa9021054
+ (y or n) y
+
+ Reading symbols from /home/jdike/linux/2.4/um/arch/um/fs/hostfs/hostfs.o...
+ done.
+ (UML gdb) p *module_list
+ $1 = {size_of_struct = 84, next = 0xa0178720, name = 0xa9022de0 "hostfs",
+ size = 9016, uc = {usecount = {counter = 0}, pad = 0}, flags = 1,
+ nsyms = 57, ndeps = 0, syms = 0xa9023170, deps = 0x0, refs = 0x0,
+ init = 0xa90221f0 <init_hostfs>, cleanup = 0xa902222c <exit_hostfs>,
+ ex_table_start = 0x0, ex_table_end = 0x0, persist_start = 0x0,
+ persist_end = 0x0, can_unload = 0, runsize = 0, kallsyms_start = 0x0,
+ kallsyms_end = 0x0,
+ archdata_start = 0x1b855 <Address 0x1b855 out of bounds>,
+ archdata_end = 0xe5890000 <Address 0xe5890000 out of bounds>,
+ kernel_data = 0xf689c35d <Address 0xf689c35d out of bounds>}
+ >> Finished loading symbols for hostfs ...
@@ -2744,7 +2657,7 @@
Boot the kernel under the debugger and load the module with insmod or
- modprobe. With gdb, do:
+ modprobe. With gdb, do::
(UML gdb) p module_list
@@ -2758,12 +2671,12 @@
the name fields until find the module you want to debug. Take the
address of that structure, and add module.size_of_struct (which in
2.4.10 kernels is 96 (0x60)) to it. Gdb can make this hard addition
- for you :-):
+ for you :-)::
- (UML gdb)
- printf "%#x\n", (int)module_list module_list->size_of_struct
+ (UML gdb)
+ printf "%#x\n", (int)module_list module_list->size_of_struct
@@ -2771,7 +2684,7 @@
The offset from the module start occasionally changes (before 2.4.0,
it was module.size_of_struct + 4), so it's a good idea to check the
init and cleanup addresses once in a while, as describe below. Now
- do:
+ do::
(UML gdb)
@@ -2786,7 +2699,7 @@
If there's any doubt that you got the offset right, like breakpoints
appear not to work, or they're appearing in the wrong place, you can
check it by looking at the module structure. The init and cleanup
- fields should look like:
+ fields should look like::
init = 0x588066b0 <init_hostfs>, cleanup = 0x588066c0 <exit_hostfs>
@@ -2801,7 +2714,7 @@
When you want to load in a new version of the module, you need to get
gdb to forget about the old one. The only way I've found to do that
- is to tell gdb to forget about all symbols that it knows about:
+ is to tell gdb to forget about all symbols that it knows about::
(UML gdb) symbol-file
@@ -2809,7 +2722,7 @@
- Then reload the symbols from the kernel binary:
+ Then reload the symbols from the kernel binary::
(UML gdb) symbol-file /path/to/kernel
@@ -2823,17 +2736,19 @@
- 11.5. Attaching gdb to the kernel
+11.5. Attaching gdb to the kernel
+----------------------------------
If you don't have the kernel running under gdb, you can attach gdb to
it later by sending the tracing thread a SIGUSR1. The first line of
- the console output identifies its pid:
+ the console output identifies its pid::
+
tracing thread pid = 20093
- When you send it the signal:
+ When you send it the signal::
host% kill -USR1 20093
@@ -2845,7 +2760,7 @@
If you have the mconsole compiled into UML, then the mconsole client
- can be used to start gdb:
+ can be used to start gdb::
(mconsole) (mconsole) config gdb=xterm
@@ -2857,7 +2772,8 @@
- 11.6. Using alternate debuggers
+11.6. Using alternate debuggers
+--------------------------------
UML has support for attaching to an already running debugger rather
than starting gdb itself. This is present in CVS as of 17 Apr 2001.
@@ -2886,7 +2802,7 @@
An example of an alternate debugger is strace. You can strace the
actual kernel as follows:
- o Run the following in a shell
+ - Run the following in a shell::
host%
@@ -2894,13 +2810,13 @@
- o Run UML with 'debug' and 'gdb-pid=<pid>' with the pid printed out
+ - Run UML with 'debug' and 'gdb-pid=<pid>' with the pid printed out
by the previous command
- o Hit return in the shell, and UML will start running, and strace
+ - Hit return in the shell, and UML will start running, and strace
output will start accumulating in the output file.
- Note that this is different from running
+ Note that this is different from running::
host% strace ./linux
@@ -2917,95 +2833,57 @@
- 12. Kernel debugging examples
+12. Kernel debugging examples
+==============================
- 12.1. The case of the hung fsck
+12.1. The case of the hung fsck
+--------------------------------
When booting up the kernel, fsck failed, and dropped me into a shell
- to fix things up. I ran fsck -y, which hung:
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+ to fix things up. I ran fsck -y, which hung::
+ Setting hostname uml [ OK ]
+ Checking root filesystem
+ /dev/fhd0 was not cleanly unmounted, check forced.
+ Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780.
+ /dev/fhd0: UNEXPECTED INCONSISTENCY; RUN fsck MANUALLY.
+ (i.e., without -a or -p options)
+ [ FAILED ]
+ *** An error occurred during the file system check.
+ *** Dropping you to a shell; the system will reboot
+ *** when you leave the shell.
+ Give root password for maintenance
+ (or type Control-D for normal startup):
+ [root@uml /root]# fsck -y /dev/fhd0
+ fsck -y /dev/fhd0
+ Parallelizing fsck version 1.14 (9-Jan-1999)
+ e2fsck 1.14, 9-Jan-1999 for EXT2 FS 0.5b, 95/08/09
+ /dev/fhd0 contains a file system with errors, check forced.
+ Pass 1: Checking inodes, blocks, and sizes
+ Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780. Ignore error? yes
+ Inode 19780, i_blocks is 1548, should be 540. Fix? yes
+ Pass 2: Checking directory structure
+ Error reading block 49405 (Attempt to read block from filesystem resulted in short read). Ignore error? yes
+ Directory inode 11858, block 0, offset 0: directory corrupted
+ Salvage? yes
+ Missing '.' in directory inode 11858.
+ Fix? yes
-
-
- Setting hostname uml [ OK ]
- Checking root filesystem
- /dev/fhd0 was not cleanly unmounted, check forced.
- Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780.
-
- /dev/fhd0: UNEXPECTED INCONSISTENCY; RUN fsck MANUALLY.
- (i.e., without -a or -p options)
- [ FAILED ]
-
- *** An error occurred during the file system check.
- *** Dropping you to a shell; the system will reboot
- *** when you leave the shell.
- Give root password for maintenance
- (or type Control-D for normal startup):
-
- [root@uml /root]# fsck -y /dev/fhd0
- fsck -y /dev/fhd0
- Parallelizing fsck version 1.14 (9-Jan-1999)
- e2fsck 1.14, 9-Jan-1999 for EXT2 FS 0.5b, 95/08/09
- /dev/fhd0 contains a file system with errors, check forced.
- Pass 1: Checking inodes, blocks, and sizes
- Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780. Ignore error? yes
-
- Inode 19780, i_blocks is 1548, should be 540. Fix? yes
-
- Pass 2: Checking directory structure
- Error reading block 49405 (Attempt to read block from filesystem resulted in short read). Ignore error? yes
-
- Directory inode 11858, block 0, offset 0: directory corrupted
- Salvage? yes
-
- Missing '.' in directory inode 11858.
- Fix? yes
-
- Missing '..' in directory inode 11858.
- Fix? yes
-
-
-
+ Missing '..' in directory inode 11858.
+ Fix? yes
The standard drill in this sort of situation is to fire up gdb on the
signal thread, which, in this case, was pid 1935. In another window,
- I run gdb and attach pid 1935.
-
-
+ I run gdb and attach pid 1935::
~/linux/2.3.26/um 1016: gdb linux
@@ -3022,11 +2900,7 @@
0x100756d9 in __wait4 ()
-
-
-
-
- Let's see what's currently running:
+ Let's see what's currently running::
@@ -3041,7 +2915,7 @@
reason and never woke up.
- Let's guess that the last process in the process list is fsck:
+ Let's guess that the last process in the process list is fsck::
@@ -3052,7 +2926,7 @@
- It is, so let's see what it thinks it's up to:
+ It is, so let's see what it thinks it's up to::
@@ -3068,8 +2942,6 @@
-
-
The interesting things here are the fact that its .thread.syscall.id
is __NR_write (see the big switch in arch/um/kernel/syscall_kern.c or
the defines in include/asm-um/arch/unistd.h), and that it never
@@ -3081,30 +2953,20 @@
The fact that it never returned from write means that its stack should
be fairly interesting. Its pid is 1980 (.thread.extern_pid). That
process is being ptraced by the signal thread, so it must be detached
- before gdb can attach it:
-
-
-
-
-
-
+ before gdb can attach it::
+ (gdb) call detach(1980)
- (gdb) call detach(1980)
-
- Program received signal SIGSEGV, Segmentation fault.
- <function called from gdb>
- The program being debugged stopped while in a function called from GDB.
- When the function (detach) is done executing, GDB will silently
- stop (instead of continuing to evaluate the expression containing
- the function call).
- (gdb) call detach(1980)
- $15 = 0
-
-
-
+ Program received signal SIGSEGV, Segmentation fault.
+ <function called from gdb>
+ The program being debugged stopped while in a function called from GDB.
+ When the function (detach) is done executing, GDB will silently
+ stop (instead of continuing to evaluate the expression containing
+ the function call).
+ (gdb) call detach(1980)
+ $15 = 0
The first detach segfaults for some reason, and the second one
@@ -3112,7 +2974,7 @@
Now I detach from the signal thread, attach to the fsck thread, and
- look at its stack:
+ look at its stack::
(gdb) det
@@ -3152,14 +3014,14 @@
- The interesting things here are :
+ The interesting things here are:
- o There are two segfaults on this stack (frames 9 and 14)
+ - There are two segfaults on this stack (frames 9 and 14)
- o The first faulting address (frame 11) is 0x50000800
+ - The first faulting address (frame 11) is 0x50000800::
- (gdb) p (void *)1342179328
- $16 = (void *) 0x50000800
+ (gdb) p (void *)1342179328
+ $16 = (void *) 0x50000800
@@ -3175,7 +3037,7 @@
However, the more immediate problem is that second segfault and I'm
going to concentrate on that. First, I want to see where the fault
- happened, so I have to go look at the sigcontent struct in frame 8:
+ happened, so I have to go look at the sigcontent struct in frame 8::
@@ -3211,7 +3073,7 @@
- That's not very useful, so I'll try a more manual method:
+ That's not very useful, so I'll try a more manual method::
(gdb) p *((struct sigcontext *) (&sig + 1))
@@ -3224,7 +3086,7 @@
- The ip is in handle_mm_fault:
+ The ip is in handle_mm_fault::
(gdb) p (void *)268480945
@@ -3236,7 +3098,7 @@
- Specifically, it's in pte_alloc:
+ Specifically, it's in pte_alloc::
(gdb) i line *$20
@@ -3249,7 +3111,7 @@
To find where in handle_mm_fault this is, I'll jump forward in the
- code until I see an address in that procedure:
+ code until I see an address in that procedure::
@@ -3286,21 +3148,21 @@
Something is apparently wrong with the page tables or vma_structs, so
- lets go back to frame 11 and have a look at them:
+ lets go back to frame 11 and have a look at them::
- #11 0x1006c0aa in segv (address=1342179328, is_write=2) at trap_kern.c:50
- 50 handle_mm_fault(current, vma, address, is_write);
- (gdb) call pgd_offset_proc(vma->vm_mm, address)
- $22 = (pgd_t *) 0x80a548c
+ #11 0x1006c0aa in segv (address=1342179328, is_write=2) at trap_kern.c:50
+ 50 handle_mm_fault(current, vma, address, is_write);
+ (gdb) call pgd_offset_proc(vma->vm_mm, address)
+ $22 = (pgd_t *) 0x80a548c
That's pretty bogus. Page tables aren't supposed to be in process
- text or data areas. Let's see what's in the vma:
+ text or data areas. Let's see what's in the vma::
(gdb) p *vma
@@ -3325,12 +3187,9 @@
-
-
This also pretty bogus. With all of the 0x80xxxxx and 0xaffffxxx
addresses, this is looking like a stack was plonked down on top of
- these structures. Maybe it's a stack overflow from the next page:
-
+ these structures. Maybe it's a stack overflow from the next page::
(gdb) p vma
@@ -3338,52 +3197,36 @@
-
-
That's towards the lower quarter of the page, so that would have to
- have been pretty heavy stack overflow:
-
-
-
-
-
-
-
-
-
-
-
-
-
-
- (gdb) x/100x $25
- 0x507d2434: 0x507d2434 0x00000000 0x08048000 0x080a4f8c
- 0x507d2444: 0x00000000 0x080a79e0 0x080a8c94 0x080d1000
- 0x507d2454: 0xaffffdb0 0xaffffe63 0xaffffe7a 0xaffffe7a
- 0x507d2464: 0xafffffec 0x00000062 0x0000008a 0x00000000
- 0x507d2474: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d2484: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d2494: 0x00000000 0x00000000 0x507d2fe0 0x00000000
- 0x507d24a4: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d24b4: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d24c4: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d24d4: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d24e4: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d24f4: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d2504: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d2514: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d2524: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d2534: 0x00000000 0x00000000 0x507d25dc 0x00000000
- 0x507d2544: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d2554: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d2564: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d2574: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d2584: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d2594: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d25a4: 0x00000000 0x00000000 0x00000000 0x00000000
- 0x507d25b4: 0x00000000 0x00000000 0x00000000 0x00000000
-
-
+ have been pretty heavy stack overflow::
+
+
+ (gdb) x/100x $25
+ 0x507d2434: 0x507d2434 0x00000000 0x08048000 0x080a4f8c
+ 0x507d2444: 0x00000000 0x080a79e0 0x080a8c94 0x080d1000
+ 0x507d2454: 0xaffffdb0 0xaffffe63 0xaffffe7a 0xaffffe7a
+ 0x507d2464: 0xafffffec 0x00000062 0x0000008a 0x00000000
+ 0x507d2474: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d2484: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d2494: 0x00000000 0x00000000 0x507d2fe0 0x00000000
+ 0x507d24a4: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d24b4: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d24c4: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d24d4: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d24e4: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d24f4: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d2504: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d2514: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d2524: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d2534: 0x00000000 0x00000000 0x507d25dc 0x00000000
+ 0x507d2544: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d2554: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d2564: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d2574: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d2584: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d2594: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d25a4: 0x00000000 0x00000000 0x00000000 0x00000000
+ 0x507d25b4: 0x00000000 0x00000000 0x00000000 0x00000000
@@ -3399,65 +3242,53 @@
on will be somewhat clearer.
- 12.2. Episode 2: The case of the hung fsck
+12.2. Episode 2: The case of the hung fsck
+-------------------------------------------
After setting a trap in the SEGV handler for accesses to the signal
thread's stack, I reran the kernel.
- fsck hung again, this time by hitting the trap:
-
-
+ fsck hung again, this time by hitting the trap::
+ Setting hostname uml [ OK ]
+ Checking root filesystem
+ /dev/fhd0 contains a file system with errors, check forced.
+ Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780.
+ /dev/fhd0: UNEXPECTED INCONSISTENCY; RUN fsck MANUALLY.
+ (i.e., without -a or -p options)
+ [ FAILED ]
+ *** An error occurred during the file system check.
+ *** Dropping you to a shell; the system will reboot
+ *** when you leave the shell.
+ Give root password for maintenance
+ (or type Control-D for normal startup):
+ [root@uml /root]# fsck -y /dev/fhd0
+ fsck -y /dev/fhd0
+ Parallelizing fsck version 1.14 (9-Jan-1999)
+ e2fsck 1.14, 9-Jan-1999 for EXT2 FS 0.5b, 95/08/09
+ /dev/fhd0 contains a file system with errors, check forced.
+ Pass 1: Checking inodes, blocks, and sizes
+ Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780. Ignore error? yes
+ Pass 2: Checking directory structure
+ Error reading block 49405 (Attempt to read block from filesystem resulted in short read). Ignore error? yes
+ Directory inode 11858, block 0, offset 0: directory corrupted
+ Salvage? yes
+ Missing '.' in directory inode 11858.
+ Fix? yes
+ Missing '..' in directory inode 11858.
+ Fix? yes
-
-
-
- Setting hostname uml [ OK ]
- Checking root filesystem
- /dev/fhd0 contains a file system with errors, check forced.
- Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780.
-
- /dev/fhd0: UNEXPECTED INCONSISTENCY; RUN fsck MANUALLY.
- (i.e., without -a or -p options)
- [ FAILED ]
-
- *** An error occurred during the file system check.
- *** Dropping you to a shell; the system will reboot
- *** when you leave the shell.
- Give root password for maintenance
- (or type Control-D for normal startup):
-
- [root@uml /root]# fsck -y /dev/fhd0
- fsck -y /dev/fhd0
- Parallelizing fsck version 1.14 (9-Jan-1999)
- e2fsck 1.14, 9-Jan-1999 for EXT2 FS 0.5b, 95/08/09
- /dev/fhd0 contains a file system with errors, check forced.
- Pass 1: Checking inodes, blocks, and sizes
- Error reading block 86894 (Attempt to read block from filesystem resulted in short read) while reading indirect blocks of inode 19780. Ignore error? yes
-
- Pass 2: Checking directory structure
- Error reading block 49405 (Attempt to read block from filesystem resulted in short read). Ignore error? yes
-
- Directory inode 11858, block 0, offset 0: directory corrupted
- Salvage? yes
-
- Missing '.' in directory inode 11858.
- Fix? yes
-
- Missing '..' in directory inode 11858.
- Fix? yes
-
- Untested (4127) [100fe44c]: trap_kern.c line 31
+ Untested (4127) [100fe44c]: trap_kern.c line 31
@@ -3465,7 +3296,7 @@
I need to get the signal thread to detach from pid 4127 so that I can
attach to it with gdb. This is done by sending it a SIGUSR1, which is
- caught by the signal thread, which detaches the process:
+ caught by the signal thread, which detaches the process::
kill -USR1 4127
@@ -3474,31 +3305,20 @@
- Now I can run gdb on it:
-
-
-
-
-
-
-
+ Now I can run gdb on it::
-
-
-
-
- ~/linux/2.3.26/um 1034: gdb linux
- GNU gdb 4.17.0.11 with Linux support
- Copyright 1998 Free Software Foundation, Inc.
- GDB is free software, covered by the GNU General Public License, and you are
- welcome to change it and/or distribute copies of it under certain conditions.
- Type "show copying" to see the conditions.
- There is absolutely no warranty for GDB. Type "show warranty" for details.
- This GDB was configured as "i386-redhat-linux"...
- (gdb) att 4127
- Attaching to program `/home/dike/linux/2.3.26/um/linux', Pid 4127
- 0x10075891 in __libc_nanosleep ()
+ ~/linux/2.3.26/um 1034: gdb linux
+ GNU gdb 4.17.0.11 with Linux support
+ Copyright 1998 Free Software Foundation, Inc.
+ GDB is free software, covered by the GNU General Public License, and you are
+ welcome to change it and/or distribute copies of it under certain conditions.
+ Type "show copying" to see the conditions.
+ There is absolutely no warranty for GDB. Type "show warranty" for details.
+ This GDB was configured as "i386-redhat-linux"...
+ (gdb) att 4127
+ Attaching to program `/home/dike/linux/2.3.26/um/linux', Pid 4127
+ 0x10075891 in __libc_nanosleep ()
@@ -3506,7 +3326,7 @@
The backtrace shows that it was in a write and that the fault address
(address in frame 3) is 0x50000800, which is right in the middle of
- the signal thread's stack page:
+ the signal thread's stack page::
(gdb) bt
@@ -3540,58 +3360,48 @@
-
-
Going up the stack to the segv_handler frame and looking at where in
the code the access happened shows that it happened near line 110 of
- block_dev.c:
-
-
-
-
-
-
-
-
-
- (gdb) up
- #1 0x1007584d in __sleep (seconds=1000000)
- at ../sysdeps/unix/sysv/linux/sleep.c:78
- ../sysdeps/unix/sysv/linux/sleep.c:78: No such file or directory.
- (gdb)
- #2 0x1006ce9a in stop () at user_util.c:191
- 191 while(1) sleep(1000000);
- (gdb)
- #3 0x1006bf88 in segv (address=1342179328, is_write=2) at trap_kern.c:31
- 31 KERN_UNTESTED();
- (gdb)
- #4 0x1006c628 in segv_handler (sc=0x5006eaf8) at trap_user.c:174
- 174 segv(sc->cr2, sc->err & 2);
- (gdb) p *sc
- $1 = {gs = 0, __gsh = 0, fs = 0, __fsh = 0, es = 43, __esh = 0, ds = 43,
- __dsh = 0, edi = 1342179328, esi = 134973440, ebp = 1342631484,
- esp = 1342630864, ebx = 256, edx = 0, ecx = 256, eax = 1024, trapno = 14,
- err = 6, eip = 268550834, cs = 35, __csh = 0, eflags = 66070,
- esp_at_signal = 1342630864, ss = 43, __ssh = 0, fpstate = 0x0, oldmask = 0,
- cr2 = 1342179328}
- (gdb) p (void *)268550834
- $2 = (void *) 0x1001c2b2
- (gdb) i sym $2
- block_write + 1090 in section .text
- (gdb) i line *$2
- Line 209 of "/home/dike/linux/2.3.26/um/include/asm/arch/string.h"
- starts at address 0x1001c2a1 <block_write+1073>
- and ends at 0x1001c2bf <block_write+1103>.
- (gdb) i line *0x1001c2c0
- Line 110 of "block_dev.c" starts at address 0x1001c2bf <block_write+1103>
- and ends at 0x1001c2e3 <block_write+1139>.
-
-
+ block_dev.c::
+
+
+
+ (gdb) up
+ #1 0x1007584d in __sleep (seconds=1000000)
+ at ../sysdeps/unix/sysv/linux/sleep.c:78
+ ../sysdeps/unix/sysv/linux/sleep.c:78: No such file or directory.
+ (gdb)
+ #2 0x1006ce9a in stop () at user_util.c:191
+ 191 while(1) sleep(1000000);
+ (gdb)
+ #3 0x1006bf88 in segv (address=1342179328, is_write=2) at trap_kern.c:31
+ 31 KERN_UNTESTED();
+ (gdb)
+ #4 0x1006c628 in segv_handler (sc=0x5006eaf8) at trap_user.c:174
+ 174 segv(sc->cr2, sc->err & 2);
+ (gdb) p *sc
+ $1 = {gs = 0, __gsh = 0, fs = 0, __fsh = 0, es = 43, __esh = 0, ds = 43,
+ __dsh = 0, edi = 1342179328, esi = 134973440, ebp = 1342631484,
+ esp = 1342630864, ebx = 256, edx = 0, ecx = 256, eax = 1024, trapno = 14,
+ err = 6, eip = 268550834, cs = 35, __csh = 0, eflags = 66070,
+ esp_at_signal = 1342630864, ss = 43, __ssh = 0, fpstate = 0x0, oldmask = 0,
+ cr2 = 1342179328}
+ (gdb) p (void *)268550834
+ $2 = (void *) 0x1001c2b2
+ (gdb) i sym $2
+ block_write + 1090 in section .text
+ (gdb) i line *$2
+ Line 209 of "/home/dike/linux/2.3.26/um/include/asm/arch/string.h"
+ starts at address 0x1001c2a1 <block_write+1073>
+ and ends at 0x1001c2bf <block_write+1103>.
+ (gdb) i line *0x1001c2c0
+ Line 110 of "block_dev.c" starts at address 0x1001c2bf <block_write+1103>
+ and ends at 0x1001c2e3 <block_write+1139>.
Looking at the source shows that the fault happened during a call to
- copy_from_user to copy the data into the kernel:
+ copy_from_user to copy the data into the kernel::
107 count -= chars;
@@ -3601,10 +3411,8 @@
-
-
p is the pointer which must contain 0x50000800, since buf contains
- 0x80b8800 (frame 8 above). It is defined as:
+ 0x80b8800 (frame 8 above). It is defined as::
p = offset + bh->b_data;
@@ -3615,24 +3423,22 @@
I need to figure out what bh is, and it just so happens that bh is
passed as an argument to mark_buffer_uptodate and mark_buffer_dirty a
- few lines later, so I do a little disassembly:
-
-
+ few lines later, so I do a little disassembly::
- (gdb) disas 0x1001c2bf 0x1001c2e0
- Dump of assembler code from 0x1001c2bf to 0x1001c2d0:
- 0x1001c2bf <block_write+1103>: addl %eax,0xc(%ebp)
- 0x1001c2c2 <block_write+1106>: movl 0xfffffdd4(%ebp),%edx
- 0x1001c2c8 <block_write+1112>: btsl $0x0,0x18(%edx)
- 0x1001c2cd <block_write+1117>: btsl $0x1,0x18(%edx)
- 0x1001c2d2 <block_write+1122>: sbbl %ecx,%ecx
- 0x1001c2d4 <block_write+1124>: testl %ecx,%ecx
- 0x1001c2d6 <block_write+1126>: jne 0x1001c2e3 <block_write+1139>
- 0x1001c2d8 <block_write+1128>: pushl $0x0
- 0x1001c2da <block_write+1130>: pushl %edx
- 0x1001c2db <block_write+1131>: call 0x1001819c <__mark_buffer_dirty>
- End of assembler dump.
+ (gdb) disas 0x1001c2bf 0x1001c2e0
+ Dump of assembler code from 0x1001c2bf to 0x1001c2d0:
+ 0x1001c2bf <block_write+1103>: addl %eax,0xc(%ebp)
+ 0x1001c2c2 <block_write+1106>: movl 0xfffffdd4(%ebp),%edx
+ 0x1001c2c8 <block_write+1112>: btsl $0x0,0x18(%edx)
+ 0x1001c2cd <block_write+1117>: btsl $0x1,0x18(%edx)
+ 0x1001c2d2 <block_write+1122>: sbbl %ecx,%ecx
+ 0x1001c2d4 <block_write+1124>: testl %ecx,%ecx
+ 0x1001c2d6 <block_write+1126>: jne 0x1001c2e3 <block_write+1139>
+ 0x1001c2d8 <block_write+1128>: pushl $0x0
+ 0x1001c2da <block_write+1130>: pushl %edx
+ 0x1001c2db <block_write+1131>: call 0x1001819c <__mark_buffer_dirty>
+ End of assembler dump.
@@ -3640,7 +3446,7 @@
At that point, bh is in %edx (address 0x1001c2da), which is calculated
at 0x1001c2c2 as %ebp + 0xfffffdd4, so I figure exactly what that is,
- taking %ebp from the sigcontext_struct above:
+ taking %ebp from the sigcontext_struct above::
(gdb) p (void *)1342631484
@@ -3657,7 +3463,7 @@
Now, I look at the structure to see what's in it, and particularly,
- what its b_data field contains:
+ what its b_data field contains::
(gdb) p *((struct buffer_head *)0x50100200)
@@ -3682,18 +3488,18 @@
The b_page field is a pointer to the page_struct representing the
0x50000000 page. Looking at it shows the kernel's idea of the state
- of that page:
+ of that page::
- (gdb) p *$13.b_page
- $17 = {list = {next = 0x50004a5c, prev = 0x100c5174}, mapping = 0x0,
- index = 0, next_hash = 0x0, count = {counter = 1}, flags = 132, lru = {
- next = 0x50008460, prev = 0x50019350}, wait = {
- lock = <optimized out or zero length>, task_list = {next = 0x50004024,
- prev = 0x50004024}, __magic = 1342193708, __creator = 0},
- pprev_hash = 0x0, buffers = 0x501002c0, virtual = 1342177280,
- zone = 0x100c5160}
+ (gdb) p *$13.b_page
+ $17 = {list = {next = 0x50004a5c, prev = 0x100c5174}, mapping = 0x0,
+ index = 0, next_hash = 0x0, count = {counter = 1}, flags = 132, lru = {
+ next = 0x50008460, prev = 0x50019350}, wait = {
+ lock = <optimized out or zero length>, task_list = {next = 0x50004024,
+ prev = 0x50004024}, __magic = 1342193708, __creator = 0},
+ pprev_hash = 0x0, buffers = 0x501002c0, virtual = 1342177280,
+ zone = 0x100c5160}
@@ -3702,7 +3508,7 @@
Some sanity-checking: the virtual field shows the "virtual" address of
this page, which in this kernel is the same as its "physical" address,
and the page_struct itself should be mem_map[0], since it represents
- the first page of memory:
+ the first page of memory::
@@ -3719,7 +3525,7 @@
Now to check out the page_struct itself. In particular, the flags
- field shows whether the page is considered free or not:
+ field shows whether the page is considered free or not::
(gdb) p (void *)132
@@ -3739,7 +3545,7 @@
In my setup_arch procedure, I have the following code which looks just
- fine:
+ fine::
@@ -3762,7 +3568,7 @@
Stepping into init_bootmem, and looking at bootmem_map before looking
- at what it contains shows the following:
+ at what it contains shows the following::
@@ -3788,18 +3594,20 @@
- 13. What to do when UML doesn't work
+13. What to do when UML doesn't work
+=====================================
- 13.1. Strange compilation errors when you build from source
+13.1. Strange compilation errors when you build from source
+------------------------------------------------------------
As of test11, it is necessary to have "ARCH=um" in the environment or
on the make command line for all steps in building UML, including
clean, distclean, or mrproper, config, menuconfig, or xconfig, dep,
and linux. If you forget for any of them, the i386 build seems to
- contaminate the UML build. If this happens, start from scratch with
+ contaminate the UML build. If this happens, start from scratch with::
host%
@@ -3811,7 +3619,7 @@
and repeat the build process with ARCH=um on all the steps.
- See ``Compiling the kernel and modules'' for more details.
+ See :ref:`Compiling_the_kernel_and_modules` for more details.
Another cause of strange compilation errors is building UML in
@@ -3824,11 +3632,11 @@
- 13.3. A variety of panics and hangs with /tmp on a reiserfs filesys-
- tem
+13.3. A variety of panics and hangs with /tmp on a reiserfs filesystem
+-----------------------------------------------------------------------
I saw this on reiserfs 3.5.21 and it seems to be fixed in 3.5.27.
- Panics preceded by
+ Panics preceded by::
Detaching pid nnnn
@@ -3854,17 +3662,19 @@
- 13.5. UML doesn't work when /tmp is an NFS filesystem
+13.5. UML doesn't work when /tmp is an NFS filesystem
+------------------------------------------------------
This seems to be a similar situation with the ReiserFS problem above.
Some versions of NFS seems not to handle mmap correctly, which UML
depends on. The workaround is have /tmp be a non-NFS directory.
- 13.6. UML hangs on boot when compiled with gprof support
+13.6. UML hangs on boot when compiled with gprof support
+---------------------------------------------------------
If you build UML with gprof support and, early in the boot, it does
- this
+ this::
kernel BUG at page_alloc.c:100!
@@ -3878,10 +3688,11 @@
- 13.7. syslogd dies with a SIGTERM on startup
+13.7. syslogd dies with a SIGTERM on startup
+---------------------------------------------
The exact boot error depends on the distribution that you're booting,
- but Debian produces this:
+ but Debian produces this::
/etc/rc2.d/S10sysklogd: line 49: 93 Terminated
@@ -3891,23 +3702,21 @@
This is a syslogd bug. There's a race between a parent process
- installing a signal handler and its child sending the signal. See
- this uml-devel post <http://www.geocrawler.com/lists/3/Source-
- Forge/709/0/6612801> for the details.
+ installing a signal handler and its child sending the signal.
- 13.8. TUN/TAP networking doesn't work on a 2.4 host
+13.8. TUN/TAP networking doesn't work on a 2.4 host
+----------------------------------------------------
- There are a couple of problems which were
- <http://www.geocrawler.com/lists/3/SourceForge/597/0/> name="pointed
- out"> by Tim Robinson <timro at trkr dot net>
+ There are a couple of problems which were reported by
+ Tim Robinson <timro at trkr dot net>
- o It doesn't work on hosts running 2.4.7 (or thereabouts) or earlier.
+ - It doesn't work on hosts running 2.4.7 (or thereabouts) or earlier.
The fix is to upgrade to something more recent and then read the
next item.
- o If you see
+ - If you see::
File descriptor in bad state
@@ -3921,8 +3730,8 @@
- 13.9. You can network to the host but not to other machines on the
- net
+13.9. You can network to the host but not to other machines on the net
+=======================================================================
If you can connect to the host, and the host can connect to UML, but
you cannot connect to any other machines, then you may need to enable
@@ -3930,7 +3739,7 @@
using private IP addresses (192.168.x.x or 10.x.x.x) for host/UML
networking, rather than the public address space that your host is
connected to. UML does not enable IP Masquerading, so you will need
- to create a static rule to enable it:
+ to create a static rule to enable it::
host%
@@ -3944,11 +3753,11 @@
Documentation on IP Masquerading, and SNAT, can be found at
- www.netfilter.org <http://www.netfilter.org> .
+ http://www.netfilter.org.
If you can reach the local net, but not the outside Internet, then
- that is usually a routing problem. The UML needs a default route:
+ that is usually a routing problem. The UML needs a default route::
UML#
@@ -3972,7 +3781,8 @@
- 13.10. I have no root and I want to scream
+13.10. I have no root and I want to scream
+===========================================
Thanks to Birgit Wahlich for telling me about this strange one. It
turns out that there's a limit of six environment variables on the
@@ -3987,14 +3797,16 @@
- 13.11. UML build conflict between ptrace.h and ucontext.h
+13.11. UML build conflict between ptrace.h and ucontext.h
+==========================================================
On some older systems, /usr/include/asm/ptrace.h and
/usr/include/sys/ucontext.h define the same names. So, when they're
included together, the defines from one completely mess up the parsing
- of the other, producing errors like:
+ of the other, producing errors like::
+
/usr/include/sys/ucontext.h:47: parse error before
- `10'
+ `10`
@@ -4007,7 +3819,8 @@
- 13.12. The UML BogoMips is exactly half the host's BogoMips
+13.12. The UML BogoMips is exactly half the host's BogoMips
+------------------------------------------------------------
On i386 kernels, there are two ways of running the loop that is used
to calculate the BogoMips rating, using the TSC if it's there or using
@@ -4019,15 +3832,17 @@
- 13.13. When you run UML, it immediately segfaults
+13.13. When you run UML, it immediately segfaults
+--------------------------------------------------
If the host is configured with the 2G/2G address space split, that's
- why. See ``UML on 2G/2G hosts'' for the details on getting UML to
+ why. See ref:`UML_on_2G/2G_hosts` for the details on getting UML to
run on your host.
- 13.14. xterms appear, then immediately disappear
+13.14. xterms appear, then immediately disappear
+-------------------------------------------------
If you're running an up to date kernel with an old release of
uml_utilities, the port-helper program will not work properly, so
@@ -4039,7 +3854,8 @@
- 13.15. Any other panic, hang, or strange behavior
+13.15. Any other panic, hang, or strange behavior
+--------------------------------------------------
If you're seeing truly strange behavior, such as hangs or panics that
happen in random places, or you try running the debugger to see what's
@@ -4057,9 +3873,13 @@
it and that a fix is imminent.
- If you want to be super-helpful, read ``Diagnosing Problems'' and
+ If you want to be super-helpful, read :ref:`Diagnosing_Problems` and
follow the instructions contained therein.
- 14. Diagnosing Problems
+
+.. _Diagnosing_Problems:
+
+14. Diagnosing Problems
+========================
If you get UML to crash, hang, or otherwise misbehave, you should
@@ -4074,21 +3894,22 @@
For any diagnosis, you're going to need to build a debugging kernel.
The binaries from this site aren't debuggable. If you haven't done
- this before, read about ``Compiling the kernel and modules'' and
- ``Kernel debugging'' UML first.
+ this before, read about :ref:`Compiling_the_kernel_and_modules` and
+ :ref:`Kernel_debugging` UML first.
- 14.1. Case 1 : Normal kernel panics
+14.1. Case 1 : Normal kernel panics
+------------------------------------
The most common case is for a normal thread to panic. To debug this,
you will need to run it under the debugger (add 'debug' to the command
line). An xterm will start up with gdb running inside it. Continue
- it when it stops in start_kernel and make it crash. Now ^C gdb and
+ it when it stops in start_kernel and make it crash. Now ``^C gdb`` and
If the panic was a "Kernel mode fault", then there will be a segv
frame on the stack and I'm going to want some more information. The
- stack might look something like this:
+ stack might look something like this::
(UML gdb) backtrace
@@ -4107,7 +3928,7 @@
I'm going to want to see the symbol and line information for the value
- of ip in the segv frame. In this case, you would do the following:
+ of ip in the segv frame. In this case, you would do the following::
(UML gdb) i sym 268849158
@@ -4115,7 +3936,7 @@
- and
+ and::
(UML gdb) i line *268849158
@@ -4128,7 +3949,8 @@
to get that information from the faulting ip.
- 14.2. Case 2 : Tracing thread panics
+14.2. Case 2 : Tracing thread panics
+-------------------------------------
The less common and more painful case is when the tracing thread
panics. In this case, the kernel debugger will be useless because it
@@ -4136,7 +3958,7 @@
do is get a backtrace from the tracing thread. This is done by
figuring out what its pid is, firing up gdb, and attaching it to that
pid. You can figure out the tracing thread pid by looking at the
- first line of the console output, which will look like this:
+ first line of the console output, which will look like this::
tracing thread pid = 15851
@@ -4145,7 +3967,7 @@
or by running ps on the host and finding the line that looks like
- this:
+ this::
jdike 15851 4.5 0.4 132568 1104 pts/0 S 21:34 0:05 ./linux [(tracing thread)]
@@ -4164,7 +3986,7 @@
14.3. Case 3 : Tracing thread panics caused by other threads
However, there are cases where the misbehavior of another thread
- caused the problem. The most common panic of this type is:
+ caused the problem. The most common panic of this type is::
wait_for_stop failed to wait for <pid> to stop with <signal number>
@@ -4177,7 +3999,7 @@
debugger is defunct and without some fancy footwork, another gdb can't
attach to it. So, this is how the fancy footwork goes:
- In a shell:
+ In a shell::
host% kill -STOP pid
@@ -4185,7 +4007,7 @@
- Run gdb on the tracing thread as described in case 2 and do:
+ Run gdb on the tracing thread as described in case 2 and do::
(host gdb) call detach(pid)
@@ -4193,7 +4015,7 @@
If you get a segfault, do it again. It always works the second time.
- Detach from the tracing thread and attach to that other thread:
+ Detach from the tracing thread and attach to that other thread::
(host gdb) detach
@@ -4209,7 +4031,7 @@
If gdb hangs when attaching to that process, go back to a shell and
- do:
+ do::
host%
@@ -4218,7 +4040,7 @@
- And then get the backtrace:
+ And then get the backtrace::
(host gdb) backtrace
@@ -4227,13 +4049,14 @@
- 14.4. Case 4 : Hangs
+14.4. Case 4 : Hangs
+---------------------
Hangs seem to be fairly rare, but they sometimes happen. When a hang
happens, we need a backtrace from the offending process. Run the
kernel debugger as described in case 1 and get a backtrace. If the
current process is not the idle thread, then send in the backtrace.
- You can tell that it's the idle thread if the stack looks like this:
+ You can tell that it's the idle thread if the stack looks like this::
#0 0x100b1401 in __libc_nanosleep ()
@@ -4257,7 +4080,8 @@
- 15. Thanks
+15. Thanks
+===========
A number of people have helped this project in various ways, and this
@@ -4274,20 +4098,21 @@
bookkeeping lapses and I forget about contributions.
- 15.1. Code and Documentation
+15.1. Code and Documentation
+-----------------------------
Rusty Russell <rusty at linuxcare.com.au> -
- o wrote the HOWTO <http://user-mode-
- linux.sourceforge.net/UserModeLinux-HOWTO.html>
+ - wrote the HOWTO
+ http://user-mode-linux.sourceforge.net/old/UserModeLinux-HOWTO.html
- o prodded me into making this project official and putting it on
+ - prodded me into making this project official and putting it on
SourceForge
- o came up with the way cool UML logo <http://user-mode-
- linux.sourceforge.net/uml-small.png>
+ - came up with the way cool UML logo
+ http://user-mode-linux.sourceforge.net/uml-small.png
- o redid the config process
+ - redid the config process
Peter Moulder <reiter at netspace.net.au> - Fixed my config and build
@@ -4296,34 +4121,32 @@
Bill Stearns <wstearns at pobox.com> -
- o HOWTO updates
+ - HOWTO updates
- o lots of bug reports
+ - lots of bug reports
- o lots of testing
+ - lots of testing
- o dedicated a box (uml.ists.dartmouth.edu) to support UML development
+ - dedicated a box (uml.ists.dartmouth.edu) to support UML development
- o wrote the mkrootfs script, which allows bootable filesystems of
+ - wrote the mkrootfs script, which allows bootable filesystems of
RPM-based distributions to be cranked out
- o cranked out a large number of filesystems with said script
+ - cranked out a large number of filesystems with said script
Jim Leu <jleu at mindspring.com> - Wrote the virtual ethernet driver
and associated usermode tools
- Lars Brinkhoff <http://lars.nocrew.org/> - Contributed the ptrace
- proxy from his own project <http://a386.nocrew.org/> to allow easier
- kernel debugging
+ Lars Brinkhoff http://lars.nocrew.org/ - Contributed the ptrace
+ proxy from his own project to allow easier kernel debugging
Andrea Arcangeli <andrea at suse.de> - Redid some of the early boot
code so that it would work on machines with Large File Support
- Chris Emerson <http://www.chiark.greenend.org.uk/~cemerson/> - Did
- the first UML port to Linux/ppc
+ Chris Emerson - Did the first UML port to Linux/ppc
Harald Welte <laforge at gnumonks.org> - Wrote the multicast
@@ -4338,7 +4161,7 @@
wrote the iomem emulation support
- Henrik Nordstrom <http://hem.passagen.se/hno/> - Provided a variety
+ Henrik Nordstrom http://hem.passagen.se/hno/ - Provided a variety
of patches, fixes, and clues
@@ -4373,190 +4196,193 @@
submitted patches for the slip transport and lots of other things.
- David Coulson <http://davidcoulson.net> -
+ David Coulson http://davidcoulson.net -
- o Set up the usermodelinux.org <http://usermodelinux.org> site,
+ - Set up the http://usermodelinux.org site,
which is a great way of keeping the UML user community on top of
UML goings-on.
- o Site documentation and updates
+ - Site documentation and updates
- o Nifty little UML management daemon UMLd
- <http://uml.openconsultancy.com/umld/>
+ - Nifty little UML management daemon UMLd
- o Lots of testing and bug reports
+ - Lots of testing and bug reports
- 15.2. Flushing out bugs
+15.2. Flushing out bugs
+------------------------
- o Yuri Pudgorodsky
+ - Yuri Pudgorodsky
- o Gerald Britton
+ - Gerald Britton
- o Ian Wehrman
+ - Ian Wehrman
- o Gord Lamb
+ - Gord Lamb
- o Eugene Koontz
+ - Eugene Koontz
- o John H. Hartman
+ - John H. Hartman
- o Anders Karlsson
+ - Anders Karlsson
- o Daniel Phillips
+ - Daniel Phillips
- o John Fremlin
+ - John Fremlin
- o Rainer Burgstaller
+ - Rainer Burgstaller
- o James Stevenson
+ - James Stevenson
- o Matt Clay
+ - Matt Clay
- o Cliff Jefferies
+ - Cliff Jefferies
- o Geoff Hoff
+ - Geoff Hoff
- o Lennert Buytenhek
+ - Lennert Buytenhek
- o Al Viro
+ - Al Viro
- o Frank Klingenhoefer
+ - Frank Klingenhoefer
- o Livio Baldini Soares
+ - Livio Baldini Soares
- o Jon Burgess
+ - Jon Burgess
- o Petru Paler
+ - Petru Paler
- o Paul
+ - Paul
- o Chris Reahard
+ - Chris Reahard
- o Sverker Nilsson
+ - Sverker Nilsson
- o Gong Su
+ - Gong Su
- o johan verrept
+ - johan verrept
- o Bjorn Eriksson
+ - Bjorn Eriksson
- o Lorenzo Allegrucci
+ - Lorenzo Allegrucci
- o Muli Ben-Yehuda
+ - Muli Ben-Yehuda
- o David Mansfield
+ - David Mansfield
- o Howard Goff
+ - Howard Goff
- o Mike Anderson
+ - Mike Anderson
- o John Byrne
+ - John Byrne
- o Sapan J. Batia
+ - Sapan J. Batia
- o Iris Huang
+ - Iris Huang
- o Jan Hudec
+ - Jan Hudec
- o Voluspa
+ - Voluspa
- 15.3. Buglets and clean-ups
+15.3. Buglets and clean-ups
+----------------------------
- o Dave Zarzycki
+ - Dave Zarzycki
- o Adam Lazur
+ - Adam Lazur
- o Boria Feigin
+ - Boria Feigin
- o Brian J. Murrell
+ - Brian J. Murrell
- o JS
+ - JS
- o Roman Zippel
+ - Roman Zippel
- o Wil Cooley
+ - Wil Cooley
- o Ayelet Shemesh
+ - Ayelet Shemesh
- o Will Dyson
+ - Will Dyson
- o Sverker Nilsson
+ - Sverker Nilsson
- o dvorak
+ - dvorak
- o v.naga srinivas
+ - v.naga srinivas
- o Shlomi Fish
+ - Shlomi Fish
- o Roger Binns
+ - Roger Binns
- o johan verrept
+ - johan verrept
- o MrChuoi
+ - MrChuoi
- o Peter Cleve
+ - Peter Cleve
- o Vincent Guffens
+ - Vincent Guffens
- o Nathan Scott
+ - Nathan Scott
- o Patrick Caulfield
+ - Patrick Caulfield
- o jbearce
+ - jbearce
- o Catalin Marinas
+ - Catalin Marinas
- o Shane Spencer
+ - Shane Spencer
- o Zou Min
+ - Zou Min
- o Ryan Boder
+ - Ryan Boder
- o Lorenzo Colitti
+ - Lorenzo Colitti
- o Gwendal Grignou
+ - Gwendal Grignou
- o Andre' Breiler
+ - Andre' Breiler
- o Tsutomu Yasuda
+ - Tsutomu Yasuda
- 15.4. Case Studies
+15.4. Case Studies
+-------------------
- o Jon Wright
+ - Jon Wright
- o William McEwan
+ - William McEwan
- o Michael Richardson
+ - Michael Richardson
- 15.5. Other contributions
+15.5. Other contributions
+--------------------------
Bill Carr <Bill.Carr at compaq.com> made the Red Hat mkrootfs script
work with RH 6.2.
Michael Jennings <mikejen at hevanet.com> sent in some material which
- is now gracing the top of the index page <http://user-mode-
- linux.sourceforge.net/> of this site.
+ is now gracing the top of the index page
+ http://user-mode-linux.sourceforge.net/ of this site.
- SGI <http://www.sgi.com> (and more specifically Ralf Baechle <ralf at
- uni-koblenz.de> ) gave me an account on oss.sgi.com
- <http://www.oss.sgi.com> . The bandwidth there made it possible to
+ SGI (and more specifically Ralf Baechle <ralf at
+ uni-koblenz.de> ) gave me an account on oss.sgi.com.
+ The bandwidth there made it possible to
produce most of the filesystems available on the project download
page.
@@ -4573,17 +4399,5 @@
Chris Reahard built a specialized root filesystem for running a DNS
server jailed inside UML. It's available from the download
- <http://user-mode-linux.sourceforge.net/dl-sf.html> page in the Jail
+ http://user-mode-linux.sourceforge.net/old/dl-sf.html page in the Jail
Filesystems section.
-
-
-
-
-
-
-
-
-
-
-
-
diff --git a/Documentation/x86/index.rst b/Documentation/x86/index.rst
index a8de2fbc1caa..265d9e9a093b 100644
--- a/Documentation/x86/index.rst
+++ b/Documentation/x86/index.rst
@@ -19,7 +19,6 @@ x86-specific Documentation
tlb
mtrr
pat
- intel_mpx
intel-iommu
intel_txt
amd-memory-encryption
diff --git a/MAINTAINERS b/MAINTAINERS
index dceaeebce52a..1a8935569017 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -693,7 +693,7 @@ ALLWINNER CPUFREQ DRIVER
M: Yangtao Li <[email protected]>
S: Maintained
-F: Documentation/devicetree/bindings/opp/sun50i-nvmem-cpufreq.txt
+F: Documentation/devicetree/bindings/opp/allwinner,sun50i-h6-operating-points.yaml
F: drivers/cpufreq/sun50i-cpufreq-nvmem.c
ALLWINNER CRYPTO DRIVERS
@@ -2796,11 +2796,11 @@ F: drivers/block/aoe/
ATHEROS 71XX/9XXX GPIO DRIVER
M: Alban Bedel <[email protected]>
+S: Maintained
W: https://github.com/AlbanBedel/linux
T: git git://github.com/AlbanBedel/linux
-S: Maintained
-F: drivers/gpio/gpio-ath79.c
F: Documentation/devicetree/bindings/gpio/gpio-ath79.txt
+F: drivers/gpio/gpio-ath79.c
ATHEROS 71XX/9XXX USB PHY DRIVER
M: Alban Bedel <[email protected]>
@@ -3422,8 +3422,8 @@ BROADCOM BRCMSTB GPIO DRIVER
M: Gregory Fong <[email protected]>
S: Supported
-F: drivers/gpio/gpio-brcmstb.c
F: Documentation/devicetree/bindings/gpio/brcm,brcmstb-gpio.txt
+F: drivers/gpio/gpio-brcmstb.c
BROADCOM BRCMSTB I2C DRIVER
M: Kamal Dasu <[email protected]>
@@ -3481,8 +3481,8 @@ BROADCOM KONA GPIO DRIVER
M: Ray Jui <[email protected]>
S: Supported
-F: drivers/gpio/gpio-bcm-kona.c
F: Documentation/devicetree/bindings/gpio/brcm,kona-gpio.txt
+F: drivers/gpio/gpio-bcm-kona.c
BROADCOM NETXTREME-E ROCE DRIVER
M: Selvin Xavier <[email protected]>
@@ -3597,8 +3597,8 @@ F: sound/pci/bt87x.c
BT8XXGPIO DRIVER
M: Michael Buesch <[email protected]>
-W: http://bu3sch.de/btgpio.php
S: Maintained
+W: http://bu3sch.de/btgpio.php
F: drivers/gpio/gpio-bt8xx.c
BTRFS FILE SYSTEM
@@ -3649,6 +3649,7 @@ F: sound/pci/oxygen/
C-SKY ARCHITECTURE
M: Guo Ren <[email protected]>
T: git https://github.com/c-sky/csky-linux.git
S: Supported
F: arch/csky/
@@ -3909,7 +3910,7 @@ S: Supported
F: Documentation/filesystems/ceph.txt
F: fs/ceph/
-CERTIFICATE HANDLING:
+CERTIFICATE HANDLING
M: David Howells <[email protected]>
M: David Woodhouse <[email protected]>
@@ -3919,7 +3920,7 @@ F: certs/
F: scripts/sign-file.c
F: scripts/extract-cert.c
-CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM:
+CERTIFIED WIRELESS USB (WUSB) SUBSYSTEM
S: Obsolete
F: drivers/staging/wusbcore/
@@ -4016,7 +4017,7 @@ M: Cheng-Yi Chiang <[email protected]>
S: Maintained
R: Enric Balletbo i Serra <[email protected]>
R: Guenter Roeck <[email protected]>
-F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.txt
+F: Documentation/devicetree/bindings/sound/google,cros-ec-codec.yaml
F: sound/soc/codecs/cros_ec_codec.*
CIRRUS LOGIC AUDIO CODEC DRIVERS
@@ -4474,7 +4475,7 @@ L: [email protected]
T: git git://linuxtv.org/media_tree.git
S: Maintained
F: drivers/media/platform/sunxi/sun6i-csi/
-F: Documentation/devicetree/bindings/media/sun6i-csi.txt
+F: Documentation/devicetree/bindings/media/allwinner,sun6i-a31-csi.yaml
CW1200 WLAN driver
M: Solomon Peachy <[email protected]>
@@ -5021,7 +5022,7 @@ L: [email protected]
L: [email protected] (moderated for non-subscribers)
F: drivers/dma-buf/
F: include/linux/dma-buf*
-F: include/linux/reservation.h
+F: include/linux/dma-resv.h
F: include/linux/*fence.h
F: Documentation/driver-api/dma-buf.rst
K: dma_(buf|fence|resv)
@@ -5262,6 +5263,12 @@ T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/tve200/
+DRM DRIVER FOR FEIXIN K101 IM2BA02 MIPI-DSI LCD PANELS
+M: Icenowy Zheng <[email protected]>
+S: Maintained
+F: drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
+F: Documentation/devicetree/bindings/display/panel/feixin,k101-im2ba02.yaml
+
DRM DRIVER FOR FEIYANG FY07024DI26A30-D MIPI-DSI LCD PANELS
M: Jagan Teki <[email protected]>
S: Maintained
@@ -5281,6 +5288,13 @@ S: Maintained
F: drivers/gpu/drm/tiny/ili9225.c
F: Documentation/devicetree/bindings/display/ilitek,ili9225.txt
+DRM DRIVER FOR ILITEK ILI9486 PANELS
+M: Kamlesh Gurudasani <[email protected]>
+T: git git://anongit.freedesktop.org/drm/drm-misc
+S: Maintained
+F: drivers/gpu/drm/tiny/ili9486.c
+F: Documentation/devicetree/bindings/display/ilitek,ili9486.yaml
+
DRM DRIVER FOR HX8357D PANELS
M: Eric Anholt <[email protected]>
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -5322,6 +5336,13 @@ F: drivers/gpu/drm/msm/
F: include/uapi/drm/msm_drm.h
F: Documentation/devicetree/bindings/display/msm/
+DRM DRIVER FOR NOVATEK NT35510 PANELS
+M: Linus Walleij <[email protected]>
+T: git git://anongit.freedesktop.org/drm/drm-misc
+S: Maintained
+F: drivers/gpu/drm/panel/panel-novatek-nt35510.c
+F: Documentation/devicetree/bindings/display/panel/novatek,nt35510.yaml
+
DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
M: Ben Skeggs <[email protected]>
@@ -5409,7 +5430,7 @@ M: David Lechner <[email protected]>
T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/tiny/st7735r.c
-F: Documentation/devicetree/bindings/display/sitronix,st7735r.txt
+F: Documentation/devicetree/bindings/display/sitronix,st7735r.yaml
DRM DRIVER FOR SONY ACX424AKP PANELS
M: Linus Walleij <[email protected]>
@@ -5489,6 +5510,7 @@ F: include/linux/vga*
DRM DRIVERS AND MISC GPU PATCHES
M: Maarten Lankhorst <[email protected]>
M: Maxime Ripard <[email protected]>
+M: Thomas Zimmermann <[email protected]>
W: https://01.org/linuxgraphics/gfx-docs/maintainer-tools/drm-misc.html
S: Maintained
T: git git://anongit.freedesktop.org/drm/drm-misc
@@ -5568,7 +5590,6 @@ S: Supported
F: drivers/gpu/drm/fsl-dcu/
F: Documentation/devicetree/bindings/display/fsl,dcu.txt
F: Documentation/devicetree/bindings/display/fsl,tcon.txt
-F: Documentation/devicetree/bindings/display/panel/nec,nl4827hc19-05b.txt
T: git git://anongit.freedesktop.org/drm/drm-misc
DRM DRIVERS FOR FREESCALE IMX
@@ -5587,12 +5608,13 @@ S: Maintained
F: drivers/gpu/drm/gma500/
DRM DRIVERS FOR HISILICON
-M: Xinliang Liu <[email protected]>
+M: Xinliang Liu <[email protected]>
M: Rongrong Zou <[email protected]>
+R: John Stultz <[email protected]>
R: Xinwei Kong <[email protected]>
R: Chen Feng <[email protected]>
-T: git git://github.com/xin3liang/linux.git
+T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/hisilicon/
F: Documentation/devicetree/bindings/display/hisilicon/
@@ -5667,7 +5689,7 @@ L: [email protected]
T: git git://anongit.freedesktop.org/drm/drm-misc
S: Maintained
F: drivers/gpu/drm/stm
-F: Documentation/devicetree/bindings/display/st,stm32-ltdc.txt
+F: Documentation/devicetree/bindings/display/st,stm32-ltdc.yaml
DRM DRIVERS FOR TI LCDC
M: Jyri Sarha <[email protected]>
@@ -5684,6 +5706,17 @@ S: Maintained
F: drivers/gpu/drm/omapdrm/
F: Documentation/devicetree/bindings/display/ti/
+DRM DRIVERS FOR TI KEYSTONE
+M: Jyri Sarha <[email protected]>
+M: Tomi Valkeinen <[email protected]>
+S: Maintained
+F: drivers/gpu/drm/tidss/
+F: Documentation/devicetree/bindings/display/ti/ti,k2g-dss.yaml
+F: Documentation/devicetree/bindings/display/ti/ti,am65x-dss.yaml
+F: Documentation/devicetree/bindings/display/ti/ti,j721e-dss.yaml
+T: git git://anongit.freedesktop.org/drm/drm-misc
+
DRM DRIVERS FOR V3D
M: Eric Anholt <[email protected]>
S: Supported
@@ -5932,12 +5965,12 @@ S: Maintained
F: drivers/media/dvb-frontends/ec100*
ECRYPT FILE SYSTEM
-M: Tyler Hicks <[email protected]>
+M: Tyler Hicks <[email protected]>
W: http://ecryptfs.org
W: https://launchpad.net/ecryptfs
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tyhicks/ecryptfs.git
-S: Supported
+S: Odd Fixes
F: Documentation/filesystems/ecryptfs.txt
F: fs/ecryptfs/
@@ -7047,7 +7080,7 @@ L: [email protected]
S: Supported
F: drivers/uio/uio_pci_generic.c
-GENERIC VDSO LIBRARY:
+GENERIC VDSO LIBRARY
M: Andy Lutomirski <[email protected]>
M: Thomas Gleixner <[email protected]>
M: Vincenzo Frascino <[email protected]>
@@ -7143,18 +7176,18 @@ GPIO SUBSYSTEM
M: Linus Walleij <[email protected]>
M: Bartosz Golaszewski <[email protected]>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
+F: Documentation/ABI/obsolete/sysfs-gpio
+F: Documentation/ABI/testing/gpio-cdev
+F: Documentation/admin-guide/gpio/
F: Documentation/devicetree/bindings/gpio/
F: Documentation/driver-api/gpio/
-F: Documentation/admin-guide/gpio/
-F: Documentation/ABI/testing/gpio-cdev
-F: Documentation/ABI/obsolete/sysfs-gpio
F: drivers/gpio/
+F: include/asm-generic/gpio.h
F: include/linux/gpio/
F: include/linux/gpio.h
F: include/linux/of_gpio.h
-F: include/asm-generic/gpio.h
F: include/uapi/linux/gpio.h
F: tools/gpio/
@@ -7737,7 +7770,7 @@ Hyper-V CORE AND DRIVERS
M: "K. Y. Srinivasan" <[email protected]>
M: Haiyang Zhang <[email protected]>
M: Stephen Hemminger <[email protected]>
-M: Sasha Levin <[email protected]>
+M: Wei Liu <[email protected]>
T: git git://git.kernel.org/pub/scm/linux/kernel/git/hyperv/linux.git
S: Supported
@@ -8055,8 +8088,8 @@ F: drivers/scsi/ips.*
ICH LPC AND GPIO DRIVER
M: Peter Tyser <[email protected]>
S: Maintained
-F: drivers/mfd/lpc_ich.c
F: drivers/gpio/gpio-ich.c
+F: drivers/mfd/lpc_ich.c
ICY I2C DRIVER
M: Max Staudt <[email protected]>
@@ -8392,7 +8425,7 @@ M: Joonas Lahtinen <[email protected]>
M: Rodrigo Vivi <[email protected]>
W: https://01.org/linuxgraphics/
-B: https://01.org/linuxgraphics/documentation/how-report-bugs
+B: https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
C: irc://chat.freenode.net/intel-gfx
Q: http://patchwork.freedesktop.org/project/intel-gfx/
T: git git://anongit.freedesktop.org/drm-intel
@@ -9278,7 +9311,7 @@ F: include/keys/trusted-type.h
F: security/keys/trusted.c
F: include/keys/trusted.h
-KEYS/KEYRINGS:
+KEYS/KEYRINGS
M: David Howells <[email protected]>
M: Jarkko Sakkinen <[email protected]>
@@ -10163,7 +10196,7 @@ MAXBOTIX ULTRASONIC RANGER IIO DRIVER
M: Andreas Klinger <[email protected]>
S: Maintained
-F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt
+F: Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.yaml
F: drivers/iio/proximity/mb1232.c
MAXIM MAX77650 PMIC MFD DRIVER
@@ -10466,7 +10499,7 @@ M: Hugues Fruchet <[email protected]>
T: git git://linuxtv.org/media_tree.git
S: Supported
-F: Documentation/devicetree/bindings/media/st,stm32-dcmi.txt
+F: Documentation/devicetree/bindings/media/st,stm32-dcmi.yaml
F: drivers/media/platform/stm32/stm32-dcmi.c
MEDIA DRIVERS FOR NVIDIA TEGRA - VDE
@@ -11114,14 +11147,12 @@ S: Maintained
F: drivers/usb/image/microtek.*
MIPS
-M: Ralf Baechle <[email protected]>
-M: Paul Burton <[email protected]>
+M: Thomas Bogendoerfer <[email protected]>
W: http://www.linux-mips.org/
-T: git git://git.linux-mips.org/pub/scm/ralf/linux.git
T: git git://git.kernel.org/pub/scm/linux/kernel/git/mips/linux.git
Q: http://patchwork.linux-mips.org/project/linux-mips/list/
-S: Supported
+S: Maintained
F: Documentation/devicetree/bindings/mips/
F: Documentation/mips/
F: arch/mips/
@@ -11484,7 +11515,7 @@ F: drivers/scsi/mac_scsi.*
F: drivers/scsi/sun3_scsi.*
F: drivers/scsi/sun3_scsi_vme.c
-NCSI LIBRARY:
+NCSI LIBRARY
M: Samuel Mendoza-Jonas <[email protected]>
S: Maintained
F: net/ncsi/
@@ -12740,7 +12771,7 @@ M: Tom Joseph <[email protected]>
S: Maintained
F: Documentation/devicetree/bindings/pci/cdns,*.txt
-F: drivers/pci/controller/pcie-cadence*
+F: drivers/pci/controller/cadence/
PCI DRIVER FOR FREESCALE LAYERSCAPE
M: Minghuan Lian <[email protected]>
@@ -12953,7 +12984,6 @@ M: Robert Richter <[email protected]>
L: [email protected] (moderated for non-subscribers)
S: Supported
-F: Documentation/devicetree/bindings/pci/pci-thunder-*
F: drivers/pci/controller/pci-thunder-*
PCIE DRIVER FOR HISILICON
@@ -13512,7 +13542,7 @@ L: [email protected]
S: Maintained
F: drivers/block/ps3vram.c
-PSAMPLE PACKET SAMPLING SUPPORT:
+PSAMPLE PACKET SAMPLING SUPPORT
M: Yotam Gigi <[email protected]>
S: Maintained
F: net/psample
@@ -14228,7 +14258,7 @@ F: include/dt-bindings/reset/
F: include/linux/reset.h
F: include/linux/reset/
F: include/linux/reset-controller.h
-K: \b(?:devm_|of_)?reset_control(?:ler_[a-z]+|_[a-z_]+)?\b
+K: \b(?:devm_|of_)?reset_control(?:ler_[a-z]+|_[a-z_]+)?\b
RESTARTABLE SEQUENCES SUPPORT
M: Mathieu Desnoyers <[email protected]>
@@ -14582,10 +14612,10 @@ F: drivers/media/pci/saa7146/
F: include/media/drv-intf/saa7146*
SAFESETID SECURITY MODULE
-M: Micah Morton <[email protected]>
-S: Supported
-F: security/safesetid/
-F: Documentation/admin-guide/LSM/SafeSetID.rst
+M: Micah Morton <[email protected]>
+S: Supported
+F: security/safesetid/
+F: Documentation/admin-guide/LSM/SafeSetID.rst
SAMSUNG AUDIO (ASoC) DRIVERS
M: Krzysztof Kozlowski <[email protected]>
@@ -15923,7 +15953,7 @@ F: drivers/*/stm32-*timer*
F: drivers/pwm/pwm-stm32*
F: include/linux/*/stm32-*tim*
F: Documentation/ABI/testing/*timer-stm32
-F: Documentation/devicetree/bindings/*/stm32-*timer*
+F: Documentation/devicetree/bindings/*/*stm32-*timer*
F: Documentation/devicetree/bindings/pwm/pwm-stm32*
STMMAC ETHERNET DRIVER
@@ -16075,20 +16105,22 @@ F: Documentation/devicetree/bindings/reset/snps,axs10x-reset.txt
SYNOPSYS CREG GPIO DRIVER
M: Eugeniy Paltsev <[email protected]>
S: Maintained
-F: drivers/gpio/gpio-creg-snps.c
F: Documentation/devicetree/bindings/gpio/snps,creg-gpio.txt
+F: drivers/gpio/gpio-creg-snps.c
SYNOPSYS DESIGNWARE 8250 UART DRIVER
R: Andy Shevchenko <[email protected]>
S: Maintained
F: drivers/tty/serial/8250/8250_dw.c
+F: drivers/tty/serial/8250/8250_dwlib.*
+F: drivers/tty/serial/8250/8250_lpss.c
SYNOPSYS DESIGNWARE APB GPIO DRIVER
M: Hoan Tran <[email protected]>
S: Maintained
-F: drivers/gpio/gpio-dwapb.c
F: Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
+F: drivers/gpio/gpio-dwapb.c
SYNOPSYS DESIGNWARE AXI DMAC DRIVER
M: Eugeniy Paltsev <[email protected]>
@@ -16552,8 +16584,8 @@ M: Michael Jamet <[email protected]>
M: Mika Westerberg <[email protected]>
M: Yehezkel Bernat <[email protected]>
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
S: Maintained
+T: git git://git.kernel.org/pub/scm/linux/kernel/git/westeri/thunderbolt.git
F: Documentation/admin-guide/thunderbolt.rst
F: drivers/thunderbolt/
F: include/linux/thunderbolt.h
@@ -17080,7 +17112,7 @@ S: Maintained
F: Documentation/admin-guide/ufs.rst
F: fs/ufs/
-UHID USERSPACE HID IO DRIVER:
+UHID USERSPACE HID IO DRIVER
M: David Herrmann <[email protected]>
S: Maintained
@@ -17094,18 +17126,18 @@ S: Maintained
F: drivers/usb/common/ulpi.c
F: include/linux/ulpi/
-ULTRA-WIDEBAND (UWB) SUBSYSTEM:
+ULTRA-WIDEBAND (UWB) SUBSYSTEM
S: Obsolete
F: drivers/staging/uwb/
-UNICODE SUBSYSTEM:
+UNICODE SUBSYSTEM
M: Gabriel Krisman Bertazi <[email protected]>
S: Supported
F: fs/unicode/
-UNICORE32 ARCHITECTURE:
+UNICORE32 ARCHITECTURE
M: Guan Xuetao <[email protected]>
W: http://mprc.pku.edu.cn/~guanxuetao/linux
S: Maintained
@@ -17392,11 +17424,14 @@ F: drivers/usb/
F: include/linux/usb.h
F: include/linux/usb/
-USB TYPEC PI3USB30532 MUX DRIVER
-M: Hans de Goede <[email protected]>
+USB TYPEC BUS FOR ALTERNATE MODES
+M: Heikki Krogerus <[email protected]>
S: Maintained
-F: drivers/usb/typec/mux/pi3usb30532.c
+F: Documentation/ABI/testing/sysfs-bus-typec
+F: Documentation/driver-api/usb/typec_bus.rst
+F: drivers/usb/typec/altmodes/
+F: include/linux/usb/typec_altmode.h
USB TYPEC CLASS
M: Heikki Krogerus <[email protected]>
@@ -17407,14 +17442,11 @@ F: Documentation/driver-api/usb/typec.rst
F: drivers/usb/typec/
F: include/linux/usb/typec.h
-USB TYPEC BUS FOR ALTERNATE MODES
-M: Heikki Krogerus <[email protected]>
+USB TYPEC PI3USB30532 MUX DRIVER
+M: Hans de Goede <[email protected]>
S: Maintained
-F: Documentation/ABI/testing/sysfs-bus-typec
-F: Documentation/driver-api/usb/typec_bus.rst
-F: drivers/usb/typec/altmodes/
-F: include/linux/usb/typec_altmode.h
+F: drivers/usb/typec/mux/pi3usb30532.c
USB TYPEC PORT CONTROLLER DRIVERS
M: Guenter Roeck <[email protected]>
@@ -17791,7 +17823,7 @@ F: include/linux/vbox_utils.h
F: include/uapi/linux/vbox*.h
F: drivers/virt/vboxguest/
-VIRTUAL BOX SHARED FOLDER VFS DRIVER:
+VIRTUAL BOX SHARED FOLDER VFS DRIVER
M: Hans de Goede <[email protected]>
S: Maintained
@@ -18414,8 +18446,8 @@ M: Nandor Han <[email protected]>
M: Semi Malinen <[email protected]>
S: Maintained
-F: drivers/gpio/gpio-xra1403.c
F: Documentation/devicetree/bindings/gpio/gpio-xra1403.txt
+F: drivers/gpio/gpio-xra1403.c
XTENSA XTFPGA PLATFORM SUPPORT
M: Max Filippov <[email protected]>
diff --git a/Makefile b/Makefile
index 84b71845c43f..e25db579ce74 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 5
PATCHLEVEL = 6
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc5
NAME = Kleptomaniac Octopus
# *DOCUMENTATION*
@@ -68,6 +68,7 @@ unexport GREP_OPTIONS
#
# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
# If KBUILD_VERBOSE equals 1 then the above command is displayed.
+# If KBUILD_VERBOSE equals 2 then give the reason why each target is rebuilt.
#
# To put more focus on warnings, be less verbose as default
# Use 'make V=1' to see the full commands
@@ -1238,7 +1239,7 @@ ifneq ($(dtstree),)
%.dtb: include/config/kernel.release scripts_dtc
$(Q)$(MAKE) $(build)=$(dtstree) $(dtstree)/$@
-PHONY += dtbs dtbs_install dt_binding_check
+PHONY += dtbs dtbs_install dtbs_check
dtbs dtbs_check: include/config/kernel.release scripts_dtc
$(Q)$(MAKE) $(build)=$(dtstree)
@@ -1258,6 +1259,7 @@ PHONY += scripts_dtc
scripts_dtc: scripts_basic
$(Q)$(MAKE) $(build)=scripts/dtc
+PHONY += dt_binding_check
dt_binding_check: scripts_dtc
$(Q)$(MAKE) $(build)=Documentation/devicetree/bindings
diff --git a/arch/Kconfig b/arch/Kconfig
index 98de654b79b3..17fe351cdde0 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -738,8 +738,9 @@ config HAVE_STACK_VALIDATION
config HAVE_RELIABLE_STACKTRACE
bool
help
- Architecture has a save_stack_trace_tsk_reliable() function which
- only returns a stack trace if it can guarantee the trace is reliable.
+ Architecture has either save_stack_trace_tsk_reliable() or
+ arch_stack_walk_reliable() function which only returns a stack trace
+ if it can guarantee the trace is reliable.
config HAVE_ARCH_HASH
bool
diff --git a/arch/arm/boot/dts/am437x-idk-evm.dts b/arch/arm/boot/dts/am437x-idk-evm.dts
index f3ced6df0c9b..9f66f96d09c9 100644
--- a/arch/arm/boot/dts/am437x-idk-evm.dts
+++ b/arch/arm/boot/dts/am437x-idk-evm.dts
@@ -526,11 +526,11 @@
* Supply voltage supervisor on board will not allow opp50 so
* disable it and set opp100 as suspend OPP.
*/
- opp50@300000000 {
+ opp50-300000000 {
status = "disabled";
};
- opp100@600000000 {
+ opp100-600000000 {
opp-suspend;
};
};
diff --git a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
index 1b5a835f66bd..efea891b1a76 100644
--- a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
+++ b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts
@@ -21,6 +21,7 @@
aliases {
ethernet0 = &genet;
+ pcie0 = &pcie0;
};
leds {
@@ -31,6 +32,8 @@
pwr {
label = "PWR";
gpios = <&expgpio 2 GPIO_ACTIVE_LOW>;
+ default-state = "keep";
+ linux,default-trigger = "default-on";
};
};
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts
index 66ab35eccba7..28be0332c1c8 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-a-plus.dts
@@ -26,6 +26,8 @@
pwr {
label = "PWR";
gpios = <&expgpio 2 GPIO_ACTIVE_LOW>;
+ default-state = "keep";
+ linux,default-trigger = "default-on";
};
};
};
diff --git a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
index 74ed6d047807..37343148643d 100644
--- a/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
+++ b/arch/arm/boot/dts/bcm2837-rpi-3-b-plus.dts
@@ -27,6 +27,8 @@
pwr {
label = "PWR";
gpios = <&expgpio 2 GPIO_ACTIVE_LOW>;
+ default-state = "keep";
+ linux,default-trigger = "default-on";
};
};
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index de7f85efaa51..af06a55d1c5c 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -61,10 +61,10 @@
regulator-max-microvolt = <1800000>;
};
- evm_3v3: fixedregulator-evm3v3 {
+ vsys_3v3: fixedregulator-vsys3v3 {
/* Output of Cntlr A of TPS43351-Q1 on dra7-evm */
compatible = "regulator-fixed";
- regulator-name = "evm_3v3";
+ regulator-name = "vsys_3v3";
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
vin-supply = <&evm_12v0>;
diff --git a/arch/arm/boot/dts/dra7-l4.dtsi b/arch/arm/boot/dts/dra7-l4.dtsi
index fc418834890d..2119a78e9c15 100644
--- a/arch/arm/boot/dts/dra7-l4.dtsi
+++ b/arch/arm/boot/dts/dra7-l4.dtsi
@@ -3474,6 +3474,7 @@
clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER13_CLKCTRL 24>;
clock-names = "fck";
interrupts = <GIC_SPI 339 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
};
};
@@ -3501,6 +3502,7 @@
clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER14_CLKCTRL 24>;
clock-names = "fck";
interrupts = <GIC_SPI 340 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
};
};
@@ -3528,6 +3530,7 @@
clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER15_CLKCTRL 24>;
clock-names = "fck";
interrupts = <GIC_SPI 341 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
};
};
@@ -3555,6 +3558,7 @@
clocks = <&l4per3_clkctrl DRA7_L4PER3_TIMER16_CLKCTRL 24>;
clock-names = "fck";
interrupts = <GIC_SPI 342 IRQ_TYPE_LEVEL_HIGH>;
+ ti,timer-pwm;
};
};
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index d78b684e7fca..4305051bb769 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -184,6 +184,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x20013000 0x13000 0 0xffed000>;
+ dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
@@ -238,6 +239,7 @@
device_type = "pci";
ranges = <0x81000000 0 0 0x03000 0 0x00010000
0x82000000 0 0x30013000 0x13000 0 0xffed000>;
+ dma-ranges = <0x02000000 0x0 0x00000000 0x00000000 0x1 0x00000000>;
bus-range = <0x00 0xff>;
#interrupt-cells = <1>;
num-lanes = <1>;
diff --git a/arch/arm/boot/dts/dra76x.dtsi b/arch/arm/boot/dts/dra76x.dtsi
index 2f7539afef2b..42b8a205b64f 100644
--- a/arch/arm/boot/dts/dra76x.dtsi
+++ b/arch/arm/boot/dts/dra76x.dtsi
@@ -128,3 +128,8 @@
&usb4_tm {
status = "disabled";
};
+
+&mmc3 {
+ /* dra76x is not affected by i887 */
+ max-frequency = <96000000>;
+};
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index 55cef4cac5f1..dc0a93bccbf1 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -796,16 +796,6 @@
clock-div = <1>;
};
- ipu1_gfclk_mux: ipu1_gfclk_mux@520 {
- #clock-cells = <0>;
- compatible = "ti,mux-clock";
- clocks = <&dpll_abe_m2x2_ck>, <&dpll_core_h22x2_ck>;
- ti,bit-shift = <24>;
- reg = <0x0520>;
- assigned-clocks = <&ipu1_gfclk_mux>;
- assigned-clock-parents = <&dpll_core_h22x2_ck>;
- };
-
dummy_ck: dummy_ck {
#clock-cells = <0>;
compatible = "fixed-clock";
@@ -1564,6 +1554,8 @@
compatible = "ti,clkctrl";
reg = <0x20 0x4>;
#clock-cells = <2>;
+ assigned-clocks = <&ipu1_clkctrl DRA7_IPU1_MMU_IPU1_CLKCTRL 24>;
+ assigned-clock-parents = <&dpll_core_h22x2_ck>;
};
ipu_clkctrl: ipu-clkctrl@50 {
diff --git a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
index cd075621de52..84fcc203a2e4 100644
--- a/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/imx6dl-colibri-eval-v3.dts
@@ -275,7 +275,7 @@
/* SRAM on Colibri nEXT_CS0 */
sram@0,0 {
- compatible = "cypress,cy7c1019dv33-10zsxi, mtd-ram";
+ compatible = "cypress,cy7c1019dv33-10zsxi", "mtd-ram";
reg = <0 0 0x00010000>;
#address-cells = <1>;
#size-cells = <1>;
@@ -286,7 +286,7 @@
/* SRAM on Colibri nEXT_CS1 */
sram@1,0 {
- compatible = "cypress,cy7c1019dv33-10zsxi, mtd-ram";
+ compatible = "cypress,cy7c1019dv33-10zsxi", "mtd-ram";
reg = <1 0 0x00010000>;
#address-cells = <1>;
#size-cells = <1>;
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
index 978dc1c2ff1b..4d18952658f8 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-phycore-som.dtsi
@@ -192,7 +192,6 @@
pinctrl-0 = <&pinctrl_usdhc4>;
bus-width = <8>;
non-removable;
- vmmc-supply = <&vdd_emmc_1p8>;
status = "disabled";
};
diff --git a/arch/arm/boot/dts/imx7-colibri.dtsi b/arch/arm/boot/dts/imx7-colibri.dtsi
index d05be3f0e2a7..04717cf69db0 100644
--- a/arch/arm/boot/dts/imx7-colibri.dtsi
+++ b/arch/arm/boot/dts/imx7-colibri.dtsi
@@ -336,7 +336,6 @@
assigned-clock-rates = <400000000>;
bus-width = <8>;
fsl,tuning-step = <2>;
- max-frequency = <100000000>;
vmmc-supply = <&reg_module_3v3>;
vqmmc-supply = <&reg_DCDC3>;
non-removable;
diff --git a/arch/arm/boot/dts/imx7d.dtsi b/arch/arm/boot/dts/imx7d.dtsi
index 92f6d0c2a74f..4c22828df55f 100644
--- a/arch/arm/boot/dts/imx7d.dtsi
+++ b/arch/arm/boot/dts/imx7d.dtsi
@@ -44,7 +44,7 @@
opp-hz = /bits/ 64 <792000000>;
opp-microvolt = <1000000>;
clock-latency-ns = <150000>;
- opp-supported-hw = <0xd>, <0xf>;
+ opp-supported-hw = <0xd>, <0x7>;
opp-suspend;
};
@@ -52,7 +52,7 @@
opp-hz = /bits/ 64 <996000000>;
opp-microvolt = <1100000>;
clock-latency-ns = <150000>;
- opp-supported-hw = <0xc>, <0xf>;
+ opp-supported-hw = <0xc>, <0x7>;
opp-suspend;
};
@@ -60,7 +60,7 @@
opp-hz = /bits/ 64 <1200000000>;
opp-microvolt = <1225000>;
clock-latency-ns = <150000>;
- opp-supported-hw = <0x8>, <0xf>;
+ opp-supported-hw = <0x8>, <0x3>;
opp-suspend;
};
};
diff --git a/arch/arm/boot/dts/ls1021a.dtsi b/arch/arm/boot/dts/ls1021a.dtsi
index 0855b1fe98e0..760a68c163c8 100644
--- a/arch/arm/boot/dts/ls1021a.dtsi
+++ b/arch/arm/boot/dts/ls1021a.dtsi
@@ -747,7 +747,7 @@
};
mdio0: mdio@2d24000 {
- compatible = "fsl,etsec2-mdio";
+ compatible = "gianfar";
device_type = "mdio";
#address-cells = <1>;
#size-cells = <0>;
@@ -756,7 +756,7 @@
};
mdio1: mdio@2d64000 {
- compatible = "fsl,etsec2-mdio";
+ compatible = "gianfar";
device_type = "mdio";
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/arm/boot/dts/motorola-mapphone-common.dtsi b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
index 85665506f4f8..b6e82b165f5c 100644
--- a/arch/arm/boot/dts/motorola-mapphone-common.dtsi
+++ b/arch/arm/boot/dts/motorola-mapphone-common.dtsi
@@ -182,6 +182,14 @@
pwm-names = "enable", "direction";
direction-duty-cycle-ns = <10000000>;
};
+
+ backlight: backlight {
+ compatible = "led-backlight";
+
+ leds = <&backlight_led>;
+ brightness-levels = <31 63 95 127 159 191 223 255>;
+ default-brightness-level = <6>;
+ };
};
&dss {
@@ -205,6 +213,8 @@
vddi-supply = <&lcd_regulator>;
reset-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; /* gpio101 */
+ backlight = <&backlight>;
+
width-mm = <50>;
height-mm = <89>;
@@ -393,12 +403,11 @@
ramp-up-us = <1024>;
ramp-down-us = <8193>;
- led@0 {
+ backlight_led: led@0 {
reg = <0>;
led-sources = <2>;
ti,led-mode = <0>;
label = ":backlight";
- linux,default-trigger = "backlight";
};
led@1 {
diff --git a/arch/arm/boot/dts/r8a7779.dtsi b/arch/arm/boot/dts/r8a7779.dtsi
index beb9885e6ffc..c0999e27e9b1 100644
--- a/arch/arm/boot/dts/r8a7779.dtsi
+++ b/arch/arm/boot/dts/r8a7779.dtsi
@@ -377,7 +377,7 @@
};
sata: sata@fc600000 {
- compatible = "renesas,sata-r8a7779", "renesas,rcar-sata";
+ compatible = "renesas,sata-r8a7779";
reg = <0xfc600000 0x200000>;
interrupts = <GIC_SPI 100 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&mstp1_clks R8A7779_CLK_SATA>;
diff --git a/arch/arm/boot/dts/stih410-b2260.dts b/arch/arm/boot/dts/stih410-b2260.dts
index 4fbd8e9eb5b7..e2bb59783146 100644
--- a/arch/arm/boot/dts/stih410-b2260.dts
+++ b/arch/arm/boot/dts/stih410-b2260.dts
@@ -178,9 +178,6 @@
phy-mode = "rgmii";
pinctrl-0 = <&pinctrl_rgmii1 &pinctrl_rgmii1_mdio_1>;
- snps,phy-bus-name = "stmmac";
- snps,phy-bus-id = <0>;
- snps,phy-addr = <0>;
snps,reset-gpio = <&pio0 7 0>;
snps,reset-active-low;
snps,reset-delays-us = <0 10000 1000000>;
diff --git a/arch/arm/boot/dts/stihxxx-b2120.dtsi b/arch/arm/boot/dts/stihxxx-b2120.dtsi
index 60e11045ad76..d051f080e52e 100644
--- a/arch/arm/boot/dts/stihxxx-b2120.dtsi
+++ b/arch/arm/boot/dts/stihxxx-b2120.dtsi
@@ -46,7 +46,7 @@
/* DAC */
format = "i2s";
mclk-fs = <256>;
- frame-inversion = <1>;
+ frame-inversion;
cpu {
sound-dai = <&sti_uni_player2>;
};
diff --git a/arch/arm/configs/am200epdkit_defconfig b/arch/arm/configs/am200epdkit_defconfig
index 622436f44783..f56ac394caf1 100644
--- a/arch/arm/configs/am200epdkit_defconfig
+++ b/arch/arm/configs/am200epdkit_defconfig
@@ -11,8 +11,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_GUMSTIX=y
CONFIG_PCCARD=y
diff --git a/arch/arm/configs/axm55xx_defconfig b/arch/arm/configs/axm55xx_defconfig
index f53634af014b..6ea7dafa4c9e 100644
--- a/arch/arm/configs/axm55xx_defconfig
+++ b/arch/arm/configs/axm55xx_defconfig
@@ -25,7 +25,6 @@ CONFIG_EMBEDDED=y
CONFIG_PROFILING=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_AXXIA=y
CONFIG_GPIO_PCA953X=y
CONFIG_ARM_LPAE=y
diff --git a/arch/arm/configs/bcm2835_defconfig b/arch/arm/configs/bcm2835_defconfig
index 519ff58e67b3..0afcae9f7cf8 100644
--- a/arch/arm/configs/bcm2835_defconfig
+++ b/arch/arm/configs/bcm2835_defconfig
@@ -178,6 +178,7 @@ CONFIG_SCHED_TRACER=y
CONFIG_STACK_TRACER=y
CONFIG_FUNCTION_PROFILER=y
CONFIG_TEST_KSTRTOX=y
+CONFIG_DEBUG_FS=y
CONFIG_KGDB=y
CONFIG_KGDB_KDB=y
CONFIG_STRICT_DEVMEM=y
diff --git a/arch/arm/configs/clps711x_defconfig b/arch/arm/configs/clps711x_defconfig
index c255dab36bde..63a153f5cf68 100644
--- a/arch/arm/configs/clps711x_defconfig
+++ b/arch/arm/configs/clps711x_defconfig
@@ -7,7 +7,6 @@ CONFIG_EMBEDDED=y
CONFIG_SLOB=y
CONFIG_JUMP_LABEL=y
CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_CLPS711X=y
CONFIG_ARCH_AUTCPU12=y
CONFIG_ARCH_CDB89712=y
diff --git a/arch/arm/configs/cns3420vb_defconfig b/arch/arm/configs/cns3420vb_defconfig
index 89df0a55a065..66a80b46038d 100644
--- a/arch/arm/configs/cns3420vb_defconfig
+++ b/arch/arm/configs/cns3420vb_defconfig
@@ -17,7 +17,7 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
-CONFIG_IOSCHED_CFQ=m
+CONFIG_IOSCHED_BFQ=m
CONFIG_ARCH_MULTI_V6=y
#CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_CNS3XXX=y
diff --git a/arch/arm/configs/colibri_pxa300_defconfig b/arch/arm/configs/colibri_pxa300_defconfig
index 446134c70a33..0dae3b185284 100644
--- a/arch/arm/configs/colibri_pxa300_defconfig
+++ b/arch/arm/configs/colibri_pxa300_defconfig
@@ -43,7 +43,6 @@ CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_MON=y
CONFIG_USB_STORAGE=y
CONFIG_MMC=y
-# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_PXA=y
CONFIG_EXT3_FS=y
CONFIG_NFS_FS=y
diff --git a/arch/arm/configs/collie_defconfig b/arch/arm/configs/collie_defconfig
index e6df11e906ba..36384fd575f8 100644
--- a/arch/arm/configs/collie_defconfig
+++ b/arch/arm/configs/collie_defconfig
@@ -7,8 +7,6 @@ CONFIG_EXPERT=y
# CONFIG_BASE_FULL is not set
# CONFIG_EPOLL is not set
CONFIG_SLOB=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_SA1100=y
CONFIG_SA1100_COLLIE=y
CONFIG_PCCARD=y
diff --git a/arch/arm/configs/davinci_all_defconfig b/arch/arm/configs/davinci_all_defconfig
index 231f8973bbb2..e849367c0566 100644
--- a/arch/arm/configs/davinci_all_defconfig
+++ b/arch/arm/configs/davinci_all_defconfig
@@ -15,8 +15,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_MULTIPLATFORM=y
CONFIG_ARCH_MULTI_V7=n
CONFIG_ARCH_MULTI_V5=y
@@ -160,7 +158,7 @@ CONFIG_VIDEO_TVP514X=m
CONFIG_VIDEO_ADV7343=m
CONFIG_DRM=m
CONFIG_DRM_TILCDC=m
-CONFIG_DRM_DUMB_VGA_DAC=m
+CONFIG_DRM_SIMPLE_BRIDGE=m
CONFIG_DRM_TINYDRM=m
CONFIG_TINYDRM_ST7586=m
CONFIG_FB=y
diff --git a/arch/arm/configs/efm32_defconfig b/arch/arm/configs/efm32_defconfig
index 10ea92513a69..46213f0530c4 100644
--- a/arch/arm/configs/efm32_defconfig
+++ b/arch/arm/configs/efm32_defconfig
@@ -12,8 +12,6 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
CONFIG_ARM_SINGLE_ARMV7M=y
CONFIG_ARCH_EFM32=y
diff --git a/arch/arm/configs/ep93xx_defconfig b/arch/arm/configs/ep93xx_defconfig
index ef2d2a820c30..cd16fb6eb8e6 100644
--- a/arch/arm/configs/ep93xx_defconfig
+++ b/arch/arm/configs/ep93xx_defconfig
@@ -11,7 +11,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_EP93XX=y
CONFIG_CRUNCH=y
CONFIG_MACH_ADSSPHERE=y
diff --git a/arch/arm/configs/eseries_pxa_defconfig b/arch/arm/configs/eseries_pxa_defconfig
index 56452fa03d56..046f4dc2e18e 100644
--- a/arch/arm/configs/eseries_pxa_defconfig
+++ b/arch/arm/configs/eseries_pxa_defconfig
@@ -9,8 +9,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_PXA_ESERIES=y
# CONFIG_ARM_THUMB is not set
diff --git a/arch/arm/configs/ezx_defconfig b/arch/arm/configs/ezx_defconfig
index 4e28771beecd..bd7b7f945e01 100644
--- a/arch/arm/configs/ezx_defconfig
+++ b/arch/arm/configs/ezx_defconfig
@@ -14,7 +14,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_PXA_EZX=y
CONFIG_NO_HZ=y
diff --git a/arch/arm/configs/h3600_defconfig b/arch/arm/configs/h3600_defconfig
index 4d91e41cb628..c02b3e409610 100644
--- a/arch/arm/configs/h3600_defconfig
+++ b/arch/arm/configs/h3600_defconfig
@@ -5,8 +5,6 @@ CONFIG_LOG_BUF_SHIFT=14
CONFIG_BLK_DEV_INITRD=y
CONFIG_MODULES=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_SA1100=y
CONFIG_SA1100_H3600=y
CONFIG_PCCARD=y
diff --git a/arch/arm/configs/h5000_defconfig b/arch/arm/configs/h5000_defconfig
index 3946c6087327..f5a338fefda8 100644
--- a/arch/arm/configs/h5000_defconfig
+++ b/arch/arm/configs/h5000_defconfig
@@ -10,7 +10,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_H5000=y
CONFIG_AEABI=y
diff --git a/arch/arm/configs/imote2_defconfig b/arch/arm/configs/imote2_defconfig
index 770469f61c3e..05c5515fa871 100644
--- a/arch/arm/configs/imote2_defconfig
+++ b/arch/arm/configs/imote2_defconfig
@@ -13,7 +13,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_INTELMOTE2=y
CONFIG_NO_HZ=y
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 2b2d617e279d..3df90fc38398 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -32,8 +32,6 @@ CONFIG_KPROBES=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/integrator_defconfig b/arch/arm/configs/integrator_defconfig
index 2f0a762dc3a0..a9755c501bec 100644
--- a/arch/arm/configs/integrator_defconfig
+++ b/arch/arm/configs/integrator_defconfig
@@ -55,7 +55,7 @@ CONFIG_SMC91X=y
# CONFIG_KEYBOARD_ATKBD is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_DRM=y
-CONFIG_DRM_DUMB_VGA_DAC=y
+CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_MATROX=y
diff --git a/arch/arm/configs/lpc18xx_defconfig b/arch/arm/configs/lpc18xx_defconfig
index e518168a0627..be882ea0eee4 100644
--- a/arch/arm/configs/lpc18xx_defconfig
+++ b/arch/arm/configs/lpc18xx_defconfig
@@ -1,4 +1,3 @@
-CONFIG_CROSS_COMPILE="arm-linux-gnueabihf-"
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT=y
CONFIG_BLK_DEV_INITRD=y
@@ -28,10 +27,7 @@ CONFIG_FLASH_SIZE=0x00080000
CONFIG_ZBOOT_ROM_TEXT=0x0
CONFIG_ZBOOT_ROM_BSS=0x0
CONFIG_ARM_APPENDED_DTB=y
-# CONFIG_LBDAF is not set
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_BINFMT_FLAT=y
CONFIG_BINFMT_ZFLAT=y
CONFIG_BINFMT_SHARED_FLAT=y
diff --git a/arch/arm/configs/magician_defconfig b/arch/arm/configs/magician_defconfig
index e6486c959220..d2e684f6565a 100644
--- a/arch/arm/configs/magician_defconfig
+++ b/arch/arm/configs/magician_defconfig
@@ -9,8 +9,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_H4700=y
CONFIG_MACH_MAGICIAN=y
diff --git a/arch/arm/configs/moxart_defconfig b/arch/arm/configs/moxart_defconfig
index 45d27190c9c9..6834e97af348 100644
--- a/arch/arm/configs/moxart_defconfig
+++ b/arch/arm/configs/moxart_defconfig
@@ -15,7 +15,6 @@ CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_MULTI_V4=y
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_MOXART=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 017d65f86eba..0b020863abdb 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -670,11 +670,11 @@ CONFIG_DRM_PANEL_ORISETECH_OTM8009A=m
CONFIG_DRM_PANEL_RAYDIUM_RM68200=m
CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03=m
CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0=m
-CONFIG_DRM_DUMB_VGA_DAC=m
CONFIG_DRM_NXP_PTN3460=m
CONFIG_DRM_PARADE_PS8622=m
CONFIG_DRM_SII902X=m
CONFIG_DRM_SII9234=m
+CONFIG_DRM_SIMPLE_BRIDGE=m
CONFIG_DRM_TOSHIBA_TC358764=m
CONFIG_DRM_I2C_ADV7511=m
CONFIG_DRM_I2C_ADV7511_AUDIO=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 2773899c21b3..a9c6f32a9b1c 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -25,8 +25,6 @@ CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_BLK_DEV_INTEGRITY=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
diff --git a/arch/arm/configs/omap1_defconfig b/arch/arm/configs/omap1_defconfig
index 0c43c589f191..3b6e7452609b 100644
--- a/arch/arm/configs/omap1_defconfig
+++ b/arch/arm/configs/omap1_defconfig
@@ -18,8 +18,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_OMAP=y
CONFIG_ARCH_OMAP1=y
CONFIG_OMAP_RESET_CLOCKS=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index c32c338f7704..54f1a21de7e0 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -350,14 +350,13 @@ CONFIG_DRM_OMAP=m
CONFIG_OMAP5_DSS_HDMI=y
CONFIG_OMAP2_DSS_SDI=y
CONFIG_OMAP2_DSS_DSI=y
-CONFIG_DRM_OMAP_ENCODER_OPA362=m
-CONFIG_DRM_OMAP_ENCODER_TPD12S015=m
-CONFIG_DRM_OMAP_CONNECTOR_HDMI=m
-CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV=m
CONFIG_DRM_OMAP_PANEL_DSI_CM=m
CONFIG_DRM_TILCDC=m
CONFIG_DRM_PANEL_SIMPLE=m
+CONFIG_DRM_DISPLAY_CONNECTOR=m
+CONFIG_DRM_SIMPLE_BRIDGE=m
CONFIG_DRM_TI_TFP410=m
+CONFIG_DRM_TI_TPD12S015=m
CONFIG_DRM_PANEL_LG_LB035Q02=m
CONFIG_DRM_PANEL_NEC_NL8048HL11=m
CONFIG_DRM_PANEL_SHARP_LS037V7DW01=m
@@ -375,6 +374,7 @@ CONFIG_BACKLIGHT_GENERIC=m
CONFIG_BACKLIGHT_PWM=m
CONFIG_BACKLIGHT_PANDORA=m
CONFIG_BACKLIGHT_GPIO=m
+CONFIG_BACKLIGHT_LED=m
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y
CONFIG_LOGO=y
diff --git a/arch/arm/configs/palmz72_defconfig b/arch/arm/configs/palmz72_defconfig
index 4a3fd82c2a0c..b47c8abe85bc 100644
--- a/arch/arm/configs/palmz72_defconfig
+++ b/arch/arm/configs/palmz72_defconfig
@@ -7,8 +7,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_PXA_PALM=y
# CONFIG_MACH_PALMTX is not set
diff --git a/arch/arm/configs/pcm027_defconfig b/arch/arm/configs/pcm027_defconfig
index a8c53228b0c1..e97a158081fc 100644
--- a/arch/arm/configs/pcm027_defconfig
+++ b/arch/arm/configs/pcm027_defconfig
@@ -13,8 +13,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_PCM027=y
CONFIG_MACH_PCM990_BASEBOARD=y
diff --git a/arch/arm/configs/pleb_defconfig b/arch/arm/configs/pleb_defconfig
index f0541b060cfa..2170148b975c 100644
--- a/arch/arm/configs/pleb_defconfig
+++ b/arch/arm/configs/pleb_defconfig
@@ -6,8 +6,6 @@ CONFIG_EXPERT=y
# CONFIG_HOTPLUG is not set
# CONFIG_SHMEM is not set
CONFIG_MODULES=y
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_SA1100=y
CONFIG_SA1100_PLEB=y
CONFIG_ZBOOT_ROM_TEXT=0x0
diff --git a/arch/arm/configs/realview_defconfig b/arch/arm/configs/realview_defconfig
index 8a056cc0c1ec..70e2c74a9f32 100644
--- a/arch/arm/configs/realview_defconfig
+++ b/arch/arm/configs/realview_defconfig
@@ -8,7 +8,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_MULTI_V6=y
CONFIG_ARCH_REALVIEW=y
CONFIG_MACH_REALVIEW_EB=y
diff --git a/arch/arm/configs/sama5_defconfig b/arch/arm/configs/sama5_defconfig
index 27f6135c4ee7..bab7861443dc 100644
--- a/arch/arm/configs/sama5_defconfig
+++ b/arch/arm/configs/sama5_defconfig
@@ -14,8 +14,6 @@ CONFIG_MODULE_FORCE_LOAD=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_AT91=y
CONFIG_SOC_SAMA5D2=y
CONFIG_SOC_SAMA5D3=y
@@ -182,7 +180,6 @@ CONFIG_USB_GADGET=y
CONFIG_USB_ATMEL_USBA=y
CONFIG_USB_G_SERIAL=y
CONFIG_MMC=y
-# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
CONFIG_MMC_SDHCI_OF_AT91=y
diff --git a/arch/arm/configs/shmobile_defconfig b/arch/arm/configs/shmobile_defconfig
index 64fa849f8bbe..838307a9bb92 100644
--- a/arch/arm/configs/shmobile_defconfig
+++ b/arch/arm/configs/shmobile_defconfig
@@ -125,9 +125,9 @@ CONFIG_VIDEO_ML86V7667=y
CONFIG_DRM=y
CONFIG_DRM_RCAR_DU=y
CONFIG_DRM_PANEL_SIMPLE=y
-CONFIG_DRM_DUMB_VGA_DAC=y
CONFIG_DRM_LVDS_CODEC=y
CONFIG_DRM_SII902X=y
+CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_I2C_ADV7511=y
CONFIG_DRM_I2C_ADV7511_AUDIO=y
CONFIG_FB_SH_MOBILE_LCDC=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index fe2e1e82e233..e73c97b0f5b0 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -157,6 +157,7 @@ CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_FS=y
CONFIG_DETECT_HUNG_TASK=y
# CONFIG_SCHED_DEBUG is not set
CONFIG_FUNCTION_TRACER=y
diff --git a/arch/arm/configs/stm32_defconfig b/arch/arm/configs/stm32_defconfig
index 152321d2893e..551db328009d 100644
--- a/arch/arm/configs/stm32_defconfig
+++ b/arch/arm/configs/stm32_defconfig
@@ -14,8 +14,6 @@ CONFIG_EMBEDDED=y
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_SLUB_DEBUG is not set
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
# CONFIG_MMU is not set
CONFIG_ARCH_STM32=y
CONFIG_CPU_V7M_NUM_IRQ=240
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 3f5d727efc41..61b8be19e527 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -85,6 +85,7 @@ CONFIG_BATTERY_AXP20X=y
CONFIG_AXP20X_POWER=y
CONFIG_THERMAL=y
CONFIG_CPU_THERMAL=y
+CONFIG_SUN8I_THERMAL=y
CONFIG_WATCHDOG=y
CONFIG_SUNXI_WATCHDOG=y
CONFIG_MFD_AC100=y
@@ -100,7 +101,7 @@ CONFIG_RC_DEVICES=y
CONFIG_IR_SUNXI=y
CONFIG_DRM=y
CONFIG_DRM_SUN4I=y
-CONFIG_DRM_DUMB_VGA_DAC=y
+CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_FB_SIMPLE=y
CONFIG_SOUND=y
CONFIG_SND=y
diff --git a/arch/arm/configs/u300_defconfig b/arch/arm/configs/u300_defconfig
index 8223397db047..543f07338100 100644
--- a/arch/arm/configs/u300_defconfig
+++ b/arch/arm/configs/u300_defconfig
@@ -11,7 +11,6 @@ CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_CFQ is not set
# CONFIG_ARCH_MULTI_V7 is not set
CONFIG_ARCH_U300=y
CONFIG_MACH_U300_SPIDUMMY=y
@@ -46,7 +45,6 @@ CONFIG_FB=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
# CONFIG_USB_SUPPORT is not set
CONFIG_MMC=y
-# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_ARMMMCI=y
CONFIG_RTC_CLASS=y
# CONFIG_RTC_HCTOSYS is not set
diff --git a/arch/arm/configs/versatile_defconfig b/arch/arm/configs/versatile_defconfig
index fe4d4b596585..767935337413 100644
--- a/arch/arm/configs/versatile_defconfig
+++ b/arch/arm/configs/versatile_defconfig
@@ -59,7 +59,7 @@ CONFIG_GPIO_PL061=y
CONFIG_DRM=y
CONFIG_DRM_PANEL_ARM_VERSATILE=y
CONFIG_DRM_PANEL_SIMPLE=y
-CONFIG_DRM_DUMB_VGA_DAC=y
+CONFIG_DRM_SIMPLE_BRIDGE=y
CONFIG_DRM_PL111=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
diff --git a/arch/arm/configs/vexpress_defconfig b/arch/arm/configs/vexpress_defconfig
index 25753552277a..c01baf7d6e37 100644
--- a/arch/arm/configs/vexpress_defconfig
+++ b/arch/arm/configs/vexpress_defconfig
@@ -15,8 +15,6 @@ CONFIG_OPROFILE=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_VEXPRESS=y
CONFIG_ARCH_VEXPRESS_DCSCB=y
CONFIG_ARCH_VEXPRESS_TC2_PM=y
diff --git a/arch/arm/configs/viper_defconfig b/arch/arm/configs/viper_defconfig
index 2ff16168d9c2..989599ce5300 100644
--- a/arch/arm/configs/viper_defconfig
+++ b/arch/arm/configs/viper_defconfig
@@ -9,7 +9,6 @@ CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_ARCH_VIPER=y
CONFIG_IWMMXT=y
diff --git a/arch/arm/configs/zeus_defconfig b/arch/arm/configs/zeus_defconfig
index aa3023c9a011..d3b98c4d225b 100644
--- a/arch/arm/configs/zeus_defconfig
+++ b/arch/arm/configs/zeus_defconfig
@@ -4,7 +4,6 @@ CONFIG_LOG_BUF_SHIFT=13
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_PXA=y
CONFIG_MACH_ARCOM_ZEUS=y
CONFIG_PCCARD=m
@@ -137,7 +136,6 @@ CONFIG_USB_MASS_STORAGE=m
CONFIG_USB_G_SERIAL=m
CONFIG_USB_G_PRINTER=m
CONFIG_MMC=y
-# CONFIG_MMC_BLOCK_BOUNCE is not set
CONFIG_MMC_PXA=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=m
diff --git a/arch/arm/configs/zx_defconfig b/arch/arm/configs/zx_defconfig
index 4d2ef785ed34..a046a492bfa7 100644
--- a/arch/arm/configs/zx_defconfig
+++ b/arch/arm/configs/zx_defconfig
@@ -16,7 +16,6 @@ CONFIG_EMBEDDED=y
CONFIG_PERF_EVENTS=y
CONFIG_SLAB=y
# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARCH_ZX=y
CONFIG_SOC_ZX296702=y
# CONFIG_SWP_EMULATE is not set
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index c3314b286a61..a827b4d60d38 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -392,9 +392,6 @@ static inline void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {}
static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {}
-static inline void kvm_arm_vhe_guest_enter(void) {}
-static inline void kvm_arm_vhe_guest_exit(void) {}
-
#define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1
diff --git a/arch/arm/kernel/ftrace.c b/arch/arm/kernel/ftrace.c
index 2a5ff69c28e6..10499d44964a 100644
--- a/arch/arm/kernel/ftrace.c
+++ b/arch/arm/kernel/ftrace.c
@@ -78,13 +78,10 @@ static int ftrace_modify_code(unsigned long pc, unsigned long old,
{
unsigned long replaced;
- if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
+ if (IS_ENABLED(CONFIG_THUMB2_KERNEL))
old = __opcode_to_mem_thumb32(old);
- new = __opcode_to_mem_thumb32(new);
- } else {
+ else
old = __opcode_to_mem_arm(old);
- new = __opcode_to_mem_arm(new);
- }
if (validate) {
if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
index d0a05a3bdb96..e9e828b6bb30 100644
--- a/arch/arm/kernel/patch.c
+++ b/arch/arm/kernel/patch.c
@@ -16,10 +16,10 @@ struct patch {
unsigned int insn;
};
+#ifdef CONFIG_MMU
static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
- __acquires(&patch_lock)
{
unsigned int uintaddr = (uintptr_t) addr;
bool module = !core_kernel_text(uintaddr);
@@ -34,8 +34,6 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
if (flags)
raw_spin_lock_irqsave(&patch_lock, *flags);
- else
- __acquire(&patch_lock);
set_fixmap(fixmap, page_to_phys(page));
@@ -43,15 +41,19 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
}
static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
- __releases(&patch_lock)
{
clear_fixmap(fixmap);
if (flags)
raw_spin_unlock_irqrestore(&patch_lock, *flags);
- else
- __release(&patch_lock);
}
+#else
+static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
+{
+ return addr;
+}
+static void __kprobes patch_unmap(int fixmap, unsigned long *flags) { }
+#endif
void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
{
@@ -64,8 +66,6 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
if (remap)
waddr = patch_map(addr, FIX_TEXT_POKE0, &flags);
- else
- __acquire(&patch_lock);
if (thumb2 && __opcode_is_thumb16(insn)) {
*(u16 *)waddr = __opcode_to_mem_thumb16(insn);
@@ -102,8 +102,7 @@ void __kprobes __patch_text_real(void *addr, unsigned int insn, bool remap)
if (waddr != addr) {
flush_kernel_vmap_range(waddr, twopage ? size / 2 : size);
patch_unmap(FIX_TEXT_POKE0, &flags);
- } else
- __release(&patch_lock);
+ }
flush_icache_range((uintptr_t)(addr),
(uintptr_t)(addr) + size);
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 35ff620537e6..03506ce46149 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -91,6 +91,8 @@ AFLAGS_suspend-imx6.o :=-Wa,-march=armv7-a
obj-$(CONFIG_SOC_IMX6) += suspend-imx6.o
obj-$(CONFIG_SOC_IMX53) += suspend-imx53.o
endif
+AFLAGS_resume-imx6.o :=-Wa,-march=armv7-a
+obj-$(CONFIG_SOC_IMX6) += resume-imx6.o
obj-$(CONFIG_SOC_IMX6) += pm-imx6.o
obj-$(CONFIG_SOC_IMX1) += mach-imx1.o
diff --git a/arch/arm/mach-imx/common.h b/arch/arm/mach-imx/common.h
index 912aeceb4ff8..5aa5796cff0e 100644
--- a/arch/arm/mach-imx/common.h
+++ b/arch/arm/mach-imx/common.h
@@ -109,17 +109,17 @@ void imx_cpu_die(unsigned int cpu);
int imx_cpu_kill(unsigned int cpu);
#ifdef CONFIG_SUSPEND
-void v7_cpu_resume(void);
void imx53_suspend(void __iomem *ocram_vbase);
extern const u32 imx53_suspend_sz;
void imx6_suspend(void __iomem *ocram_vbase);
#else
-static inline void v7_cpu_resume(void) {}
static inline void imx53_suspend(void __iomem *ocram_vbase) {}
static const u32 imx53_suspend_sz;
static inline void imx6_suspend(void __iomem *ocram_vbase) {}
#endif
+void v7_cpu_resume(void);
+
void imx6_pm_ccm_init(const char *ccm_compat);
void imx6q_pm_init(void);
void imx6dl_pm_init(void);
diff --git a/arch/arm/mach-imx/resume-imx6.S b/arch/arm/mach-imx/resume-imx6.S
new file mode 100644
index 000000000000..5bd1ba7ef15b
--- /dev/null
+++ b/arch/arm/mach-imx/resume-imx6.S
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright 2014 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/hardware/cache-l2x0.h>
+#include "hardware.h"
+
+/*
+ * The following code must assume it is running from physical address
+ * where absolute virtual addresses to the data section have to be
+ * turned into relative ones.
+ */
+
+ENTRY(v7_cpu_resume)
+ bl v7_invalidate_l1
+#ifdef CONFIG_CACHE_L2X0
+ bl l2c310_early_resume
+#endif
+ b cpu_resume
+ENDPROC(v7_cpu_resume)
diff --git a/arch/arm/mach-imx/suspend-imx6.S b/arch/arm/mach-imx/suspend-imx6.S
index 062391ff13da..1eabf2d2834b 100644
--- a/arch/arm/mach-imx/suspend-imx6.S
+++ b/arch/arm/mach-imx/suspend-imx6.S
@@ -327,17 +327,3 @@ resume:
ret lr
ENDPROC(imx6_suspend)
-
-/*
- * The following code must assume it is running from physical address
- * where absolute virtual addresses to the data section have to be
- * turned into relative ones.
- */
-
-ENTRY(v7_cpu_resume)
- bl v7_invalidate_l1
-#ifdef CONFIG_CACHE_L2X0
- bl l2c310_early_resume
-#endif
- b cpu_resume
-ENDPROC(v7_cpu_resume)
diff --git a/arch/arm/mach-meson/Kconfig b/arch/arm/mach-meson/Kconfig
index 01f0f4b765e0..75034fe197e3 100644
--- a/arch/arm/mach-meson/Kconfig
+++ b/arch/arm/mach-meson/Kconfig
@@ -9,7 +9,6 @@ menuconfig ARCH_MESON
select CACHE_L2X0
select PINCTRL
select PINCTRL_MESON
- select COMMON_CLK
select HAVE_ARM_SCU if SMP
select HAVE_ARM_TWD if SMP
diff --git a/arch/arm/mach-npcm/Kconfig b/arch/arm/mach-npcm/Kconfig
index 880bc2a5cada..7f7002dc2b21 100644
--- a/arch/arm/mach-npcm/Kconfig
+++ b/arch/arm/mach-npcm/Kconfig
@@ -11,7 +11,7 @@ config ARCH_NPCM7XX
depends on ARCH_MULTI_V7
select PINCTRL_NPCM7XX
select NPCM7XX_TIMER
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select CACHE_L2X0
select ARM_GIC
select HAVE_ARM_TWD if SMP
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index e1135b9d67c6..5017a3be0ff0 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -16,7 +16,7 @@ hwmod-common = omap_hwmod.o omap_hwmod_reset.o \
clock-common = clock.o
secure-common = omap-smc.o omap-secure.o
-obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
+obj-$(CONFIG_ARCH_OMAP2) += $(omap-2-3-common) $(hwmod-common)
obj-$(CONFIG_ARCH_OMAP3) += $(omap-2-3-common) $(hwmod-common) $(secure-common)
obj-$(CONFIG_ARCH_OMAP4) += $(hwmod-common) $(secure-common)
obj-$(CONFIG_SOC_AM33XX) += $(hwmod-common) $(secure-common)
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index f28047233665..27608d1026cb 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -431,7 +431,6 @@ void __init omap2420_init_early(void)
omap_hwmod_init_postsetup();
omap_clk_soc_init = omap2420_dt_clk_init;
rate_table = omap2420_rate_table;
- omap_secure_init();
}
void __init omap2420_init_late(void)
@@ -456,7 +455,6 @@ void __init omap2430_init_early(void)
omap_hwmod_init_postsetup();
omap_clk_soc_init = omap2430_dt_clk_init;
rate_table = omap2430_rate_table;
- omap_secure_init();
}
void __init omap2430_init_late(void)
diff --git a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
index f82f25c1a5f9..d5dc12878dfe 100644
--- a/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-gxm-khadas-vim2.dts
@@ -327,7 +327,7 @@
#size-cells = <0>;
bus-width = <4>;
- max-frequency = <50000000>;
+ max-frequency = <60000000>;
non-removable;
disable-wp;
diff --git a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
index a8bb3fa9fec9..cb1b48f5b8b1 100644
--- a/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
+++ b/arch/arm64/boot/dts/amlogic/meson-sm1-sei610.dts
@@ -593,6 +593,7 @@
compatible = "brcm,bcm43438-bt";
interrupt-parent = <&gpio_intc>;
interrupts = <95 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "host-wakeup";
shutdown-gpios = <&gpio GPIOX_17 GPIO_ACTIVE_HIGH>;
max-speed = <2000000>;
clocks = <&wifi32k>;
diff --git a/arch/arm64/boot/dts/arm/fvp-base-revc.dts b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
index 62ab0d54ff71..335fff762451 100644
--- a/arch/arm64/boot/dts/arm/fvp-base-revc.dts
+++ b/arch/arm64/boot/dts/arm/fvp-base-revc.dts
@@ -161,10 +161,10 @@
bus-range = <0x0 0x1>;
reg = <0x0 0x40000000 0x0 0x10000000>;
ranges = <0x2000000 0x0 0x50000000 0x0 0x50000000 0x0 0x10000000>;
- interrupt-map = <0 0 0 1 &gic GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 2 &gic GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 3 &gic GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
- <0 0 0 4 &gic GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-map = <0 0 0 1 &gic 0 0 GIC_SPI 168 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 2 &gic 0 0 GIC_SPI 169 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 3 &gic 0 0 GIC_SPI 170 IRQ_TYPE_LEVEL_HIGH>,
+ <0 0 0 4 &gic 0 0 GIC_SPI 171 IRQ_TYPE_LEVEL_HIGH>;
interrupt-map-mask = <0x0 0x0 0x0 0x7>;
msi-map = <0x0 &its 0x0 0x10000>;
iommu-map = <0x0 &smmu 0x0 0x10000>;
diff --git a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
index d3d26cca7d52..13460a360c6a 100644
--- a/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
+++ b/arch/arm64/boot/dts/freescale/imx8qxp-mek.dts
@@ -52,11 +52,6 @@
compatible = "ethernet-phy-ieee802.3-c22";
reg = <0>;
};
-
- ethphy1: ethernet-phy@1 {
- compatible = "ethernet-phy-ieee802.3-c22";
- reg = <1>;
- };
};
};
diff --git a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
index e1d357eaad7c..d8c44d3ca15a 100644
--- a/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
+++ b/arch/arm64/boot/dts/intel/socfpga_agilex.dtsi
@@ -102,7 +102,7 @@
};
gmac0: ethernet@ff800000 {
- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
+ compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
reg = <0xff800000 0x2000>;
interrupts = <0 90 4>;
interrupt-names = "macirq";
@@ -118,7 +118,7 @@
};
gmac1: ethernet@ff802000 {
- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
+ compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
reg = <0xff802000 0x2000>;
interrupts = <0 91 4>;
interrupt-names = "macirq";
@@ -134,7 +134,7 @@
};
gmac2: ethernet@ff804000 {
- compatible = "altr,socfpga-stmmac", "snps,dwmac-3.74a", "snps,dwmac";
+ compatible = "altr,socfpga-stmmac-a10-s10", "snps,dwmac-3.74a", "snps,dwmac";
reg = <0xff804000 0x2000>;
interrupts = <0 92 4>;
interrupt-names = "macirq";
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 0f212889c931..4db223dbc549 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -452,6 +452,7 @@ CONFIG_THERMAL_GOV_POWER_ALLOCATOR=y
CONFIG_CPU_THERMAL=y
CONFIG_THERMAL_EMULATION=y
CONFIG_QORIQ_THERMAL=m
+CONFIG_SUN8I_THERMAL=y
CONFIG_ROCKCHIP_THERMAL=m
CONFIG_RCAR_THERMAL=y
CONFIG_RCAR_GEN3_THERMAL=y
@@ -547,6 +548,7 @@ CONFIG_ROCKCHIP_DW_MIPI_DSI=y
CONFIG_ROCKCHIP_INNO_HDMI=y
CONFIG_DRM_RCAR_DU=m
CONFIG_DRM_SUN4I=m
+CONFIG_DRM_SUN6I_DSI=m
CONFIG_DRM_SUN8I_DW_HDMI=m
CONFIG_DRM_SUN8I_MIXER=m
CONFIG_DRM_MSM=m
@@ -681,7 +683,7 @@ CONFIG_RTC_DRV_SNVS=m
CONFIG_RTC_DRV_IMX_SC=m
CONFIG_RTC_DRV_XGENE=y
CONFIG_DMADEVICES=y
-CONFIG_DMA_BCM2835=m
+CONFIG_DMA_BCM2835=y
CONFIG_DMA_SUN6I=m
CONFIG_FSL_EDMA=y
CONFIG_IMX_SDMA=y
@@ -771,7 +773,7 @@ CONFIG_ARCH_R8A774A1=y
CONFIG_ARCH_R8A774B1=y
CONFIG_ARCH_R8A774C0=y
CONFIG_ARCH_R8A7795=y
-CONFIG_ARCH_R8A7796=y
+CONFIG_ARCH_R8A77960=y
CONFIG_ARCH_R8A77961=y
CONFIG_ARCH_R8A77965=y
CONFIG_ARCH_R8A77970=y
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 25fec4bde43a..a358e97572c1 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -32,7 +32,7 @@ static inline void gic_write_eoir(u32 irq)
isb();
}
-static inline void gic_write_dir(u32 irq)
+static __always_inline void gic_write_dir(u32 irq)
{
write_sysreg_s(irq, SYS_ICC_DIR_EL1);
isb();
diff --git a/arch/arm64/include/asm/cache.h b/arch/arm64/include/asm/cache.h
index 806e9dc2a852..a4d1b5f771f6 100644
--- a/arch/arm64/include/asm/cache.h
+++ b/arch/arm64/include/asm/cache.h
@@ -69,7 +69,7 @@ static inline int icache_is_aliasing(void)
return test_bit(ICACHEF_ALIASING, &__icache_flags);
}
-static inline int icache_is_vpipt(void)
+static __always_inline int icache_is_vpipt(void)
{
return test_bit(ICACHEF_VPIPT, &__icache_flags);
}
diff --git a/arch/arm64/include/asm/cacheflush.h b/arch/arm64/include/asm/cacheflush.h
index 665c78e0665a..e6cca3d4acf7 100644
--- a/arch/arm64/include/asm/cacheflush.h
+++ b/arch/arm64/include/asm/cacheflush.h
@@ -145,7 +145,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
extern void flush_dcache_page(struct page *);
-static inline void __flush_icache_all(void)
+static __always_inline void __flush_icache_all(void)
{
if (cpus_have_const_cap(ARM64_HAS_CACHE_DIC))
return;
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 92ef9539874a..2a746b99e937 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -435,13 +435,13 @@ cpuid_feature_extract_signed_field(u64 features, int field)
return cpuid_feature_extract_signed_field_width(features, field, 4);
}
-static inline unsigned int __attribute_const__
+static __always_inline unsigned int __attribute_const__
cpuid_feature_extract_unsigned_field_width(u64 features, int field, int width)
{
return (u64)(features << (64 - width - field)) >> (64 - width);
}
-static inline unsigned int __attribute_const__
+static __always_inline unsigned int __attribute_const__
cpuid_feature_extract_unsigned_field(u64 features, int field)
{
return cpuid_feature_extract_unsigned_field_width(features, field, 4);
@@ -564,7 +564,7 @@ static inline bool system_supports_mixed_endian(void)
return val == 0x1;
}
-static inline bool system_supports_fpsimd(void)
+static __always_inline bool system_supports_fpsimd(void)
{
return !cpus_have_const_cap(ARM64_HAS_NO_FPSIMD);
}
@@ -575,13 +575,13 @@ static inline bool system_uses_ttbr0_pan(void)
!cpus_have_const_cap(ARM64_HAS_PAN);
}
-static inline bool system_supports_sve(void)
+static __always_inline bool system_supports_sve(void)
{
return IS_ENABLED(CONFIG_ARM64_SVE) &&
cpus_have_const_cap(ARM64_SVE);
}
-static inline bool system_supports_cnp(void)
+static __always_inline bool system_supports_cnp(void)
{
return IS_ENABLED(CONFIG_ARM64_CNP) &&
cpus_have_const_cap(ARM64_HAS_CNP);
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h
index b87c6e276ab1..7a6e81ca23a8 100644
--- a/arch/arm64/include/asm/exception.h
+++ b/arch/arm64/include/asm/exception.h
@@ -33,7 +33,6 @@ static inline u32 disr_to_esr(u64 disr)
asmlinkage void enter_from_user_mode(void);
void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
-void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs);
void do_undefinstr(struct pt_regs *regs);
asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr);
void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr,
@@ -47,7 +46,4 @@ void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr);
void do_cp15instr(unsigned int esr, struct pt_regs *regs);
void do_el0_svc(struct pt_regs *regs);
void do_el0_svc_compat(struct pt_regs *regs);
-void do_el0_ia_bp_hardening(unsigned long addr, unsigned int esr,
- struct pt_regs *regs);
-
#endif /* __ASM_EXCEPTION_H */
diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h
index 4e531f57147d..6facd1308e7c 100644
--- a/arch/arm64/include/asm/io.h
+++ b/arch/arm64/include/asm/io.h
@@ -34,7 +34,7 @@ static inline void __raw_writew(u16 val, volatile void __iomem *addr)
}
#define __raw_writel __raw_writel
-static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+static __always_inline void __raw_writel(u32 val, volatile void __iomem *addr)
{
asm volatile("str %w0, [%1]" : : "rZ" (val), "r" (addr));
}
@@ -69,7 +69,7 @@ static inline u16 __raw_readw(const volatile void __iomem *addr)
}
#define __raw_readl __raw_readl
-static inline u32 __raw_readl(const volatile void __iomem *addr)
+static __always_inline u32 __raw_readl(const volatile void __iomem *addr)
{
u32 val;
asm volatile(ALTERNATIVE("ldr %w0, [%1]",
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 688c63412cc2..f658dda12364 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -36,7 +36,7 @@ void kvm_inject_undef32(struct kvm_vcpu *vcpu);
void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
-static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
{
return !(vcpu->arch.hcr_el2 & HCR_RW);
}
@@ -127,7 +127,7 @@ static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
vcpu->arch.vsesr_el2 = vsesr;
}
-static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
+static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
}
@@ -153,17 +153,17 @@ static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long
*__vcpu_elr_el1(vcpu) = v;
}
-static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
+static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
{
return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
}
-static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
+static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
{
return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
}
-static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
{
if (vcpu_mode_is_32bit(vcpu))
return kvm_condition_valid32(vcpu);
@@ -181,13 +181,13 @@ static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
* coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
* AArch32 with banked registers.
*/
-static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
+static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
u8 reg_num)
{
return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
}
-static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
+static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
unsigned long val)
{
if (reg_num != 31)
@@ -264,12 +264,12 @@ static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
return mode != PSR_MODE_EL0t;
}
-static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
+static __always_inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.esr_el2;
}
-static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
+static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
@@ -279,12 +279,12 @@ static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
return -1;
}
-static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
+static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
{
return vcpu->arch.fault.far_el2;
}
-static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
+static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
{
return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
}
@@ -299,7 +299,7 @@ static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
}
-static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
}
@@ -319,17 +319,17 @@ static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SF);
}
-static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
+static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
{
return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
}
-static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}
-static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
@@ -340,18 +340,18 @@ static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
}
-static inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
+static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
{
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
}
/* This one is not specific to Data Abort */
-static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
{
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
}
-static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
+static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
{
return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
}
@@ -361,17 +361,17 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
}
-static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
+static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
}
-static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
+static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
}
-static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
+static __always_inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
{
switch (kvm_vcpu_trap_get_fault(vcpu)) {
case FSC_SEA:
@@ -390,7 +390,7 @@ static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
}
}
-static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
+static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
{
u32 esr = kvm_vcpu_get_hsr(vcpu);
return ESR_ELx_SYS64_ISS_RT(esr);
@@ -504,7 +504,7 @@ static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
return data; /* Leave LE untouched */
}
-static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
+static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
{
if (vcpu_mode_is_32bit(vcpu))
kvm_skip_instr32(vcpu, is_wide_instr);
@@ -519,7 +519,7 @@ static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
* Skip an instruction which has been emulated at hyp while most guest sysregs
* are live.
*/
-static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
+static __always_inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
{
*vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(SYS_SPSR);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index d87aa609d2b6..57fd46acd058 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -626,38 +626,6 @@ static inline void kvm_set_pmu_events(u32 set, struct perf_event_attr *attr) {}
static inline void kvm_clr_pmu_events(u32 clr) {}
#endif
-static inline void kvm_arm_vhe_guest_enter(void)
-{
- local_daif_mask();
-
- /*
- * Having IRQs masked via PMR when entering the guest means the GIC
- * will not signal the CPU of interrupts of lower priority, and the
- * only way to get out will be via guest exceptions.
- * Naturally, we want to avoid this.
- *
- * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
- * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
- */
- pmr_sync();
-}
-
-static inline void kvm_arm_vhe_guest_exit(void)
-{
- /*
- * local_daif_restore() takes care to properly restore PSTATE.DAIF
- * and the GIC PMR if the host is using IRQ priorities.
- */
- local_daif_restore(DAIF_PROCCTX_NOIRQ);
-
- /*
- * When we exit from the guest we change a number of CPU configuration
- * parameters, such as traps. Make sure these changes take effect
- * before running the host or additional guests.
- */
- isb();
-}
-
#define KVM_BP_HARDEN_UNKNOWN -1
#define KVM_BP_HARDEN_WA_NEEDED 0
#define KVM_BP_HARDEN_NOT_REQUIRED 1
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
index a3a6a2ba9a63..fe57f60f06a8 100644
--- a/arch/arm64/include/asm/kvm_hyp.h
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -47,6 +47,13 @@
#define read_sysreg_el2(r) read_sysreg_elx(r, _EL2, _EL1)
#define write_sysreg_el2(v,r) write_sysreg_elx(v, r, _EL2, _EL1)
+/*
+ * Without an __arch_swab32(), we fall back to ___constant_swab32(), but the
+ * static inline can allow the compiler to out-of-line this. KVM always wants
+ * the macro version as its always inlined.
+ */
+#define __kvm_swab32(x) ___constant_swab32(x)
+
int __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu);
void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 53d846f1bfe7..785762860c63 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -93,7 +93,7 @@ void kvm_update_va_mask(struct alt_instr *alt,
__le32 *origptr, __le32 *updptr, int nr_inst);
void kvm_compute_layout(void);
-static inline unsigned long __kern_hyp_va(unsigned long v)
+static __always_inline unsigned long __kern_hyp_va(unsigned long v)
{
asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"
"ror %0, %0, #1\n"
@@ -473,6 +473,7 @@ static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
extern void *__kvm_bp_vect_base;
extern int __kvm_harden_el2_vector_slot;
+/* This is only called on a VHE system */
static inline void *kvm_get_hyp_vector(void)
{
struct bp_hardening_data *data = arm64_get_bp_hardening_data();
diff --git a/arch/arm64/include/asm/lse.h b/arch/arm64/include/asm/lse.h
index d429f7701c36..5d10051c3e62 100644
--- a/arch/arm64/include/asm/lse.h
+++ b/arch/arm64/include/asm/lse.h
@@ -6,7 +6,7 @@
#ifdef CONFIG_ARM64_LSE_ATOMICS
-#define __LSE_PREAMBLE ".arch armv8-a+lse\n"
+#define __LSE_PREAMBLE ".arch_extension lse\n"
#include <linux/compiler_types.h>
#include <linux/export.h>
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index a4f9ca5479b0..4d94676e5a8b 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -213,7 +213,7 @@ static inline unsigned long kaslr_offset(void)
((__force __typeof__(addr))sign_extend64((__force u64)(addr), 55))
#define untagged_addr(addr) ({ \
- u64 __addr = (__force u64)addr; \
+ u64 __addr = (__force u64)(addr); \
__addr &= __untagged_addr(__addr); \
(__force __typeof__(addr))__addr; \
})
diff --git a/arch/arm64/include/asm/spinlock.h b/arch/arm64/include/asm/spinlock.h
index 102404dc1e13..9083d6992603 100644
--- a/arch/arm64/include/asm/spinlock.h
+++ b/arch/arm64/include/asm/spinlock.h
@@ -18,6 +18,10 @@
* See:
* https://lore.kernel.org/lkml/[email protected]
*/
-#define vcpu_is_preempted(cpu) false
+#define vcpu_is_preempted vcpu_is_preempted
+static inline bool vcpu_is_preempted(int cpu)
+{
+ return false;
+}
#endif /* __ASM_SPINLOCK_H */
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 0958ed6191aa..61fd26752adc 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -83,7 +83,7 @@ static inline bool is_kernel_in_hyp_mode(void)
return read_sysreg(CurrentEL) == CurrentEL_EL2;
}
-static inline bool has_vhe(void)
+static __always_inline bool has_vhe(void)
{
if (cpus_have_const_cap(ARM64_HAS_VIRT_HOST_EXTN))
return true;
diff --git a/arch/arm64/kernel/kaslr.c b/arch/arm64/kernel/kaslr.c
index 53b8a4ee64ff..91a83104c6e8 100644
--- a/arch/arm64/kernel/kaslr.c
+++ b/arch/arm64/kernel/kaslr.c
@@ -11,6 +11,7 @@
#include <linux/sched.h>
#include <linux/types.h>
+#include <asm/archrandom.h>
#include <asm/cacheflush.h>
#include <asm/fixmap.h>
#include <asm/kernel-pgtable.h>
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index bbb0f0c145f6..00626057a384 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -466,6 +466,13 @@ static void ssbs_thread_switch(struct task_struct *next)
if (unlikely(next->flags & PF_KTHREAD))
return;
+ /*
+ * If all CPUs implement the SSBS extension, then we just need to
+ * context-switch the PSTATE field.
+ */
+ if (cpu_have_feature(cpu_feature(SSBS)))
+ return;
+
/* If the mitigation is enabled, then we leave SSBS clear. */
if ((arm64_get_ssbd_state() == ARM64_SSBD_FORCE_ENABLE) ||
test_tsk_thread_flag(next, TIF_SSBD))
@@ -608,8 +615,6 @@ long get_tagged_addr_ctrl(void)
* only prevents the tagged address ABI enabling via prctl() and does not
* disable it for tasks that already opted in to the relaxed ABI.
*/
-static int zero;
-static int one = 1;
static struct ctl_table tagged_addr_sysctl_table[] = {
{
@@ -618,8 +623,8 @@ static struct ctl_table tagged_addr_sysctl_table[] = {
.data = &tagged_addr_disabled,
.maxlen = sizeof(int),
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
- .extra2 = &one,
+ .extra1 = SYSCTL_ZERO,
+ .extra2 = SYSCTL_ONE,
},
{ }
};
diff --git a/arch/arm64/kernel/time.c b/arch/arm64/kernel/time.c
index 73f06d4b3aae..eebbc8d7123e 100644
--- a/arch/arm64/kernel/time.c
+++ b/arch/arm64/kernel/time.c
@@ -23,7 +23,7 @@
#include <linux/irq.h>
#include <linux/delay.h>
#include <linux/clocksource.h>
-#include <linux/clk-provider.h>
+#include <linux/of_clk.h>
#include <linux/acpi.h>
#include <clocksource/arm_arch_timer.h>
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index dfe8dd172512..925086b46136 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -625,7 +625,7 @@ static void __hyp_text __pmu_switch_to_host(struct kvm_cpu_context *host_ctxt)
}
/* Switch to the guest for VHE systems running in EL2 */
-int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
struct kvm_cpu_context *guest_ctxt;
@@ -678,7 +678,42 @@ int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
return exit_code;
}
-NOKPROBE_SYMBOL(kvm_vcpu_run_vhe);
+NOKPROBE_SYMBOL(__kvm_vcpu_run_vhe);
+
+int kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
+{
+ int ret;
+
+ local_daif_mask();
+
+ /*
+ * Having IRQs masked via PMR when entering the guest means the GIC
+ * will not signal the CPU of interrupts of lower priority, and the
+ * only way to get out will be via guest exceptions.
+ * Naturally, we want to avoid this.
+ *
+ * local_daif_mask() already sets GIC_PRIO_PSR_I_SET, we just need a
+ * dsb to ensure the redistributor is forwards EL2 IRQs to the CPU.
+ */
+ pmr_sync();
+
+ ret = __kvm_vcpu_run_vhe(vcpu);
+
+ /*
+ * local_daif_restore() takes care to properly restore PSTATE.DAIF
+ * and the GIC PMR if the host is using IRQ priorities.
+ */
+ local_daif_restore(DAIF_PROCCTX_NOIRQ);
+
+ /*
+ * When we exit from the guest we change a number of CPU configuration
+ * parameters, such as traps. Make sure these changes take effect
+ * before running the host or additional guests.
+ */
+ isb();
+
+ return ret;
+}
/* Switch to the guest for legacy non-VHE systems */
int __hyp_text __kvm_vcpu_run_nvhe(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
index 29ee1feba4eb..4f3a087e36d5 100644
--- a/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
+++ b/arch/arm64/kvm/hyp/vgic-v2-cpuif-proxy.c
@@ -69,14 +69,14 @@ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
u32 data = vcpu_get_reg(vcpu, rd);
if (__is_be(vcpu)) {
/* guest pre-swabbed data, undo this for writel() */
- data = swab32(data);
+ data = __kvm_swab32(data);
}
writel_relaxed(data, addr);
} else {
u32 data = readl_relaxed(addr);
if (__is_be(vcpu)) {
/* guest expects swabbed data */
- data = swab32(data);
+ data = __kvm_swab32(data);
}
vcpu_set_reg(vcpu, rd, data);
}
diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c
index 8ef73e89d514..d89bb22589f6 100644
--- a/arch/arm64/mm/context.c
+++ b/arch/arm64/mm/context.c
@@ -260,14 +260,26 @@ asmlinkage void post_ttbr_update_workaround(void)
CONFIG_CAVIUM_ERRATUM_27456));
}
-static int asids_init(void)
+static int asids_update_limit(void)
{
- asid_bits = get_cpu_asid_bits();
+ unsigned long num_available_asids = NUM_USER_ASIDS;
+
+ if (arm64_kernel_unmapped_at_el0())
+ num_available_asids /= 2;
/*
* Expect allocation after rollover to fail if we don't have at least
* one more ASID than CPUs. ASID #0 is reserved for init_mm.
*/
- WARN_ON(NUM_USER_ASIDS - 1 <= num_possible_cpus());
+ WARN_ON(num_available_asids - 1 <= num_possible_cpus());
+ pr_info("ASID allocator initialised with %lu entries\n",
+ num_available_asids);
+ return 0;
+}
+arch_initcall(asids_update_limit);
+
+static int asids_init(void)
+{
+ asid_bits = get_cpu_asid_bits();
atomic64_set(&asid_generation, ASID_FIRST_VERSION);
asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map),
GFP_KERNEL);
@@ -282,8 +294,6 @@ static int asids_init(void)
*/
if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0))
set_kpti_asid_bits();
-
- pr_info("ASID allocator initialised with %lu entries\n", NUM_USER_ASIDS);
return 0;
}
early_initcall(asids_init);
diff --git a/arch/csky/Kconfig b/arch/csky/Kconfig
index da09c884cc30..047427f71d83 100644
--- a/arch/csky/Kconfig
+++ b/arch/csky/Kconfig
@@ -9,7 +9,6 @@ config CSKY
select ARCH_USE_QUEUED_RWLOCKS if NR_CPUS>2
select COMMON_CLK
select CLKSRC_MMIO
- select CLKSRC_OF
select CSKY_MPINTC if CPU_CK860
select CSKY_MP_TIMER if CPU_CK860
select CSKY_APB_INTC
@@ -37,6 +36,7 @@ config CSKY
select GX6605S_TIMER if CPU_CK610
select HAVE_ARCH_TRACEHOOK
select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_COPY_THREAD_TLS
select HAVE_DYNAMIC_FTRACE
select HAVE_FUNCTION_TRACER
select HAVE_FUNCTION_GRAPH_TRACER
@@ -47,8 +47,8 @@ config CSKY
select HAVE_PERF_EVENTS
select HAVE_PERF_REGS
select HAVE_PERF_USER_STACK_DUMP
- select HAVE_DMA_API_DEBUG
select HAVE_DMA_CONTIGUOUS
+ select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select MAY_HAVE_SPARSE_IRQ
select MODULES_USE_ELF_RELA if MODULES
@@ -59,6 +59,11 @@ config CSKY
select TIMER_OF
select USB_ARCH_HAS_EHCI
select USB_ARCH_HAS_OHCI
+ select GENERIC_PCI_IOMAP
+ select HAVE_PCI
+ select PCI_DOMAINS_GENERIC if PCI
+ select PCI_SYSCALL if PCI
+ select PCI_MSI if PCI
config CPU_HAS_CACHEV2
bool
@@ -75,7 +80,7 @@ config CPU_HAS_TLBI
config CPU_HAS_LDSTEX
bool
help
- For SMP, CPU needs "ldex&stex" instrcutions to atomic operations.
+ For SMP, CPU needs "ldex&stex" instructions for atomic operations.
config CPU_NEED_TLBSYNC
bool
@@ -188,6 +193,40 @@ config CPU_PM_STOP
bool "stop"
endchoice
+menuconfig HAVE_TCM
+ bool "Tightly-Coupled/Sram Memory"
+ select GENERIC_ALLOCATOR
+ help
+ The implementation are not only used by TCM (Tightly-Coupled Meory)
+ but also used by sram on SOC bus. It follow existed linux tcm
+ software interface, so that old tcm application codes could be
+ re-used directly.
+
+if HAVE_TCM
+config ITCM_RAM_BASE
+ hex "ITCM ram base"
+ default 0xffffffff
+
+config ITCM_NR_PAGES
+ int "Page count of ITCM size: NR*4KB"
+ range 1 256
+ default 32
+
+config HAVE_DTCM
+ bool "DTCM Support"
+
+config DTCM_RAM_BASE
+ hex "DTCM ram base"
+ depends on HAVE_DTCM
+ default 0xffffffff
+
+config DTCM_NR_PAGES
+ int "Page count of DTCM size: NR*4KB"
+ depends on HAVE_DTCM
+ range 1 256
+ default 32
+endif
+
config CPU_HAS_VDSP
bool "CPU has VDSP coprocessor"
depends on CPU_HAS_FPU && CPU_HAS_FPUV2
@@ -196,6 +235,10 @@ config CPU_HAS_FPU
bool "CPU has FPU coprocessor"
depends on CPU_CK807 || CPU_CK810 || CPU_CK860
+config CPU_HAS_ICACHE_INS
+ bool "CPU has Icache invalidate instructions"
+ depends on CPU_HAS_CACHEV2
+
config CPU_HAS_TEE
bool "CPU has Trusted Execution Environment"
depends on CPU_CK810
@@ -235,4 +278,6 @@ config HOTPLUG_CPU
Say N if you want to disable CPU hotplug.
endmenu
+source "arch/csky/Kconfig.platforms"
+
source "kernel/Kconfig.hz"
diff --git a/arch/csky/Kconfig.platforms b/arch/csky/Kconfig.platforms
new file mode 100644
index 000000000000..639e17f4eacb
--- /dev/null
+++ b/arch/csky/Kconfig.platforms
@@ -0,0 +1,9 @@
+menu "Platform drivers selection"
+
+config ARCH_CSKY_DW_APB_ICTL
+ bool "Select dw-apb interrupt controller"
+ select DW_APB_ICTL
+ default y
+ help
+ This enables support for snps dw-apb-ictl
+endmenu
diff --git a/arch/csky/abiv1/inc/abi/cacheflush.h b/arch/csky/abiv1/inc/abi/cacheflush.h
index 79ef9e8c1afd..d3e04208d53c 100644
--- a/arch/csky/abiv1/inc/abi/cacheflush.h
+++ b/arch/csky/abiv1/inc/abi/cacheflush.h
@@ -48,9 +48,8 @@ extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, u
#define flush_icache_page(vma, page) do {} while (0);
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
-
-#define flush_icache_user_range(vma,page,addr,len) \
- flush_dcache_page(page)
+#define flush_icache_mm_range(mm, start, end) cache_wbinv_range(start, end)
+#define flush_icache_deferred(mm) do {} while (0);
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
diff --git a/arch/csky/abiv1/inc/abi/entry.h b/arch/csky/abiv1/inc/abi/entry.h
index 7ab78bd0f3b1..f35a9f3315ee 100644
--- a/arch/csky/abiv1/inc/abi/entry.h
+++ b/arch/csky/abiv1/inc/abi/entry.h
@@ -16,14 +16,16 @@
#define LSAVE_A4 40
#define LSAVE_A5 44
+#define usp ss1
+
.macro USPTOKSP
- mtcr sp, ss1
+ mtcr sp, usp
mfcr sp, ss0
.endm
.macro KSPTOUSP
mtcr sp, ss0
- mfcr sp, ss1
+ mfcr sp, usp
.endm
.macro SAVE_ALL epc_inc
@@ -45,7 +47,13 @@
add lr, r13
stw lr, (sp, 8)
+ mov lr, sp
+ addi lr, 32
+ addi lr, 32
+ addi lr, 16
+ bt 2f
mfcr lr, ss1
+2:
stw lr, (sp, 16)
stw a0, (sp, 20)
@@ -79,9 +87,10 @@
ldw a0, (sp, 12)
mtcr a0, epsr
btsti a0, 31
+ bt 1f
ldw a0, (sp, 16)
mtcr a0, ss1
-
+1:
ldw a0, (sp, 24)
ldw a1, (sp, 28)
ldw a2, (sp, 32)
@@ -102,9 +111,9 @@
addi sp, 32
addi sp, 8
- bt 1f
+ bt 2f
KSPTOUSP
-1:
+2:
rte
.endm
diff --git a/arch/csky/abiv2/cacheflush.c b/arch/csky/abiv2/cacheflush.c
index 5bb887b275e1..790f1ebfba44 100644
--- a/arch/csky/abiv2/cacheflush.c
+++ b/arch/csky/abiv2/cacheflush.c
@@ -6,46 +6,80 @@
#include <linux/mm.h>
#include <asm/cache.h>
-void flush_icache_page(struct vm_area_struct *vma, struct page *page)
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *pte)
{
- unsigned long start;
+ unsigned long addr;
+ struct page *page;
- start = (unsigned long) kmap_atomic(page);
+ page = pfn_to_page(pte_pfn(*pte));
+ if (page == ZERO_PAGE(0))
+ return;
- cache_wbinv_range(start, start + PAGE_SIZE);
+ if (test_and_set_bit(PG_dcache_clean, &page->flags))
+ return;
- kunmap_atomic((void *)start);
-}
+ addr = (unsigned long) kmap_atomic(page);
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
- unsigned long vaddr, int len)
-{
- unsigned long kaddr;
+ dcache_wb_range(addr, addr + PAGE_SIZE);
- kaddr = (unsigned long) kmap_atomic(page) + (vaddr & ~PAGE_MASK);
+ if (vma->vm_flags & VM_EXEC)
+ icache_inv_range(addr, addr + PAGE_SIZE);
+
+ kunmap_atomic((void *) addr);
+}
- cache_wbinv_range(kaddr, kaddr + len);
+void flush_icache_deferred(struct mm_struct *mm)
+{
+ unsigned int cpu = smp_processor_id();
+ cpumask_t *mask = &mm->context.icache_stale_mask;
- kunmap_atomic((void *)kaddr);
+ if (cpumask_test_cpu(cpu, mask)) {
+ cpumask_clear_cpu(cpu, mask);
+ /*
+ * Ensure the remote hart's writes are visible to this hart.
+ * This pairs with a barrier in flush_icache_mm.
+ */
+ smp_mb();
+ local_icache_inv_all(NULL);
+ }
}
-void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t *pte)
+void flush_icache_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
{
- unsigned long addr, pfn;
- struct page *page;
+ unsigned int cpu;
+ cpumask_t others, *mask;
- pfn = pte_pfn(*pte);
- if (unlikely(!pfn_valid(pfn)))
- return;
+ preempt_disable();
- page = pfn_to_page(pfn);
- if (page == ZERO_PAGE(0))
+#ifdef CONFIG_CPU_HAS_ICACHE_INS
+ if (mm == current->mm) {
+ icache_inv_range(start, end);
+ preempt_enable();
return;
+ }
+#endif
- addr = (unsigned long) kmap_atomic(page);
+ /* Mark every hart's icache as needing a flush for this MM. */
+ mask = &mm->context.icache_stale_mask;
+ cpumask_setall(mask);
- cache_wbinv_range(addr, addr + PAGE_SIZE);
+ /* Flush this hart's I$ now, and mark it as flushed. */
+ cpu = smp_processor_id();
+ cpumask_clear_cpu(cpu, mask);
+ local_icache_inv_all(NULL);
- kunmap_atomic((void *) addr);
+ /*
+ * Flush the I$ of other harts concurrently executing, and mark them as
+ * flushed.
+ */
+ cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+
+ if (mm != current->active_mm || !cpumask_empty(&others)) {
+ on_each_cpu_mask(&others, local_icache_inv_all, NULL, 1);
+ cpumask_clear(mask);
+ }
+
+ preempt_enable();
}
diff --git a/arch/csky/abiv2/inc/abi/cacheflush.h b/arch/csky/abiv2/inc/abi/cacheflush.h
index b8db5e0b2fe3..a565e00c3f70 100644
--- a/arch/csky/abiv2/inc/abi/cacheflush.h
+++ b/arch/csky/abiv2/inc/abi/cacheflush.h
@@ -13,24 +13,27 @@
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_dup_mm(mm) do { } while (0)
+#define flush_cache_range(vma, start, end) do { } while (0)
+#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define flush_cache_range(vma, start, end) \
- do { \
- if (vma->vm_flags & VM_EXEC) \
- icache_inv_all(); \
- } while (0)
+#define PG_dcache_clean PG_arch_1
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_page(struct page *page)
+{
+ if (test_bit(PG_dcache_clean, &page->flags))
+ clear_bit(PG_dcache_clean, &page->flags);
+}
-#define flush_cache_page(vma, vmaddr, pfn) do { } while (0)
-#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0
-#define flush_dcache_page(page) do { } while (0)
#define flush_dcache_mmap_lock(mapping) do { } while (0)
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+#define flush_icache_page(vma, page) do { } while (0)
#define flush_icache_range(start, end) cache_wbinv_range(start, end)
-void flush_icache_page(struct vm_area_struct *vma, struct page *page);
-void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
- unsigned long vaddr, int len);
+void flush_icache_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end);
+void flush_icache_deferred(struct mm_struct *mm);
#define flush_cache_vmap(start, end) do { } while (0)
#define flush_cache_vunmap(start, end) do { } while (0)
@@ -38,7 +41,13 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
- cache_wbinv_range((unsigned long)dst, (unsigned long)dst + len); \
+ if (vma->vm_flags & VM_EXEC) { \
+ dcache_wb_range((unsigned long)dst, \
+ (unsigned long)dst + len); \
+ flush_icache_mm_range(current->mm, \
+ (unsigned long)dst, \
+ (unsigned long)dst + len); \
+ } \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
diff --git a/arch/csky/abiv2/inc/abi/entry.h b/arch/csky/abiv2/inc/abi/entry.h
index 9897a16b45e5..94a7a58765df 100644
--- a/arch/csky/abiv2/inc/abi/entry.h
+++ b/arch/csky/abiv2/inc/abi/entry.h
@@ -31,7 +31,13 @@
mfcr lr, epsr
stw lr, (sp, 12)
+ btsti lr, 31
+ bf 1f
+ addi lr, sp, 152
+ br 2f
+1:
mfcr lr, usp
+2:
stw lr, (sp, 16)
stw a0, (sp, 20)
@@ -64,8 +70,10 @@
mtcr a0, epc
ldw a0, (sp, 12)
mtcr a0, epsr
+ btsti a0, 31
ldw a0, (sp, 16)
mtcr a0, usp
+ mtcr a0, ss0
#ifdef CONFIG_CPU_HAS_HILO
ldw a0, (sp, 140)
@@ -86,6 +94,9 @@
addi sp, 40
ldm r16-r30, (sp)
addi sp, 72
+ bf 1f
+ mfcr sp, ss0
+1:
rte
.endm
diff --git a/arch/csky/configs/defconfig b/arch/csky/configs/defconfig
index 7ef42895dfb0..af722e4dfb47 100644
--- a/arch/csky/configs/defconfig
+++ b/arch/csky/configs/defconfig
@@ -10,9 +10,6 @@ CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
-CONFIG_DEFAULT_DEADLINE=y
-CONFIG_CPU_CK807=y
-CONFIG_CPU_HAS_FPU=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -27,10 +24,7 @@ CONFIG_SERIAL_NONSTANDARD=y
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
-CONFIG_TTY_PRINTK=y
# CONFIG_VGA_CONSOLE is not set
-CONFIG_CSKY_MPTIMER=y
-CONFIG_GX6605S_TIMER=y
CONFIG_PM_DEVFREQ=y
CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND=y
CONFIG_DEVFREQ_GOV_PERFORMANCE=y
@@ -56,6 +50,4 @@ CONFIG_CRAMFS=y
CONFIG_ROMFS_FS=y
CONFIG_NFS_FS=y
CONFIG_PRINTK_TIME=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
diff --git a/arch/csky/include/asm/Kbuild b/arch/csky/include/asm/Kbuild
index bc15a26c782f..4130e3eaa766 100644
--- a/arch/csky/include/asm/Kbuild
+++ b/arch/csky/include/asm/Kbuild
@@ -28,7 +28,6 @@ generic-y += local64.h
generic-y += mm-arch-hooks.h
generic-y += mmiowb.h
generic-y += module.h
-generic-y += pci.h
generic-y += percpu.h
generic-y += preempt.h
generic-y += qrwlock.h
diff --git a/arch/csky/include/asm/cache.h b/arch/csky/include/asm/cache.h
index 1d5fc2f78fd7..4b5c09bf1d25 100644
--- a/arch/csky/include/asm/cache.h
+++ b/arch/csky/include/asm/cache.h
@@ -16,6 +16,7 @@ void dcache_wb_line(unsigned long start);
void icache_inv_range(unsigned long start, unsigned long end);
void icache_inv_all(void);
+void local_icache_inv_all(void *priv);
void dcache_wb_range(unsigned long start, unsigned long end);
void dcache_wbinv_all(void);
diff --git a/arch/csky/include/asm/cacheflush.h b/arch/csky/include/asm/cacheflush.h
index a96da67261ae..f0b8f25429a2 100644
--- a/arch/csky/include/asm/cacheflush.h
+++ b/arch/csky/include/asm/cacheflush.h
@@ -4,6 +4,7 @@
#ifndef __ASM_CSKY_CACHEFLUSH_H
#define __ASM_CSKY_CACHEFLUSH_H
+#include <linux/mm.h>
#include <abi/cacheflush.h>
#endif /* __ASM_CSKY_CACHEFLUSH_H */
diff --git a/arch/csky/include/asm/fixmap.h b/arch/csky/include/asm/fixmap.h
index 380ff0a307df..81f9477d5330 100644
--- a/arch/csky/include/asm/fixmap.h
+++ b/arch/csky/include/asm/fixmap.h
@@ -5,12 +5,16 @@
#define __ASM_CSKY_FIXMAP_H
#include <asm/page.h>
+#include <asm/memory.h>
#ifdef CONFIG_HIGHMEM
#include <linux/threads.h>
#include <asm/kmap_types.h>
#endif
enum fixed_addresses {
+#ifdef CONFIG_HAVE_TCM
+ FIX_TCM = TCM_NR_PAGES,
+#endif
#ifdef CONFIG_HIGHMEM
FIX_KMAP_BEGIN,
FIX_KMAP_END = FIX_KMAP_BEGIN + (KM_TYPE_NR * NR_CPUS) - 1,
@@ -18,10 +22,13 @@ enum fixed_addresses {
__end_of_fixed_addresses
};
-#define FIXADDR_TOP 0xffffc000
#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
#include <asm-generic/fixmap.h>
+extern void fixrange_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base);
+extern void __init fixaddr_init(void);
+
#endif /* __ASM_CSKY_FIXMAP_H */
diff --git a/arch/csky/include/asm/memory.h b/arch/csky/include/asm/memory.h
new file mode 100644
index 000000000000..a65c6759f537
--- /dev/null
+++ b/arch/csky/include/asm/memory.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_MEMORY_H
+#define __ASM_CSKY_MEMORY_H
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+#include <linux/sizes.h>
+
+#define FIXADDR_TOP _AC(0xffffc000, UL)
+#define PKMAP_BASE _AC(0xff800000, UL)
+#define VMALLOC_START _AC(0xc0008000, UL)
+#define VMALLOC_END (PKMAP_BASE - (PAGE_SIZE * 2))
+
+#ifdef CONFIG_HAVE_TCM
+#ifdef CONFIG_HAVE_DTCM
+#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES + CONFIG_DTCM_NR_PAGES)
+#else
+#define TCM_NR_PAGES (CONFIG_ITCM_NR_PAGES)
+#endif
+#define FIXADDR_TCM _AC(FIXADDR_TOP - (TCM_NR_PAGES * PAGE_SIZE), UL)
+#endif
+
+#endif
diff --git a/arch/csky/include/asm/mmu.h b/arch/csky/include/asm/mmu.h
index b382a14ea4ec..26fbb1d15df0 100644
--- a/arch/csky/include/asm/mmu.h
+++ b/arch/csky/include/asm/mmu.h
@@ -7,6 +7,7 @@
typedef struct {
atomic64_t asid;
void *vdso;
+ cpumask_t icache_stale_mask;
} mm_context_t;
#endif /* __ASM_CSKY_MMU_H */
diff --git a/arch/csky/include/asm/mmu_context.h b/arch/csky/include/asm/mmu_context.h
index 0285b0ad18b6..abdf1f1cb6ec 100644
--- a/arch/csky/include/asm/mmu_context.h
+++ b/arch/csky/include/asm/mmu_context.h
@@ -43,5 +43,7 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
write_mmu_entryhi(next->context.asid.counter);
+
+ flush_icache_deferred(next);
}
#endif /* __ASM_CSKY_MMU_CONTEXT_H */
diff --git a/arch/csky/include/asm/pci.h b/arch/csky/include/asm/pci.h
new file mode 100644
index 000000000000..ebc765b1f78b
--- /dev/null
+++ b/arch/csky/include/asm/pci.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_CSKY_PCI_H
+#define __ASM_CSKY_PCI_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+
+#define PCIBIOS_MIN_IO 0
+#define PCIBIOS_MIN_MEM 0
+
+/* C-SKY shim does not initialize PCI bus */
+#define pcibios_assign_all_busses() 1
+
+extern int isa_dma_bridge_buggy;
+
+#ifdef CONFIG_PCI
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ /* no legacy IRQ on csky */
+ return -ENODEV;
+}
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ /* always show the domain in /proc */
+ return 1;
+}
+#endif /* CONFIG_PCI */
+
+#endif /* __ASM_CSKY_PCI_H */
diff --git a/arch/csky/include/asm/pgtable.h b/arch/csky/include/asm/pgtable.h
index 4b2a41e15f2e..9b7764cb7645 100644
--- a/arch/csky/include/asm/pgtable.h
+++ b/arch/csky/include/asm/pgtable.h
@@ -5,6 +5,7 @@
#define __ASM_CSKY_PGTABLE_H
#include <asm/fixmap.h>
+#include <asm/memory.h>
#include <asm/addrspace.h>
#include <abi/pgtable-bits.h>
#include <asm-generic/pgtable-nopmd.h>
@@ -16,11 +17,6 @@
#define USER_PTRS_PER_PGD (0x80000000UL/PGDIR_SIZE)
#define FIRST_USER_ADDRESS 0UL
-#define PKMAP_BASE (0xff800000)
-
-#define VMALLOC_START (0xc0008000)
-#define VMALLOC_END (PKMAP_BASE - 2*PAGE_SIZE)
-
/*
* C-SKY is two-level paging structure:
*/
diff --git a/arch/csky/include/asm/stackprotector.h b/arch/csky/include/asm/stackprotector.h
new file mode 100644
index 000000000000..d7cd4e51edd9
--- /dev/null
+++ b/arch/csky/include/asm/stackprotector.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_STACKPROTECTOR_H
+#define _ASM_STACKPROTECTOR_H 1
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= LINUX_VERSION_CODE;
+ canary &= CANARY_MASK;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+
+#endif /* __ASM_SH_STACKPROTECTOR_H */
diff --git a/arch/csky/include/asm/tcm.h b/arch/csky/include/asm/tcm.h
new file mode 100644
index 000000000000..2b135cefb73f
--- /dev/null
+++ b/arch/csky/include/asm/tcm.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __ASM_CSKY_TCM_H
+#define __ASM_CSKY_TCM_H
+
+#ifndef CONFIG_HAVE_TCM
+#error "You should not be including tcm.h unless you have a TCM!"
+#endif
+
+#include <linux/compiler.h>
+
+/* Tag variables with this */
+#define __tcmdata __section(.tcm.data)
+/* Tag constants with this */
+#define __tcmconst __section(.tcm.rodata)
+/* Tag functions inside TCM called from outside TCM with this */
+#define __tcmfunc __section(.tcm.text) noinline
+/* Tag function inside TCM called from inside TCM with this */
+#define __tcmlocalfunc __section(.tcm.text)
+
+void *tcm_alloc(size_t len);
+void tcm_free(void *addr, size_t len);
+
+#endif
diff --git a/arch/csky/include/uapi/asm/unistd.h b/arch/csky/include/uapi/asm/unistd.h
index 211c983c7282..ba4018929733 100644
--- a/arch/csky/include/uapi/asm/unistd.h
+++ b/arch/csky/include/uapi/asm/unistd.h
@@ -1,7 +1,10 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
+#define __ARCH_WANT_STAT64
+#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SYS_CLONE
+#define __ARCH_WANT_SYS_CLONE3
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
#include <asm-generic/unistd.h>
diff --git a/arch/csky/kernel/atomic.S b/arch/csky/kernel/atomic.S
index 5b84f11485ae..3821ef9b7567 100644
--- a/arch/csky/kernel/atomic.S
+++ b/arch/csky/kernel/atomic.S
@@ -17,10 +17,12 @@ ENTRY(csky_cmpxchg)
mfcr a3, epc
addi a3, TRAP0_SIZE
- subi sp, 8
+ subi sp, 16
stw a3, (sp, 0)
mfcr a3, epsr
stw a3, (sp, 4)
+ mfcr a3, usp
+ stw a3, (sp, 8)
psrset ee
#ifdef CONFIG_CPU_HAS_LDSTEX
@@ -47,7 +49,9 @@ ENTRY(csky_cmpxchg)
mtcr a3, epc
ldw a3, (sp, 4)
mtcr a3, epsr
- addi sp, 8
+ ldw a3, (sp, 8)
+ mtcr a3, usp
+ addi sp, 16
KSPTOUSP
rte
END(csky_cmpxchg)
diff --git a/arch/csky/kernel/process.c b/arch/csky/kernel/process.c
index f320d9248a22..f7b231ca269a 100644
--- a/arch/csky/kernel/process.c
+++ b/arch/csky/kernel/process.c
@@ -16,6 +16,12 @@
struct cpuinfo_csky cpu_data[NR_CPUS];
+#ifdef CONFIG_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
asmlinkage void ret_from_fork(void);
asmlinkage void ret_from_kernel_thread(void);
@@ -34,10 +40,11 @@ unsigned long thread_saved_pc(struct task_struct *tsk)
return sw->r15;
}
-int copy_thread(unsigned long clone_flags,
+int copy_thread_tls(unsigned long clone_flags,
unsigned long usp,
unsigned long kthread_arg,
- struct task_struct *p)
+ struct task_struct *p,
+ unsigned long tls)
{
struct switch_stack *childstack;
struct pt_regs *childregs = task_pt_regs(p);
@@ -64,7 +71,7 @@ int copy_thread(unsigned long clone_flags,
childregs->usp = usp;
if (clone_flags & CLONE_SETTLS)
task_thread_info(p)->tp_value = childregs->tls
- = childregs->regs[0];
+ = tls;
childregs->a0 = 0;
childstack->r15 = (unsigned long) ret_from_fork;
diff --git a/arch/csky/kernel/setup.c b/arch/csky/kernel/setup.c
index 52eaf31ba27f..3821e55742f4 100644
--- a/arch/csky/kernel/setup.c
+++ b/arch/csky/kernel/setup.c
@@ -47,9 +47,6 @@ static void __init csky_memblock_init(void)
signed long size;
memblock_reserve(__pa(_stext), _end - _stext);
-#ifdef CONFIG_BLK_DEV_INITRD
- memblock_reserve(__pa(initrd_start), initrd_end - initrd_start);
-#endif
early_init_fdt_reserve_self();
early_init_fdt_scan_reserved_mem();
@@ -133,6 +130,8 @@ void __init setup_arch(char **cmdline_p)
sparse_init();
+ fixaddr_init();
+
#ifdef CONFIG_HIGHMEM
kmap_init();
#endif
diff --git a/arch/csky/kernel/smp.c b/arch/csky/kernel/smp.c
index b753d382e4ce..0bb0954d5570 100644
--- a/arch/csky/kernel/smp.c
+++ b/arch/csky/kernel/smp.c
@@ -120,7 +120,7 @@ void __init setup_smp_ipi(void)
int rc;
if (ipi_irq == 0)
- panic("%s IRQ mapping failed\n", __func__);
+ return;
rc = request_percpu_irq(ipi_irq, handle_ipi, "IPI Interrupt",
&ipi_dummy_dev);
diff --git a/arch/csky/kernel/time.c b/arch/csky/kernel/time.c
index b5fc9447d93f..52379d866fe4 100644
--- a/arch/csky/kernel/time.c
+++ b/arch/csky/kernel/time.c
@@ -1,8 +1,8 @@
// SPDX-License-Identifier: GPL-2.0
// Copyright (C) 2018 Hangzhou C-SKY Microsystems co.,ltd.
-#include <linux/clk-provider.h>
#include <linux/clocksource.h>
+#include <linux/of_clk.h>
void __init time_init(void)
{
diff --git a/arch/csky/kernel/vmlinux.lds.S b/arch/csky/kernel/vmlinux.lds.S
index 2ff37beaf2bf..f05b413df328 100644
--- a/arch/csky/kernel/vmlinux.lds.S
+++ b/arch/csky/kernel/vmlinux.lds.S
@@ -2,6 +2,7 @@
#include <asm/vmlinux.lds.h>
#include <asm/page.h>
+#include <asm/memory.h>
OUTPUT_ARCH(csky)
ENTRY(_start)
@@ -53,6 +54,54 @@ SECTIONS
RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
_edata = .;
+#ifdef CONFIG_HAVE_TCM
+ .tcm_start : {
+ . = ALIGN(PAGE_SIZE);
+ __tcm_start = .;
+ }
+
+ .text_data_tcm FIXADDR_TCM : AT(__tcm_start)
+ {
+ . = ALIGN(4);
+ __stcm_text_data = .;
+ *(.tcm.text)
+ *(.tcm.rodata)
+#ifndef CONFIG_HAVE_DTCM
+ *(.tcm.data)
+#endif
+ . = ALIGN(4);
+ __etcm_text_data = .;
+ }
+
+ . = ADDR(.tcm_start) + SIZEOF(.tcm_start) + SIZEOF(.text_data_tcm);
+
+#ifdef CONFIG_HAVE_DTCM
+ #define ITCM_SIZE CONFIG_ITCM_NR_PAGES * PAGE_SIZE
+
+ .dtcm_start : {
+ __dtcm_start = .;
+ }
+
+ .data_tcm FIXADDR_TCM + ITCM_SIZE : AT(__dtcm_start)
+ {
+ . = ALIGN(4);
+ __stcm_data = .;
+ *(.tcm.data)
+ . = ALIGN(4);
+ __etcm_data = .;
+ }
+
+ . = ADDR(.dtcm_start) + SIZEOF(.data_tcm);
+
+ .tcm_end : AT(ADDR(.dtcm_start) + SIZEOF(.data_tcm)) {
+#else
+ .tcm_end : AT(ADDR(.tcm_start) + SIZEOF(.text_data_tcm)) {
+#endif
+ . = ALIGN(PAGE_SIZE);
+ __tcm_end = .;
+ }
+#endif
+
EXCEPTION_TABLE(L1_CACHE_BYTES)
BSS_SECTION(L1_CACHE_BYTES, PAGE_SIZE, L1_CACHE_BYTES)
VBR_BASE
diff --git a/arch/csky/mm/Makefile b/arch/csky/mm/Makefile
index c94ef6481098..6e7696e55f71 100644
--- a/arch/csky/mm/Makefile
+++ b/arch/csky/mm/Makefile
@@ -1,8 +1,10 @@
# SPDX-License-Identifier: GPL-2.0-only
ifeq ($(CONFIG_CPU_HAS_CACHEV2),y)
obj-y += cachev2.o
+CFLAGS_REMOVE_cachev2.o = $(CC_FLAGS_FTRACE)
else
obj-y += cachev1.o
+CFLAGS_REMOVE_cachev1.o = $(CC_FLAGS_FTRACE)
endif
obj-y += dma-mapping.o
@@ -14,3 +16,4 @@ obj-y += syscache.o
obj-y += tlb.o
obj-y += asid.o
obj-y += context.o
+obj-$(CONFIG_HAVE_TCM) += tcm.o
diff --git a/arch/csky/mm/cachev1.c b/arch/csky/mm/cachev1.c
index 494ec912abff..5a5a9804a0e3 100644
--- a/arch/csky/mm/cachev1.c
+++ b/arch/csky/mm/cachev1.c
@@ -94,6 +94,11 @@ void icache_inv_all(void)
cache_op_all(INS_CACHE|CACHE_INV, 0);
}
+void local_icache_inv_all(void *priv)
+{
+ cache_op_all(INS_CACHE|CACHE_INV, 0);
+}
+
void dcache_wb_range(unsigned long start, unsigned long end)
{
cache_op_range(start, end, DATA_CACHE|CACHE_CLR, 0);
diff --git a/arch/csky/mm/cachev2.c b/arch/csky/mm/cachev2.c
index b61be6518e21..bc419f8039d3 100644
--- a/arch/csky/mm/cachev2.c
+++ b/arch/csky/mm/cachev2.c
@@ -3,15 +3,25 @@
#include <linux/spinlock.h>
#include <linux/smp.h>
+#include <linux/mm.h>
#include <asm/cache.h>
#include <asm/barrier.h>
-inline void dcache_wb_line(unsigned long start)
+#define INS_CACHE (1 << 0)
+#define CACHE_INV (1 << 4)
+
+void local_icache_inv_all(void *priv)
{
- asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
+ mtcr("cr17", INS_CACHE|CACHE_INV);
sync_is();
}
+void icache_inv_all(void)
+{
+ on_each_cpu(local_icache_inv_all, NULL, 1);
+}
+
+#ifdef CONFIG_CPU_HAS_ICACHE_INS
void icache_inv_range(unsigned long start, unsigned long end)
{
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
@@ -20,43 +30,32 @@ void icache_inv_range(unsigned long start, unsigned long end)
asm volatile("icache.iva %0\n"::"r"(i):"memory");
sync_is();
}
-
-void icache_inv_all(void)
+#else
+void icache_inv_range(unsigned long start, unsigned long end)
{
- asm volatile("icache.ialls\n":::"memory");
- sync_is();
+ icache_inv_all();
}
+#endif
-void dcache_wb_range(unsigned long start, unsigned long end)
+inline void dcache_wb_line(unsigned long start)
{
- unsigned long i = start & ~(L1_CACHE_BYTES - 1);
-
- for (; i < end; i += L1_CACHE_BYTES)
- asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
+ asm volatile("dcache.cval1 %0\n"::"r"(start):"memory");
sync_is();
}
-void dcache_inv_range(unsigned long start, unsigned long end)
+void dcache_wb_range(unsigned long start, unsigned long end)
{
unsigned long i = start & ~(L1_CACHE_BYTES - 1);
for (; i < end; i += L1_CACHE_BYTES)
- asm volatile("dcache.civa %0\n"::"r"(i):"memory");
+ asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
sync_is();
}
void cache_wbinv_range(unsigned long start, unsigned long end)
{
- unsigned long i = start & ~(L1_CACHE_BYTES - 1);
-
- for (; i < end; i += L1_CACHE_BYTES)
- asm volatile("dcache.cval1 %0\n"::"r"(i):"memory");
- sync_is();
-
- i = start & ~(L1_CACHE_BYTES - 1);
- for (; i < end; i += L1_CACHE_BYTES)
- asm volatile("icache.iva %0\n"::"r"(i):"memory");
- sync_is();
+ dcache_wb_range(start, end);
+ icache_inv_range(start, end);
}
EXPORT_SYMBOL(cache_wbinv_range);
diff --git a/arch/csky/mm/highmem.c b/arch/csky/mm/highmem.c
index 3317b774f6dc..813129145f3d 100644
--- a/arch/csky/mm/highmem.c
+++ b/arch/csky/mm/highmem.c
@@ -117,85 +117,29 @@ struct page *kmap_atomic_to_page(void *ptr)
return pte_page(*pte);
}
-static void __init fixrange_init(unsigned long start, unsigned long end,
- pgd_t *pgd_base)
+static void __init kmap_pages_init(void)
{
-#ifdef CONFIG_HIGHMEM
- pgd_t *pgd;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- int i, j, k;
unsigned long vaddr;
-
- vaddr = start;
- i = __pgd_offset(vaddr);
- j = __pud_offset(vaddr);
- k = __pmd_offset(vaddr);
- pgd = pgd_base + i;
-
- for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
- pud = (pud_t *)pgd;
- for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
- pmd = (pmd_t *)pud;
- for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
- if (pmd_none(*pmd)) {
- pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
- if (!pte)
- panic("%s: Failed to allocate %lu bytes align=%lx\n",
- __func__, PAGE_SIZE,
- PAGE_SIZE);
-
- set_pmd(pmd, __pmd(__pa(pte)));
- BUG_ON(pte != pte_offset_kernel(pmd, 0));
- }
- vaddr += PMD_SIZE;
- }
- k = 0;
- }
- j = 0;
- }
-#endif
-}
-
-void __init fixaddr_kmap_pages_init(void)
-{
- unsigned long vaddr;
- pgd_t *pgd_base;
-#ifdef CONFIG_HIGHMEM
pgd_t *pgd;
pmd_t *pmd;
pud_t *pud;
pte_t *pte;
-#endif
- pgd_base = swapper_pg_dir;
-
- /*
- * Fixed mappings:
- */
- vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
- fixrange_init(vaddr, 0, pgd_base);
-
-#ifdef CONFIG_HIGHMEM
- /*
- * Permanent kmaps:
- */
+
vaddr = PKMAP_BASE;
- fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
+ fixrange_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, swapper_pg_dir);
pgd = swapper_pg_dir + __pgd_offset(vaddr);
pud = (pud_t *)pgd;
pmd = pmd_offset(pud, vaddr);
pte = pte_offset_kernel(pmd, vaddr);
pkmap_page_table = pte;
-#endif
}
void __init kmap_init(void)
{
unsigned long vaddr;
- fixaddr_kmap_pages_init();
+ kmap_pages_init();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN);
diff --git a/arch/csky/mm/init.c b/arch/csky/mm/init.c
index d4c2292ea46b..cb64d8647a78 100644
--- a/arch/csky/mm/init.c
+++ b/arch/csky/mm/init.c
@@ -19,6 +19,7 @@
#include <linux/swap.h>
#include <linux/proc_fs.h>
#include <linux/pfn.h>
+#include <linux/initrd.h>
#include <asm/setup.h>
#include <asm/cachectl.h>
@@ -31,10 +32,50 @@
pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
+EXPORT_SYMBOL(invalid_pte_table);
unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
__page_aligned_bss;
EXPORT_SYMBOL(empty_zero_page);
+#ifdef CONFIG_BLK_DEV_INITRD
+static void __init setup_initrd(void)
+{
+ unsigned long size;
+
+ if (initrd_start >= initrd_end) {
+ pr_err("initrd not found or empty");
+ goto disable;
+ }
+
+ if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ pr_err("initrd extends beyond end of memory");
+ goto disable;
+ }
+
+ size = initrd_end - initrd_start;
+
+ if (memblock_is_region_reserved(__pa(initrd_start), size)) {
+ pr_err("INITRD: 0x%08lx+0x%08lx overlaps in-use memory region",
+ __pa(initrd_start), size);
+ goto disable;
+ }
+
+ memblock_reserve(__pa(initrd_start), size);
+
+ pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
+ (void *)(initrd_start), size);
+
+ initrd_below_start_ok = 1;
+
+ return;
+
+disable:
+ initrd_start = initrd_end = 0;
+
+ pr_err(" - disabling initrd\n");
+}
+#endif
+
void __init mem_init(void)
{
#ifdef CONFIG_HIGHMEM
@@ -46,6 +87,10 @@ void __init mem_init(void)
#endif
high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+#ifdef CONFIG_BLK_DEV_INITRD
+ setup_initrd();
+#endif
+
memblock_free_all();
#ifdef CONFIG_HIGHMEM
@@ -101,3 +146,50 @@ void __init pre_mmu_init(void)
/* Setup page mask to 4k */
write_mmu_pagemask(0);
}
+
+void __init fixrange_init(unsigned long start, unsigned long end,
+ pgd_t *pgd_base)
+{
+ pgd_t *pgd;
+ pud_t *pud;
+ pmd_t *pmd;
+ pte_t *pte;
+ int i, j, k;
+ unsigned long vaddr;
+
+ vaddr = start;
+ i = __pgd_offset(vaddr);
+ j = __pud_offset(vaddr);
+ k = __pmd_offset(vaddr);
+ pgd = pgd_base + i;
+
+ for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) {
+ pud = (pud_t *)pgd;
+ for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) {
+ pmd = (pmd_t *)pud;
+ for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) {
+ if (pmd_none(*pmd)) {
+ pte = (pte_t *) memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate %lu bytes align=%lx\n",
+ __func__, PAGE_SIZE,
+ PAGE_SIZE);
+
+ set_pmd(pmd, __pmd(__pa(pte)));
+ BUG_ON(pte != pte_offset_kernel(pmd, 0));
+ }
+ vaddr += PMD_SIZE;
+ }
+ k = 0;
+ }
+ j = 0;
+ }
+}
+
+void __init fixaddr_init(void)
+{
+ unsigned long vaddr;
+
+ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
+ fixrange_init(vaddr, vaddr + PMD_SIZE, swapper_pg_dir);
+}
diff --git a/arch/csky/mm/syscache.c b/arch/csky/mm/syscache.c
index c4645e4e97f4..ffade2f9a4c8 100644
--- a/arch/csky/mm/syscache.c
+++ b/arch/csky/mm/syscache.c
@@ -3,7 +3,7 @@
#include <linux/syscalls.h>
#include <asm/page.h>
-#include <asm/cache.h>
+#include <asm/cacheflush.h>
#include <asm/cachectl.h>
SYSCALL_DEFINE3(cacheflush,
@@ -13,17 +13,14 @@ SYSCALL_DEFINE3(cacheflush,
{
switch (cache) {
case ICACHE:
- icache_inv_range((unsigned long)addr,
- (unsigned long)addr + bytes);
- break;
+ case BCACHE:
+ flush_icache_mm_range(current->mm,
+ (unsigned long)addr,
+ (unsigned long)addr + bytes);
case DCACHE:
dcache_wb_range((unsigned long)addr,
(unsigned long)addr + bytes);
break;
- case BCACHE:
- cache_wbinv_range((unsigned long)addr,
- (unsigned long)addr + bytes);
- break;
default:
return -EINVAL;
}
diff --git a/arch/csky/mm/tcm.c b/arch/csky/mm/tcm.c
new file mode 100644
index 000000000000..ddeb36328819
--- /dev/null
+++ b/arch/csky/mm/tcm.c
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/highmem.h>
+#include <linux/genalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/fixmap.h>
+
+#if (CONFIG_ITCM_RAM_BASE == 0xffffffff)
+#error "You should define ITCM_RAM_BASE"
+#endif
+
+#ifdef CONFIG_HAVE_DTCM
+#if (CONFIG_DTCM_RAM_BASE == 0xffffffff)
+#error "You should define DTCM_RAM_BASE"
+#endif
+
+#if (CONFIG_DTCM_RAM_BASE == CONFIG_ITCM_RAM_BASE)
+#error "You should define correct DTCM_RAM_BASE"
+#endif
+#endif
+
+extern char __tcm_start, __tcm_end, __dtcm_start;
+
+static struct gen_pool *tcm_pool;
+
+static void __init tcm_mapping_init(void)
+{
+ pte_t *tcm_pte;
+ unsigned long vaddr, paddr;
+ int i;
+
+ paddr = CONFIG_ITCM_RAM_BASE;
+
+ if (pfn_valid(PFN_DOWN(CONFIG_ITCM_RAM_BASE)))
+ goto panic;
+
+#ifndef CONFIG_HAVE_DTCM
+ for (i = 0; i < TCM_NR_PAGES; i++) {
+#else
+ for (i = 0; i < CONFIG_ITCM_NR_PAGES; i++) {
+#endif
+ vaddr = __fix_to_virt(FIX_TCM - i);
+
+ tcm_pte =
+ pte_offset_kernel((pmd_t *)pgd_offset_k(vaddr), vaddr);
+
+ set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL));
+
+ flush_tlb_one(vaddr);
+
+ paddr = paddr + PAGE_SIZE;
+ }
+
+#ifdef CONFIG_HAVE_DTCM
+ if (pfn_valid(PFN_DOWN(CONFIG_DTCM_RAM_BASE)))
+ goto panic;
+
+ paddr = CONFIG_DTCM_RAM_BASE;
+
+ for (i = 0; i < CONFIG_DTCM_NR_PAGES; i++) {
+ vaddr = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES - i);
+
+ tcm_pte =
+ pte_offset_kernel((pmd_t *) pgd_offset_k(vaddr), vaddr);
+
+ set_pte(tcm_pte, pfn_pte(__phys_to_pfn(paddr), PAGE_KERNEL));
+
+ flush_tlb_one(vaddr);
+
+ paddr = paddr + PAGE_SIZE;
+ }
+#endif
+
+#ifndef CONFIG_HAVE_DTCM
+ memcpy((void *)__fix_to_virt(FIX_TCM),
+ &__tcm_start, &__tcm_end - &__tcm_start);
+
+ pr_info("%s: mapping tcm va:0x%08lx to pa:0x%08x\n",
+ __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE);
+
+ pr_info("%s: __tcm_start va:0x%08lx size:%d\n",
+ __func__, (unsigned long)&__tcm_start, &__tcm_end - &__tcm_start);
+#else
+ memcpy((void *)__fix_to_virt(FIX_TCM),
+ &__tcm_start, &__dtcm_start - &__tcm_start);
+
+ pr_info("%s: mapping itcm va:0x%08lx to pa:0x%08x\n",
+ __func__, __fix_to_virt(FIX_TCM), CONFIG_ITCM_RAM_BASE);
+
+ pr_info("%s: __itcm_start va:0x%08lx size:%d\n",
+ __func__, (unsigned long)&__tcm_start, &__dtcm_start - &__tcm_start);
+
+ memcpy((void *)__fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES),
+ &__dtcm_start, &__tcm_end - &__dtcm_start);
+
+ pr_info("%s: mapping dtcm va:0x%08lx to pa:0x%08x\n",
+ __func__, __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES),
+ CONFIG_DTCM_RAM_BASE);
+
+ pr_info("%s: __dtcm_start va:0x%08lx size:%d\n",
+ __func__, (unsigned long)&__dtcm_start, &__tcm_end - &__dtcm_start);
+
+#endif
+ return;
+panic:
+ panic("TCM init error");
+}
+
+void *tcm_alloc(size_t len)
+{
+ unsigned long vaddr;
+
+ if (!tcm_pool)
+ return NULL;
+
+ vaddr = gen_pool_alloc(tcm_pool, len);
+ if (!vaddr)
+ return NULL;
+
+ return (void *) vaddr;
+}
+EXPORT_SYMBOL(tcm_alloc);
+
+void tcm_free(void *addr, size_t len)
+{
+ gen_pool_free(tcm_pool, (unsigned long) addr, len);
+}
+EXPORT_SYMBOL(tcm_free);
+
+static int __init tcm_setup_pool(void)
+{
+#ifndef CONFIG_HAVE_DTCM
+ u32 pool_size = (u32) (TCM_NR_PAGES * PAGE_SIZE)
+ - (u32) (&__tcm_end - &__tcm_start);
+
+ u32 tcm_pool_start = __fix_to_virt(FIX_TCM)
+ + (u32) (&__tcm_end - &__tcm_start);
+#else
+ u32 pool_size = (u32) (CONFIG_DTCM_NR_PAGES * PAGE_SIZE)
+ - (u32) (&__tcm_end - &__dtcm_start);
+
+ u32 tcm_pool_start = __fix_to_virt(FIX_TCM - CONFIG_ITCM_NR_PAGES)
+ + (u32) (&__tcm_end - &__dtcm_start);
+#endif
+ int ret;
+
+ tcm_pool = gen_pool_create(2, -1);
+
+ ret = gen_pool_add(tcm_pool, tcm_pool_start, pool_size, -1);
+ if (ret) {
+ pr_err("%s: gen_pool add failed!\n", __func__);
+ return ret;
+ }
+
+ pr_info("%s: Added %d bytes @ 0x%08x to memory pool\n",
+ __func__, pool_size, tcm_pool_start);
+
+ return 0;
+}
+
+static int __init tcm_init(void)
+{
+ tcm_mapping_init();
+
+ tcm_setup_pool();
+
+ return 0;
+}
+arch_initcall(tcm_init);
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index 5accda2767be..a3301bab9231 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/clock/jz4740-cgu.h>
+#include <dt-bindings/clock/ingenic,tcu.h>
/ {
#address-cells = <1>;
@@ -45,14 +46,6 @@
#clock-cells = <1>;
};
- watchdog: watchdog@10002000 {
- compatible = "ingenic,jz4740-watchdog";
- reg = <0x10002000 0x10>;
-
- clocks = <&cgu JZ4740_CLK_RTC>;
- clock-names = "rtc";
- };
-
tcu: timer@10002000 {
compatible = "ingenic,jz4740-tcu", "simple-mfd";
reg = <0x10002000 0x1000>;
@@ -73,6 +66,14 @@
interrupt-parent = <&intc>;
interrupts = <23 22 21>;
+
+ watchdog: watchdog@0 {
+ compatible = "ingenic,jz4740-watchdog";
+ reg = <0x0 0xc>;
+
+ clocks = <&tcu TCU_CLK_WDT>;
+ clock-names = "wdt";
+ };
};
rtc_dev: rtc@10003000 {
diff --git a/arch/mips/boot/dts/ingenic/jz4780.dtsi b/arch/mips/boot/dts/ingenic/jz4780.dtsi
index f928329b034b..bb89653d16a3 100644
--- a/arch/mips/boot/dts/ingenic/jz4780.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4780.dtsi
@@ -1,5 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
#include <dt-bindings/clock/jz4780-cgu.h>
+#include <dt-bindings/clock/ingenic,tcu.h>
#include <dt-bindings/dma/jz4780-dma.h>
/ {
@@ -67,6 +68,14 @@
interrupt-parent = <&intc>;
interrupts = <27 26 25>;
+
+ watchdog: watchdog@0 {
+ compatible = "ingenic,jz4780-watchdog";
+ reg = <0x0 0xc>;
+
+ clocks = <&tcu TCU_CLK_WDT>;
+ clock-names = "wdt";
+ };
};
rtc_dev: rtc@10003000 {
@@ -348,14 +357,6 @@
status = "disabled";
};
- watchdog: watchdog@10002000 {
- compatible = "ingenic,jz4780-watchdog";
- reg = <0x10002000 0x10>;
-
- clocks = <&cgu JZ4780_CLK_RTCLK>;
- clock-names = "rtc";
- };
-
nemc: nemc@13410000 {
compatible = "ingenic,jz4780-nemc";
reg = <0x13410000 0x10000>;
diff --git a/arch/mips/boot/dts/ingenic/x1000.dtsi b/arch/mips/boot/dts/ingenic/x1000.dtsi
index 4994c695a1a7..147f7d5c243a 100644
--- a/arch/mips/boot/dts/ingenic/x1000.dtsi
+++ b/arch/mips/boot/dts/ingenic/x1000.dtsi
@@ -1,4 +1,5 @@
// SPDX-License-Identifier: GPL-2.0
+#include <dt-bindings/clock/ingenic,tcu.h>
#include <dt-bindings/clock/x1000-cgu.h>
#include <dt-bindings/dma/x1000-dma.h>
@@ -72,7 +73,7 @@
compatible = "ingenic,x1000-watchdog", "ingenic,jz4780-watchdog";
reg = <0x0 0x10>;
- clocks = <&cgu X1000_CLK_RTCLK>;
+ clocks = <&tcu TCU_CLK_WDT>;
clock-names = "wdt";
};
};
@@ -158,7 +159,6 @@
i2c0: i2c-controller@10050000 {
compatible = "ingenic,x1000-i2c";
reg = <0x10050000 0x1000>;
-
#address-cells = <1>;
#size-cells = <0>;
@@ -173,7 +173,6 @@
i2c1: i2c-controller@10051000 {
compatible = "ingenic,x1000-i2c";
reg = <0x10051000 0x1000>;
-
#address-cells = <1>;
#size-cells = <0>;
@@ -188,7 +187,6 @@
i2c2: i2c-controller@10052000 {
compatible = "ingenic,x1000-i2c";
reg = <0x10052000 0x1000>;
-
#address-cells = <1>;
#size-cells = <0>;
diff --git a/arch/mips/include/asm/sync.h b/arch/mips/include/asm/sync.h
index 7c6a1095f556..aabd097933fe 100644
--- a/arch/mips/include/asm/sync.h
+++ b/arch/mips/include/asm/sync.h
@@ -155,9 +155,11 @@
* effective barrier as noted by commit 6b07d38aaa52 ("MIPS: Octeon: Use
* optimized memory barrier primitives."). Here we specify that the affected
* sync instructions should be emitted twice.
+ * Note that this expression is evaluated by the assembler (not the compiler),
+ * and that the assembler evaluates '==' as 0 or -1, not 0 or 1.
*/
#ifdef CONFIG_CPU_CAVIUM_OCTEON
-# define __SYNC_rpt(type) (1 + (type == __SYNC_wmb))
+# define __SYNC_rpt(type) (1 - (type == __SYNC_wmb))
#else
# define __SYNC_rpt(type) 1
#endif
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
index 6176b9acba95..d0d832ab3d3b 100644
--- a/arch/mips/kernel/vpe.c
+++ b/arch/mips/kernel/vpe.c
@@ -134,7 +134,7 @@ void release_vpe(struct vpe *v)
{
list_del(&v->list);
if (v->load_addr)
- release_progmem(v);
+ release_progmem(v->load_addr);
kfree(v);
}
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index aa89a41dc5dd..d7fe8408603e 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -33,6 +33,7 @@ endif
cflags-vdso := $(ccflags-vdso) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
-O3 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
+ -mrelax-pic-calls $(call cc-option, -mexplicit-relocs) \
-fno-stack-protector -fno-jump-tables -DDISABLE_BRANCH_PROFILING \
$(call cc-option, -fno-asynchronous-unwind-tables) \
$(call cc-option, -fno-stack-protector)
@@ -51,6 +52,8 @@ endif
CFLAGS_REMOVE_vgettimeofday.o = -pg
+DISABLE_VDSO := n
+
#
# For the pre-R6 code in arch/mips/vdso/vdso.h for locating
# the base address of VDSO, the linker will emit a R_MIPS_PC32
@@ -64,11 +67,24 @@ CFLAGS_REMOVE_vgettimeofday.o = -pg
ifndef CONFIG_CPU_MIPSR6
ifeq ($(call ld-ifversion, -lt, 225000000, y),y)
$(warning MIPS VDSO requires binutils >= 2.25)
- obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y))
- ccflags-vdso += -DDISABLE_MIPS_VDSO
+ DISABLE_VDSO := y
endif
endif
+#
+# GCC (at least up to version 9.2) appears to emit function calls that make use
+# of the GOT when targeting microMIPS, which we can't use in the VDSO due to
+# the lack of relocations. As such, we disable the VDSO for microMIPS builds.
+#
+ifdef CONFIG_CPU_MICROMIPS
+ DISABLE_VDSO := y
+endif
+
+ifeq ($(DISABLE_VDSO),y)
+ obj-vdso-y := $(filter-out vgettimeofday.o, $(obj-vdso-y))
+ ccflags-vdso += -DDISABLE_MIPS_VDSO
+endif
+
# VDSO linker flags.
VDSO_LDFLAGS := \
-Wl,-Bsymbolic -Wl,--no-undefined -Wl,-soname=linux-vdso.so.1 \
@@ -81,12 +97,18 @@ GCOV_PROFILE := n
UBSAN_SANITIZE := n
KCOV_INSTRUMENT := n
+# Check that we don't have PIC 'jalr t9' calls left
+quiet_cmd_vdso_mips_check = VDSOCHK $@
+ cmd_vdso_mips_check = if $(OBJDUMP) --disassemble $@ | egrep -h "jalr.*t9" > /dev/null; \
+ then (echo >&2 "$@: PIC 'jalr t9' calls are not supported"; \
+ rm -f $@; /bin/false); fi
+
#
# Shared build commands.
#
quiet_cmd_vdsold_and_vdso_check = LD $@
- cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check)
+ cmd_vdsold_and_vdso_check = $(cmd_vdsold); $(cmd_vdso_check); $(cmd_vdso_mips_check)
quiet_cmd_vdsold = VDSO $@
cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
index 86332080399a..080a0bf8e54b 100644
--- a/arch/powerpc/include/asm/page.h
+++ b/arch/powerpc/include/asm/page.h
@@ -295,8 +295,13 @@ static inline bool pfn_valid(unsigned long pfn)
/*
* Some number of bits at the level of the page table that points to
* a hugepte are used to encode the size. This masks those bits.
+ * On 8xx, HW assistance requires 4k alignment for the hugepte.
*/
+#ifdef CONFIG_PPC_8xx
+#define HUGEPD_SHIFT_MASK 0xfff
+#else
#define HUGEPD_SHIFT_MASK 0x3f
+#endif
#ifndef __ASSEMBLY__
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 8387698bd5b6..eedcbfb9a6ff 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -168,6 +168,10 @@ struct thread_struct {
unsigned long srr1;
unsigned long dar;
unsigned long dsisr;
+#ifdef CONFIG_PPC_BOOK3S_32
+ unsigned long r0, r3, r4, r5, r6, r8, r9, r11;
+ unsigned long lr, ctr;
+#endif
#endif
/* Debug Registers */
struct debug_reg debug;
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index c25e562f1cd9..fcf24a365fc0 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -132,6 +132,18 @@ int main(void)
OFFSET(SRR1, thread_struct, srr1);
OFFSET(DAR, thread_struct, dar);
OFFSET(DSISR, thread_struct, dsisr);
+#ifdef CONFIG_PPC_BOOK3S_32
+ OFFSET(THR0, thread_struct, r0);
+ OFFSET(THR3, thread_struct, r3);
+ OFFSET(THR4, thread_struct, r4);
+ OFFSET(THR5, thread_struct, r5);
+ OFFSET(THR6, thread_struct, r6);
+ OFFSET(THR8, thread_struct, r8);
+ OFFSET(THR9, thread_struct, r9);
+ OFFSET(THR11, thread_struct, r11);
+ OFFSET(THLR, thread_struct, lr);
+ OFFSET(THCTR, thread_struct, ctr);
+#endif
#endif
#ifdef CONFIG_SPE
OFFSET(THREAD_EVR0, thread_struct, evr[0]);
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index e745abc5457a..245be4fafe13 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -2193,11 +2193,13 @@ static struct cpu_spec * __init setup_cpu_spec(unsigned long offset,
* oprofile_cpu_type already has a value, then we are
* possibly overriding a real PVR with a logical one,
* and, in that case, keep the current value for
- * oprofile_cpu_type.
+ * oprofile_cpu_type. Futhermore, let's ensure that the
+ * fix for the PMAO bug is enabled on compatibility mode.
*/
if (old.oprofile_cpu_type != NULL) {
t->oprofile_cpu_type = old.oprofile_cpu_type;
t->oprofile_type = old.oprofile_type;
+ t->cpu_features |= old.cpu_features & CPU_FTR_PMAO_BUG;
}
}
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index a1eaffe868de..7b048cee767c 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -1184,6 +1184,17 @@ void eeh_handle_special_event(void)
eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
eeh_handle_normal_event(pe);
} else {
+ eeh_for_each_pe(pe, tmp_pe)
+ eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
+ edev->mode &= ~EEH_DEV_NO_HANDLER;
+
+ /* Notify all devices to be down */
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
+ eeh_set_channel_state(pe, pci_channel_io_perm_failure);
+ eeh_pe_report(
+ "error_detected(permanent failure)", pe,
+ eeh_report_failure, NULL);
+
pci_lock_rescan_remove();
list_for_each_entry(hose, &hose_list, list_node) {
phb_pe = eeh_phb_pe_get(hose);
@@ -1192,16 +1203,6 @@ void eeh_handle_special_event(void)
(phb_pe->state & EEH_PE_RECOVERING))
continue;
- eeh_for_each_pe(pe, tmp_pe)
- eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
- edev->mode &= ~EEH_DEV_NO_HANDLER;
-
- /* Notify all devices to be down */
- eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
- eeh_set_channel_state(pe, pci_channel_io_perm_failure);
- eeh_pe_report(
- "error_detected(permanent failure)", pe,
- eeh_report_failure, NULL);
bus = eeh_pe_bus_get(phb_pe);
if (!bus) {
pr_err("%s: Cannot find PCI bus for "
diff --git a/arch/powerpc/kernel/entry_32.S b/arch/powerpc/kernel/entry_32.S
index 0713daa651d9..16af0d8d90a8 100644
--- a/arch/powerpc/kernel/entry_32.S
+++ b/arch/powerpc/kernel/entry_32.S
@@ -783,7 +783,7 @@ fast_exception_return:
1: lis r3,exc_exit_restart_end@ha
addi r3,r3,exc_exit_restart_end@l
cmplw r12,r3
-#if CONFIG_PPC_BOOK3S_601
+#ifdef CONFIG_PPC_BOOK3S_601
bge 2b
#else
bge 3f
@@ -791,7 +791,7 @@ fast_exception_return:
lis r4,exc_exit_restart@ha
addi r4,r4,exc_exit_restart@l
cmplw r12,r4
-#if CONFIG_PPC_BOOK3S_601
+#ifdef CONFIG_PPC_BOOK3S_601
blt 2b
#else
blt 3f
@@ -1354,12 +1354,17 @@ _GLOBAL(enter_rtas)
mtspr SPRN_SRR0,r8
mtspr SPRN_SRR1,r9
RFI
-1: tophys(r9,r1)
+1: tophys_novmstack r9, r1
+#ifdef CONFIG_VMAP_STACK
+ li r0, MSR_KERNEL & ~MSR_IR /* can take DTLB miss */
+ mtmsr r0
+ isync
+#endif
lwz r8,INT_FRAME_SIZE+4(r9) /* get return address */
lwz r9,8(r9) /* original msr value */
addi r1,r1,INT_FRAME_SIZE
li r0,0
- tophys(r7, r2)
+ tophys_novmstack r7, r2
stw r0, THREAD + RTAS_SP(r7)
mtspr SPRN_SRR0,r8
mtspr SPRN_SRR1,r9
diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S
index 0493fcac6409..97c887950c3c 100644
--- a/arch/powerpc/kernel/head_32.S
+++ b/arch/powerpc/kernel/head_32.S
@@ -290,17 +290,55 @@ MachineCheck:
7: EXCEPTION_PROLOG_2
addi r3,r1,STACK_FRAME_OVERHEAD
#ifdef CONFIG_PPC_CHRP
- bne cr1,1f
+#ifdef CONFIG_VMAP_STACK
+ mfspr r4, SPRN_SPRG_THREAD
+ tovirt(r4, r4)
+ lwz r4, RTAS_SP(r4)
+ cmpwi cr1, r4, 0
#endif
- EXC_XFER_STD(0x200, machine_check_exception)
-#ifdef CONFIG_PPC_CHRP
-1: b machine_check_in_rtas
+ beq cr1, machine_check_tramp
+ b machine_check_in_rtas
+#else
+ b machine_check_tramp
#endif
/* Data access exception. */
. = 0x300
DO_KVM 0x300
DataAccess:
+#ifdef CONFIG_VMAP_STACK
+ mtspr SPRN_SPRG_SCRATCH0,r10
+ mfspr r10, SPRN_SPRG_THREAD
+BEGIN_MMU_FTR_SECTION
+ stw r11, THR11(r10)
+ mfspr r10, SPRN_DSISR
+ mfcr r11
+#ifdef CONFIG_PPC_KUAP
+ andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH | DSISR_PROTFAULT)@h
+#else
+ andis. r10, r10, (DSISR_BAD_FAULT_32S | DSISR_DABRMATCH)@h
+#endif
+ mfspr r10, SPRN_SPRG_THREAD
+ beq hash_page_dsi
+.Lhash_page_dsi_cont:
+ mtcr r11
+ lwz r11, THR11(r10)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+ mtspr SPRN_SPRG_SCRATCH1,r11
+ mfspr r11, SPRN_DAR
+ stw r11, DAR(r10)
+ mfspr r11, SPRN_DSISR
+ stw r11, DSISR(r10)
+ mfspr r11, SPRN_SRR0
+ stw r11, SRR0(r10)
+ mfspr r11, SPRN_SRR1 /* check whether user or kernel */
+ stw r11, SRR1(r10)
+ mfcr r10
+ andi. r11, r11, MSR_PR
+
+ EXCEPTION_PROLOG_1
+ b handle_page_fault_tramp_1
+#else /* CONFIG_VMAP_STACK */
EXCEPTION_PROLOG handle_dar_dsisr=1
get_and_save_dar_dsisr_on_stack r4, r5, r11
BEGIN_MMU_FTR_SECTION
@@ -316,11 +354,32 @@ BEGIN_MMU_FTR_SECTION
FTR_SECTION_ELSE
b handle_page_fault_tramp_2
ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
+#endif /* CONFIG_VMAP_STACK */
/* Instruction access exception. */
. = 0x400
DO_KVM 0x400
InstructionAccess:
+#ifdef CONFIG_VMAP_STACK
+ mtspr SPRN_SPRG_SCRATCH0,r10
+ mtspr SPRN_SPRG_SCRATCH1,r11
+ mfspr r10, SPRN_SPRG_THREAD
+ mfspr r11, SPRN_SRR0
+ stw r11, SRR0(r10)
+ mfspr r11, SPRN_SRR1 /* check whether user or kernel */
+ stw r11, SRR1(r10)
+ mfcr r10
+BEGIN_MMU_FTR_SECTION
+ andis. r11, r11, SRR1_ISI_NOPT@h /* no pte found? */
+ bne hash_page_isi
+.Lhash_page_isi_cont:
+ mfspr r11, SPRN_SRR1 /* check whether user or kernel */
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+ andi. r11, r11, MSR_PR
+
+ EXCEPTION_PROLOG_1
+ EXCEPTION_PROLOG_2
+#else /* CONFIG_VMAP_STACK */
EXCEPTION_PROLOG
andis. r0,r9,SRR1_ISI_NOPT@h /* no pte found? */
beq 1f /* if so, try to put a PTE */
@@ -329,6 +388,7 @@ InstructionAccess:
BEGIN_MMU_FTR_SECTION
bl hash_page
END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+#endif /* CONFIG_VMAP_STACK */
1: mr r4,r12
andis. r5,r9,DSISR_SRR1_MATCH_32S@h /* Filter relevant SRR1 bits */
stw r4, _DAR(r11)
@@ -344,7 +404,7 @@ Alignment:
EXCEPTION_PROLOG handle_dar_dsisr=1
save_dar_dsisr_on_stack r4, r5, r11
addi r3,r1,STACK_FRAME_OVERHEAD
- EXC_XFER_STD(0x600, alignment_exception)
+ b alignment_exception_tramp
/* Program check exception */
EXCEPTION(0x700, ProgramCheck, program_check_exception, EXC_XFER_STD)
@@ -645,15 +705,100 @@ END_MMU_FTR_SECTION_IFSET(MMU_FTR_NEED_DTLB_SW_LRU)
. = 0x3000
+machine_check_tramp:
+ EXC_XFER_STD(0x200, machine_check_exception)
+
+alignment_exception_tramp:
+ EXC_XFER_STD(0x600, alignment_exception)
+
handle_page_fault_tramp_1:
+#ifdef CONFIG_VMAP_STACK
+ EXCEPTION_PROLOG_2 handle_dar_dsisr=1
+#endif
lwz r4, _DAR(r11)
lwz r5, _DSISR(r11)
/* fall through */
handle_page_fault_tramp_2:
EXC_XFER_LITE(0x300, handle_page_fault)
+#ifdef CONFIG_VMAP_STACK
+.macro save_regs_thread thread
+ stw r0, THR0(\thread)
+ stw r3, THR3(\thread)
+ stw r4, THR4(\thread)
+ stw r5, THR5(\thread)
+ stw r6, THR6(\thread)
+ stw r8, THR8(\thread)
+ stw r9, THR9(\thread)
+ mflr r0
+ stw r0, THLR(\thread)
+ mfctr r0
+ stw r0, THCTR(\thread)
+.endm
+
+.macro restore_regs_thread thread
+ lwz r0, THLR(\thread)
+ mtlr r0
+ lwz r0, THCTR(\thread)
+ mtctr r0
+ lwz r0, THR0(\thread)
+ lwz r3, THR3(\thread)
+ lwz r4, THR4(\thread)
+ lwz r5, THR5(\thread)
+ lwz r6, THR6(\thread)
+ lwz r8, THR8(\thread)
+ lwz r9, THR9(\thread)
+.endm
+
+hash_page_dsi:
+ save_regs_thread r10
+ mfdsisr r3
+ mfdar r4
+ mfsrr0 r5
+ mfsrr1 r9
+ rlwinm r3, r3, 32 - 15, _PAGE_RW /* DSISR_STORE -> _PAGE_RW */
+ bl hash_page
+ mfspr r10, SPRN_SPRG_THREAD
+ restore_regs_thread r10
+ b .Lhash_page_dsi_cont
+
+hash_page_isi:
+ mr r11, r10
+ mfspr r10, SPRN_SPRG_THREAD
+ save_regs_thread r10
+ li r3, 0
+ lwz r4, SRR0(r10)
+ lwz r9, SRR1(r10)
+ bl hash_page
+ mfspr r10, SPRN_SPRG_THREAD
+ restore_regs_thread r10
+ mr r10, r11
+ b .Lhash_page_isi_cont
+
+ .globl fast_hash_page_return
+fast_hash_page_return:
+ andis. r10, r9, SRR1_ISI_NOPT@h /* Set on ISI, cleared on DSI */
+ mfspr r10, SPRN_SPRG_THREAD
+ restore_regs_thread r10
+ bne 1f
+
+ /* DSI */
+ mtcr r11
+ lwz r11, THR11(r10)
+ mfspr r10, SPRN_SPRG_SCRATCH0
+ SYNC
+ RFI
+
+1: /* ISI */
+ mtcr r11
+ mfspr r11, SPRN_SPRG_SCRATCH1
+ mfspr r10, SPRN_SPRG_SCRATCH0
+ SYNC
+ RFI
+
stack_overflow:
vmap_stack_overflow_exception
+#endif
AltiVecUnavailable:
EXCEPTION_PROLOG
diff --git a/arch/powerpc/kernel/head_32.h b/arch/powerpc/kernel/head_32.h
index a6a5fbbf8504..9db162f79fe6 100644
--- a/arch/powerpc/kernel/head_32.h
+++ b/arch/powerpc/kernel/head_32.h
@@ -64,11 +64,25 @@
.endm
.macro EXCEPTION_PROLOG_2 handle_dar_dsisr=0
+#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
+BEGIN_MMU_FTR_SECTION
+ mtcr r10
+FTR_SECTION_ELSE
+ stw r10, _CCR(r11)
+ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_HPTE_TABLE)
+#else
stw r10,_CCR(r11) /* save registers */
+#endif
+ mfspr r10, SPRN_SPRG_SCRATCH0
stw r12,GPR12(r11)
stw r9,GPR9(r11)
- mfspr r10,SPRN_SPRG_SCRATCH0
stw r10,GPR10(r11)
+#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
+BEGIN_MMU_FTR_SECTION
+ mfcr r10
+ stw r10, _CCR(r11)
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+#endif
mfspr r12,SPRN_SPRG_SCRATCH1
stw r12,GPR11(r11)
mflr r10
@@ -83,6 +97,11 @@
stw r10, _DSISR(r11)
.endif
lwz r9, SRR1(r12)
+#if defined(CONFIG_VMAP_STACK) && defined(CONFIG_PPC_BOOK3S)
+BEGIN_MMU_FTR_SECTION
+ andi. r10, r9, MSR_PR
+END_MMU_FTR_SECTION_IFSET(MMU_FTR_HPTE_TABLE)
+#endif
lwz r12, SRR0(r12)
#else
mfspr r12,SPRN_SRR0
diff --git a/arch/powerpc/kernel/head_8xx.S b/arch/powerpc/kernel/head_8xx.S
index 9922306ae512..073a651787df 100644
--- a/arch/powerpc/kernel/head_8xx.S
+++ b/arch/powerpc/kernel/head_8xx.S
@@ -256,7 +256,7 @@ InstructionTLBMiss:
* set. All other Linux PTE bits control the behavior
* of the MMU.
*/
- rlwimi r10, r10, 0, 0x0f00 /* Clear bits 20-23 */
+ rlwinm r10, r10, 0, ~0x0f00 /* Clear bits 20-23 */
rlwimi r10, r10, 4, 0x0400 /* Copy _PAGE_EXEC into bit 21 */
ori r10, r10, RPN_PATTERN | 0x200 /* Set 22 and 24-27 */
mtspr SPRN_MI_RPN, r10 /* Update TLB entry */
diff --git a/arch/powerpc/kernel/hw_breakpoint.c b/arch/powerpc/kernel/hw_breakpoint.c
index 2462cd7c565c..d0854320bb50 100644
--- a/arch/powerpc/kernel/hw_breakpoint.c
+++ b/arch/powerpc/kernel/hw_breakpoint.c
@@ -331,11 +331,13 @@ int hw_breakpoint_handler(struct die_args *args)
}
info->type &= ~HW_BRK_TYPE_EXTRANEOUS_IRQ;
- if (!dar_within_range(regs->dar, info))
- info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
-
- if (!IS_ENABLED(CONFIG_PPC_8xx) && !stepping_handler(regs, bp, info))
- goto out;
+ if (IS_ENABLED(CONFIG_PPC_8xx)) {
+ if (!dar_within_range(regs->dar, info))
+ info->type |= HW_BRK_TYPE_EXTRANEOUS_IRQ;
+ } else {
+ if (!stepping_handler(regs, bp, info))
+ goto out;
+ }
/*
* As a policy, the callback is invoked in a 'trigger-after-execute'
diff --git a/arch/powerpc/kernel/idle_6xx.S b/arch/powerpc/kernel/idle_6xx.S
index 0ffdd18b9f26..433d97bea1f3 100644
--- a/arch/powerpc/kernel/idle_6xx.S
+++ b/arch/powerpc/kernel/idle_6xx.S
@@ -166,7 +166,11 @@ BEGIN_FTR_SECTION
mfspr r9,SPRN_HID0
andis. r9,r9,HID0_NAP@h
beq 1f
+#ifdef CONFIG_VMAP_STACK
+ addis r9, r11, nap_save_msscr0@ha
+#else
addis r9,r11,(nap_save_msscr0-KERNELBASE)@ha
+#endif
lwz r9,nap_save_msscr0@l(r9)
mtspr SPRN_MSSCR0, r9
sync
@@ -174,7 +178,11 @@ BEGIN_FTR_SECTION
1:
END_FTR_SECTION_IFSET(CPU_FTR_NAP_DISABLE_L2_PR)
BEGIN_FTR_SECTION
+#ifdef CONFIG_VMAP_STACK
+ addis r9, r11, nap_save_hid1@ha
+#else
addis r9,r11,(nap_save_hid1-KERNELBASE)@ha
+#endif
lwz r9,nap_save_hid1@l(r9)
mtspr SPRN_HID1, r9
END_FTR_SECTION_IFSET(CPU_FTR_DUAL_PLL_750FX)
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index e6c30cee6abf..d215f9554553 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -200,14 +200,27 @@ unsigned long get_tm_stackpointer(struct task_struct *tsk)
* normal/non-checkpointed stack pointer.
*/
+ unsigned long ret = tsk->thread.regs->gpr[1];
+
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
BUG_ON(tsk != current);
if (MSR_TM_ACTIVE(tsk->thread.regs->msr)) {
+ preempt_disable();
tm_reclaim_current(TM_CAUSE_SIGNAL);
if (MSR_TM_TRANSACTIONAL(tsk->thread.regs->msr))
- return tsk->thread.ckpt_regs.gpr[1];
+ ret = tsk->thread.ckpt_regs.gpr[1];
+
+ /*
+ * If we treclaim, we must clear the current thread's TM bits
+ * before re-enabling preemption. Otherwise we might be
+ * preempted and have the live MSR[TS] changed behind our back
+ * (tm_recheckpoint_new_task() would recheckpoint). Besides, we
+ * enter the signal handler in non-transactional state.
+ */
+ tsk->thread.regs->msr &= ~MSR_TS_MASK;
+ preempt_enable();
}
#endif
- return tsk->thread.regs->gpr[1];
+ return ret;
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 98600b276f76..1b090a76b444 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -489,19 +489,11 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
*/
static int save_tm_user_regs(struct pt_regs *regs,
struct mcontext __user *frame,
- struct mcontext __user *tm_frame, int sigret)
+ struct mcontext __user *tm_frame, int sigret,
+ unsigned long msr)
{
- unsigned long msr = regs->msr;
-
WARN_ON(tm_suspend_disabled);
- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
- * just indicates to userland that we were doing a transaction, but we
- * don't want to return in transactional state. This also ensures
- * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
- */
- regs->msr &= ~MSR_TS_MASK;
-
/* Save both sets of general registers */
if (save_general_regs(&current->thread.ckpt_regs, frame)
|| save_general_regs(regs, tm_frame))
@@ -912,6 +904,10 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
int sigret;
unsigned long tramp;
struct pt_regs *regs = tsk->thread.regs;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* Save the thread's msr before get_tm_stackpointer() changes it */
+ unsigned long msr = regs->msr;
+#endif
BUG_ON(tsk != current);
@@ -944,13 +940,13 @@ int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_frame = &rt_sf->uc_transact.uc_mcontext;
- if (MSR_TM_ACTIVE(regs->msr)) {
+ if (MSR_TM_ACTIVE(msr)) {
if (__put_user((unsigned long)&rt_sf->uc_transact,
&rt_sf->uc.uc_link) ||
__put_user((unsigned long)tm_frame,
&rt_sf->uc_transact.uc_regs))
goto badframe;
- if (save_tm_user_regs(regs, frame, tm_frame, sigret))
+ if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
goto badframe;
}
else
@@ -1369,6 +1365,10 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
int sigret;
unsigned long tramp;
struct pt_regs *regs = tsk->thread.regs;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* Save the thread's msr before get_tm_stackpointer() changes it */
+ unsigned long msr = regs->msr;
+#endif
BUG_ON(tsk != current);
@@ -1402,9 +1402,9 @@ int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
tm_mctx = &frame->mctx_transact;
- if (MSR_TM_ACTIVE(regs->msr)) {
+ if (MSR_TM_ACTIVE(msr)) {
if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
- sigret))
+ sigret, msr))
goto badframe;
}
else
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 117515564ec7..84ed2e77ef9c 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -192,7 +192,8 @@ static long setup_sigcontext(struct sigcontext __user *sc,
static long setup_tm_sigcontexts(struct sigcontext __user *sc,
struct sigcontext __user *tm_sc,
struct task_struct *tsk,
- int signr, sigset_t *set, unsigned long handler)
+ int signr, sigset_t *set, unsigned long handler,
+ unsigned long msr)
{
/* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
* process never used altivec yet (MSR_VEC is zero in pt_regs of
@@ -207,12 +208,11 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
#endif
struct pt_regs *regs = tsk->thread.regs;
- unsigned long msr = tsk->thread.regs->msr;
long err = 0;
BUG_ON(tsk != current);
- BUG_ON(!MSR_TM_ACTIVE(regs->msr));
+ BUG_ON(!MSR_TM_ACTIVE(msr));
WARN_ON(tm_suspend_disabled);
@@ -222,13 +222,6 @@ static long setup_tm_sigcontexts(struct sigcontext __user *sc,
*/
msr |= tsk->thread.ckpt_regs.msr & (MSR_FP | MSR_VEC | MSR_VSX);
- /* Remove TM bits from thread's MSR. The MSR in the sigcontext
- * just indicates to userland that we were doing a transaction, but we
- * don't want to return in transactional state. This also ensures
- * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
- */
- regs->msr &= ~MSR_TS_MASK;
-
#ifdef CONFIG_ALTIVEC
err |= __put_user(v_regs, &sc->v_regs);
err |= __put_user(tm_v_regs, &tm_sc->v_regs);
@@ -824,6 +817,10 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
unsigned long newsp = 0;
long err = 0;
struct pt_regs *regs = tsk->thread.regs;
+#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
+ /* Save the thread's msr before get_tm_stackpointer() changes it */
+ unsigned long msr = regs->msr;
+#endif
BUG_ON(tsk != current);
@@ -841,7 +838,7 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
err |= __put_user(0, &frame->uc.uc_flags);
err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
- if (MSR_TM_ACTIVE(regs->msr)) {
+ if (MSR_TM_ACTIVE(msr)) {
/* The ucontext_t passed to userland points to the second
* ucontext_t (for transactional state) with its uc_link ptr.
*/
@@ -849,7 +846,8 @@ int handle_rt_signal64(struct ksignal *ksig, sigset_t *set,
err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
&frame->uc_transact.uc_mcontext,
tsk, ksig->sig, NULL,
- (unsigned long)ksig->ka.sa.sa_handler);
+ (unsigned long)ksig->ka.sa.sa_handler,
+ msr);
} else
#endif
{
diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S
index b4c89a1acebb..a32d478a7f41 100644
--- a/arch/powerpc/kernel/vmlinux.lds.S
+++ b/arch/powerpc/kernel/vmlinux.lds.S
@@ -303,6 +303,12 @@ SECTIONS
*(.branch_lt)
}
+#ifdef CONFIG_DEBUG_INFO_BTF
+ .BTF : AT(ADDR(.BTF) - LOAD_OFFSET) {
+ *(.BTF)
+ }
+#endif
+
.opd : AT(ADDR(.opd) - LOAD_OFFSET) {
__start_opd = .;
KEEP(*(.opd))
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
index c11b0a005196..2015c4f96238 100644
--- a/arch/powerpc/mm/book3s32/hash_low.S
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -25,12 +25,6 @@
#include <asm/feature-fixups.h>
#include <asm/code-patching-asm.h>
-#ifdef CONFIG_VMAP_STACK
-#define ADDR_OFFSET 0
-#else
-#define ADDR_OFFSET PAGE_OFFSET
-#endif
-
#ifdef CONFIG_SMP
.section .bss
.align 2
@@ -53,8 +47,8 @@ mmu_hash_lock:
.text
_GLOBAL(hash_page)
#ifdef CONFIG_SMP
- lis r8, (mmu_hash_lock - ADDR_OFFSET)@h
- ori r8, r8, (mmu_hash_lock - ADDR_OFFSET)@l
+ lis r8, (mmu_hash_lock - PAGE_OFFSET)@h
+ ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
lis r0,0x0fff
b 10f
11: lwz r6,0(r8)
@@ -72,12 +66,9 @@ _GLOBAL(hash_page)
cmplw 0,r4,r0
ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
-#ifdef CONFIG_VMAP_STACK
- tovirt(r5, r5)
-#endif
blt+ 112f /* assume user more likely */
- lis r5, (swapper_pg_dir - ADDR_OFFSET)@ha /* if kernel address, use */
- addi r5 ,r5 ,(swapper_pg_dir - ADDR_OFFSET)@l /* kernel page table */
+ lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
+ addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
112:
#ifndef CONFIG_PTE_64BIT
@@ -89,9 +80,6 @@ _GLOBAL(hash_page)
lwzx r8,r8,r5 /* Get L1 entry */
rlwinm. r8,r8,0,0,20 /* extract pt base address */
#endif
-#ifdef CONFIG_VMAP_STACK
- tovirt(r8, r8)
-#endif
#ifdef CONFIG_SMP
beq- hash_page_out /* return if no mapping */
#else
@@ -143,30 +131,36 @@ retry:
bne- retry /* retry if someone got there first */
mfsrin r3,r4 /* get segment reg for segment */
+#ifndef CONFIG_VMAP_STACK
mfctr r0
stw r0,_CTR(r11)
+#endif
bl create_hpte /* add the hash table entry */
#ifdef CONFIG_SMP
eieio
- lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha
+ lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
li r0,0
- stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
+ stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
#endif
+#ifdef CONFIG_VMAP_STACK
+ b fast_hash_page_return
+#else
/* Return from the exception */
lwz r5,_CTR(r11)
mtctr r5
lwz r0,GPR0(r11)
lwz r8,GPR8(r11)
b fast_exception_return
+#endif
#ifdef CONFIG_SMP
hash_page_out:
eieio
- lis r8, (mmu_hash_lock - ADDR_OFFSET)@ha
+ lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
li r0,0
- stw r0, (mmu_hash_lock - ADDR_OFFSET)@l(r8)
+ stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
blr
#endif /* CONFIG_SMP */
@@ -341,7 +335,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
patch_site 1f, patch__hash_page_A1
patch_site 2f, patch__hash_page_A2
/* Get the address of the primary PTE group in the hash table (r3) */
-0: lis r0, (Hash_base - ADDR_OFFSET)@h /* base address of hash table */
+0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
xor r3,r3,r0 /* make primary hash */
@@ -355,10 +349,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
beq+ 10f /* no PTE: go look for an empty slot */
tlbie r4
- lis r4, (htab_hash_searches - ADDR_OFFSET)@ha
- lwz r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
+ lis r4, (htab_hash_searches - PAGE_OFFSET)@ha
+ lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
addi r6,r6,1 /* count how many searches we do */
- stw r6, (htab_hash_searches - ADDR_OFFSET)@l(r4)
+ stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
/* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
mtctr r0
@@ -390,10 +384,10 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
beq+ found_empty
/* update counter of times that the primary PTEG is full */
- lis r4, (primary_pteg_full - ADDR_OFFSET)@ha
- lwz r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
+ lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
+ lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
addi r6,r6,1
- stw r6, (primary_pteg_full - ADDR_OFFSET)@l(r4)
+ stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
patch_site 0f, patch__hash_page_C
/* Search the secondary PTEG for an empty slot */
@@ -427,8 +421,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
* lockup here but that shouldn't happen
*/
-1: lis r4, (next_slot - ADDR_OFFSET)@ha /* get next evict slot */
- lwz r6, (next_slot - ADDR_OFFSET)@l(r4)
+1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
+ lwz r6, (next_slot - PAGE_OFFSET)@l(r4)
addi r6,r6,HPTE_SIZE /* search for candidate */
andi. r6,r6,7*HPTE_SIZE
stw r6,next_slot@l(r4)
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
index 0a1c65a2c565..f888cbb109b9 100644
--- a/arch/powerpc/mm/book3s32/mmu.c
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -413,7 +413,7 @@ void __init MMU_init_hw(void)
void __init MMU_init_hw_patch(void)
{
unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
- unsigned int hash;
+ unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;
if (ppc_md.progress)
ppc_md.progress("hash:patch", 0x345);
@@ -425,11 +425,6 @@ void __init MMU_init_hw_patch(void)
/*
* Patch up the instructions in hashtable.S:create_hpte
*/
- if (IS_ENABLED(CONFIG_VMAP_STACK))
- hash = (unsigned int)Hash;
- else
- hash = (unsigned int)Hash - PAGE_OFFSET;
-
modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
@@ -439,8 +434,7 @@ void __init MMU_init_hw_patch(void)
/*
* Patch up the instructions in hashtable.S:flush_hash_page
*/
- modify_instruction_site(&patch__flush_hash_A0, 0xffff,
- ((unsigned int)Hash - PAGE_OFFSET) >> 16);
+ modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 73d4873fc7f8..33b3461d91e8 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -53,20 +53,24 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (pshift >= pdshift) {
cachep = PGT_CACHE(PTE_T_ORDER);
num_hugepd = 1 << (pshift - pdshift);
+ new = NULL;
} else if (IS_ENABLED(CONFIG_PPC_8xx)) {
- cachep = PGT_CACHE(PTE_INDEX_SIZE);
+ cachep = NULL;
num_hugepd = 1;
+ new = pte_alloc_one(mm);
} else {
cachep = PGT_CACHE(pdshift - pshift);
num_hugepd = 1;
+ new = NULL;
}
- if (!cachep) {
+ if (!cachep && !new) {
WARN_ONCE(1, "No page table cache created for hugetlb tables");
return -ENOMEM;
}
- new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
+ if (cachep)
+ new = kmem_cache_alloc(cachep, pgtable_gfp_flags(mm, GFP_KERNEL));
BUG_ON(pshift > HUGEPD_SHIFT_MASK);
BUG_ON((unsigned long)new & HUGEPD_SHIFT_MASK);
@@ -97,7 +101,10 @@ static int __hugepte_alloc(struct mm_struct *mm, hugepd_t *hpdp,
if (i < num_hugepd) {
for (i = i - 1 ; i >= 0; i--, hpdp--)
*hpdp = __hugepd(0);
- kmem_cache_free(cachep, new);
+ if (cachep)
+ kmem_cache_free(cachep, new);
+ else
+ pte_free(mm, new);
} else {
kmemleak_ignore(new);
}
@@ -324,8 +331,7 @@ static void free_hugepd_range(struct mmu_gather *tlb, hugepd_t *hpdp, int pdshif
if (shift >= pdshift)
hugepd_free(tlb, hugepte);
else if (IS_ENABLED(CONFIG_PPC_8xx))
- pgtable_free_tlb(tlb, hugepte,
- get_hugepd_cache_index(PTE_INDEX_SIZE));
+ pgtable_free_tlb(tlb, hugepte, 0);
else
pgtable_free_tlb(tlb, hugepte,
get_hugepd_cache_index(pdshift - shift));
@@ -639,12 +645,13 @@ static int __init hugetlbpage_init(void)
* if we have pdshift and shift value same, we don't
* use pgt cache for hugepd.
*/
- if (pdshift > shift && IS_ENABLED(CONFIG_PPC_8xx))
- pgtable_cache_add(PTE_INDEX_SIZE);
- else if (pdshift > shift)
- pgtable_cache_add(pdshift - shift);
- else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) || IS_ENABLED(CONFIG_PPC_8xx))
+ if (pdshift > shift) {
+ if (!IS_ENABLED(CONFIG_PPC_8xx))
+ pgtable_cache_add(pdshift - shift);
+ } else if (IS_ENABLED(CONFIG_PPC_FSL_BOOK3E) ||
+ IS_ENABLED(CONFIG_PPC_8xx)) {
pgtable_cache_add(PTE_T_ORDER);
+ }
configured = true;
}
diff --git a/arch/powerpc/mm/kasan/kasan_init_32.c b/arch/powerpc/mm/kasan/kasan_init_32.c
index 16dd95bd0749..db5664dde5ff 100644
--- a/arch/powerpc/mm/kasan/kasan_init_32.c
+++ b/arch/powerpc/mm/kasan/kasan_init_32.c
@@ -185,8 +185,7 @@ u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
static void __init kasan_early_hash_table(void)
{
- unsigned int hash = IS_ENABLED(CONFIG_VMAP_STACK) ? (unsigned int)early_hash :
- __pa(early_hash);
+ unsigned int hash = __pa(early_hash);
modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index ef7b1119b2e2..1c07d5a3f543 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -373,7 +373,9 @@ static inline bool flush_coherent_icache(unsigned long addr)
*/
if (cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) {
mb(); /* sync */
+ allow_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
icbi((void *)addr);
+ prevent_read_from_user((const void __user *)addr, L1_CACHE_BYTES);
mb(); /* sync */
isync();
return true;
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index e8c84d265602..0ec9640335bb 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -3435,6 +3435,11 @@ getstring(char *s, int size)
int c;
c = skipbl();
+ if (c == '\n') {
+ *s = 0;
+ return;
+ }
+
do {
if( size > 1 ){
*s++ = c;
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
index 73f029eae0cc..1a3b5a5276be 100644
--- a/arch/riscv/Kconfig
+++ b/arch/riscv/Kconfig
@@ -121,6 +121,7 @@ config ARCH_FLATMEM_ENABLE
config ARCH_SPARSEMEM_ENABLE
def_bool y
+ depends on MMU
select SPARSEMEM_VMEMMAP_ENABLE
config ARCH_SELECT_MEMORY_MODEL
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
index d325b67d00df..3078b2de0b2d 100644
--- a/arch/riscv/Kconfig.socs
+++ b/arch/riscv/Kconfig.socs
@@ -10,4 +10,28 @@ config SOC_SIFIVE
help
This enables support for SiFive SoC platform hardware.
+config SOC_VIRT
+ bool "QEMU Virt Machine"
+ select VIRTIO_PCI
+ select VIRTIO_BALLOON
+ select VIRTIO_MMIO
+ select VIRTIO_CONSOLE
+ select VIRTIO_NET
+ select NET_9P_VIRTIO
+ select VIRTIO_BLK
+ select SCSI_VIRTIO
+ select DRM_VIRTIO_GPU
+ select HW_RANDOM_VIRTIO
+ select RPMSG_CHAR
+ select RPMSG_VIRTIO
+ select CRYPTO_DEV_VIRTIO
+ select VIRTIO_INPUT
+ select POWER_RESET_SYSCON
+ select POWER_RESET_SYSCON_POWEROFF
+ select GOLDFISH
+ select RTC_DRV_GOLDFISH
+ select SIFIVE_PLIC
+ help
+ This enables support for QEMU Virt Machine.
+
endmenu
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
index b9009a2fbaf5..259cb53d7f20 100644
--- a/arch/riscv/Makefile
+++ b/arch/riscv/Makefile
@@ -13,8 +13,10 @@ LDFLAGS_vmlinux :=
ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
LDFLAGS_vmlinux := --no-relax
endif
-KBUILD_AFLAGS_MODULE += -fPIC
-KBUILD_CFLAGS_MODULE += -fPIC
+
+ifeq ($(CONFIG_64BIT)$(CONFIG_CMODEL_MEDLOW),yy)
+KBUILD_CFLAGS_MODULE += -mcmodel=medany
+endif
export BITS
ifeq ($(CONFIG_ARCH_RV64I),y)
diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore
index 8dab0bb6ae66..8a45a37d2af4 100644
--- a/arch/riscv/boot/.gitignore
+++ b/arch/riscv/boot/.gitignore
@@ -1,2 +1,4 @@
Image
Image.gz
+loader
+loader.lds
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
index 609198cb1163..4a2729f5ca3f 100644
--- a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -2,6 +2,7 @@
/* Copyright (c) 2018-2019 SiFive, Inc */
#include "fu540-c000.dtsi"
+#include <dt-bindings/gpio/gpio.h>
/* Clock frequency (in Hz) of the PCB crystal for rtcclk */
#define RTCCLK_FREQ 1000000
@@ -41,6 +42,10 @@
clock-frequency = <RTCCLK_FREQ>;
clock-output-names = "rtcclk";
};
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&gpio 10 GPIO_ACTIVE_LOW>;
+ };
};
&uart0 {
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
index e2ff95cb3390..c8f084203067 100644
--- a/arch/riscv/configs/defconfig
+++ b/arch/riscv/configs/defconfig
@@ -15,6 +15,7 @@ CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
CONFIG_SOC_SIFIVE=y
+CONFIG_SOC_VIRT=y
CONFIG_SMP=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
@@ -30,7 +31,6 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NETLINK_DIAG=y
CONFIG_NET_9P=y
-CONFIG_NET_9P_VIRTIO=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
@@ -38,15 +38,12 @@ CONFIG_PCIE_XILINX=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_SCSI_VIRTIO=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_NETDEVICES=y
-CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
CONFIG_R8169=y
@@ -57,15 +54,13 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_HVC_RISCV_SBI=y
-CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_VIRTIO=y
CONFIG_SPI=y
CONFIG_SPI_SIFIVE=y
# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_POWER_RESET=y
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
-CONFIG_DRM_VIRTIO_GPU=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
@@ -78,12 +73,7 @@ CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
CONFIG_MMC=y
CONFIG_MMC_SPI=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_INPUT=y
-CONFIG_VIRTIO_MMIO=y
-CONFIG_RPMSG_CHAR=y
-CONFIG_RPMSG_VIRTIO=y
+CONFIG_RTC_CLASS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y
@@ -98,7 +88,6 @@ CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
CONFIG_CRYPTO_USER_API_HASH=y
-CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_PAGEALLOC=y
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
index eb519407c841..a844920a261f 100644
--- a/arch/riscv/configs/rv32_defconfig
+++ b/arch/riscv/configs/rv32_defconfig
@@ -14,6 +14,7 @@ CONFIG_CHECKPOINT_RESTORE=y
CONFIG_BLK_DEV_INITRD=y
CONFIG_EXPERT=y
CONFIG_BPF_SYSCALL=y
+CONFIG_SOC_VIRT=y
CONFIG_ARCH_RV32I=y
CONFIG_SMP=y
CONFIG_MODULES=y
@@ -30,7 +31,6 @@ CONFIG_IP_PNP_BOOTP=y
CONFIG_IP_PNP_RARP=y
CONFIG_NETLINK_DIAG=y
CONFIG_NET_9P=y
-CONFIG_NET_9P_VIRTIO=y
CONFIG_PCI=y
CONFIG_PCIEPORTBUS=y
CONFIG_PCI_HOST_GENERIC=y
@@ -38,15 +38,12 @@ CONFIG_PCIE_XILINX=y
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_LOOP=y
-CONFIG_VIRTIO_BLK=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
-CONFIG_SCSI_VIRTIO=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
CONFIG_SATA_AHCI_PLATFORM=y
CONFIG_NETDEVICES=y
-CONFIG_VIRTIO_NET=y
CONFIG_MACB=y
CONFIG_E1000E=y
CONFIG_R8169=y
@@ -57,13 +54,11 @@ CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
CONFIG_HVC_RISCV_SBI=y
-CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
-CONFIG_HW_RANDOM_VIRTIO=y
# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_POWER_RESET=y
CONFIG_DRM=y
CONFIG_DRM_RADEON=y
-CONFIG_DRM_VIRTIO_GPU=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
@@ -74,13 +69,7 @@ CONFIG_USB_OHCI_HCD=y
CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_UAS=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_INPUT=y
-CONFIG_VIRTIO_MMIO=y
-CONFIG_RPMSG_CHAR=y
-CONFIG_RPMSG_VIRTIO=y
-CONFIG_SIFIVE_PLIC=y
+CONFIG_RTC_CLASS=y
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
CONFIG_AUTOFS4_FS=y
@@ -95,7 +84,6 @@ CONFIG_NFS_V4_2=y
CONFIG_ROOT_NFS=y
CONFIG_9P_FS=y
CONFIG_CRYPTO_USER_API_HASH=y
-CONFIG_CRYPTO_DEV_VIRTIO=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_PAGEALLOC=y
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 435b65532e29..8e18d2c64399 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -72,6 +72,16 @@
#define EXC_LOAD_PAGE_FAULT 13
#define EXC_STORE_PAGE_FAULT 15
+/* PMP configuration */
+#define PMP_R 0x01
+#define PMP_W 0x02
+#define PMP_X 0x04
+#define PMP_A 0x18
+#define PMP_A_TOR 0x08
+#define PMP_A_NA4 0x10
+#define PMP_A_NAPOT 0x18
+#define PMP_L 0x80
+
/* symbolic CSR names: */
#define CSR_CYCLE 0xc00
#define CSR_TIME 0xc01
@@ -100,6 +110,8 @@
#define CSR_MCAUSE 0x342
#define CSR_MTVAL 0x343
#define CSR_MIP 0x344
+#define CSR_PMPCFG0 0x3a0
+#define CSR_PMPADDR0 0x3b0
#define CSR_MHARTID 0xf14
#ifdef CONFIG_RISCV_M_MODE
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
index 42347d0981e7..49350c8bd7b0 100644
--- a/arch/riscv/include/asm/syscall.h
+++ b/arch/riscv/include/asm/syscall.h
@@ -28,13 +28,6 @@ static inline int syscall_get_nr(struct task_struct *task,
return regs->a7;
}
-static inline void syscall_set_nr(struct task_struct *task,
- struct pt_regs *regs,
- int sysno)
-{
- regs->a7 = sysno;
-}
-
static inline void syscall_rollback(struct task_struct *task,
struct pt_regs *regs)
{
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index bad4d85b5e91..208702d8c18e 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -229,19 +229,12 @@ check_syscall_nr:
li t0, __NR_syscalls
la s0, sys_ni_syscall
/*
- * The tracer can change syscall number to valid/invalid value.
- * We use syscall_set_nr helper in syscall_trace_enter thus we
- * cannot trust the current value in a7 and have to reload from
- * the current task pt_regs.
- */
- REG_L a7, PT_A7(sp)
- /*
* Syscall number held in a7.
* If syscall number is above allowed value, redirect to ni_syscall.
*/
bge a7, t0, 1f
/*
- * Check if syscall is rejected by tracer or seccomp, i.e., a7 == -1.
+ * Check if syscall is rejected by tracer, i.e., a7 == -1.
* If yes, we pretend it was executed.
*/
li t1, -1
@@ -334,6 +327,7 @@ work_resched:
handle_syscall_trace_enter:
move a0, sp
call do_syscall_trace_enter
+ move t0, a0
REG_L a0, PT_A0(sp)
REG_L a1, PT_A1(sp)
REG_L a2, PT_A2(sp)
@@ -342,6 +336,7 @@ handle_syscall_trace_enter:
REG_L a5, PT_A5(sp)
REG_L a6, PT_A6(sp)
REG_L a7, PT_A7(sp)
+ bnez t0, ret_from_syscall_rejected
j check_syscall_nr
handle_syscall_trace_exit:
move a0, sp
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 271860fc2c3f..85f2073e7fe4 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -58,6 +58,12 @@ _start_kernel:
/* Reset all registers except ra, a0, a1 */
call reset_regs
+ /* Setup a PMP to permit access to all of memory. */
+ li a0, -1
+ csrw CSR_PMPADDR0, a0
+ li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
+ csrw CSR_PMPCFG0, a0
+
/*
* The hartid in a0 is expected later on, and we have no firmware
* to hand it to us.
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index b7401858d872..8bbe5dbe1341 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -8,6 +8,10 @@
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/moduleloader.h>
+#include <linux/vmalloc.h>
+#include <linux/sizes.h>
+#include <asm/pgtable.h>
+#include <asm/sections.h>
static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
{
@@ -386,3 +390,15 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
return 0;
}
+
+#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
+#define VMALLOC_MODULE_START \
+ max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START)
+void *module_alloc(unsigned long size)
+{
+ return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START,
+ VMALLOC_END, GFP_KERNEL,
+ PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+}
+#endif
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
index 407464201b91..444dc7b0fd78 100644
--- a/arch/riscv/kernel/ptrace.c
+++ b/arch/riscv/kernel/ptrace.c
@@ -148,21 +148,19 @@ long arch_ptrace(struct task_struct *child, long request,
* Allows PTRACE_SYSCALL to work. These are called from entry.S in
* {handle,ret_from}_syscall.
*/
-__visible void do_syscall_trace_enter(struct pt_regs *regs)
+__visible int do_syscall_trace_enter(struct pt_regs *regs)
{
if (test_thread_flag(TIF_SYSCALL_TRACE))
if (tracehook_report_syscall_entry(regs))
- syscall_set_nr(current, regs, -1);
+ return -1;
/*
* Do the secure computing after ptrace; failures should be fast.
* If this fails we might have return value in a0 from seccomp
* (via SECCOMP_RET_ERRNO/TRACE).
*/
- if (secure_computing() == -1) {
- syscall_set_nr(current, regs, -1);
- return;
- }
+ if (secure_computing() == -1)
+ return -1;
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
@@ -170,6 +168,7 @@ __visible void do_syscall_trace_enter(struct pt_regs *regs)
#endif
audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3);
+ return 0;
}
__visible void do_syscall_trace_exit(struct pt_regs *regs)
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index f4cad5163bf2..ffb3d94bf0cc 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -156,6 +156,6 @@ void __init trap_init(void)
csr_write(CSR_SCRATCH, 0);
/* Set the exception vector address */
csr_write(CSR_TVEC, &handle_exception);
- /* Enable all interrupts */
- csr_write(CSR_IE, -1);
+ /* Enable interrupts */
+ csr_write(CSR_IE, IE_SIE | IE_EIE);
}
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 965a8cf4829c..fab855963c73 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -131,7 +131,7 @@ void __init setup_bootmem(void)
for_each_memblock(memory, reg) {
phys_addr_t end = reg->base + reg->size;
- if (reg->base <= vmlinux_end && vmlinux_end <= end) {
+ if (reg->base <= vmlinux_start && vmlinux_end <= end) {
mem_size = min(reg->size, (phys_addr_t)-PAGE_OFFSET);
/*
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
index f0cc86040587..ec0ca90dd900 100644
--- a/arch/riscv/mm/kasan_init.c
+++ b/arch/riscv/mm/kasan_init.c
@@ -19,18 +19,20 @@ asmlinkage void __init kasan_early_init(void)
for (i = 0; i < PTRS_PER_PTE; ++i)
set_pte(kasan_early_shadow_pte + i,
mk_pte(virt_to_page(kasan_early_shadow_page),
- PAGE_KERNEL));
+ PAGE_KERNEL));
for (i = 0; i < PTRS_PER_PMD; ++i)
set_pmd(kasan_early_shadow_pmd + i,
- pfn_pmd(PFN_DOWN(__pa((uintptr_t)kasan_early_shadow_pte)),
- __pgprot(_PAGE_TABLE)));
+ pfn_pmd(PFN_DOWN
+ (__pa((uintptr_t) kasan_early_shadow_pte)),
+ __pgprot(_PAGE_TABLE)));
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
i += PGDIR_SIZE, ++pgd)
set_pgd(pgd,
- pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
- __pgprot(_PAGE_TABLE)));
+ pfn_pgd(PFN_DOWN
+ (__pa(((uintptr_t) kasan_early_shadow_pmd))),
+ __pgprot(_PAGE_TABLE)));
/* init for swapper_pg_dir */
pgd = pgd_offset_k(KASAN_SHADOW_START);
@@ -38,37 +40,43 @@ asmlinkage void __init kasan_early_init(void)
for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
i += PGDIR_SIZE, ++pgd)
set_pgd(pgd,
- pfn_pgd(PFN_DOWN(__pa(((uintptr_t)kasan_early_shadow_pmd))),
- __pgprot(_PAGE_TABLE)));
+ pfn_pgd(PFN_DOWN
+ (__pa(((uintptr_t) kasan_early_shadow_pmd))),
+ __pgprot(_PAGE_TABLE)));
flush_tlb_all();
}
static void __init populate(void *start, void *end)
{
- unsigned long i;
+ unsigned long i, offset;
unsigned long vaddr = (unsigned long)start & PAGE_MASK;
unsigned long vend = PAGE_ALIGN((unsigned long)end);
unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
+ unsigned long n_ptes =
+ ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
unsigned long n_pmds =
- (n_pages % PTRS_PER_PTE) ? n_pages / PTRS_PER_PTE + 1 :
- n_pages / PTRS_PER_PTE;
+ ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
+
+ pte_t *pte =
+ memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
+ pmd_t *pmd =
+ memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
pgd_t *pgd = pgd_offset_k(vaddr);
- pmd_t *pmd = memblock_alloc(n_pmds * sizeof(pmd_t), PAGE_SIZE);
- pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE);
for (i = 0; i < n_pages; i++) {
phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
-
- set_pte(pte + i, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+ set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
}
- for (i = 0; i < n_pmds; ++pgd, i += PTRS_PER_PMD)
- set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(((uintptr_t)(pmd + i)))),
+ for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
+ set_pmd(&pmd[i],
+ pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
__pgprot(_PAGE_TABLE)));
- for (i = 0; i < n_pages; ++pmd, i += PTRS_PER_PTE)
- set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa((uintptr_t)(pte + i))),
+ for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
+ set_pgd(&pgd[i],
+ pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
__pgprot(_PAGE_TABLE)));
flush_tlb_all();
@@ -81,7 +89,8 @@ void __init kasan_init(void)
unsigned long i;
kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
- (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+ (void *)kasan_mem_to_shadow((void *)
+ VMALLOC_END));
for_each_memblock(memory, reg) {
void *start = (void *)__va(reg->base);
@@ -90,14 +99,14 @@ void __init kasan_init(void)
if (start >= end)
break;
- populate(kasan_mem_to_shadow(start),
- kasan_mem_to_shadow(end));
+ populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
};
for (i = 0; i < PTRS_PER_PTE; i++)
set_pte(&kasan_early_shadow_pte[i],
mk_pte(virt_to_page(kasan_early_shadow_page),
- __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_ACCESSED)));
+ __pgprot(_PAGE_PRESENT | _PAGE_READ |
+ _PAGE_ACCESSED)));
memset(kasan_early_shadow_page, 0, PAGE_SIZE);
init_task.kasan_depth = 0;
diff --git a/arch/s390/Makefile b/arch/s390/Makefile
index e0e3a465bbfd..8dfa2cf1f05c 100644
--- a/arch/s390/Makefile
+++ b/arch/s390/Makefile
@@ -146,7 +146,7 @@ all: bzImage
#KBUILD_IMAGE is necessary for packaging targets like rpm-pkg, deb-pkg...
KBUILD_IMAGE := $(boot)/bzImage
-install: vmlinux
+install:
$(Q)$(MAKE) $(build)=$(boot) $@
bzImage: vmlinux
diff --git a/arch/s390/boot/Makefile b/arch/s390/boot/Makefile
index e2c47d3a1c89..0ff9261c915e 100644
--- a/arch/s390/boot/Makefile
+++ b/arch/s390/boot/Makefile
@@ -70,7 +70,7 @@ $(obj)/compressed/vmlinux: $(obj)/startup.a FORCE
$(obj)/startup.a: $(OBJECTS) FORCE
$(call if_changed,ar)
-install: $(CONFIGURE) $(obj)/bzImage
+install:
sh -x $(srctree)/$(obj)/install.sh $(KERNELRELEASE) $(obj)/bzImage \
System.map "$(INSTALL_PATH)"
diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c
index 5d12352545c5..5591243d673e 100644
--- a/arch/s390/boot/kaslr.c
+++ b/arch/s390/boot/kaslr.c
@@ -75,7 +75,7 @@ static unsigned long get_random(unsigned long limit)
*(unsigned long *) prng.parm_block ^= seed;
for (i = 0; i < 16; i++) {
cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
- (char *) entropy, (char *) entropy,
+ (u8 *) entropy, (u8 *) entropy,
sizeof(entropy));
memcpy(prng.parm_block, entropy, sizeof(entropy));
}
diff --git a/arch/s390/boot/uv.c b/arch/s390/boot/uv.c
index ed007f4a6444..3f501159ee9f 100644
--- a/arch/s390/boot/uv.c
+++ b/arch/s390/boot/uv.c
@@ -15,7 +15,8 @@ void uv_query_info(void)
if (!test_facility(158))
return;
- if (uv_call(0, (uint64_t)&uvcb))
+ /* rc==0x100 means that there is additional data we do not process */
+ if (uv_call(0, (uint64_t)&uvcb) && uvcb.header.rc != 0x100)
return;
if (test_bit_inv(BIT_UVC_CMD_SET_SHARED_ACCESS, (unsigned long *)uvcb.inst_calls_list) &&
diff --git a/arch/s390/configs/debug_defconfig b/arch/s390/configs/debug_defconfig
index 2e60c80395ab..0c86ba19fa2b 100644
--- a/arch/s390/configs/debug_defconfig
+++ b/arch/s390/configs/debug_defconfig
@@ -53,6 +53,7 @@ CONFIG_VFIO_AP=m
CONFIG_CRASH_DUMP=y
CONFIG_HIBERNATION=y
CONFIG_PM_DEBUG=y
+CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
@@ -474,7 +475,6 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_GOOGLE is not set
-# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -684,7 +684,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_XXHASH=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
@@ -748,7 +747,6 @@ CONFIG_DEBUG_INFO_DWARF4=y
CONFIG_GDB_SCRIPTS=y
CONFIG_FRAME_WARN=1024
CONFIG_HEADERS_INSTALL=y
-CONFIG_HEADERS_CHECK=y
CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_PAGEALLOC=y
@@ -772,9 +770,9 @@ CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
CONFIG_DEBUG_PER_CPU_MAPS=y
CONFIG_DEBUG_SHIRQ=y
+CONFIG_PANIC_ON_OOPS=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_WQ_WATCHDOG=y
-CONFIG_PANIC_ON_OOPS=y
CONFIG_DEBUG_TIMEKEEPING=y
CONFIG_PROVE_LOCKING=y
CONFIG_LOCK_STAT=y
@@ -783,9 +781,20 @@ CONFIG_DEBUG_ATOMIC_SLEEP=y
CONFIG_DEBUG_LOCKING_API_SELFTESTS=y
CONFIG_DEBUG_SG=y
CONFIG_DEBUG_NOTIFIERS=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_DEBUG_CREDENTIALS=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=300
+CONFIG_LATENCYTOP=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_STACK_TRACER=y
+CONFIG_IRQSOFF_TRACER=y
+CONFIG_PREEMPT_TRACER=y
+CONFIG_SCHED_TRACER=y
+CONFIG_FTRACE_SYSCALLS=y
+CONFIG_BLK_DEV_IO_TRACE=y
+CONFIG_HIST_TRIGGERS=y
+CONFIG_S390_PTDUMP=y
CONFIG_NOTIFIER_ERROR_INJECTION=m
CONFIG_NETDEV_NOTIFIER_ERROR_INJECT=m
CONFIG_FAULT_INJECTION=y
@@ -796,15 +805,6 @@ CONFIG_FAIL_IO_TIMEOUT=y
CONFIG_FAIL_FUTEX=y
CONFIG_FAULT_INJECTION_DEBUG_FS=y
CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
-CONFIG_LATENCYTOP=y
-CONFIG_IRQSOFF_TRACER=y
-CONFIG_PREEMPT_TRACER=y
-CONFIG_SCHED_TRACER=y
-CONFIG_FTRACE_SYSCALLS=y
-CONFIG_STACK_TRACER=y
-CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_FUNCTION_PROFILER=y
-CONFIG_HIST_TRIGGERS=y
CONFIG_LKDTM=m
CONFIG_TEST_LIST_SORT=y
CONFIG_TEST_SORT=y
@@ -814,5 +814,3 @@ CONFIG_INTERVAL_TREE_TEST=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
-CONFIG_BUG_ON_DATA_CORRUPTION=y
-CONFIG_S390_PTDUMP=y
diff --git a/arch/s390/configs/defconfig b/arch/s390/configs/defconfig
index 25f799849582..6b27d861a9a3 100644
--- a/arch/s390/configs/defconfig
+++ b/arch/s390/configs/defconfig
@@ -53,6 +53,7 @@ CONFIG_VFIO_AP=m
CONFIG_CRASH_DUMP=y
CONFIG_HIBERNATION=y
CONFIG_PM_DEBUG=y
+CONFIG_PROTECTED_VIRTUALIZATION_GUEST=y
CONFIG_CMM=m
CONFIG_APPLDATA_BASE=y
CONFIG_KVM=m
@@ -470,7 +471,6 @@ CONFIG_NLMON=m
# CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_EZCHIP is not set
# CONFIG_NET_VENDOR_GOOGLE is not set
-# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_HUAWEI is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
@@ -677,7 +677,6 @@ CONFIG_CRYPTO_ADIANTUM=m
CONFIG_CRYPTO_XCBC=m
CONFIG_CRYPTO_VMAC=m
CONFIG_CRYPTO_CRC32=m
-CONFIG_CRYPTO_XXHASH=m
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_RMD128=m
CONFIG_CRYPTO_RMD160=m
@@ -739,18 +738,18 @@ CONFIG_DEBUG_SECTION_MISMATCH=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_DEBUG_MEMORY_INIT=y
CONFIG_PANIC_ON_OOPS=y
+CONFIG_BUG_ON_DATA_CORRUPTION=y
CONFIG_RCU_TORTURE_TEST=m
CONFIG_RCU_CPU_STALL_TIMEOUT=60
CONFIG_LATENCYTOP=y
+CONFIG_FUNCTION_PROFILER=y
+CONFIG_STACK_TRACER=y
CONFIG_SCHED_TRACER=y
CONFIG_FTRACE_SYSCALLS=y
-CONFIG_STACK_TRACER=y
CONFIG_BLK_DEV_IO_TRACE=y
-CONFIG_FUNCTION_PROFILER=y
CONFIG_HIST_TRIGGERS=y
+CONFIG_S390_PTDUMP=y
CONFIG_LKDTM=m
CONFIG_PERCPU_TEST=m
CONFIG_ATOMIC64_SELFTEST=y
CONFIG_TEST_BPF=m
-CONFIG_BUG_ON_DATA_CORRUPTION=y
-CONFIG_S390_PTDUMP=y
diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h
index 85e944f04c70..1019efd85b9d 100644
--- a/arch/s390/include/asm/page.h
+++ b/arch/s390/include/asm/page.h
@@ -42,7 +42,7 @@ void __storage_key_init_range(unsigned long start, unsigned long end);
static inline void storage_key_init_range(unsigned long start, unsigned long end)
{
- if (PAGE_DEFAULT_KEY)
+ if (PAGE_DEFAULT_KEY != 0)
__storage_key_init_range(start, end);
}
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 137a3920ca36..6d7c3b7e9281 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -752,6 +752,12 @@ static inline int pmd_write(pmd_t pmd)
return (pmd_val(pmd) & _SEGMENT_ENTRY_WRITE) != 0;
}
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+ return (pud_val(pud) & _REGION3_ENTRY_WRITE) != 0;
+}
+
static inline int pmd_dirty(pmd_t pmd)
{
return (pmd_val(pmd) & _SEGMENT_ENTRY_DIRTY) != 0;
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 361ef5eda468..aadb3d0e2adc 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -84,7 +84,6 @@ void s390_update_cpu_mhz(void);
void cpu_detect_mhz_feature(void);
extern const struct seq_operations cpuinfo_op;
-extern int sysctl_ieee_emulation_warnings;
extern void execve_tail(void);
extern void __bpon(void);
diff --git a/arch/s390/include/asm/qdio.h b/arch/s390/include/asm/qdio.h
index 71e3f0146cda..1e3517b0518b 100644
--- a/arch/s390/include/asm/qdio.h
+++ b/arch/s390/include/asm/qdio.h
@@ -201,7 +201,7 @@ struct slib {
* @scount: SBAL count
* @sflags: whole SBAL flags
* @length: length
- * @addr: address
+ * @addr: absolute data address
*/
struct qdio_buffer_element {
u8 eflags;
@@ -211,7 +211,7 @@ struct qdio_buffer_element {
u8 scount;
u8 sflags;
u32 length;
- void *addr;
+ u64 addr;
} __attribute__ ((packed, aligned(16)));
/**
@@ -227,7 +227,7 @@ struct qdio_buffer {
* @sbal: absolute SBAL address
*/
struct sl_element {
- unsigned long sbal;
+ u64 sbal;
} __attribute__ ((packed));
/**
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 670f14a228e5..6bf3a45ccfec 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -155,7 +155,7 @@ static inline void get_tod_clock_ext(char *clk)
static inline unsigned long long get_tod_clock(void)
{
- unsigned char clk[STORE_CLOCK_EXT_SIZE];
+ char clk[STORE_CLOCK_EXT_SIZE];
get_tod_clock_ext(clk);
return *((unsigned long long *)&clk[1]);
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
index bc61ea18e88d..60716d18ce5a 100644
--- a/arch/s390/pci/pci.c
+++ b/arch/s390/pci/pci.c
@@ -424,7 +424,7 @@ static void zpci_map_resources(struct pci_dev *pdev)
if (zpci_use_mio(zdev))
pdev->resource[i].start =
- (resource_size_t __force) zdev->bars[i].mio_wb;
+ (resource_size_t __force) zdev->bars[i].mio_wt;
else
pdev->resource[i].start = (resource_size_t __force)
pci_iomap_range_fh(pdev, i, 0, 0);
@@ -531,7 +531,7 @@ static int zpci_setup_bus_resources(struct zpci_dev *zdev,
flags |= IORESOURCE_MEM_64;
if (zpci_use_mio(zdev))
- addr = (unsigned long) zdev->bars[i].mio_wb;
+ addr = (unsigned long) zdev->bars[i].mio_wt;
else
addr = ZPCI_ADDR(entry);
size = 1UL << zdev->bars[i].size;
diff --git a/arch/x86/boot/compressed/kaslr_64.c b/arch/x86/boot/compressed/kaslr_64.c
index 748456c365f4..9557c5a15b91 100644
--- a/arch/x86/boot/compressed/kaslr_64.c
+++ b/arch/x86/boot/compressed/kaslr_64.c
@@ -29,9 +29,6 @@
#define __PAGE_OFFSET __PAGE_OFFSET_BASE
#include "../../mm/ident_map.c"
-/* Used by pgtable.h asm code to force instruction serialization. */
-unsigned long __force_order;
-
/* Used to track our page table allocation area. */
struct alloc_pgt_data {
unsigned char *pgt_buf;
diff --git a/arch/x86/events/amd/core.c b/arch/x86/events/amd/core.c
index 1f22b6bbda68..39eb276d0277 100644
--- a/arch/x86/events/amd/core.c
+++ b/arch/x86/events/amd/core.c
@@ -250,6 +250,7 @@ static const u64 amd_f17h_perfmon_event_map[PERF_COUNT_HW_MAX] =
[PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
[PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
[PERF_COUNT_HW_CACHE_REFERENCES] = 0xff60,
+ [PERF_COUNT_HW_CACHE_MISSES] = 0x0964,
[PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c2,
[PERF_COUNT_HW_BRANCH_MISSES] = 0x00c3,
[PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = 0x0287,
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 3be51aa06e67..dff6623804c2 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -4765,6 +4765,7 @@ __init int intel_pmu_init(void)
break;
case INTEL_FAM6_ATOM_TREMONT_D:
+ case INTEL_FAM6_ATOM_TREMONT:
x86_pmu.late_ack = true;
memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
sizeof(hw_cache_event_ids));
diff --git a/arch/x86/events/intel/cstate.c b/arch/x86/events/intel/cstate.c
index e1daf4151e11..4814c964692c 100644
--- a/arch/x86/events/intel/cstate.c
+++ b/arch/x86/events/intel/cstate.c
@@ -40,17 +40,18 @@
* Model specific counters:
* MSR_CORE_C1_RES: CORE C1 Residency Counter
* perf code: 0x00
- * Available model: SLM,AMT,GLM,CNL
+ * Available model: SLM,AMT,GLM,CNL,TNT
* Scope: Core (each processor core has a MSR)
* MSR_CORE_C3_RESIDENCY: CORE C3 Residency Counter
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,GLM,
- * CNL,KBL,CML
+ * CNL,KBL,CML,TNT
* Scope: Core
* MSR_CORE_C6_RESIDENCY: CORE C6 Residency Counter
* perf code: 0x02
* Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
- * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
+ * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
+ * TNT
* Scope: Core
* MSR_CORE_C7_RESIDENCY: CORE C7 Residency Counter
* perf code: 0x03
@@ -60,17 +61,18 @@
* MSR_PKG_C2_RESIDENCY: Package C2 Residency Counter.
* perf code: 0x00
* Available model: SNB,IVB,HSW,BDW,SKL,KNL,GLM,CNL,
- * KBL,CML,ICL,TGL
+ * KBL,CML,ICL,TGL,TNT
* Scope: Package (physical package)
* MSR_PKG_C3_RESIDENCY: Package C3 Residency Counter.
* perf code: 0x01
* Available model: NHM,WSM,SNB,IVB,HSW,BDW,SKL,KNL,
- * GLM,CNL,KBL,CML,ICL,TGL
+ * GLM,CNL,KBL,CML,ICL,TGL,TNT
* Scope: Package (physical package)
* MSR_PKG_C6_RESIDENCY: Package C6 Residency Counter.
* perf code: 0x02
- * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW
- * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL
+ * Available model: SLM,AMT,NHM,WSM,SNB,IVB,HSW,BDW,
+ * SKL,KNL,GLM,CNL,KBL,CML,ICL,TGL,
+ * TNT
* Scope: Package (physical package)
* MSR_PKG_C7_RESIDENCY: Package C7 Residency Counter.
* perf code: 0x03
@@ -87,7 +89,8 @@
* Scope: Package (physical package)
* MSR_PKG_C10_RESIDENCY: Package C10 Residency Counter.
* perf code: 0x06
- * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL
+ * Available model: HSW ULT,KBL,GLM,CNL,CML,ICL,TGL,
+ * TNT
* Scope: Package (physical package)
*
*/
@@ -640,8 +643,9 @@ static const struct x86_cpu_id intel_cstates_match[] __initconst = {
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_D, glm_cstates),
-
X86_CSTATES_MODEL(INTEL_FAM6_ATOM_GOLDMONT_PLUS, glm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT_D, glm_cstates),
+ X86_CSTATES_MODEL(INTEL_FAM6_ATOM_TREMONT, glm_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE_L, icl_cstates),
X86_CSTATES_MODEL(INTEL_FAM6_ICELAKE, icl_cstates),
diff --git a/arch/x86/events/intel/ds.c b/arch/x86/events/intel/ds.c
index 4b94ae4ae369..dc43cc124e09 100644
--- a/arch/x86/events/intel/ds.c
+++ b/arch/x86/events/intel/ds.c
@@ -1714,6 +1714,8 @@ intel_pmu_save_and_restart_reload(struct perf_event *event, int count)
old = ((s64)(prev_raw_count << shift) >> shift);
local64_add(new - old + count * period, &event->count);
+ local64_set(&hwc->period_left, -new);
+
perf_event_update_userpage(event);
return 0;
diff --git a/arch/x86/events/msr.c b/arch/x86/events/msr.c
index 6f86650b3f77..a949f6f55991 100644
--- a/arch/x86/events/msr.c
+++ b/arch/x86/events/msr.c
@@ -75,8 +75,9 @@ static bool test_intel(int idx, void *data)
case INTEL_FAM6_ATOM_GOLDMONT:
case INTEL_FAM6_ATOM_GOLDMONT_D:
-
case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
+ case INTEL_FAM6_ATOM_TREMONT_D:
+ case INTEL_FAM6_ATOM_TREMONT:
case INTEL_FAM6_XEON_PHI_KNL:
case INTEL_FAM6_XEON_PHI_KNM:
diff --git a/arch/x86/include/asm/io_bitmap.h b/arch/x86/include/asm/io_bitmap.h
index 02c6ef8f7667..07344d82e88e 100644
--- a/arch/x86/include/asm/io_bitmap.h
+++ b/arch/x86/include/asm/io_bitmap.h
@@ -19,7 +19,14 @@ struct task_struct;
void io_bitmap_share(struct task_struct *tsk);
void io_bitmap_exit(void);
-void tss_update_io_bitmap(void);
+void native_tss_update_io_bitmap(void);
+
+#ifdef CONFIG_PARAVIRT_XXL
+#include <asm/paravirt.h>
+#else
+#define tss_update_io_bitmap native_tss_update_io_bitmap
+#endif
+
#else
static inline void io_bitmap_share(struct task_struct *tsk) { }
static inline void io_bitmap_exit(void) { }
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 03946eb3e2b9..2a8f2bd2e5cf 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -292,6 +292,14 @@ enum x86emul_mode {
#define X86EMUL_SMM_MASK (1 << 6)
#define X86EMUL_SMM_INSIDE_NMI_MASK (1 << 7)
+/*
+ * fastop functions are declared as taking a never-defined fastop parameter,
+ * so they can't be called from C directly.
+ */
+struct fastop;
+
+typedef void (*fastop_t)(struct fastop *);
+
struct x86_emulate_ctxt {
const struct x86_emulate_ops *ops;
@@ -324,7 +332,10 @@ struct x86_emulate_ctxt {
struct operand src;
struct operand src2;
struct operand dst;
- int (*execute)(struct x86_emulate_ctxt *ctxt);
+ union {
+ int (*execute)(struct x86_emulate_ctxt *ctxt);
+ fastop_t fop;
+ };
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
/*
* The following six fields are cleared together,
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4dffbc10d3f8..98959e8cd448 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -781,9 +781,19 @@ struct kvm_vcpu_arch {
u64 msr_kvm_poll_control;
/*
- * Indicate whether the access faults on its page table in guest
- * which is set when fix page fault and used to detect unhandeable
- * instruction.
+ * Indicates the guest is trying to write a gfn that contains one or
+ * more of the PTEs used to translate the write itself, i.e. the access
+ * is changing its own translation in the guest page tables. KVM exits
+ * to userspace if emulation of the faulting instruction fails and this
+ * flag is set, as KVM cannot make forward progress.
+ *
+ * If emulation fails for a write to guest page tables, KVM unprotects
+ * (zaps) the shadow page for the target gfn and resumes the guest to
+ * retry the non-emulatable instruction (on hardware). Unprotecting the
+ * gfn doesn't allow forward progress for a self-changing access because
+ * doing so also zaps the translation for the gfn, i.e. retrying the
+ * instruction will hit a !PRESENT fault, which results in a new shadow
+ * page and sends KVM back to square one.
*/
bool write_fault_to_shadow_pgtable;
@@ -1112,6 +1122,7 @@ struct kvm_x86_ops {
int (*handle_exit)(struct kvm_vcpu *vcpu,
enum exit_fastpath_completion exit_fastpath);
int (*skip_emulated_instruction)(struct kvm_vcpu *vcpu);
+ void (*update_emulated_instruction)(struct kvm_vcpu *vcpu);
void (*set_interrupt_shadow)(struct kvm_vcpu *vcpu, int mask);
u32 (*get_interrupt_shadow)(struct kvm_vcpu *vcpu);
void (*patch_hypercall)(struct kvm_vcpu *vcpu,
@@ -1136,7 +1147,7 @@ struct kvm_x86_ops {
void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu);
void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa);
- void (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
+ int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector);
int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu);
int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index ebe1685e92dd..d5e517d1c3dd 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -512,6 +512,8 @@
#define MSR_K7_HWCR 0xc0010015
#define MSR_K7_HWCR_SMMLOCK_BIT 0
#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
+#define MSR_K7_HWCR_IRPERF_EN_BIT 30
+#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
index 86e7317eb31f..694d8daf4983 100644
--- a/arch/x86/include/asm/paravirt.h
+++ b/arch/x86/include/asm/paravirt.h
@@ -295,6 +295,13 @@ static inline void write_idt_entry(gate_desc *dt, int entry, const gate_desc *g)
PVOP_VCALL3(cpu.write_idt_entry, dt, entry, g);
}
+#ifdef CONFIG_X86_IOPL_IOPERM
+static inline void tss_update_io_bitmap(void)
+{
+ PVOP_VCALL0(cpu.update_io_bitmap);
+}
+#endif
+
static inline void paravirt_activate_mm(struct mm_struct *prev,
struct mm_struct *next)
{
diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
index 84812964d3dd..732f62e04ddb 100644
--- a/arch/x86/include/asm/paravirt_types.h
+++ b/arch/x86/include/asm/paravirt_types.h
@@ -140,6 +140,10 @@ struct pv_cpu_ops {
void (*load_sp0)(unsigned long sp0);
+#ifdef CONFIG_X86_IOPL_IOPERM
+ void (*update_io_bitmap)(void);
+#endif
+
void (*wbinvd)(void);
/* cpuid emulation, mostly so that caps bits can be disabled */
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 2a85287b3685..8521af3fef27 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -72,7 +72,7 @@
#define SECONDARY_EXEC_MODE_BASED_EPT_EXEC VMCS_CONTROL_BIT(MODE_BASED_EPT_EXEC)
#define SECONDARY_EXEC_PT_USE_GPA VMCS_CONTROL_BIT(PT_USE_GPA)
#define SECONDARY_EXEC_TSC_SCALING VMCS_CONTROL_BIT(TSC_SCALING)
-#define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE 0x04000000
+#define SECONDARY_EXEC_ENABLE_USR_WAIT_PAUSE VMCS_CONTROL_BIT(USR_WAIT_PAUSE)
#define PIN_BASED_EXT_INTR_MASK VMCS_CONTROL_BIT(INTR_EXITING)
#define PIN_BASED_NMI_EXITING VMCS_CONTROL_BIT(NMI_EXITING)
diff --git a/arch/x86/include/asm/vmxfeatures.h b/arch/x86/include/asm/vmxfeatures.h
index a50e4a0de315..9915990fd8cf 100644
--- a/arch/x86/include/asm/vmxfeatures.h
+++ b/arch/x86/include/asm/vmxfeatures.h
@@ -81,6 +81,7 @@
#define VMX_FEATURE_MODE_BASED_EPT_EXEC ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */
#define VMX_FEATURE_PT_USE_GPA ( 2*32+ 24) /* "" Processor Trace logs GPAs */
#define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* Scale hardware TSC when read in guest */
+#define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* Enable TPAUSE, UMONITOR, UMWAIT in guest */
#define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* "" VM-Exit on ENCLV (leaf dependent) */
#endif /* _ASM_X86_VMXFEATURES_H */
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 503d3f42da16..3f3f780c8c65 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -390,6 +390,7 @@ struct kvm_sync_regs {
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
#define KVM_STATE_NESTED_EVMCS 0x00000004
+#define KVM_STATE_NESTED_MTF_PENDING 0x00000008
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index ac83a0fef628..1f875fbe1384 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -28,6 +28,7 @@
static const int amd_erratum_383[];
static const int amd_erratum_400[];
+static const int amd_erratum_1054[];
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum);
/*
@@ -972,6 +973,15 @@ static void init_amd(struct cpuinfo_x86 *c)
/* AMD CPUs don't reset SS attributes on SYSRET, Xen does. */
if (!cpu_has(c, X86_FEATURE_XENPV))
set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
+
+ /*
+ * Turn on the Instructions Retired free counter on machines not
+ * susceptible to erratum #1054 "Instructions Retired Performance
+ * Counter May Be Inaccurate".
+ */
+ if (cpu_has(c, X86_FEATURE_IRPERF) &&
+ !cpu_has_amd_erratum(c, amd_erratum_1054))
+ msr_set_bit(MSR_K7_HWCR, MSR_K7_HWCR_IRPERF_EN_BIT);
}
#ifdef CONFIG_X86_32
@@ -1099,6 +1109,10 @@ static const int amd_erratum_400[] =
static const int amd_erratum_383[] =
AMD_OSVW_ERRATUM(3, AMD_MODEL_RANGE(0x10, 0, 0, 0xff, 0xf));
+/* #1054: Instructions Retired Performance Counter May Be Inaccurate */
+static const int amd_erratum_1054[] =
+ AMD_OSVW_ERRATUM(0, AMD_MODEL_RANGE(0x17, 0, 0, 0x2f, 0xf));
+
static bool cpu_has_amd_erratum(struct cpuinfo_x86 *cpu, const int *erratum)
{
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 52c9bfbbdb2a..4cdb123ff66a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -445,7 +445,7 @@ static __always_inline void setup_pku(struct cpuinfo_x86 *c)
* cpuid bit to be set. We need to ensure that we
* update that bit in this CPU's "cpu_info".
*/
- get_cpu_cap(c);
+ set_cpu_cap(c, X86_FEATURE_OSPKE);
}
#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
diff --git a/arch/x86/kernel/cpu/mce/amd.c b/arch/x86/kernel/cpu/mce/amd.c
index b3a50d962851..52de616a8065 100644
--- a/arch/x86/kernel/cpu/mce/amd.c
+++ b/arch/x86/kernel/cpu/mce/amd.c
@@ -1163,9 +1163,12 @@ static const struct sysfs_ops threshold_ops = {
.store = store,
};
+static void threshold_block_release(struct kobject *kobj);
+
static struct kobj_type threshold_ktype = {
.sysfs_ops = &threshold_ops,
.default_attrs = default_attrs,
+ .release = threshold_block_release,
};
static const char *get_name(unsigned int bank, struct threshold_block *b)
@@ -1198,8 +1201,9 @@ static const char *get_name(unsigned int bank, struct threshold_block *b)
return buf_mcatype;
}
-static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
- unsigned int block, u32 address)
+static int allocate_threshold_blocks(unsigned int cpu, struct threshold_bank *tb,
+ unsigned int bank, unsigned int block,
+ u32 address)
{
struct threshold_block *b = NULL;
u32 low, high;
@@ -1243,16 +1247,12 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
INIT_LIST_HEAD(&b->miscj);
- if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
- list_add(&b->miscj,
- &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
- } else {
- per_cpu(threshold_banks, cpu)[bank]->blocks = b;
- }
+ if (tb->blocks)
+ list_add(&b->miscj, &tb->blocks->miscj);
+ else
+ tb->blocks = b;
- err = kobject_init_and_add(&b->kobj, &threshold_ktype,
- per_cpu(threshold_banks, cpu)[bank]->kobj,
- get_name(bank, b));
+ err = kobject_init_and_add(&b->kobj, &threshold_ktype, tb->kobj, get_name(bank, b));
if (err)
goto out_free;
recurse:
@@ -1260,7 +1260,7 @@ recurse:
if (!address)
return 0;
- err = allocate_threshold_blocks(cpu, bank, block, address);
+ err = allocate_threshold_blocks(cpu, tb, bank, block, address);
if (err)
goto out_free;
@@ -1345,8 +1345,6 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
goto out_free;
}
- per_cpu(threshold_banks, cpu)[bank] = b;
-
if (is_shared_bank(bank)) {
refcount_set(&b->cpus, 1);
@@ -1357,9 +1355,13 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
}
}
- err = allocate_threshold_blocks(cpu, bank, 0, msr_ops.misc(bank));
- if (!err)
- goto out;
+ err = allocate_threshold_blocks(cpu, b, bank, 0, msr_ops.misc(bank));
+ if (err)
+ goto out_free;
+
+ per_cpu(threshold_banks, cpu)[bank] = b;
+
+ return 0;
out_free:
kfree(b);
@@ -1368,8 +1370,12 @@ static int threshold_create_bank(unsigned int cpu, unsigned int bank)
return err;
}
-static void deallocate_threshold_block(unsigned int cpu,
- unsigned int bank)
+static void threshold_block_release(struct kobject *kobj)
+{
+ kfree(to_block(kobj));
+}
+
+static void deallocate_threshold_block(unsigned int cpu, unsigned int bank)
{
struct threshold_block *pos = NULL;
struct threshold_block *tmp = NULL;
@@ -1379,13 +1385,11 @@ static void deallocate_threshold_block(unsigned int cpu,
return;
list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
- kobject_put(&pos->kobj);
list_del(&pos->miscj);
- kfree(pos);
+ kobject_put(&pos->kobj);
}
- kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
- per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
+ kobject_put(&head->blocks->kobj);
}
static void __threshold_remove_blocks(struct threshold_bank *b)
diff --git a/arch/x86/kernel/ima_arch.c b/arch/x86/kernel/ima_arch.c
index 4d4f5d9faac3..23054909c8dd 100644
--- a/arch/x86/kernel/ima_arch.c
+++ b/arch/x86/kernel/ima_arch.c
@@ -10,8 +10,6 @@ extern struct boot_params boot_params;
static enum efi_secureboot_mode get_sb_mode(void)
{
- efi_char16_t efi_SecureBoot_name[] = L"SecureBoot";
- efi_char16_t efi_SetupMode_name[] = L"SecureBoot";
efi_guid_t efi_variable_guid = EFI_GLOBAL_VARIABLE_GUID;
efi_status_t status;
unsigned long size;
@@ -25,7 +23,7 @@ static enum efi_secureboot_mode get_sb_mode(void)
}
/* Get variable contents into buffer */
- status = efi.get_variable(efi_SecureBoot_name, &efi_variable_guid,
+ status = efi.get_variable(L"SecureBoot", &efi_variable_guid,
NULL, &size, &secboot);
if (status == EFI_NOT_FOUND) {
pr_info("ima: secureboot mode disabled\n");
@@ -38,7 +36,7 @@ static enum efi_secureboot_mode get_sb_mode(void)
}
size = sizeof(setupmode);
- status = efi.get_variable(efi_SetupMode_name, &efi_variable_guid,
+ status = efi.get_variable(L"SetupMode", &efi_variable_guid,
NULL, &size, &setupmode);
if (status != EFI_SUCCESS) /* ignore unknown SetupMode */
diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
index d817f255aed8..6efe0410fb72 100644
--- a/arch/x86/kernel/kvm.c
+++ b/arch/x86/kernel/kvm.c
@@ -425,7 +425,29 @@ static void __init sev_map_percpu_data(void)
}
}
+static bool pv_tlb_flush_supported(void)
+{
+ return (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
+ !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
+}
+
+static DEFINE_PER_CPU(cpumask_var_t, __pv_cpu_mask);
+
#ifdef CONFIG_SMP
+
+static bool pv_ipi_supported(void)
+{
+ return kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI);
+}
+
+static bool pv_sched_yield_supported(void)
+{
+ return (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
+ !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
+ kvm_para_has_feature(KVM_FEATURE_STEAL_TIME));
+}
+
#define KVM_IPI_CLUSTER_SIZE (2 * BITS_PER_LONG)
static void __send_ipi_mask(const struct cpumask *mask, int vector)
@@ -490,12 +512,12 @@ static void kvm_send_ipi_mask(const struct cpumask *mask, int vector)
static void kvm_send_ipi_mask_allbutself(const struct cpumask *mask, int vector)
{
unsigned int this_cpu = smp_processor_id();
- struct cpumask new_mask;
+ struct cpumask *new_mask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
const struct cpumask *local_mask;
- cpumask_copy(&new_mask, mask);
- cpumask_clear_cpu(this_cpu, &new_mask);
- local_mask = &new_mask;
+ cpumask_copy(new_mask, mask);
+ cpumask_clear_cpu(this_cpu, new_mask);
+ local_mask = new_mask;
__send_ipi_mask(local_mask, vector);
}
@@ -575,7 +597,6 @@ static void __init kvm_apf_trap_init(void)
update_intr_gate(X86_TRAP_PF, async_page_fault);
}
-static DEFINE_PER_CPU(cpumask_var_t, __pv_tlb_mask);
static void kvm_flush_tlb_others(const struct cpumask *cpumask,
const struct flush_tlb_info *info)
@@ -583,7 +604,7 @@ static void kvm_flush_tlb_others(const struct cpumask *cpumask,
u8 state;
int cpu;
struct kvm_steal_time *src;
- struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_tlb_mask);
+ struct cpumask *flushmask = this_cpu_cpumask_var_ptr(__pv_cpu_mask);
cpumask_copy(flushmask, cpumask);
/*
@@ -619,11 +640,10 @@ static void __init kvm_guest_init(void)
pv_ops.time.steal_clock = kvm_steal_clock;
}
- if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
- !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ if (pv_tlb_flush_supported()) {
pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others;
pv_ops.mmu.tlb_remove_table = tlb_remove_table;
+ pr_info("KVM setup pv remote TLB flush\n");
}
if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
@@ -632,9 +652,7 @@ static void __init kvm_guest_init(void)
#ifdef CONFIG_SMP
smp_ops.smp_prepare_cpus = kvm_smp_prepare_cpus;
smp_ops.smp_prepare_boot_cpu = kvm_smp_prepare_boot_cpu;
- if (kvm_para_has_feature(KVM_FEATURE_PV_SCHED_YIELD) &&
- !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ if (pv_sched_yield_supported()) {
smp_ops.send_call_func_ipi = kvm_smp_send_call_func_ipi;
pr_info("KVM setup pv sched yield\n");
}
@@ -700,7 +718,7 @@ static uint32_t __init kvm_detect(void)
static void __init kvm_apic_init(void)
{
#if defined(CONFIG_SMP)
- if (kvm_para_has_feature(KVM_FEATURE_PV_SEND_IPI))
+ if (pv_ipi_supported())
kvm_setup_pv_ipi();
#endif
}
@@ -732,26 +750,31 @@ static __init int activate_jump_labels(void)
}
arch_initcall(activate_jump_labels);
-static __init int kvm_setup_pv_tlb_flush(void)
+static __init int kvm_alloc_cpumask(void)
{
int cpu;
+ bool alloc = false;
if (!kvm_para_available() || nopv)
return 0;
- if (kvm_para_has_feature(KVM_FEATURE_PV_TLB_FLUSH) &&
- !kvm_para_has_hint(KVM_HINTS_REALTIME) &&
- kvm_para_has_feature(KVM_FEATURE_STEAL_TIME)) {
+ if (pv_tlb_flush_supported())
+ alloc = true;
+
+#if defined(CONFIG_SMP)
+ if (pv_ipi_supported())
+ alloc = true;
+#endif
+
+ if (alloc)
for_each_possible_cpu(cpu) {
- zalloc_cpumask_var_node(per_cpu_ptr(&__pv_tlb_mask, cpu),
+ zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu),
GFP_KERNEL, cpu_to_node(cpu));
}
- pr_info("KVM setup pv remote TLB flush\n");
- }
return 0;
}
-arch_initcall(kvm_setup_pv_tlb_flush);
+arch_initcall(kvm_alloc_cpumask);
#ifdef CONFIG_PARAVIRT_SPINLOCKS
diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
index 789f5e4f89de..c131ba4e70ef 100644
--- a/arch/x86/kernel/paravirt.c
+++ b/arch/x86/kernel/paravirt.c
@@ -30,6 +30,7 @@
#include <asm/timer.h>
#include <asm/special_insns.h>
#include <asm/tlb.h>
+#include <asm/io_bitmap.h>
/*
* nop stub, which must not clobber anything *including the stack* to
@@ -341,6 +342,10 @@ struct paravirt_patch_template pv_ops = {
.cpu.iret = native_iret,
.cpu.swapgs = native_swapgs,
+#ifdef CONFIG_X86_IOPL_IOPERM
+ .cpu.update_io_bitmap = native_tss_update_io_bitmap,
+#endif
+
.cpu.start_context_switch = paravirt_nop,
.cpu.end_context_switch = paravirt_nop,
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index 839b5244e3b7..3053c85e0e42 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -374,7 +374,7 @@ static void tss_copy_io_bitmap(struct tss_struct *tss, struct io_bitmap *iobm)
/**
* tss_update_io_bitmap - Update I/O bitmap before exiting to usermode
*/
-void tss_update_io_bitmap(void)
+void native_tss_update_io_bitmap(void)
{
struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
struct thread_struct *t = &current->thread;
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 991019d5eee1..1bb4927030af 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -59,6 +59,19 @@ config KVM
If unsure, say N.
+config KVM_WERROR
+ bool "Compile KVM with -Werror"
+ # KASAN may cause the build to fail due to larger frames
+ default y if X86_64 && !KASAN
+ # We use the dependency on !COMPILE_TEST to not be enabled
+ # blindly in allmodconfig or allyesconfig configurations
+ depends on (X86_64 && !KASAN) || !COMPILE_TEST
+ depends on EXPERT
+ help
+ Add -Werror to the build flags for (and only for) i915.ko.
+
+ If in doubt, say "N".
+
config KVM_INTEL
tristate "KVM for Intel (and compatible) processors support"
depends on KVM && IA32_FEAT_CTL
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index b19ef421084d..e553f0fdd87d 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y += -Iarch/x86/kvm
+ccflags-$(CONFIG_KVM_WERROR) += -Werror
KVM := ../../../virt/kvm
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index ddbc61984227..dd19fb3539e0 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -191,25 +191,6 @@
#define NR_FASTOP (ilog2(sizeof(ulong)) + 1)
#define FASTOP_SIZE 8
-/*
- * fastop functions have a special calling convention:
- *
- * dst: rax (in/out)
- * src: rdx (in/out)
- * src2: rcx (in)
- * flags: rflags (in/out)
- * ex: rsi (in:fastop pointer, out:zero if exception)
- *
- * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
- * different operand sizes can be reached by calculation, rather than a jump
- * table (which would be bigger than the code).
- *
- * fastop functions are declared as taking a never-defined fastop parameter,
- * so they can't be called from C directly.
- */
-
-struct fastop;
-
struct opcode {
u64 flags : 56;
u64 intercept : 8;
@@ -311,8 +292,19 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
#define ON64(x)
#endif
-typedef void (*fastop_t)(struct fastop *);
-
+/*
+ * fastop functions have a special calling convention:
+ *
+ * dst: rax (in/out)
+ * src: rdx (in/out)
+ * src2: rcx (in)
+ * flags: rflags (in/out)
+ * ex: rsi (in:fastop pointer, out:zero if exception)
+ *
+ * Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
+ * different operand sizes can be reached by calculation, rather than a jump
+ * table (which would be bigger than the code).
+ */
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
#define __FOP_FUNC(name) \
@@ -5683,7 +5675,7 @@ special_insn:
if (ctxt->execute) {
if (ctxt->d & Fastop)
- rc = fastop(ctxt, (fastop_t)ctxt->execute);
+ rc = fastop(ctxt, ctxt->fop);
else
rc = ctxt->execute(ctxt);
if (rc != X86EMUL_CONTINUE)
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 79afa0bb5f41..c47d2acec529 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -417,7 +417,7 @@ void kvm_scan_ioapic_routes(struct kvm_vcpu *vcpu,
kvm_set_msi_irq(vcpu->kvm, entry, &irq);
- if (irq.level &&
+ if (irq.trig_mode &&
kvm_apic_match_dest(vcpu, NULL, APIC_DEST_NOSHORT,
irq.dest_id, irq.dest_mode))
__set_bit(irq.vector, ioapic_handled_vectors);
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index eafc631d305c..e3099c642fec 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -627,9 +627,11 @@ static inline bool pv_eoi_enabled(struct kvm_vcpu *vcpu)
static bool pv_eoi_get_pending(struct kvm_vcpu *vcpu)
{
u8 val;
- if (pv_eoi_get_user(vcpu, &val) < 0)
+ if (pv_eoi_get_user(vcpu, &val) < 0) {
printk(KERN_WARNING "Can't read EOI MSR value: 0x%llx\n",
(unsigned long long)vcpu->arch.pv_eoi.msr_val);
+ return false;
+ }
return val & 0x1;
}
@@ -1046,11 +1048,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
apic->regs + APIC_TMR);
}
- if (vcpu->arch.apicv_active)
- kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
- else {
+ if (kvm_x86_ops->deliver_posted_interrupt(vcpu, vector)) {
kvm_lapic_set_irr(vector, apic);
-
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
}
@@ -1080,9 +1079,6 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
result = 1;
/* assumes that there are only KVM_APIC_INIT/SIPI */
apic->pending_events = (1UL << KVM_APIC_INIT);
- /* make sure pending_events is visible before sending
- * the request */
- smp_wmb();
kvm_make_request(KVM_REQ_EVENT, vcpu);
kvm_vcpu_kick(vcpu);
}
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index d55674f44a18..a647601c9e1c 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -102,6 +102,19 @@ static inline void kvm_mmu_load_cr3(struct kvm_vcpu *vcpu)
kvm_get_active_pcid(vcpu));
}
+int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
+ bool prefault);
+
+static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ u32 err, bool prefault)
+{
+#ifdef CONFIG_RETPOLINE
+ if (likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault))
+ return kvm_tdp_page_fault(vcpu, cr2_or_gpa, err, prefault);
+#endif
+ return vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa, err, prefault);
+}
+
/*
* Currently, we have two sorts of write-protection, a) the first one
* write-protects guest page to sync the guest modification, b) another one is
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 7011a4e54866..87e9ba27ada1 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4219,8 +4219,8 @@ int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
}
EXPORT_SYMBOL_GPL(kvm_handle_page_fault);
-static int tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
- bool prefault)
+int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, gpa_t gpa, u32 error_code,
+ bool prefault)
{
int max_level;
@@ -4925,7 +4925,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
return;
context->mmu_role.as_u64 = new_role.as_u64;
- context->page_fault = tdp_page_fault;
+ context->page_fault = kvm_tdp_page_fault;
context->sync_page = nonpaging_sync_page;
context->invlpg = nonpaging_invlpg;
context->update_pte = nonpaging_update_pte;
@@ -5436,9 +5436,8 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, u64 error_code,
}
if (r == RET_PF_INVALID) {
- r = vcpu->arch.mmu->page_fault(vcpu, cr2_or_gpa,
- lower_32_bits(error_code),
- false);
+ r = kvm_mmu_do_page_fault(vcpu, cr2_or_gpa,
+ lower_32_bits(error_code), false);
WARN_ON(r == RET_PF_INVALID);
}
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index 4e1ef0473663..e4c8a4cbf407 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -33,7 +33,7 @@
#define PT_GUEST_ACCESSED_SHIFT PT_ACCESSED_SHIFT
#define PT_HAVE_ACCESSED_DIRTY(mmu) true
#ifdef CONFIG_X86_64
- #define PT_MAX_FULL_LEVELS 4
+ #define PT_MAX_FULL_LEVELS PT64_ROOT_MAX_LEVEL
#define CMPXCHG cmpxchg
#else
#define CMPXCHG cmpxchg64
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index 3c6522b84ff1..ffcd96fc02d0 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -339,7 +339,7 @@ TRACE_EVENT(
/* These depend on page entry type, so compute them now. */
__field(bool, r)
__field(bool, x)
- __field(u8, u)
+ __field(signed char, u)
),
TP_fast_assign(
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a3e32d61d60c..24c0b2ba8fb9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -57,11 +57,13 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+#ifdef MODULE
static const struct x86_cpu_id svm_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_SVM),
{}
};
MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
+#endif
#define IOPM_ALLOC_ORDER 2
#define MSRPM_ALLOC_ORDER 1
@@ -1005,33 +1007,32 @@ static void svm_cpu_uninit(int cpu)
static int svm_cpu_init(int cpu)
{
struct svm_cpu_data *sd;
- int r;
sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
if (!sd)
return -ENOMEM;
sd->cpu = cpu;
- r = -ENOMEM;
sd->save_area = alloc_page(GFP_KERNEL);
if (!sd->save_area)
- goto err_1;
+ goto free_cpu_data;
if (svm_sev_enabled()) {
- r = -ENOMEM;
sd->sev_vmcbs = kmalloc_array(max_sev_asid + 1,
sizeof(void *),
GFP_KERNEL);
if (!sd->sev_vmcbs)
- goto err_1;
+ goto free_save_area;
}
per_cpu(svm_data, cpu) = sd;
return 0;
-err_1:
+free_save_area:
+ __free_page(sd->save_area);
+free_cpu_data:
kfree(sd);
- return r;
+ return -ENOMEM;
}
@@ -1350,6 +1351,24 @@ static __init void svm_adjust_mmio_mask(void)
kvm_mmu_set_mmio_spte_mask(mask, mask, PT_WRITABLE_MASK | PT_USER_MASK);
}
+static void svm_hardware_teardown(void)
+{
+ int cpu;
+
+ if (svm_sev_enabled()) {
+ bitmap_free(sev_asid_bitmap);
+ bitmap_free(sev_reclaim_asid_bitmap);
+
+ sev_flush_asids();
+ }
+
+ for_each_possible_cpu(cpu)
+ svm_cpu_uninit(cpu);
+
+ __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
+ iopm_base = 0;
+}
+
static __init int svm_hardware_setup(void)
{
int cpu;
@@ -1463,29 +1482,10 @@ static __init int svm_hardware_setup(void)
return 0;
err:
- __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
- iopm_base = 0;
+ svm_hardware_teardown();
return r;
}
-static __exit void svm_hardware_unsetup(void)
-{
- int cpu;
-
- if (svm_sev_enabled()) {
- bitmap_free(sev_asid_bitmap);
- bitmap_free(sev_reclaim_asid_bitmap);
-
- sev_flush_asids();
- }
-
- for_each_possible_cpu(cpu)
- svm_cpu_uninit(cpu);
-
- __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
- iopm_base = 0;
-}
-
static void init_seg(struct vmcb_seg *seg)
{
seg->selector = 0;
@@ -2175,7 +2175,6 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
u32 dummy;
u32 eax = 1;
- vcpu->arch.microcode_version = 0x01000065;
svm->spec_ctrl = 0;
svm->virt_spec_ctrl = 0;
@@ -2197,8 +2196,9 @@ static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
static int avic_init_vcpu(struct vcpu_svm *svm)
{
int ret;
+ struct kvm_vcpu *vcpu = &svm->vcpu;
- if (!kvm_vcpu_apicv_active(&svm->vcpu))
+ if (!avic || !irqchip_in_kernel(vcpu->kvm))
return 0;
ret = avic_init_backing_page(&svm->vcpu);
@@ -2266,6 +2266,7 @@ static int svm_create_vcpu(struct kvm_vcpu *vcpu)
init_vmcb(svm);
svm_init_osvw(vcpu);
+ vcpu->arch.microcode_version = 0x01000065;
return 0;
@@ -5232,6 +5233,9 @@ static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
struct vmcb *vmcb = svm->vmcb;
bool activated = kvm_vcpu_apicv_active(vcpu);
+ if (!avic)
+ return;
+
if (activated) {
/**
* During AVIC temporary deactivation, guest could update
@@ -5255,8 +5259,11 @@ static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
return;
}
-static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
+static int svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
{
+ if (!vcpu->arch.apicv_active)
+ return -1;
+
kvm_lapic_set_irr(vec, vcpu->arch.apic);
smp_mb__after_atomic();
@@ -5268,6 +5275,8 @@ static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
put_cpu();
} else
kvm_vcpu_wake_up(vcpu);
+
+ return 0;
}
static bool svm_dy_apicv_has_pending_interrupt(struct kvm_vcpu *vcpu)
@@ -7378,7 +7387,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.cpu_has_kvm_support = has_svm,
.disabled_by_bios = is_disabled,
.hardware_setup = svm_hardware_setup,
- .hardware_unsetup = svm_hardware_unsetup,
+ .hardware_unsetup = svm_hardware_teardown,
.check_processor_compatibility = svm_check_processor_compat,
.hardware_enable = svm_hardware_enable,
.hardware_disable = svm_hardware_disable,
@@ -7433,6 +7442,7 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
.run = svm_vcpu_run,
.handle_exit = handle_exit,
.skip_emulated_instruction = skip_emulated_instruction,
+ .update_emulated_instruction = NULL,
.set_interrupt_shadow = svm_set_interrupt_shadow,
.get_interrupt_shadow = svm_get_interrupt_shadow,
.patch_hypercall = svm_patch_hypercall,
diff --git a/arch/x86/kvm/vmx/capabilities.h b/arch/x86/kvm/vmx/capabilities.h
index 283bdb7071af..f486e2606247 100644
--- a/arch/x86/kvm/vmx/capabilities.h
+++ b/arch/x86/kvm/vmx/capabilities.h
@@ -12,6 +12,7 @@ extern bool __read_mostly enable_ept;
extern bool __read_mostly enable_unrestricted_guest;
extern bool __read_mostly enable_ept_ad_bits;
extern bool __read_mostly enable_pml;
+extern bool __read_mostly enable_apicv;
extern int __read_mostly pt_mode;
#define PT_MODE_SYSTEM 0
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 657c2eda357c..e920d7834d73 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -544,7 +544,8 @@ static void nested_vmx_disable_intercept_for_msr(unsigned long *msr_bitmap_l1,
}
}
-static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap) {
+static inline void enable_x2apic_msr_intercepts(unsigned long *msr_bitmap)
+{
int msr;
for (msr = 0x800; msr <= 0x8ff; msr += BITS_PER_LONG) {
@@ -1981,7 +1982,7 @@ static int nested_vmx_handle_enlightened_vmptrld(struct kvm_vcpu *vcpu,
}
/*
- * Clean fields data can't de used on VMLAUNCH and when we switch
+ * Clean fields data can't be used on VMLAUNCH and when we switch
* between different L2 guests as KVM keeps a single VMCS12 per L1.
*/
if (from_launch || evmcs_gpa_changed)
@@ -3160,10 +3161,10 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
* or KVM_SET_NESTED_STATE). Otherwise it's called from vmlaunch/vmresume.
*
* Returns:
- * NVMX_ENTRY_SUCCESS: Entered VMX non-root mode
- * NVMX_ENTRY_VMFAIL: Consistency check VMFail
- * NVMX_ENTRY_VMEXIT: Consistency check VMExit
- * NVMX_ENTRY_KVM_INTERNAL_ERROR: KVM internal error
+ * NVMX_VMENTRY_SUCCESS: Entered VMX non-root mode
+ * NVMX_VMENTRY_VMFAIL: Consistency check VMFail
+ * NVMX_VMENTRY_VMEXIT: Consistency check VMExit
+ * NVMX_VMENTRY_KVM_INTERNAL_ERROR: KVM internal error
*/
enum nvmx_vmentry_status nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
bool from_vmentry)
@@ -3575,25 +3576,80 @@ static void nested_vmx_inject_exception_vmexit(struct kvm_vcpu *vcpu,
nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI, intr_info, exit_qual);
}
+/*
+ * Returns true if a debug trap is pending delivery.
+ *
+ * In KVM, debug traps bear an exception payload. As such, the class of a #DB
+ * exception may be inferred from the presence of an exception payload.
+ */
+static inline bool vmx_pending_dbg_trap(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.exception.pending &&
+ vcpu->arch.exception.nr == DB_VECTOR &&
+ vcpu->arch.exception.payload;
+}
+
+/*
+ * Certain VM-exits set the 'pending debug exceptions' field to indicate a
+ * recognized #DB (data or single-step) that has yet to be delivered. Since KVM
+ * represents these debug traps with a payload that is said to be compatible
+ * with the 'pending debug exceptions' field, write the payload to the VMCS
+ * field if a VM-exit is delivered before the debug trap.
+ */
+static void nested_vmx_update_pending_dbg(struct kvm_vcpu *vcpu)
+{
+ if (vmx_pending_dbg_trap(vcpu))
+ vmcs_writel(GUEST_PENDING_DBG_EXCEPTIONS,
+ vcpu->arch.exception.payload);
+}
+
static int vmx_check_nested_events(struct kvm_vcpu *vcpu, bool external_intr)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
unsigned long exit_qual;
bool block_nested_events =
vmx->nested.nested_run_pending || kvm_event_needs_reinjection(vcpu);
+ bool mtf_pending = vmx->nested.mtf_pending;
struct kvm_lapic *apic = vcpu->arch.apic;
+ /*
+ * Clear the MTF state. If a higher priority VM-exit is delivered first,
+ * this state is discarded.
+ */
+ vmx->nested.mtf_pending = false;
+
if (lapic_in_kernel(vcpu) &&
test_bit(KVM_APIC_INIT, &apic->pending_events)) {
if (block_nested_events)
return -EBUSY;
+ nested_vmx_update_pending_dbg(vcpu);
clear_bit(KVM_APIC_INIT, &apic->pending_events);
nested_vmx_vmexit(vcpu, EXIT_REASON_INIT_SIGNAL, 0, 0);
return 0;
}
+ /*
+ * Process any exceptions that are not debug traps before MTF.
+ */
+ if (vcpu->arch.exception.pending &&
+ !vmx_pending_dbg_trap(vcpu) &&
+ nested_vmx_check_exception(vcpu, &exit_qual)) {
+ if (block_nested_events)
+ return -EBUSY;
+ nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
+ return 0;
+ }
+
+ if (mtf_pending) {
+ if (block_nested_events)
+ return -EBUSY;
+ nested_vmx_update_pending_dbg(vcpu);
+ nested_vmx_vmexit(vcpu, EXIT_REASON_MONITOR_TRAP_FLAG, 0, 0);
+ return 0;
+ }
+
if (vcpu->arch.exception.pending &&
- nested_vmx_check_exception(vcpu, &exit_qual)) {
+ nested_vmx_check_exception(vcpu, &exit_qual)) {
if (block_nested_events)
return -EBUSY;
nested_vmx_inject_exception_vmexit(vcpu, exit_qual);
@@ -5256,24 +5312,17 @@ fail:
return 1;
}
-
-static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
- struct vmcs12 *vmcs12)
+/*
+ * Return true if an IO instruction with the specified port and size should cause
+ * a VM-exit into L1.
+ */
+bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
+ int size)
{
- unsigned long exit_qualification;
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
gpa_t bitmap, last_bitmap;
- unsigned int port;
- int size;
u8 b;
- if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
- return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
-
- exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
-
- port = exit_qualification >> 16;
- size = (exit_qualification & 7) + 1;
-
last_bitmap = (gpa_t)-1;
b = -1;
@@ -5300,8 +5349,26 @@ static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
return false;
}
+static bool nested_vmx_exit_handled_io(struct kvm_vcpu *vcpu,
+ struct vmcs12 *vmcs12)
+{
+ unsigned long exit_qualification;
+ unsigned short port;
+ int size;
+
+ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
+ return nested_cpu_has(vmcs12, CPU_BASED_UNCOND_IO_EXITING);
+
+ exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
+
+ port = exit_qualification >> 16;
+ size = (exit_qualification & 7) + 1;
+
+ return nested_vmx_check_io_bitmaps(vcpu, port, size);
+}
+
/*
- * Return 1 if we should exit from L2 to L1 to handle an MSR access access,
+ * Return 1 if we should exit from L2 to L1 to handle an MSR access,
* rather than handle it ourselves in L0. I.e., check whether L1 expressed
* disinterest in the current event (read or write a specific MSR) by using an
* MSR bitmap. This may be the case even when L0 doesn't use MSR bitmaps.
@@ -5683,6 +5750,9 @@ static int vmx_get_nested_state(struct kvm_vcpu *vcpu,
if (vmx->nested.nested_run_pending)
kvm_state.flags |= KVM_STATE_NESTED_RUN_PENDING;
+
+ if (vmx->nested.mtf_pending)
+ kvm_state.flags |= KVM_STATE_NESTED_MTF_PENDING;
}
}
@@ -5863,6 +5933,9 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
vmx->nested.nested_run_pending =
!!(kvm_state->flags & KVM_STATE_NESTED_RUN_PENDING);
+ vmx->nested.mtf_pending =
+ !!(kvm_state->flags & KVM_STATE_NESTED_MTF_PENDING);
+
ret = -EINVAL;
if (nested_cpu_has_shadow_vmcs(vmcs12) &&
vmcs12->vmcs_link_pointer != -1ull) {
@@ -5920,8 +5993,7 @@ void nested_vmx_set_vmcs_shadowing_bitmap(void)
* bit in the high half is on if the corresponding bit in the control field
* may be on. See also vmx_control_verify().
*/
-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
- bool apicv)
+void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps)
{
/*
* Note that as a general rule, the high half of the MSRs (bits in
@@ -5948,7 +6020,7 @@ void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
PIN_BASED_EXT_INTR_MASK |
PIN_BASED_NMI_EXITING |
PIN_BASED_VIRTUAL_NMIS |
- (apicv ? PIN_BASED_POSTED_INTR : 0);
+ (enable_apicv ? PIN_BASED_POSTED_INTR : 0);
msrs->pinbased_ctls_high |=
PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
PIN_BASED_VMX_PREEMPTION_TIMER;
diff --git a/arch/x86/kvm/vmx/nested.h b/arch/x86/kvm/vmx/nested.h
index fc874d4ead0f..9aeda46f473e 100644
--- a/arch/x86/kvm/vmx/nested.h
+++ b/arch/x86/kvm/vmx/nested.h
@@ -17,8 +17,7 @@ enum nvmx_vmentry_status {
};
void vmx_leave_nested(struct kvm_vcpu *vcpu);
-void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps,
- bool apicv);
+void nested_vmx_setup_ctls_msrs(struct nested_vmx_msrs *msrs, u32 ept_caps);
void nested_vmx_hardware_unsetup(void);
__init int nested_vmx_hardware_setup(int (*exit_handlers[])(struct kvm_vcpu *));
void nested_vmx_set_vmcs_shadowing_bitmap(void);
@@ -34,6 +33,8 @@ int vmx_get_vmx_msr(struct nested_vmx_msrs *msrs, u32 msr_index, u64 *pdata);
int get_vmx_mem_address(struct kvm_vcpu *vcpu, unsigned long exit_qualification,
u32 vmx_instruction_info, bool wr, int len, gva_t *ret);
void nested_vmx_pmu_entry_exit_ctls_update(struct kvm_vcpu *vcpu);
+bool nested_vmx_check_io_bitmaps(struct kvm_vcpu *vcpu, unsigned int port,
+ int size);
static inline struct vmcs12 *get_vmcs12(struct kvm_vcpu *vcpu)
{
@@ -175,6 +176,11 @@ static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12)
return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
}
+static inline int nested_cpu_has_mtf(struct vmcs12 *vmcs12)
+{
+ return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_TRAP_FLAG);
+}
+
static inline int nested_cpu_has_ept(struct vmcs12 *vmcs12)
{
return nested_cpu_has2(vmcs12, SECONDARY_EXEC_ENABLE_EPT);
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 9a6664886f2e..40b1e6138cd5 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -64,11 +64,13 @@
MODULE_AUTHOR("Qumranet");
MODULE_LICENSE("GPL");
+#ifdef MODULE
static const struct x86_cpu_id vmx_cpu_id[] = {
X86_FEATURE_MATCH(X86_FEATURE_VMX),
{}
};
MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
+#endif
bool __read_mostly enable_vpid = 1;
module_param_named(vpid, enable_vpid, bool, 0444);
@@ -95,7 +97,7 @@ module_param(emulate_invalid_guest_state, bool, S_IRUGO);
static bool __read_mostly fasteoi = 1;
module_param(fasteoi, bool, S_IRUGO);
-static bool __read_mostly enable_apicv = 1;
+bool __read_mostly enable_apicv = 1;
module_param(enable_apicv, bool, S_IRUGO);
/*
@@ -1175,6 +1177,10 @@ void vmx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
vmx->guest_msrs[i].mask);
}
+
+ if (vmx->nested.need_vmcs12_to_shadow_sync)
+ nested_sync_vmcs12_to_shadow(vcpu);
+
if (vmx->guest_state_loaded)
return;
@@ -1599,6 +1605,40 @@ static int skip_emulated_instruction(struct kvm_vcpu *vcpu)
return 1;
}
+
+/*
+ * Recognizes a pending MTF VM-exit and records the nested state for later
+ * delivery.
+ */
+static void vmx_update_emulated_instruction(struct kvm_vcpu *vcpu)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ struct vcpu_vmx *vmx = to_vmx(vcpu);
+
+ if (!is_guest_mode(vcpu))
+ return;
+
+ /*
+ * Per the SDM, MTF takes priority over debug-trap exceptions besides
+ * T-bit traps. As instruction emulation is completed (i.e. at the
+ * instruction boundary), any #DB exception pending delivery must be a
+ * debug-trap. Record the pending MTF state to be delivered in
+ * vmx_check_nested_events().
+ */
+ if (nested_cpu_has_mtf(vmcs12) &&
+ (!vcpu->arch.exception.pending ||
+ vcpu->arch.exception.nr == DB_VECTOR))
+ vmx->nested.mtf_pending = true;
+ else
+ vmx->nested.mtf_pending = false;
+}
+
+static int vmx_skip_emulated_instruction(struct kvm_vcpu *vcpu)
+{
+ vmx_update_emulated_instruction(vcpu);
+ return skip_emulated_instruction(vcpu);
+}
+
static void vmx_clear_hlt(struct kvm_vcpu *vcpu)
{
/*
@@ -2947,6 +2987,9 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
static int get_ept_level(struct kvm_vcpu *vcpu)
{
+ /* Nested EPT currently only supports 4-level walks. */
+ if (is_guest_mode(vcpu) && nested_cpu_has_ept(get_vmcs12(vcpu)))
+ return 4;
if (cpu_has_vmx_ept_5levels() && (cpuid_maxphyaddr(vcpu) > 48))
return 5;
return 4;
@@ -3815,24 +3858,29 @@ static int vmx_deliver_nested_posted_interrupt(struct kvm_vcpu *vcpu,
* 2. If target vcpu isn't running(root mode), kick it to pick up the
* interrupt from PIR in next vmentry.
*/
-static void vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
+static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector)
{
struct vcpu_vmx *vmx = to_vmx(vcpu);
int r;
r = vmx_deliver_nested_posted_interrupt(vcpu, vector);
if (!r)
- return;
+ return 0;
+
+ if (!vcpu->arch.apicv_active)
+ return -1;
if (pi_test_and_set_pir(vector, &vmx->pi_desc))
- return;
+ return 0;
/* If a previous notification has sent the IPI, nothing to do. */
if (pi_test_and_set_on(&vmx->pi_desc))
- return;
+ return 0;
if (!kvm_vcpu_trigger_posted_interrupt(vcpu, false))
kvm_vcpu_kick(vcpu);
+
+ return 0;
}
/*
@@ -4238,7 +4286,6 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
vmx->msr_ia32_umwait_control = 0;
- vcpu->arch.microcode_version = 0x100000000ULL;
vmx->vcpu.arch.regs[VCPU_REGS_RDX] = get_rdx_init_val();
vmx->hv_deadline_tsc = -1;
kvm_set_cr8(vcpu, 0);
@@ -6480,8 +6527,11 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu)
vmcs_write32(PLE_WINDOW, vmx->ple_window);
}
- if (vmx->nested.need_vmcs12_to_shadow_sync)
- nested_sync_vmcs12_to_shadow(vcpu);
+ /*
+ * We did this in prepare_switch_to_guest, because it needs to
+ * be within srcu_read_lock.
+ */
+ WARN_ON_ONCE(vmx->nested.need_vmcs12_to_shadow_sync);
if (kvm_register_is_dirty(vcpu, VCPU_REGS_RSP))
vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
@@ -6755,14 +6805,14 @@ static int vmx_create_vcpu(struct kvm_vcpu *vcpu)
if (nested)
nested_vmx_setup_ctls_msrs(&vmx->nested.msrs,
- vmx_capability.ept,
- kvm_vcpu_apicv_active(vcpu));
+ vmx_capability.ept);
else
memset(&vmx->nested.msrs, 0, sizeof(vmx->nested.msrs));
vmx->nested.posted_intr_nv = -1;
vmx->nested.current_vmptr = -1ull;
+ vcpu->arch.microcode_version = 0x100000000ULL;
vmx->msr_ia32_feature_control_valid_bits = FEAT_CTL_LOCKED;
/*
@@ -6836,8 +6886,7 @@ static int __init vmx_check_processor_compat(void)
if (setup_vmcs_config(&vmcs_conf, &vmx_cap) < 0)
return -EIO;
if (nested)
- nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept,
- enable_apicv);
+ nested_vmx_setup_ctls_msrs(&vmcs_conf.nested, vmx_cap.ept);
if (memcmp(&vmcs_config, &vmcs_conf, sizeof(struct vmcs_config)) != 0) {
printk(KERN_ERR "kvm: CPU %d feature inconsistency!\n",
smp_processor_id());
@@ -7098,6 +7147,40 @@ static void vmx_request_immediate_exit(struct kvm_vcpu *vcpu)
to_vmx(vcpu)->req_immediate_exit = true;
}
+static int vmx_check_intercept_io(struct kvm_vcpu *vcpu,
+ struct x86_instruction_info *info)
+{
+ struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+ unsigned short port;
+ bool intercept;
+ int size;
+
+ if (info->intercept == x86_intercept_in ||
+ info->intercept == x86_intercept_ins) {
+ port = info->src_val;
+ size = info->dst_bytes;
+ } else {
+ port = info->dst_val;
+ size = info->src_bytes;
+ }
+
+ /*
+ * If the 'use IO bitmaps' VM-execution control is 0, IO instruction
+ * VM-exits depend on the 'unconditional IO exiting' VM-execution
+ * control.
+ *
+ * Otherwise, IO instruction VM-exits are controlled by the IO bitmaps.
+ */
+ if (!nested_cpu_has(vmcs12, CPU_BASED_USE_IO_BITMAPS))
+ intercept = nested_cpu_has(vmcs12,
+ CPU_BASED_UNCOND_IO_EXITING);
+ else
+ intercept = nested_vmx_check_io_bitmaps(vcpu, port, size);
+
+ /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
+ return intercept ? X86EMUL_UNHANDLEABLE : X86EMUL_CONTINUE;
+}
+
static int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct x86_instruction_info *info,
enum x86_intercept_stage stage)
@@ -7105,19 +7188,45 @@ static int vmx_check_intercept(struct kvm_vcpu *vcpu,
struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
+ switch (info->intercept) {
/*
* RDPID causes #UD if disabled through secondary execution controls.
* Because it is marked as EmulateOnUD, we need to intercept it here.
*/
- if (info->intercept == x86_intercept_rdtscp &&
- !nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
- ctxt->exception.vector = UD_VECTOR;
- ctxt->exception.error_code_valid = false;
- return X86EMUL_PROPAGATE_FAULT;
- }
+ case x86_intercept_rdtscp:
+ if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_RDTSCP)) {
+ ctxt->exception.vector = UD_VECTOR;
+ ctxt->exception.error_code_valid = false;
+ return X86EMUL_PROPAGATE_FAULT;
+ }
+ break;
+
+ case x86_intercept_in:
+ case x86_intercept_ins:
+ case x86_intercept_out:
+ case x86_intercept_outs:
+ return vmx_check_intercept_io(vcpu, info);
+
+ case x86_intercept_lgdt:
+ case x86_intercept_lidt:
+ case x86_intercept_lldt:
+ case x86_intercept_ltr:
+ case x86_intercept_sgdt:
+ case x86_intercept_sidt:
+ case x86_intercept_sldt:
+ case x86_intercept_str:
+ if (!nested_cpu_has2(vmcs12, SECONDARY_EXEC_DESC))
+ return X86EMUL_CONTINUE;
+
+ /* FIXME: produce nested vmexit and return X86EMUL_INTERCEPTED. */
+ break;
/* TODO: check more intercepts... */
- return X86EMUL_CONTINUE;
+ default:
+ break;
+ }
+
+ return X86EMUL_UNHANDLEABLE;
}
#ifdef CONFIG_X86_64
@@ -7699,7 +7808,7 @@ static __init int hardware_setup(void)
if (nested) {
nested_vmx_setup_ctls_msrs(&vmcs_config.nested,
- vmx_capability.ept, enable_apicv);
+ vmx_capability.ept);
r = nested_vmx_hardware_setup(kvm_vmx_exit_handlers);
if (r)
@@ -7783,7 +7892,8 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = {
.run = vmx_vcpu_run,
.handle_exit = vmx_handle_exit,
- .skip_emulated_instruction = skip_emulated_instruction,
+ .skip_emulated_instruction = vmx_skip_emulated_instruction,
+ .update_emulated_instruction = vmx_update_emulated_instruction,
.set_interrupt_shadow = vmx_set_interrupt_shadow,
.get_interrupt_shadow = vmx_get_interrupt_shadow,
.patch_hypercall = vmx_patch_hypercall,
diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h
index 7f42cf3dcd70..e64da06c7009 100644
--- a/arch/x86/kvm/vmx/vmx.h
+++ b/arch/x86/kvm/vmx/vmx.h
@@ -150,6 +150,9 @@ struct nested_vmx {
/* L2 must run next, and mustn't decide to exit to L1. */
bool nested_run_pending;
+ /* Pending MTF VM-exit into L1. */
+ bool mtf_pending;
+
struct loaded_vmcs vmcs02;
/*
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fbabb2f06273..5de200663f51 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -438,6 +438,14 @@ void kvm_deliver_exception_payload(struct kvm_vcpu *vcpu)
* for #DB exceptions under VMX.
*/
vcpu->arch.dr6 ^= payload & DR6_RTM;
+
+ /*
+ * The #DB payload is defined as compatible with the 'pending
+ * debug exceptions' field under VMX, not DR6. While bit 12 is
+ * defined in the 'pending debug exceptions' field (enabled
+ * breakpoint), it is reserved and must be zero in DR6.
+ */
+ vcpu->arch.dr6 &= ~BIT(12);
break;
case PF_VECTOR:
vcpu->arch.cr2 = payload;
@@ -490,19 +498,7 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
vcpu->arch.exception.error_code = error_code;
vcpu->arch.exception.has_payload = has_payload;
vcpu->arch.exception.payload = payload;
- /*
- * In guest mode, payload delivery should be deferred,
- * so that the L1 hypervisor can intercept #PF before
- * CR2 is modified (or intercept #DB before DR6 is
- * modified under nVMX). However, for ABI
- * compatibility with KVM_GET_VCPU_EVENTS and
- * KVM_SET_VCPU_EVENTS, we can't delay payload
- * delivery unless userspace has enabled this
- * functionality via the per-VM capability,
- * KVM_CAP_EXCEPTION_PAYLOAD.
- */
- if (!vcpu->kvm->arch.exception_payload_enabled ||
- !is_guest_mode(vcpu))
+ if (!is_guest_mode(vcpu))
kvm_deliver_exception_payload(vcpu);
return;
}
@@ -2448,7 +2444,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
vcpu->hv_clock.tsc_timestamp = tsc_timestamp;
vcpu->hv_clock.system_time = kernel_ns + v->kvm->arch.kvmclock_offset;
vcpu->last_guest_tsc = tsc_timestamp;
- WARN_ON(vcpu->hv_clock.system_time < 0);
+ WARN_ON((s64)vcpu->hv_clock.system_time < 0);
/* If the host uses TSC clocksource, then it is stable */
pvclock_flags = 0;
@@ -3796,6 +3792,21 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
process_nmi(vcpu);
/*
+ * In guest mode, payload delivery should be deferred,
+ * so that the L1 hypervisor can intercept #PF before
+ * CR2 is modified (or intercept #DB before DR6 is
+ * modified under nVMX). Unless the per-VM capability,
+ * KVM_CAP_EXCEPTION_PAYLOAD, is set, we may not defer the delivery of
+ * an exception payload and handle after a KVM_GET_VCPU_EVENTS. Since we
+ * opportunistically defer the exception payload, deliver it if the
+ * capability hasn't been requested before processing a
+ * KVM_GET_VCPU_EVENTS.
+ */
+ if (!vcpu->kvm->arch.exception_payload_enabled &&
+ vcpu->arch.exception.pending && vcpu->arch.exception.has_payload)
+ kvm_deliver_exception_payload(vcpu);
+
+ /*
* The API doesn't provide the instruction length for software
* exceptions, so don't report them. As long as the guest RIP
* isn't advanced, we should expect to encounter the exception
@@ -6880,6 +6891,8 @@ restart:
kvm_rip_write(vcpu, ctxt->eip);
if (r && ctxt->tf)
r = kvm_vcpu_do_singlestep(vcpu);
+ if (kvm_x86_ops->update_emulated_instruction)
+ kvm_x86_ops->update_emulated_instruction(vcpu);
__kvm_set_rflags(vcpu, ctxt->eflags);
}
@@ -7177,15 +7190,15 @@ static void kvm_timer_init(void)
if (!boot_cpu_has(X86_FEATURE_CONSTANT_TSC)) {
#ifdef CONFIG_CPU_FREQ
- struct cpufreq_policy policy;
+ struct cpufreq_policy *policy;
int cpu;
- memset(&policy, 0, sizeof(policy));
cpu = get_cpu();
- cpufreq_get_policy(&policy, cpu);
- if (policy.cpuinfo.max_freq)
- max_tsc_khz = policy.cpuinfo.max_freq;
+ policy = cpufreq_cpu_get(cpu);
+ if (policy && policy->cpuinfo.max_freq)
+ max_tsc_khz = policy->cpuinfo.max_freq;
put_cpu();
+ cpufreq_cpu_put(policy);
#endif
cpufreq_register_notifier(&kvmclock_cpufreq_notifier_block,
CPUFREQ_TRANSITION_NOTIFIER);
@@ -7295,12 +7308,12 @@ int kvm_arch_init(void *opaque)
}
if (!ops->cpu_has_kvm_support()) {
- printk(KERN_ERR "kvm: no hardware support\n");
+ pr_err_ratelimited("kvm: no hardware support\n");
r = -EOPNOTSUPP;
goto out;
}
if (ops->disabled_by_bios()) {
- printk(KERN_ERR "kvm: disabled by bios\n");
+ pr_err_ratelimited("kvm: disabled by bios\n");
r = -EOPNOTSUPP;
goto out;
}
@@ -8942,7 +8955,6 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
kvm_rip_write(vcpu, ctxt->eip);
kvm_set_rflags(vcpu, ctxt->eflags);
- kvm_make_request(KVM_REQ_EVENT, vcpu);
return 1;
}
EXPORT_SYMBOL_GPL(kvm_task_switch);
@@ -10182,7 +10194,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
work->arch.cr3 != vcpu->arch.mmu->get_cr3(vcpu))
return;
- vcpu->arch.mmu->page_fault(vcpu, work->cr2_or_gpa, 0, true);
+ kvm_mmu_do_page_fault(vcpu, work->cr2_or_gpa, 0, true);
}
static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
diff --git a/arch/x86/mm/dump_pagetables.c b/arch/x86/mm/dump_pagetables.c
index 64229dad7eab..69309cd56fdf 100644
--- a/arch/x86/mm/dump_pagetables.c
+++ b/arch/x86/mm/dump_pagetables.c
@@ -363,13 +363,8 @@ static void ptdump_walk_pgd_level_core(struct seq_file *m,
{
const struct ptdump_range ptdump_ranges[] = {
#ifdef CONFIG_X86_64
-
-#define normalize_addr_shift (64 - (__VIRTUAL_MASK_SHIFT + 1))
-#define normalize_addr(u) ((signed long)((u) << normalize_addr_shift) >> \
- normalize_addr_shift)
-
{0, PTRS_PER_PGD * PGD_LEVEL_MULT / 2},
- {normalize_addr(PTRS_PER_PGD * PGD_LEVEL_MULT / 2), ~0UL},
+ {GUARD_HOLE_END_ADDR, ~0UL},
#else
{0, ~0UL},
#endif
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index fa8506e76bbe..d19a2edd63cb 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -180,7 +180,7 @@ void efi_sync_low_kernel_mappings(void)
static inline phys_addr_t
virt_to_phys_or_null_size(void *va, unsigned long size)
{
- bool bad_size;
+ phys_addr_t pa;
if (!va)
return 0;
@@ -188,16 +188,13 @@ virt_to_phys_or_null_size(void *va, unsigned long size)
if (virt_addr_valid(va))
return virt_to_phys(va);
- /*
- * A fully aligned variable on the stack is guaranteed not to
- * cross a page bounary. Try to catch strings on the stack by
- * checking that 'size' is a power of two.
- */
- bad_size = size > PAGE_SIZE || !is_power_of_2(size);
+ pa = slow_virt_to_phys(va);
- WARN_ON(!IS_ALIGNED((unsigned long)va, size) || bad_size);
+ /* check if the object crosses a page boundary */
+ if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
+ return 0;
- return slow_virt_to_phys(va);
+ return pa;
}
#define virt_to_phys_or_null(addr) \
@@ -568,85 +565,25 @@ efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
{
- efi_status_t status;
- u32 phys_tm, phys_tc;
- unsigned long flags;
-
- spin_lock(&rtc_lock);
- spin_lock_irqsave(&efi_runtime_lock, flags);
-
- phys_tm = virt_to_phys_or_null(tm);
- phys_tc = virt_to_phys_or_null(tc);
-
- status = efi_thunk(get_time, phys_tm, phys_tc);
-
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
- spin_unlock(&rtc_lock);
-
- return status;
+ return EFI_UNSUPPORTED;
}
static efi_status_t efi_thunk_set_time(efi_time_t *tm)
{
- efi_status_t status;
- u32 phys_tm;
- unsigned long flags;
-
- spin_lock(&rtc_lock);
- spin_lock_irqsave(&efi_runtime_lock, flags);
-
- phys_tm = virt_to_phys_or_null(tm);
-
- status = efi_thunk(set_time, phys_tm);
-
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
- spin_unlock(&rtc_lock);
-
- return status;
+ return EFI_UNSUPPORTED;
}
static efi_status_t
efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
efi_time_t *tm)
{
- efi_status_t status;
- u32 phys_enabled, phys_pending, phys_tm;
- unsigned long flags;
-
- spin_lock(&rtc_lock);
- spin_lock_irqsave(&efi_runtime_lock, flags);
-
- phys_enabled = virt_to_phys_or_null(enabled);
- phys_pending = virt_to_phys_or_null(pending);
- phys_tm = virt_to_phys_or_null(tm);
-
- status = efi_thunk(get_wakeup_time, phys_enabled,
- phys_pending, phys_tm);
-
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
- spin_unlock(&rtc_lock);
-
- return status;
+ return EFI_UNSUPPORTED;
}
static efi_status_t
efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
{
- efi_status_t status;
- u32 phys_tm;
- unsigned long flags;
-
- spin_lock(&rtc_lock);
- spin_lock_irqsave(&efi_runtime_lock, flags);
-
- phys_tm = virt_to_phys_or_null(tm);
-
- status = efi_thunk(set_wakeup_time, enabled, phys_tm);
-
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
- spin_unlock(&rtc_lock);
-
- return status;
+ return EFI_UNSUPPORTED;
}
static unsigned long efi_name_size(efi_char16_t *name)
@@ -658,6 +595,8 @@ static efi_status_t
efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 *attr, unsigned long *data_size, void *data)
{
+ u8 buf[24] __aligned(8);
+ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
efi_status_t status;
u32 phys_name, phys_vendor, phys_attr;
u32 phys_data_size, phys_data;
@@ -665,14 +604,19 @@ efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
spin_lock_irqsave(&efi_runtime_lock, flags);
+ *vnd = *vendor;
+
phys_data_size = virt_to_phys_or_null(data_size);
- phys_vendor = virt_to_phys_or_null(vendor);
+ phys_vendor = virt_to_phys_or_null(vnd);
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
phys_attr = virt_to_phys_or_null(attr);
phys_data = virt_to_phys_or_null_size(data, *data_size);
- status = efi_thunk(get_variable, phys_name, phys_vendor,
- phys_attr, phys_data_size, phys_data);
+ if (!phys_name || (data && !phys_data))
+ status = EFI_INVALID_PARAMETER;
+ else
+ status = efi_thunk(get_variable, phys_name, phys_vendor,
+ phys_attr, phys_data_size, phys_data);
spin_unlock_irqrestore(&efi_runtime_lock, flags);
@@ -683,19 +627,25 @@ static efi_status_t
efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size, void *data)
{
+ u8 buf[24] __aligned(8);
+ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
u32 phys_name, phys_vendor, phys_data;
efi_status_t status;
unsigned long flags;
spin_lock_irqsave(&efi_runtime_lock, flags);
+ *vnd = *vendor;
+
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
- phys_vendor = virt_to_phys_or_null(vendor);
+ phys_vendor = virt_to_phys_or_null(vnd);
phys_data = virt_to_phys_or_null_size(data, data_size);
- /* If data_size is > sizeof(u32) we've got problems */
- status = efi_thunk(set_variable, phys_name, phys_vendor,
- attr, data_size, phys_data);
+ if (!phys_name || !phys_data)
+ status = EFI_INVALID_PARAMETER;
+ else
+ status = efi_thunk(set_variable, phys_name, phys_vendor,
+ attr, data_size, phys_data);
spin_unlock_irqrestore(&efi_runtime_lock, flags);
@@ -707,6 +657,8 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
u32 attr, unsigned long data_size,
void *data)
{
+ u8 buf[24] __aligned(8);
+ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
u32 phys_name, phys_vendor, phys_data;
efi_status_t status;
unsigned long flags;
@@ -714,13 +666,17 @@ efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
return EFI_NOT_READY;
+ *vnd = *vendor;
+
phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
- phys_vendor = virt_to_phys_or_null(vendor);
+ phys_vendor = virt_to_phys_or_null(vnd);
phys_data = virt_to_phys_or_null_size(data, data_size);
- /* If data_size is > sizeof(u32) we've got problems */
- status = efi_thunk(set_variable, phys_name, phys_vendor,
- attr, data_size, phys_data);
+ if (!phys_name || !phys_data)
+ status = EFI_INVALID_PARAMETER;
+ else
+ status = efi_thunk(set_variable, phys_name, phys_vendor,
+ attr, data_size, phys_data);
spin_unlock_irqrestore(&efi_runtime_lock, flags);
@@ -732,39 +688,36 @@ efi_thunk_get_next_variable(unsigned long *name_size,
efi_char16_t *name,
efi_guid_t *vendor)
{
+ u8 buf[24] __aligned(8);
+ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
efi_status_t status;
u32 phys_name_size, phys_name, phys_vendor;
unsigned long flags;
spin_lock_irqsave(&efi_runtime_lock, flags);
+ *vnd = *vendor;
+
phys_name_size = virt_to_phys_or_null(name_size);
- phys_vendor = virt_to_phys_or_null(vendor);
+ phys_vendor = virt_to_phys_or_null(vnd);
phys_name = virt_to_phys_or_null_size(name, *name_size);
- status = efi_thunk(get_next_variable, phys_name_size,
- phys_name, phys_vendor);
+ if (!phys_name)
+ status = EFI_INVALID_PARAMETER;
+ else
+ status = efi_thunk(get_next_variable, phys_name_size,
+ phys_name, phys_vendor);
spin_unlock_irqrestore(&efi_runtime_lock, flags);
+ *vendor = *vnd;
return status;
}
static efi_status_t
efi_thunk_get_next_high_mono_count(u32 *count)
{
- efi_status_t status;
- u32 phys_count;
- unsigned long flags;
-
- spin_lock_irqsave(&efi_runtime_lock, flags);
-
- phys_count = virt_to_phys_or_null(count);
- status = efi_thunk(get_next_high_mono_count, phys_count);
-
- spin_unlock_irqrestore(&efi_runtime_lock, flags);
-
- return status;
+ return EFI_UNSUPPORTED;
}
static void
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 1f756ffffe8b..507f4fb88fa7 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -72,6 +72,9 @@
#include <asm/mwait.h>
#include <asm/pci_x86.h>
#include <asm/cpu.h>
+#ifdef CONFIG_X86_IOPL_IOPERM
+#include <asm/io_bitmap.h>
+#endif
#ifdef CONFIG_ACPI
#include <linux/acpi.h>
@@ -837,6 +840,25 @@ static void xen_load_sp0(unsigned long sp0)
this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0);
}
+#ifdef CONFIG_X86_IOPL_IOPERM
+static void xen_update_io_bitmap(void)
+{
+ struct physdev_set_iobitmap iobitmap;
+ struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw);
+
+ native_tss_update_io_bitmap();
+
+ iobitmap.bitmap = (uint8_t *)(&tss->x86_tss) +
+ tss->x86_tss.io_bitmap_base;
+ if (tss->x86_tss.io_bitmap_base == IO_BITMAP_OFFSET_INVALID)
+ iobitmap.nr_ports = 0;
+ else
+ iobitmap.nr_ports = IO_BITMAP_BITS;
+
+ HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap);
+}
+#endif
+
static void xen_io_delay(void)
{
}
@@ -896,14 +918,15 @@ static u64 xen_read_msr_safe(unsigned int msr, int *err)
static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high)
{
int ret;
+#ifdef CONFIG_X86_64
+ unsigned int which;
+ u64 base;
+#endif
ret = 0;
switch (msr) {
#ifdef CONFIG_X86_64
- unsigned which;
- u64 base;
-
case MSR_FS_BASE: which = SEGBASE_FS; goto set;
case MSR_KERNEL_GS_BASE: which = SEGBASE_GS_USER; goto set;
case MSR_GS_BASE: which = SEGBASE_GS_KERNEL; goto set;
@@ -1046,6 +1069,9 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
.write_idt_entry = xen_write_idt_entry,
.load_sp0 = xen_load_sp0,
+#ifdef CONFIG_X86_IOPL_IOPERM
+ .update_io_bitmap = xen_update_io_bitmap,
+#endif
.io_delay = xen_io_delay,
/* Xen takes care of %gs when switching to usermode for us */
diff --git a/block/bfq-cgroup.c b/block/bfq-cgroup.c
index 09b69a3ed490..f0ff6654af28 100644
--- a/block/bfq-cgroup.c
+++ b/block/bfq-cgroup.c
@@ -610,12 +610,13 @@ struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
*/
entity = &bfqg->entity;
for_each_entity(entity) {
- bfqg = container_of(entity, struct bfq_group, entity);
- if (bfqg != bfqd->root_group) {
- parent = bfqg_parent(bfqg);
+ struct bfq_group *curr_bfqg = container_of(entity,
+ struct bfq_group, entity);
+ if (curr_bfqg != bfqd->root_group) {
+ parent = bfqg_parent(curr_bfqg);
if (!parent)
parent = bfqd->root_group;
- bfq_group_set_parent(bfqg, parent);
+ bfq_group_set_parent(curr_bfqg, parent);
}
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 089e890ab208..60dc9552ef8d 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1663,12 +1663,6 @@ int kblockd_schedule_work(struct work_struct *work)
}
EXPORT_SYMBOL(kblockd_schedule_work);
-int kblockd_schedule_work_on(int cpu, struct work_struct *work)
-{
- return queue_work_on(cpu, kblockd_workqueue, work);
-}
-EXPORT_SYMBOL(kblockd_schedule_work_on);
-
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork,
unsigned long delay)
{
diff --git a/block/blk-flush.c b/block/blk-flush.c
index 3f977c517960..5cc775bdb06a 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -412,7 +412,7 @@ void blk_insert_flush(struct request *rq)
*/
if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
- blk_mq_request_bypass_insert(rq, false);
+ blk_mq_request_bypass_insert(rq, false, false);
return;
}
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c
index ca22afd47b3d..856356b1619e 100644
--- a/block/blk-mq-sched.c
+++ b/block/blk-mq-sched.c
@@ -361,13 +361,19 @@ static bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx,
bool has_sched,
struct request *rq)
{
- /* dispatch flush rq directly */
- if (rq->rq_flags & RQF_FLUSH_SEQ) {
- spin_lock(&hctx->lock);
- list_add(&rq->queuelist, &hctx->dispatch);
- spin_unlock(&hctx->lock);
+ /*
+ * dispatch flush and passthrough rq directly
+ *
+ * passthrough request has to be added to hctx->dispatch directly.
+ * For some reason, device may be in one situation which can't
+ * handle FS request, so STS_RESOURCE is always returned and the
+ * FS request will be added to hctx->dispatch. However passthrough
+ * request may be required at that time for fixing the problem. If
+ * passthrough request is added to scheduler queue, there isn't any
+ * chance to dispatch it given we prioritize requests in hctx->dispatch.
+ */
+ if ((rq->rq_flags & RQF_FLUSH_SEQ) || blk_rq_is_passthrough(rq))
return true;
- }
if (has_sched)
rq->rq_flags |= RQF_SORTED;
@@ -391,8 +397,10 @@ void blk_mq_sched_insert_request(struct request *rq, bool at_head,
WARN_ON(e && (rq->tag != -1));
- if (blk_mq_sched_bypass_insert(hctx, !!e, rq))
+ if (blk_mq_sched_bypass_insert(hctx, !!e, rq)) {
+ blk_mq_request_bypass_insert(rq, at_head, false);
goto run;
+ }
if (e && e->type->ops.insert_requests) {
LIST_HEAD(list);
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c
index fbacde454718..586c9d6e904a 100644
--- a/block/blk-mq-tag.c
+++ b/block/blk-mq-tag.c
@@ -183,8 +183,8 @@ found_tag:
return tag + tag_offset;
}
-void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
- struct blk_mq_ctx *ctx, unsigned int tag)
+void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
+ unsigned int tag)
{
if (!blk_mq_tag_is_reserved(tags, tag)) {
const int real_tag = tag - tags->nr_reserved_tags;
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h
index 15bc74acb57e..2b8321efb682 100644
--- a/block/blk-mq-tag.h
+++ b/block/blk-mq-tag.h
@@ -26,8 +26,8 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r
extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
-extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
- struct blk_mq_ctx *ctx, unsigned int tag);
+extern void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx,
+ unsigned int tag);
extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tags **tags,
unsigned int depth, bool can_grow);
diff --git a/block/blk-mq.c b/block/blk-mq.c
index a12b1763508d..d92088dec6c3 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -477,9 +477,9 @@ static void __blk_mq_free_request(struct request *rq)
blk_pm_mark_last_busy(rq);
rq->mq_hctx = NULL;
if (rq->tag != -1)
- blk_mq_put_tag(hctx, hctx->tags, ctx, rq->tag);
+ blk_mq_put_tag(hctx->tags, ctx, rq->tag);
if (sched_tag != -1)
- blk_mq_put_tag(hctx, hctx->sched_tags, ctx, sched_tag);
+ blk_mq_put_tag(hctx->sched_tags, ctx, sched_tag);
blk_mq_sched_restart(hctx);
blk_queue_exit(q);
}
@@ -735,7 +735,7 @@ static void blk_mq_requeue_work(struct work_struct *work)
* merge.
*/
if (rq->rq_flags & RQF_DONTPREP)
- blk_mq_request_bypass_insert(rq, false);
+ blk_mq_request_bypass_insert(rq, false, false);
else
blk_mq_sched_insert_request(rq, true, false, false);
}
@@ -1286,7 +1286,7 @@ bool blk_mq_dispatch_rq_list(struct request_queue *q, struct list_head *list,
q->mq_ops->commit_rqs(hctx);
spin_lock(&hctx->lock);
- list_splice_init(list, &hctx->dispatch);
+ list_splice_tail_init(list, &hctx->dispatch);
spin_unlock(&hctx->lock);
/*
@@ -1677,12 +1677,16 @@ void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
* Should only be used carefully, when the caller knows we want to
* bypass a potential IO scheduler on the target device.
*/
-void blk_mq_request_bypass_insert(struct request *rq, bool run_queue)
+void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
+ bool run_queue)
{
struct blk_mq_hw_ctx *hctx = rq->mq_hctx;
spin_lock(&hctx->lock);
- list_add_tail(&rq->queuelist, &hctx->dispatch);
+ if (at_head)
+ list_add(&rq->queuelist, &hctx->dispatch);
+ else
+ list_add_tail(&rq->queuelist, &hctx->dispatch);
spin_unlock(&hctx->lock);
if (run_queue)
@@ -1849,7 +1853,7 @@ insert:
if (bypass_insert)
return BLK_STS_RESOURCE;
- blk_mq_request_bypass_insert(rq, run_queue);
+ blk_mq_request_bypass_insert(rq, false, run_queue);
return BLK_STS_OK;
}
@@ -1876,7 +1880,7 @@ static void blk_mq_try_issue_directly(struct blk_mq_hw_ctx *hctx,
ret = __blk_mq_try_issue_directly(hctx, rq, cookie, false, true);
if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE)
- blk_mq_request_bypass_insert(rq, true);
+ blk_mq_request_bypass_insert(rq, false, true);
else if (ret != BLK_STS_OK)
blk_mq_end_request(rq, ret);
@@ -1910,7 +1914,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx,
if (ret != BLK_STS_OK) {
if (ret == BLK_STS_RESOURCE ||
ret == BLK_STS_DEV_RESOURCE) {
- blk_mq_request_bypass_insert(rq,
+ blk_mq_request_bypass_insert(rq, false,
list_empty(list));
break;
}
@@ -3398,7 +3402,6 @@ static void blk_mq_poll_stats_fn(struct blk_stat_callback *cb)
}
static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
unsigned long ret = 0;
@@ -3431,7 +3434,6 @@ static unsigned long blk_mq_poll_nsecs(struct request_queue *q,
}
static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
- struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
struct hrtimer_sleeper hs;
@@ -3451,7 +3453,7 @@ static bool blk_mq_poll_hybrid_sleep(struct request_queue *q,
if (q->poll_nsec > 0)
nsecs = q->poll_nsec;
else
- nsecs = blk_mq_poll_nsecs(q, hctx, rq);
+ nsecs = blk_mq_poll_nsecs(q, rq);
if (!nsecs)
return false;
@@ -3506,7 +3508,7 @@ static bool blk_mq_poll_hybrid(struct request_queue *q,
return false;
}
- return blk_mq_poll_hybrid_sleep(q, hctx, rq);
+ return blk_mq_poll_hybrid_sleep(q, rq);
}
/**
diff --git a/block/blk-mq.h b/block/blk-mq.h
index eaaca8fc1c28..10bfdfb494fa 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -66,7 +66,8 @@ int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
*/
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head);
-void blk_mq_request_bypass_insert(struct request *rq, bool run_queue);
+void blk_mq_request_bypass_insert(struct request *rq, bool at_head,
+ bool run_queue);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);
@@ -199,7 +200,7 @@ static inline bool blk_mq_get_dispatch_budget(struct blk_mq_hw_ctx *hctx)
static inline void __blk_mq_put_driver_tag(struct blk_mq_hw_ctx *hctx,
struct request *rq)
{
- blk_mq_put_tag(hctx, hctx->tags, rq->mq_ctx, rq->tag);
+ blk_mq_put_tag(hctx->tags, rq->mq_ctx, rq->tag);
rq->tag = -1;
if (rq->rq_flags & RQF_MQ_INFLIGHT) {
diff --git a/crypto/Kconfig b/crypto/Kconfig
index cdb51d4272d0..c24a47406f8f 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -136,8 +136,6 @@ config CRYPTO_USER
Userspace configuration for cryptographic instantiations such as
cbc(aes).
-if CRYPTO_MANAGER2
-
config CRYPTO_MANAGER_DISABLE_TESTS
bool "Disable run-time self tests"
default y
@@ -155,8 +153,6 @@ config CRYPTO_MANAGER_EXTRA_TESTS
This is intended for developer use only, as these tests take much
longer to run than the normal self tests.
-endif # if CRYPTO_MANAGER2
-
config CRYPTO_GF128MUL
tristate
diff --git a/crypto/hash_info.c b/crypto/hash_info.c
index c754cb75dd1a..a49ff96bde77 100644
--- a/crypto/hash_info.c
+++ b/crypto/hash_info.c
@@ -26,7 +26,7 @@ const char *const hash_algo_name[HASH_ALGO__LAST] = {
[HASH_ALGO_TGR_128] = "tgr128",
[HASH_ALGO_TGR_160] = "tgr160",
[HASH_ALGO_TGR_192] = "tgr192",
- [HASH_ALGO_SM3_256] = "sm3-256",
+ [HASH_ALGO_SM3_256] = "sm3",
[HASH_ALGO_STREEBOG_256] = "streebog256",
[HASH_ALGO_STREEBOG_512] = "streebog512",
};
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index 88f33c0efb23..ccb3d60729fc 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -4436,6 +4436,15 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(tf_cbc_tv_template)
},
}, {
+#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
+ .alg = "cbc-paes-s390",
+ .fips_allowed = 1,
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(aes_cbc_tv_template)
+ }
+ }, {
+#endif
.alg = "cbcmac(aes)",
.fips_allowed = 1,
.test = alg_test_hash,
@@ -4587,6 +4596,15 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(tf_ctr_tv_template)
}
}, {
+#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
+ .alg = "ctr-paes-s390",
+ .fips_allowed = 1,
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(aes_ctr_tv_template)
+ }
+ }, {
+#endif
.alg = "cts(cbc(aes))",
.test = alg_test_skcipher,
.fips_allowed = 1,
@@ -4879,6 +4897,15 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(xtea_tv_template)
}
}, {
+#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
+ .alg = "ecb-paes-s390",
+ .fips_allowed = 1,
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(aes_tv_template)
+ }
+ }, {
+#endif
.alg = "ecdh",
.test = alg_test_kpp,
.fips_allowed = 1,
@@ -5465,6 +5492,15 @@ static const struct alg_test_desc alg_test_descs[] = {
.cipher = __VECS(tf_xts_tv_template)
}
}, {
+#if IS_ENABLED(CONFIG_CRYPTO_PAES_S390)
+ .alg = "xts-paes-s390",
+ .fips_allowed = 1,
+ .test = alg_test_skcipher,
+ .suite = {
+ .cipher = __VECS(aes_xts_tv_template)
+ }
+ }, {
+#endif
.alg = "xts4096(paes)",
.test = alg_test_null,
.fips_allowed = 1,
diff --git a/drivers/acpi/acpi_watchdog.c b/drivers/acpi/acpi_watchdog.c
index b5516b04ffc0..6e9ec6e3fe47 100644
--- a/drivers/acpi/acpi_watchdog.c
+++ b/drivers/acpi/acpi_watchdog.c
@@ -55,12 +55,14 @@ static bool acpi_watchdog_uses_rtc(const struct acpi_table_wdat *wdat)
}
#endif
+static bool acpi_no_watchdog;
+
static const struct acpi_table_wdat *acpi_watchdog_get_wdat(void)
{
const struct acpi_table_wdat *wdat = NULL;
acpi_status status;
- if (acpi_disabled)
+ if (acpi_disabled || acpi_no_watchdog)
return NULL;
status = acpi_get_table(ACPI_SIG_WDAT, 0,
@@ -88,6 +90,14 @@ bool acpi_has_watchdog(void)
}
EXPORT_SYMBOL_GPL(acpi_has_watchdog);
+/* ACPI watchdog can be disabled on boot command line */
+static int __init disable_acpi_watchdog(char *str)
+{
+ acpi_no_watchdog = true;
+ return 1;
+}
+__setup("acpi_no_watchdog", disable_acpi_watchdog);
+
void __init acpi_watchdog_init(void)
{
const struct acpi_wdat_entry *entries;
@@ -126,12 +136,11 @@ void __init acpi_watchdog_init(void)
gas = &entries[i].register_region;
res.start = gas->address;
+ res.end = res.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
res.flags = IORESOURCE_MEM;
- res.end = res.start + ALIGN(gas->access_width, 4) - 1;
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
res.flags = IORESOURCE_IO;
- res.end = res.start + gas->access_width - 1;
} else {
pr_warn("Unsupported address space: %u\n",
gas->space_id);
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 67f282e9e0af..6ad0517553d5 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -101,6 +101,8 @@ acpi_status acpi_hw_enable_all_runtime_gpes(void);
acpi_status acpi_hw_enable_all_wakeup_gpes(void);
+u8 acpi_hw_check_all_gpes(void);
+
acpi_status
acpi_hw_enable_runtime_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
struct acpi_gpe_block_info *gpe_block,
diff --git a/drivers/acpi/acpica/evevent.c b/drivers/acpi/acpica/evevent.c
index 8c83d8c620dc..789d5e920aaf 100644
--- a/drivers/acpi/acpica/evevent.c
+++ b/drivers/acpi/acpica/evevent.c
@@ -265,4 +265,49 @@ static u32 acpi_ev_fixed_event_dispatch(u32 event)
handler) (acpi_gbl_fixed_event_handlers[event].context));
}
+/*******************************************************************************
+ *
+ * FUNCTION: acpi_any_fixed_event_status_set
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: TRUE or FALSE
+ *
+ * DESCRIPTION: Checks the PM status register for active fixed events
+ *
+ ******************************************************************************/
+
+u32 acpi_any_fixed_event_status_set(void)
+{
+ acpi_status status;
+ u32 in_status;
+ u32 in_enable;
+ u32 i;
+
+ status = acpi_hw_register_read(ACPI_REGISTER_PM1_ENABLE, &in_enable);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+
+ status = acpi_hw_register_read(ACPI_REGISTER_PM1_STATUS, &in_status);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+
+ /*
+ * Check for all possible Fixed Events and dispatch those that are active
+ */
+ for (i = 0; i < ACPI_NUM_FIXED_EVENTS; i++) {
+
+ /* Both the status and enable bits must be on for this event */
+
+ if ((in_status & acpi_gbl_fixed_event_info[i].status_bit_mask) &&
+ (in_enable & acpi_gbl_fixed_event_info[i].enable_bit_mask)) {
+ return (TRUE);
+ }
+ }
+
+ return (FALSE);
+}
+
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 2c39ff2a7406..f2de66bfd8a7 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -795,6 +795,38 @@ acpi_status acpi_enable_all_wakeup_gpes(void)
ACPI_EXPORT_SYMBOL(acpi_enable_all_wakeup_gpes)
+/******************************************************************************
+ *
+ * FUNCTION: acpi_any_gpe_status_set
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Whether or not the status bit is set for any GPE
+ *
+ * DESCRIPTION: Check the status bits of all enabled GPEs and return TRUE if any
+ * of them is set or FALSE otherwise.
+ *
+ ******************************************************************************/
+u32 acpi_any_gpe_status_set(void)
+{
+ acpi_status status;
+ u8 ret;
+
+ ACPI_FUNCTION_TRACE(acpi_any_gpe_status_set);
+
+ status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
+ if (ACPI_FAILURE(status)) {
+ return (FALSE);
+ }
+
+ ret = acpi_hw_check_all_gpes();
+ (void)acpi_ut_release_mutex(ACPI_MTX_EVENTS);
+
+ return (ret);
+}
+
+ACPI_EXPORT_SYMBOL(acpi_any_gpe_status_set)
+
/*******************************************************************************
*
* FUNCTION: acpi_install_gpe_block
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index 1b4252bdcd0b..f4c285c2f595 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -446,6 +446,53 @@ acpi_hw_enable_wakeup_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
/******************************************************************************
*
+ * FUNCTION: acpi_hw_get_gpe_block_status
+ *
+ * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
+ * gpe_block - Gpe Block info
+ *
+ * RETURN: Success
+ *
+ * DESCRIPTION: Produce a combined GPE status bits mask for the given block.
+ *
+ ******************************************************************************/
+
+static acpi_status
+acpi_hw_get_gpe_block_status(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
+ struct acpi_gpe_block_info *gpe_block,
+ void *ret_ptr)
+{
+ struct acpi_gpe_register_info *gpe_register_info;
+ u64 in_enable, in_status;
+ acpi_status status;
+ u8 *ret = ret_ptr;
+ u32 i;
+
+ /* Examine each GPE Register within the block */
+
+ for (i = 0; i < gpe_block->register_count; i++) {
+ gpe_register_info = &gpe_block->register_info[i];
+
+ status = acpi_hw_read(&in_enable,
+ &gpe_register_info->enable_address);
+ if (ACPI_FAILURE(status)) {
+ continue;
+ }
+
+ status = acpi_hw_read(&in_status,
+ &gpe_register_info->status_address);
+ if (ACPI_FAILURE(status)) {
+ continue;
+ }
+
+ *ret |= in_enable & in_status;
+ }
+
+ return (AE_OK);
+}
+
+/******************************************************************************
+ *
* FUNCTION: acpi_hw_disable_all_gpes
*
* PARAMETERS: None
@@ -510,4 +557,28 @@ acpi_status acpi_hw_enable_all_wakeup_gpes(void)
return_ACPI_STATUS(status);
}
+/******************************************************************************
+ *
+ * FUNCTION: acpi_hw_check_all_gpes
+ *
+ * PARAMETERS: None
+ *
+ * RETURN: Combined status of all GPEs
+ *
+ * DESCRIPTION: Check all enabled GPEs in all GPE blocks and return TRUE if the
+ * status bit is set for at least one of them of FALSE otherwise.
+ *
+ ******************************************************************************/
+
+u8 acpi_hw_check_all_gpes(void)
+{
+ u8 ret = 0;
+
+ ACPI_FUNCTION_TRACE(acpi_hw_check_all_gpes);
+
+ (void)acpi_ev_walk_gpe_list(acpi_hw_get_gpe_block_status, &ret);
+
+ return (ret != 0);
+}
+
#endif /* !ACPI_REDUCED_HARDWARE */
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index 08bc9751fe66..d1f1cf5d4bf0 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -179,6 +179,7 @@ EXPORT_SYMBOL(first_ec);
static struct acpi_ec *boot_ec;
static bool boot_ec_is_ecdt = false;
+static struct workqueue_struct *ec_wq;
static struct workqueue_struct *ec_query_wq;
static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
@@ -469,7 +470,7 @@ static void acpi_ec_submit_query(struct acpi_ec *ec)
ec_dbg_evt("Command(%s) submitted/blocked",
acpi_ec_cmd_string(ACPI_EC_COMMAND_QUERY));
ec->nr_pending_queries++;
- schedule_work(&ec->work);
+ queue_work(ec_wq, &ec->work);
}
}
@@ -535,7 +536,7 @@ static void acpi_ec_enable_event(struct acpi_ec *ec)
#ifdef CONFIG_PM_SLEEP
static void __acpi_ec_flush_work(void)
{
- flush_scheduled_work(); /* flush ec->work */
+ drain_workqueue(ec_wq); /* flush ec->work */
flush_workqueue(ec_query_wq); /* flush queries */
}
@@ -556,8 +557,8 @@ static void acpi_ec_disable_event(struct acpi_ec *ec)
void acpi_ec_flush_work(void)
{
- /* Without ec_query_wq there is nothing to flush. */
- if (!ec_query_wq)
+ /* Without ec_wq there is nothing to flush. */
+ if (!ec_wq)
return;
__acpi_ec_flush_work();
@@ -2107,25 +2108,33 @@ static struct acpi_driver acpi_ec_driver = {
.drv.pm = &acpi_ec_pm,
};
-static inline int acpi_ec_query_init(void)
+static void acpi_ec_destroy_workqueues(void)
{
- if (!ec_query_wq) {
- ec_query_wq = alloc_workqueue("kec_query", 0,
- ec_max_queries);
- if (!ec_query_wq)
- return -ENODEV;
+ if (ec_wq) {
+ destroy_workqueue(ec_wq);
+ ec_wq = NULL;
}
- return 0;
-}
-
-static inline void acpi_ec_query_exit(void)
-{
if (ec_query_wq) {
destroy_workqueue(ec_query_wq);
ec_query_wq = NULL;
}
}
+static int acpi_ec_init_workqueues(void)
+{
+ if (!ec_wq)
+ ec_wq = alloc_ordered_workqueue("kec", 0);
+
+ if (!ec_query_wq)
+ ec_query_wq = alloc_workqueue("kec_query", 0, ec_max_queries);
+
+ if (!ec_wq || !ec_query_wq) {
+ acpi_ec_destroy_workqueues();
+ return -ENODEV;
+ }
+ return 0;
+}
+
static const struct dmi_system_id acpi_ec_no_wakeup[] = {
{
.ident = "Thinkpad X1 Carbon 6th",
@@ -2156,8 +2165,7 @@ int __init acpi_ec_init(void)
int result;
int ecdt_fail, dsdt_fail;
- /* register workqueue for _Qxx evaluations */
- result = acpi_ec_query_init();
+ result = acpi_ec_init_workqueues();
if (result)
return result;
@@ -2188,6 +2196,6 @@ static void __exit acpi_ec_exit(void)
{
acpi_bus_unregister_driver(&acpi_ec_driver);
- acpi_ec_query_exit();
+ acpi_ec_destroy_workqueues();
}
#endif /* 0 */
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 439880629839..e5f95922bc21 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -990,21 +990,41 @@ static void acpi_s2idle_sync(void)
acpi_os_wait_events_complete(); /* synchronize Notify handling */
}
-static void acpi_s2idle_wake(void)
+static bool acpi_s2idle_wake(void)
{
- /*
- * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the SCI has
- * not triggered while suspended, so bail out.
- */
- if (!acpi_sci_irq_valid() ||
- irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
- return;
+ if (!acpi_sci_irq_valid())
+ return pm_wakeup_pending();
+
+ while (pm_wakeup_pending()) {
+ /*
+ * If IRQD_WAKEUP_ARMED is set for the SCI at this point, the
+ * SCI has not triggered while suspended, so bail out (the
+ * wakeup is pending anyway and the SCI is not the source of
+ * it).
+ */
+ if (irqd_is_wakeup_armed(irq_get_irq_data(acpi_sci_irq)))
+ return true;
+
+ /*
+ * If the status bit of any enabled fixed event is set, the
+ * wakeup is regarded as valid.
+ */
+ if (acpi_any_fixed_event_status_set())
+ return true;
+
+ /*
+ * If there are no EC events to process and at least one of the
+ * other enabled GPEs is active, the wakeup is regarded as a
+ * genuine one.
+ *
+ * Note that the checks below must be carried out in this order
+ * to avoid returning prematurely due to a change of the EC GPE
+ * status bit from unset to set between the checks with the
+ * status bits of all the other GPEs unset.
+ */
+ if (acpi_any_gpe_status_set() && !acpi_ec_dispatch_gpe())
+ return true;
- /*
- * If there are EC events to process, the wakeup may be a spurious one
- * coming from the EC.
- */
- if (acpi_ec_dispatch_gpe()) {
/*
* Cancel the wakeup and process all pending events in case
* there are any wakeup ones in there.
@@ -1017,8 +1037,19 @@ static void acpi_s2idle_wake(void)
acpi_s2idle_sync();
+ /*
+ * The SCI is in the "suspended" state now and it cannot produce
+ * new wakeup events till the rearming below, so if any of them
+ * are pending here, they must be resulting from the processing
+ * of EC events above or coming from somewhere else.
+ */
+ if (pm_wakeup_pending())
+ return true;
+
rearm_wake_irq(acpi_sci_irq);
}
+
+ return false;
}
static void acpi_s2idle_restore_early(void)
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a6b2082c24f8..e47c8a4c83db 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -5228,6 +5228,7 @@ static int binder_open(struct inode *nodp, struct file *filp)
binder_dev = container_of(filp->private_data,
struct binder_device, miscdev);
}
+ refcount_inc(&binder_dev->ref);
proc->context = &binder_dev->context;
binder_alloc_init(&proc->alloc);
@@ -5405,6 +5406,7 @@ static int binder_node_release(struct binder_node *node, int refs)
static void binder_deferred_release(struct binder_proc *proc)
{
struct binder_context *context = proc->context;
+ struct binder_device *device;
struct rb_node *n;
int threads, nodes, incoming_refs, outgoing_refs, active_transactions;
@@ -5421,6 +5423,12 @@ static void binder_deferred_release(struct binder_proc *proc)
context->binder_context_mgr_node = NULL;
}
mutex_unlock(&context->context_mgr_node_lock);
+ device = container_of(proc->context, struct binder_device, context);
+ if (refcount_dec_and_test(&device->ref)) {
+ kfree(context->name);
+ kfree(device);
+ }
+ proc->context = NULL;
binder_inner_proc_lock(proc);
/*
* Make sure proc stays alive after we
@@ -6077,6 +6085,7 @@ static int __init init_binder_device(const char *name)
binder_device->miscdev.minor = MISC_DYNAMIC_MINOR;
binder_device->miscdev.name = name;
+ refcount_set(&binder_device->ref, 1);
binder_device->context.binder_context_mgr_uid = INVALID_UID;
binder_device->context.name = name;
mutex_init(&binder_device->context.context_mgr_node_lock);
diff --git a/drivers/android/binder_internal.h b/drivers/android/binder_internal.h
index ae991097d14d..283d3cb9c16e 100644
--- a/drivers/android/binder_internal.h
+++ b/drivers/android/binder_internal.h
@@ -8,6 +8,7 @@
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/mutex.h>
+#include <linux/refcount.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/uidgid.h>
@@ -33,6 +34,7 @@ struct binder_device {
struct miscdevice miscdev;
struct binder_context context;
struct inode *binderfs_inode;
+ refcount_t ref;
};
/**
diff --git a/drivers/android/binderfs.c b/drivers/android/binderfs.c
index e2580e5316a2..110e41f920c2 100644
--- a/drivers/android/binderfs.c
+++ b/drivers/android/binderfs.c
@@ -154,6 +154,7 @@ static int binderfs_binder_device_create(struct inode *ref_inode,
if (!name)
goto err;
+ refcount_set(&device->ref, 1);
device->binderfs_inode = inode;
device->context.binder_context_mgr_uid = INVALID_UID;
device->context.name = name;
@@ -257,8 +258,10 @@ static void binderfs_evict_inode(struct inode *inode)
ida_free(&binderfs_minors, device->miscdev.minor);
mutex_unlock(&binderfs_minors_mutex);
- kfree(device->context.name);
- kfree(device);
+ if (refcount_dec_and_test(&device->ref)) {
+ kfree(device->context.name);
+ kfree(device);
+ }
}
/**
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 42a672456432..dbb0f9130f42 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -718,6 +718,8 @@ static void __device_links_queue_sync_state(struct device *dev,
{
struct device_link *link;
+ if (!dev_has_sync_state(dev))
+ return;
if (dev->state_synced)
return;
@@ -745,25 +747,31 @@ static void __device_links_queue_sync_state(struct device *dev,
/**
* device_links_flush_sync_list - Call sync_state() on a list of devices
* @list: List of devices to call sync_state() on
+ * @dont_lock_dev: Device for which lock is already held by the caller
*
* Calls sync_state() on all the devices that have been queued for it. This
- * function is used in conjunction with __device_links_queue_sync_state().
+ * function is used in conjunction with __device_links_queue_sync_state(). The
+ * @dont_lock_dev parameter is useful when this function is called from a
+ * context where a device lock is already held.
*/
-static void device_links_flush_sync_list(struct list_head *list)
+static void device_links_flush_sync_list(struct list_head *list,
+ struct device *dont_lock_dev)
{
struct device *dev, *tmp;
list_for_each_entry_safe(dev, tmp, list, links.defer_sync) {
list_del_init(&dev->links.defer_sync);
- device_lock(dev);
+ if (dev != dont_lock_dev)
+ device_lock(dev);
if (dev->bus->sync_state)
dev->bus->sync_state(dev);
else if (dev->driver && dev->driver->sync_state)
dev->driver->sync_state(dev);
- device_unlock(dev);
+ if (dev != dont_lock_dev)
+ device_unlock(dev);
put_device(dev);
}
@@ -801,7 +809,7 @@ void device_links_supplier_sync_state_resume(void)
out:
device_links_write_unlock();
- device_links_flush_sync_list(&sync_list);
+ device_links_flush_sync_list(&sync_list, NULL);
}
static int sync_state_resume_initcall(void)
@@ -813,7 +821,7 @@ late_initcall(sync_state_resume_initcall);
static void __device_links_supplier_defer_sync(struct device *sup)
{
- if (list_empty(&sup->links.defer_sync))
+ if (list_empty(&sup->links.defer_sync) && dev_has_sync_state(sup))
list_add_tail(&sup->links.defer_sync, &deferred_sync);
}
@@ -865,6 +873,11 @@ void device_links_driver_bound(struct device *dev)
driver_deferred_probe_add(link->consumer);
}
+ if (defer_sync_state_count)
+ __device_links_supplier_defer_sync(dev);
+ else
+ __device_links_queue_sync_state(dev, &sync_list);
+
list_for_each_entry(link, &dev->links.suppliers, c_node) {
if (!(link->flags & DL_FLAG_MANAGED))
continue;
@@ -883,7 +896,7 @@ void device_links_driver_bound(struct device *dev)
device_links_write_unlock();
- device_links_flush_sync_list(&sync_list);
+ device_links_flush_sync_list(&sync_list, dev);
}
static void device_link_drop_managed(struct device_link *link)
diff --git a/drivers/base/swnode.c b/drivers/base/swnode.c
index 0b081dee1e95..de8d3543e8fe 100644
--- a/drivers/base/swnode.c
+++ b/drivers/base/swnode.c
@@ -608,6 +608,13 @@ static void software_node_release(struct kobject *kobj)
{
struct swnode *swnode = kobj_to_swnode(kobj);
+ if (swnode->parent) {
+ ida_simple_remove(&swnode->parent->child_ids, swnode->id);
+ list_del(&swnode->entry);
+ } else {
+ ida_simple_remove(&swnode_root_ids, swnode->id);
+ }
+
if (swnode->allocated) {
property_entries_free(swnode->node->properties);
kfree(swnode->node);
@@ -773,13 +780,6 @@ void fwnode_remove_software_node(struct fwnode_handle *fwnode)
if (!swnode)
return;
- if (swnode->parent) {
- ida_simple_remove(&swnode->parent->child_ids, swnode->id);
- list_del(&swnode->entry);
- } else {
- ida_simple_remove(&swnode_root_ids, swnode->id);
- }
-
kobject_put(&swnode->kobj);
}
EXPORT_SYMBOL_GPL(fwnode_remove_software_node);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index cd3612e4e2e1..8ef65c085640 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -853,14 +853,17 @@ static void reset_fdc_info(int mode)
/* selects the fdc and drive, and enables the fdc's input/dma. */
static void set_fdc(int drive)
{
+ unsigned int new_fdc = fdc;
+
if (drive >= 0 && drive < N_DRIVE) {
- fdc = FDC(drive);
+ new_fdc = FDC(drive);
current_drive = drive;
}
- if (fdc != 1 && fdc != 0) {
+ if (new_fdc >= N_FDC) {
pr_info("bad fdc value\n");
return;
}
+ fdc = new_fdc;
set_dor(fdc, ~0, 8);
#if N_FDC > 1
set_dor(1 - fdc, ~8, 0);
diff --git a/drivers/block/null_blk.h b/drivers/block/null_blk.h
index bc837862b767..62b660821dbc 100644
--- a/drivers/block/null_blk.h
+++ b/drivers/block/null_blk.h
@@ -14,9 +14,6 @@
#include <linux/fault-inject.h>
struct nullb_cmd {
- struct list_head list;
- struct llist_node ll_list;
- struct __call_single_data csd;
struct request *rq;
struct bio *bio;
unsigned int tag;
diff --git a/drivers/block/null_blk_main.c b/drivers/block/null_blk_main.c
index 16510795e377..133060431dbd 100644
--- a/drivers/block/null_blk_main.c
+++ b/drivers/block/null_blk_main.c
@@ -1518,8 +1518,6 @@ static int setup_commands(struct nullb_queue *nq)
for (i = 0; i < nq->queue_depth; i++) {
cmd = &nq->cmds[i];
- INIT_LIST_HEAD(&cmd->list);
- cmd->ll_list.next = NULL;
cmd->tag = -1U;
}
diff --git a/drivers/block/paride/pcd.c b/drivers/block/paride/pcd.c
index 117cfc8cd05a..cda5cf917e9a 100644
--- a/drivers/block/paride/pcd.c
+++ b/drivers/block/paride/pcd.c
@@ -276,7 +276,7 @@ static const struct block_device_operations pcd_bdops = {
.release = pcd_block_release,
.ioctl = pcd_block_ioctl,
#ifdef CONFIG_COMPAT
- .ioctl = blkdev_compat_ptr_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
#endif
.check_events = pcd_block_check_events,
};
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index e2ad6bba2281..9df516a56bb2 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -213,6 +213,7 @@ struct blkfront_info
struct blk_mq_tag_set tag_set;
struct blkfront_ring_info *rinfo;
unsigned int nr_rings;
+ unsigned int rinfo_size;
/* Save uncomplete reqs and bios for migration. */
struct list_head requests;
struct bio_list bio_list;
@@ -259,6 +260,18 @@ static int blkfront_setup_indirect(struct blkfront_ring_info *rinfo);
static void blkfront_gather_backend_features(struct blkfront_info *info);
static int negotiate_mq(struct blkfront_info *info);
+#define for_each_rinfo(info, ptr, idx) \
+ for ((ptr) = (info)->rinfo, (idx) = 0; \
+ (idx) < (info)->nr_rings; \
+ (idx)++, (ptr) = (void *)(ptr) + (info)->rinfo_size)
+
+static inline struct blkfront_ring_info *
+get_rinfo(const struct blkfront_info *info, unsigned int i)
+{
+ BUG_ON(i >= info->nr_rings);
+ return (void *)info->rinfo + i * info->rinfo_size;
+}
+
static int get_id_from_freelist(struct blkfront_ring_info *rinfo)
{
unsigned long free = rinfo->shadow_free;
@@ -883,8 +896,7 @@ static blk_status_t blkif_queue_rq(struct blk_mq_hw_ctx *hctx,
struct blkfront_info *info = hctx->queue->queuedata;
struct blkfront_ring_info *rinfo = NULL;
- BUG_ON(info->nr_rings <= qid);
- rinfo = &info->rinfo[qid];
+ rinfo = get_rinfo(info, qid);
blk_mq_start_request(qd->rq);
spin_lock_irqsave(&rinfo->ring_lock, flags);
if (RING_FULL(&rinfo->ring))
@@ -1181,6 +1193,7 @@ static int xlvbd_alloc_gendisk(blkif_sector_t capacity,
static void xlvbd_release_gendisk(struct blkfront_info *info)
{
unsigned int minor, nr_minors, i;
+ struct blkfront_ring_info *rinfo;
if (info->rq == NULL)
return;
@@ -1188,9 +1201,7 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
/* No more blkif_request(). */
blk_mq_stop_hw_queues(info->rq);
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
-
+ for_each_rinfo(info, rinfo, i) {
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&rinfo->callback);
@@ -1339,6 +1350,7 @@ free_shadow:
static void blkif_free(struct blkfront_info *info, int suspend)
{
unsigned int i;
+ struct blkfront_ring_info *rinfo;
/* Prevent new requests being issued until we fix things up. */
info->connected = suspend ?
@@ -1347,8 +1359,8 @@ static void blkif_free(struct blkfront_info *info, int suspend)
if (info->rq)
blk_mq_stop_hw_queues(info->rq);
- for (i = 0; i < info->nr_rings; i++)
- blkif_free_ring(&info->rinfo[i]);
+ for_each_rinfo(info, rinfo, i)
+ blkif_free_ring(rinfo);
kvfree(info->rinfo);
info->rinfo = NULL;
@@ -1775,6 +1787,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
int err;
unsigned int i, max_page_order;
unsigned int ring_page_order;
+ struct blkfront_ring_info *rinfo;
if (!info)
return -ENODEV;
@@ -1788,9 +1801,7 @@ static int talk_to_blkback(struct xenbus_device *dev,
if (err)
goto destroy_blkring;
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
-
+ for_each_rinfo(info, rinfo, i) {
/* Create shared ring, alloc event channel. */
err = setup_blkring(dev, rinfo);
if (err)
@@ -1815,7 +1826,7 @@ again:
/* We already got the number of queues/rings in _probe */
if (info->nr_rings == 1) {
- err = write_per_ring_nodes(xbt, &info->rinfo[0], dev->nodename);
+ err = write_per_ring_nodes(xbt, info->rinfo, dev->nodename);
if (err)
goto destroy_blkring;
} else {
@@ -1837,10 +1848,10 @@ again:
goto abort_transaction;
}
- for (i = 0; i < info->nr_rings; i++) {
+ for_each_rinfo(info, rinfo, i) {
memset(path, 0, pathsize);
snprintf(path, pathsize, "%s/queue-%u", dev->nodename, i);
- err = write_per_ring_nodes(xbt, &info->rinfo[i], path);
+ err = write_per_ring_nodes(xbt, rinfo, path);
if (err) {
kfree(path);
goto destroy_blkring;
@@ -1868,9 +1879,8 @@ again:
goto destroy_blkring;
}
- for (i = 0; i < info->nr_rings; i++) {
+ for_each_rinfo(info, rinfo, i) {
unsigned int j;
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
for (j = 0; j < BLK_RING_SIZE(info); j++)
rinfo->shadow[j].req.u.rw.id = j + 1;
@@ -1900,6 +1910,7 @@ static int negotiate_mq(struct blkfront_info *info)
{
unsigned int backend_max_queues;
unsigned int i;
+ struct blkfront_ring_info *rinfo;
BUG_ON(info->nr_rings);
@@ -1911,20 +1922,16 @@ static int negotiate_mq(struct blkfront_info *info)
if (!info->nr_rings)
info->nr_rings = 1;
- info->rinfo = kvcalloc(info->nr_rings,
- struct_size(info->rinfo, shadow,
- BLK_RING_SIZE(info)),
- GFP_KERNEL);
+ info->rinfo_size = struct_size(info->rinfo, shadow,
+ BLK_RING_SIZE(info));
+ info->rinfo = kvcalloc(info->nr_rings, info->rinfo_size, GFP_KERNEL);
if (!info->rinfo) {
xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
info->nr_rings = 0;
return -ENOMEM;
}
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo;
-
- rinfo = &info->rinfo[i];
+ for_each_rinfo(info, rinfo, i) {
INIT_LIST_HEAD(&rinfo->indirect_pages);
INIT_LIST_HEAD(&rinfo->grants);
rinfo->dev_info = info;
@@ -2017,6 +2024,7 @@ static int blkif_recover(struct blkfront_info *info)
int rc;
struct bio *bio;
unsigned int segs;
+ struct blkfront_ring_info *rinfo;
blkfront_gather_backend_features(info);
/* Reset limits changed by blk_mq_update_nr_hw_queues(). */
@@ -2024,9 +2032,7 @@ static int blkif_recover(struct blkfront_info *info)
segs = info->max_indirect_segments ? : BLKIF_MAX_SEGMENTS_PER_REQUEST;
blk_queue_max_segments(info->rq, segs / GRANTS_PER_PSEG);
- for (r_index = 0; r_index < info->nr_rings; r_index++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[r_index];
-
+ for_each_rinfo(info, rinfo, r_index) {
rc = blkfront_setup_indirect(rinfo);
if (rc)
return rc;
@@ -2036,10 +2042,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED;
- for (r_index = 0; r_index < info->nr_rings; r_index++) {
- struct blkfront_ring_info *rinfo;
-
- rinfo = &info->rinfo[r_index];
+ for_each_rinfo(info, rinfo, r_index) {
/* Kick any other new requests queued since we resumed */
kick_pending_request_queues(rinfo);
}
@@ -2072,13 +2075,13 @@ static int blkfront_resume(struct xenbus_device *dev)
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
int err = 0;
unsigned int i, j;
+ struct blkfront_ring_info *rinfo;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
bio_list_init(&info->bio_list);
INIT_LIST_HEAD(&info->requests);
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ for_each_rinfo(info, rinfo, i) {
struct bio_list merge_bio;
struct blk_shadow *shadow = rinfo->shadow;
@@ -2337,6 +2340,7 @@ static void blkfront_connect(struct blkfront_info *info)
unsigned int binfo;
char *envp[] = { "RESIZE=1", NULL };
int err, i;
+ struct blkfront_ring_info *rinfo;
switch (info->connected) {
case BLKIF_STATE_CONNECTED:
@@ -2394,8 +2398,8 @@ static void blkfront_connect(struct blkfront_info *info)
"physical-sector-size",
sector_size);
blkfront_gather_backend_features(info);
- for (i = 0; i < info->nr_rings; i++) {
- err = blkfront_setup_indirect(&info->rinfo[i]);
+ for_each_rinfo(info, rinfo, i) {
+ err = blkfront_setup_indirect(rinfo);
if (err) {
xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
info->xbdev->otherend);
@@ -2416,8 +2420,8 @@ static void blkfront_connect(struct blkfront_info *info)
/* Kick pending requests. */
info->connected = BLKIF_STATE_CONNECTED;
- for (i = 0; i < info->nr_rings; i++)
- kick_pending_request_queues(&info->rinfo[i]);
+ for_each_rinfo(info, rinfo, i)
+ kick_pending_request_queues(rinfo);
device_add_disk(&info->xbdev->dev, info->gd, NULL);
@@ -2652,9 +2656,9 @@ static void purge_persistent_grants(struct blkfront_info *info)
{
unsigned int i;
unsigned long flags;
+ struct blkfront_ring_info *rinfo;
- for (i = 0; i < info->nr_rings; i++) {
- struct blkfront_ring_info *rinfo = &info->rinfo[i];
+ for_each_rinfo(info, rinfo, i) {
struct grant *gnt_list_entry, *tmp;
spin_lock_irqsave(&rinfo->ring_lock, flags);
diff --git a/drivers/bus/moxtet.c b/drivers/bus/moxtet.c
index 15fa293819a0..b20fdcbd035b 100644
--- a/drivers/bus/moxtet.c
+++ b/drivers/bus/moxtet.c
@@ -465,7 +465,7 @@ static ssize_t input_read(struct file *file, char __user *buf, size_t len,
{
struct moxtet *moxtet = file->private_data;
u8 bin[TURRIS_MOX_MAX_MODULES];
- u8 hex[sizeof(buf) * 2 + 1];
+ u8 hex[sizeof(bin) * 2 + 1];
int ret, n;
ret = moxtet_spi_read(moxtet, bin);
diff --git a/drivers/bus/ti-sysc.c b/drivers/bus/ti-sysc.c
index f702c85c81b6..6113fc0a52ae 100644
--- a/drivers/bus/ti-sysc.c
+++ b/drivers/bus/ti-sysc.c
@@ -1400,7 +1400,7 @@ static void sysc_init_revision_quirks(struct sysc *ddata)
}
/* 1-wire needs module's internal clocks enabled for reset */
-static void sysc_clk_enable_quirk_hdq1w(struct sysc *ddata)
+static void sysc_pre_reset_quirk_hdq1w(struct sysc *ddata)
{
int offset = 0x0c; /* HDQ_CTRL_STATUS */
u16 val;
@@ -1488,7 +1488,7 @@ static void sysc_init_module_quirks(struct sysc *ddata)
return;
if (ddata->cfg.quirks & SYSC_MODULE_QUIRK_HDQ1W) {
- ddata->clk_enable_quirk = sysc_clk_enable_quirk_hdq1w;
+ ddata->clk_disable_quirk = sysc_pre_reset_quirk_hdq1w;
return;
}
diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
index 886b2638c730..c51292c2a131 100644
--- a/drivers/cdrom/gdrom.c
+++ b/drivers/cdrom/gdrom.c
@@ -519,7 +519,7 @@ static const struct block_device_operations gdrom_bdops = {
.check_events = gdrom_bdops_check_events,
.ioctl = gdrom_bdops_ioctl,
#ifdef CONFIG_COMPAT
- .ioctl = blkdev_compat_ptr_ioctl,
+ .compat_ioctl = blkdev_compat_ptr_ioctl,
#endif
};
diff --git a/drivers/char/ipmi/ipmb_dev_int.c b/drivers/char/ipmi/ipmb_dev_int.c
index 1ff4fb1def7c..382b28f1cf2f 100644
--- a/drivers/char/ipmi/ipmb_dev_int.c
+++ b/drivers/char/ipmi/ipmb_dev_int.c
@@ -19,7 +19,7 @@
#include <linux/spinlock.h>
#include <linux/wait.h>
-#define MAX_MSG_LEN 128
+#define MAX_MSG_LEN 240
#define IPMB_REQUEST_LEN_MIN 7
#define NETFN_RSP_BIT_MASK 0x4
#define REQUEST_QUEUE_MAX_LEN 256
@@ -63,6 +63,7 @@ struct ipmb_dev {
spinlock_t lock;
wait_queue_head_t wait_queue;
struct mutex file_mutex;
+ bool is_i2c_protocol;
};
static inline struct ipmb_dev *to_ipmb_dev(struct file *file)
@@ -112,6 +113,25 @@ static ssize_t ipmb_read(struct file *file, char __user *buf, size_t count,
return ret < 0 ? ret : count;
}
+static int ipmb_i2c_write(struct i2c_client *client, u8 *msg, u8 addr)
+{
+ struct i2c_msg i2c_msg;
+
+ /*
+ * subtract 1 byte (rq_sa) from the length of the msg passed to
+ * raw i2c_transfer
+ */
+ i2c_msg.len = msg[IPMB_MSG_LEN_IDX] - 1;
+
+ /* Assign message to buffer except first 2 bytes (length and address) */
+ i2c_msg.buf = msg + 2;
+
+ i2c_msg.addr = addr;
+ i2c_msg.flags = client->flags & I2C_CLIENT_PEC;
+
+ return i2c_transfer(client->adapter, &i2c_msg, 1);
+}
+
static ssize_t ipmb_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
@@ -133,6 +153,12 @@ static ssize_t ipmb_write(struct file *file, const char __user *buf,
rq_sa = GET_7BIT_ADDR(msg[RQ_SA_8BIT_IDX]);
netf_rq_lun = msg[NETFN_LUN_IDX];
+ /* Check i2c block transfer vs smbus */
+ if (ipmb_dev->is_i2c_protocol) {
+ ret = ipmb_i2c_write(ipmb_dev->client, msg, rq_sa);
+ return (ret == 1) ? count : ret;
+ }
+
/*
* subtract rq_sa and netf_rq_lun from the length of the msg passed to
* i2c_smbus_xfer
@@ -253,7 +279,7 @@ static int ipmb_slave_cb(struct i2c_client *client,
break;
case I2C_SLAVE_WRITE_RECEIVED:
- if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg))
+ if (ipmb_dev->msg_idx >= sizeof(struct ipmb_msg) - 1)
break;
buf[++ipmb_dev->msg_idx] = *val;
@@ -302,6 +328,9 @@ static int ipmb_probe(struct i2c_client *client,
if (ret)
return ret;
+ ipmb_dev->is_i2c_protocol
+ = device_property_read_bool(&client->dev, "i2c-protocol");
+
ipmb_dev->client = client;
i2c_set_clientdata(client, ipmb_dev);
ret = i2c_slave_register(client, ipmb_slave_cb);
diff --git a/drivers/char/ipmi/ipmi_ssif.c b/drivers/char/ipmi/ipmi_ssif.c
index 22c6a2e61236..8ac390c2b514 100644
--- a/drivers/char/ipmi/ipmi_ssif.c
+++ b/drivers/char/ipmi/ipmi_ssif.c
@@ -775,10 +775,14 @@ static void msg_done_handler(struct ssif_info *ssif_info, int result,
flags = ipmi_ssif_lock_cond(ssif_info, &oflags);
msg = ssif_info->curr_msg;
if (msg) {
+ if (data) {
+ if (len > IPMI_MAX_MSG_LENGTH)
+ len = IPMI_MAX_MSG_LENGTH;
+ memcpy(msg->rsp, data, len);
+ } else {
+ len = 0;
+ }
msg->rsp_size = len;
- if (msg->rsp_size > IPMI_MAX_MSG_LENGTH)
- msg->rsp_size = IPMI_MAX_MSG_LENGTH;
- memcpy(msg->rsp, data, msg->rsp_size);
ssif_info->curr_msg = NULL;
}
diff --git a/drivers/char/tpm/Makefile b/drivers/char/tpm/Makefile
index 5a0d99d4fec0..9567e5197f74 100644
--- a/drivers/char/tpm/Makefile
+++ b/drivers/char/tpm/Makefile
@@ -21,9 +21,11 @@ tpm-$(CONFIG_EFI) += eventlog/efi.o
tpm-$(CONFIG_OF) += eventlog/of.o
obj-$(CONFIG_TCG_TIS_CORE) += tpm_tis_core.o
obj-$(CONFIG_TCG_TIS) += tpm_tis.o
-obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi_mod.o
-tpm_tis_spi_mod-y := tpm_tis_spi.o
-tpm_tis_spi_mod-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
+
+obj-$(CONFIG_TCG_TIS_SPI) += tpm_tis_spi.o
+tpm_tis_spi-y := tpm_tis_spi_main.o
+tpm_tis_spi-$(CONFIG_TCG_TIS_SPI_CR50) += tpm_tis_spi_cr50.o
+
obj-$(CONFIG_TCG_TIS_I2C_ATMEL) += tpm_i2c_atmel.o
obj-$(CONFIG_TCG_TIS_I2C_INFINEON) += tpm_i2c_infineon.o
obj-$(CONFIG_TCG_TIS_I2C_NUVOTON) += tpm_i2c_nuvoton.o
diff --git a/drivers/char/tpm/tpm2-cmd.c b/drivers/char/tpm/tpm2-cmd.c
index 13696deceae8..760329598b99 100644
--- a/drivers/char/tpm/tpm2-cmd.c
+++ b/drivers/char/tpm/tpm2-cmd.c
@@ -525,6 +525,8 @@ static int tpm2_init_bank_info(struct tpm_chip *chip, u32 bank_index)
return 0;
}
+ bank->crypto_id = HASH_ALGO__LAST;
+
return tpm2_pcr_read(chip, 0, &digest, &bank->digest_size);
}
diff --git a/drivers/char/tpm/tpm_tis_spi.c b/drivers/char/tpm/tpm_tis_spi_main.c
index d1754fd6c573..d1754fd6c573 100644
--- a/drivers/char/tpm/tpm_tis_spi.c
+++ b/drivers/char/tpm/tpm_tis_spi_main.c
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 4adac3a8c265..808874bccf4a 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -105,6 +105,8 @@ bool have_governor_per_policy(void)
}
EXPORT_SYMBOL_GPL(have_governor_per_policy);
+static struct kobject *cpufreq_global_kobject;
+
struct kobject *get_governor_parent_kobj(struct cpufreq_policy *policy)
{
if (have_governor_per_policy())
@@ -1074,9 +1076,17 @@ static int cpufreq_init_policy(struct cpufreq_policy *policy)
pol = policy->last_policy;
} else if (def_gov) {
pol = cpufreq_parse_policy(def_gov->name);
- } else {
- return -ENODATA;
+ /*
+ * In case the default governor is neiter "performance"
+ * nor "powersave", fall back to the initial policy
+ * value set by the driver.
+ */
+ if (pol == CPUFREQ_POLICY_UNKNOWN)
+ pol = policy->policy;
}
+ if (pol != CPUFREQ_POLICY_PERFORMANCE &&
+ pol != CPUFREQ_POLICY_POWERSAVE)
+ return -ENODATA;
}
return cpufreq_set_policy(policy, gov, pol);
@@ -2745,9 +2755,6 @@ int cpufreq_unregister_driver(struct cpufreq_driver *driver)
}
EXPORT_SYMBOL_GPL(cpufreq_unregister_driver);
-struct kobject *cpufreq_global_kobject;
-EXPORT_SYMBOL(cpufreq_global_kobject);
-
static int __init cpufreq_core_init(void)
{
if (cpufreq_disabled())
diff --git a/drivers/dax/super.c b/drivers/dax/super.c
index 26a654dbc69a..0aa4b6bc5101 100644
--- a/drivers/dax/super.c
+++ b/drivers/dax/super.c
@@ -61,7 +61,7 @@ struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev)
{
if (!blk_queue_dax(bdev->bd_queue))
return NULL;
- return fs_dax_get_by_host(bdev->bd_disk->disk_name);
+ return dax_get_by_host(bdev->bd_disk->disk_name);
}
EXPORT_SYMBOL_GPL(fs_dax_get_by_bdev);
#endif
diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
index cceee8bc3c2f..7dcf2093e531 100644
--- a/drivers/devfreq/devfreq.c
+++ b/drivers/devfreq/devfreq.c
@@ -738,7 +738,6 @@ struct devfreq *devfreq_add_device(struct device *dev,
{
struct devfreq *devfreq;
struct devfreq_governor *governor;
- static atomic_t devfreq_no = ATOMIC_INIT(-1);
int err = 0;
if (!dev || !profile || !governor_name) {
@@ -800,8 +799,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
devfreq->suspend_freq = dev_pm_opp_get_suspend_opp_freq(dev);
atomic_set(&devfreq->suspend_count, 0);
- dev_set_name(&devfreq->dev, "devfreq%d",
- atomic_inc_return(&devfreq_no));
+ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
err = device_register(&devfreq->dev);
if (err) {
mutex_unlock(&devfreq->lock);
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
index 0613bb7770f5..ef73b678419c 100644
--- a/drivers/dma-buf/Kconfig
+++ b/drivers/dma-buf/Kconfig
@@ -6,7 +6,7 @@ config SYNC_FILE
default n
select DMA_SHARED_BUFFER
---help---
- The Sync File Framework adds explicit syncronization via
+ The Sync File Framework adds explicit synchronization via
userspace. It enables send/receive 'struct dma_fence' objects to/from
userspace via Sync File fds for synchronization between drivers via
userspace components. It has been ported from Android.
@@ -39,6 +39,16 @@ config UDMABUF
A driver to let userspace turn memfd regions into dma-bufs.
Qemu can use this to create host dmabufs for guest framebuffers.
+config DMABUF_MOVE_NOTIFY
+ bool "Move notify between drivers (EXPERIMENTAL)"
+ default n
+ help
+ Don''t pin buffers if the dynamic DMA-buf interface is available on both the
+ exporter as well as the importer. This fixes a security problem where
+ userspace is able to pin unrestricted amounts of memory through DMA-buf.
+ But marked experimental because we don''t jet have a consistent execution
+ context and memory management between drivers.
+
config DMABUF_SELFTESTS
tristate "Selftests for the dma-buf interfaces"
default n
diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c
index d4097856c86b..ccc9eda1bc28 100644
--- a/drivers/dma-buf/dma-buf.c
+++ b/drivers/dma-buf/dma-buf.c
@@ -108,6 +108,7 @@ static int dma_buf_release(struct inode *inode, struct file *file)
dma_resv_fini(dmabuf->resv);
module_put(dmabuf->owner);
+ kfree(dmabuf->name);
kfree(dmabuf);
return 0;
}
@@ -524,7 +525,10 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
}
if (WARN_ON(exp_info->ops->cache_sgt_mapping &&
- exp_info->ops->dynamic_mapping))
+ (exp_info->ops->pin || exp_info->ops->unpin)))
+ return ERR_PTR(-EINVAL);
+
+ if (WARN_ON(!exp_info->ops->pin != !exp_info->ops->unpin))
return ERR_PTR(-EINVAL);
if (!try_module_get(exp_info->owner))
@@ -651,7 +655,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
* calls attach() of dma_buf_ops to allow device-specific attach functionality
* @dmabuf: [in] buffer to attach device to.
* @dev: [in] device to be attached.
- * @dynamic_mapping: [in] calling convention for map/unmap
+ * @importer_ops [in] importer operations for the attachment
+ * @importer_priv [in] importer private pointer for the attachment
*
* Returns struct dma_buf_attachment pointer for this attachment. Attachments
* must be cleaned up by calling dma_buf_detach().
@@ -667,7 +672,8 @@ EXPORT_SYMBOL_GPL(dma_buf_put);
*/
struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
- bool dynamic_mapping)
+ const struct dma_buf_attach_ops *importer_ops,
+ void *importer_priv)
{
struct dma_buf_attachment *attach;
int ret;
@@ -675,13 +681,17 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
if (WARN_ON(!dmabuf || !dev))
return ERR_PTR(-EINVAL);
+ if (WARN_ON(importer_ops && !importer_ops->move_notify))
+ return ERR_PTR(-EINVAL);
+
attach = kzalloc(sizeof(*attach), GFP_KERNEL);
if (!attach)
return ERR_PTR(-ENOMEM);
attach->dev = dev;
attach->dmabuf = dmabuf;
- attach->dynamic_mapping = dynamic_mapping;
+ attach->importer_ops = importer_ops;
+ attach->importer_priv = importer_priv;
if (dmabuf->ops->attach) {
ret = dmabuf->ops->attach(dmabuf, attach);
@@ -700,15 +710,19 @@ dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
dma_buf_is_dynamic(dmabuf)) {
struct sg_table *sgt;
- if (dma_buf_is_dynamic(attach->dmabuf))
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_lock(attach->dmabuf->resv, NULL);
+ ret = dma_buf_pin(attach);
+ if (ret)
+ goto err_unlock;
+ }
sgt = dmabuf->ops->map_dma_buf(attach, DMA_BIDIRECTIONAL);
if (!sgt)
sgt = ERR_PTR(-ENOMEM);
if (IS_ERR(sgt)) {
ret = PTR_ERR(sgt);
- goto err_unlock;
+ goto err_unpin;
}
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
@@ -722,6 +736,10 @@ err_attach:
kfree(attach);
return ERR_PTR(ret);
+err_unpin:
+ if (dma_buf_is_dynamic(attach->dmabuf))
+ dma_buf_unpin(attach);
+
err_unlock:
if (dma_buf_is_dynamic(attach->dmabuf))
dma_resv_unlock(attach->dmabuf->resv);
@@ -742,7 +760,7 @@ EXPORT_SYMBOL_GPL(dma_buf_dynamic_attach);
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev)
{
- return dma_buf_dynamic_attach(dmabuf, dev, false);
+ return dma_buf_dynamic_attach(dmabuf, dev, NULL, NULL);
}
EXPORT_SYMBOL_GPL(dma_buf_attach);
@@ -765,8 +783,10 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
dmabuf->ops->unmap_dma_buf(attach, attach->sgt, attach->dir);
- if (dma_buf_is_dynamic(attach->dmabuf))
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
+ dma_buf_unpin(attach);
dma_resv_unlock(attach->dmabuf->resv);
+ }
}
dma_resv_lock(dmabuf->resv, NULL);
@@ -780,6 +800,44 @@ void dma_buf_detach(struct dma_buf *dmabuf, struct dma_buf_attachment *attach)
EXPORT_SYMBOL_GPL(dma_buf_detach);
/**
+ * dma_buf_pin - Lock down the DMA-buf
+ *
+ * @attach: [in] attachment which should be pinned
+ *
+ * Returns:
+ * 0 on success, negative error code on failure.
+ */
+int dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct dma_buf *dmabuf = attach->dmabuf;
+ int ret = 0;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ if (dmabuf->ops->pin)
+ ret = dmabuf->ops->pin(attach);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(dma_buf_pin);
+
+/**
+ * dma_buf_unpin - Remove lock from DMA-buf
+ *
+ * @attach: [in] attachment which should be unpinned
+ */
+void dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct dma_buf *dmabuf = attach->dmabuf;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ if (dmabuf->ops->unpin)
+ dmabuf->ops->unpin(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_unpin);
+
+/**
* dma_buf_map_attachment - Returns the scatterlist table of the attachment;
* mapped into _device_ address space. Is a wrapper for map_dma_buf() of the
* dma_buf_ops.
@@ -798,6 +856,7 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
enum dma_data_direction direction)
{
struct sg_table *sg_table;
+ int r;
might_sleep();
@@ -819,13 +878,23 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *attach,
return attach->sgt;
}
- if (dma_buf_is_dynamic(attach->dmabuf))
+ if (dma_buf_is_dynamic(attach->dmabuf)) {
dma_resv_assert_held(attach->dmabuf->resv);
+ if (!IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY)) {
+ r = dma_buf_pin(attach);
+ if (r)
+ return ERR_PTR(r);
+ }
+ }
sg_table = attach->dmabuf->ops->map_dma_buf(attach, direction);
if (!sg_table)
sg_table = ERR_PTR(-ENOMEM);
+ if (IS_ERR(sg_table) && dma_buf_is_dynamic(attach->dmabuf) &&
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+ dma_buf_unpin(attach);
+
if (!IS_ERR(sg_table) && attach->dmabuf->ops->cache_sgt_mapping) {
attach->sgt = sg_table;
attach->dir = direction;
@@ -864,10 +933,34 @@ void dma_buf_unmap_attachment(struct dma_buf_attachment *attach,
dma_resv_assert_held(attach->dmabuf->resv);
attach->dmabuf->ops->unmap_dma_buf(attach, sg_table, direction);
+
+ if (dma_buf_is_dynamic(attach->dmabuf) &&
+ !IS_ENABLED(CONFIG_DMABUF_MOVE_NOTIFY))
+ dma_buf_unpin(attach);
}
EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
/**
+ * dma_buf_move_notify - notify attachments that DMA-buf is moving
+ *
+ * @dmabuf: [in] buffer which is moving
+ *
+ * Informs all attachmenst that they need to destroy and recreated all their
+ * mappings.
+ */
+void dma_buf_move_notify(struct dma_buf *dmabuf)
+{
+ struct dma_buf_attachment *attach;
+
+ dma_resv_assert_held(dmabuf->resv);
+
+ list_for_each_entry(attach, &dmabuf->attachments, node)
+ if (attach->importer_ops)
+ attach->importer_ops->move_notify(attach);
+}
+EXPORT_SYMBOL_GPL(dma_buf_move_notify);
+
+/**
* DOC: cpu access
*
* There are mutliple reasons for supporting CPU access to a dma buffer object:
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index e51d836afcc7..1092d4ce723e 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1947,8 +1947,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
return;
}
- spin_lock(&cohc->lock);
-
/*
* When we reach this point, at least one queue item
* should have been moved over from cohc->queue to
@@ -1969,8 +1967,6 @@ static void dma_tc_handle(struct coh901318_chan *cohc)
if (coh901318_queue_start(cohc) == NULL)
cohc->busy = 0;
- spin_unlock(&cohc->lock);
-
/*
* This tasklet will remove items from cohc->active
* and thus terminates them.
diff --git a/drivers/dma/idxd/cdev.c b/drivers/dma/idxd/cdev.c
index 1d7347825b95..df47be612ebb 100644
--- a/drivers/dma/idxd/cdev.c
+++ b/drivers/dma/idxd/cdev.c
@@ -204,6 +204,7 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
minor = ida_simple_get(&cdev_ctx->minor_ida, 0, MINORMASK, GFP_KERNEL);
if (minor < 0) {
rc = minor;
+ kfree(dev);
goto ida_err;
}
@@ -212,7 +213,6 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
rc = device_register(dev);
if (rc < 0) {
dev_err(&idxd->pdev->dev, "device register failed\n");
- put_device(dev);
goto dev_reg_err;
}
idxd_cdev->minor = minor;
@@ -221,8 +221,8 @@ static int idxd_wq_cdev_dev_setup(struct idxd_wq *wq)
dev_reg_err:
ida_simple_remove(&cdev_ctx->minor_ida, MINOR(dev->devt));
+ put_device(dev);
ida_err:
- kfree(dev);
idxd_cdev->dev = NULL;
return rc;
}
diff --git a/drivers/dma/idxd/sysfs.c b/drivers/dma/idxd/sysfs.c
index 6d907fe150aa..6ca6e520a2fa 100644
--- a/drivers/dma/idxd/sysfs.c
+++ b/drivers/dma/idxd/sysfs.c
@@ -124,6 +124,7 @@ static int idxd_config_bus_probe(struct device *dev)
rc = idxd_device_config(idxd);
if (rc < 0) {
spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ module_put(THIS_MODULE);
dev_warn(dev, "Device config failed: %d\n", rc);
return rc;
}
@@ -132,6 +133,7 @@ static int idxd_config_bus_probe(struct device *dev)
rc = idxd_device_enable(idxd);
if (rc < 0) {
spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ module_put(THIS_MODULE);
dev_warn(dev, "Device enable failed: %d\n", rc);
return rc;
}
@@ -142,6 +144,7 @@ static int idxd_config_bus_probe(struct device *dev)
rc = idxd_register_dma_device(idxd);
if (rc < 0) {
spin_unlock_irqrestore(&idxd->dev_lock, flags);
+ module_put(THIS_MODULE);
dev_dbg(dev, "Failed to register dmaengine device\n");
return rc;
}
@@ -516,7 +519,7 @@ static ssize_t group_tokens_reserved_store(struct device *dev,
if (val > idxd->max_tokens)
return -EINVAL;
- if (val > idxd->nr_tokens)
+ if (val > idxd->nr_tokens + group->tokens_reserved)
return -EINVAL;
group->tokens_reserved = val;
@@ -901,6 +904,20 @@ static ssize_t wq_size_show(struct device *dev, struct device_attribute *attr,
return sprintf(buf, "%u\n", wq->size);
}
+static int total_claimed_wq_size(struct idxd_device *idxd)
+{
+ int i;
+ int wq_size = 0;
+
+ for (i = 0; i < idxd->max_wqs; i++) {
+ struct idxd_wq *wq = &idxd->wqs[i];
+
+ wq_size += wq->size;
+ }
+
+ return wq_size;
+}
+
static ssize_t wq_size_store(struct device *dev,
struct device_attribute *attr, const char *buf,
size_t count)
@@ -920,7 +937,7 @@ static ssize_t wq_size_store(struct device *dev,
if (wq->state != IDXD_WQ_DISABLED)
return -EPERM;
- if (size > idxd->max_wq_size)
+ if (size + total_claimed_wq_size(idxd) - wq->size > idxd->max_wq_size)
return -EINVAL;
wq->size = size;
@@ -999,12 +1016,14 @@ static ssize_t wq_type_store(struct device *dev,
return -EPERM;
old_type = wq->type;
- if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
+ if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_NONE]))
+ wq->type = IDXD_WQT_NONE;
+ else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_KERNEL]))
wq->type = IDXD_WQT_KERNEL;
else if (sysfs_streq(buf, idxd_wq_type_names[IDXD_WQT_USER]))
wq->type = IDXD_WQT_USER;
else
- wq->type = IDXD_WQT_NONE;
+ return -EINVAL;
/* If we are changing queue type, clear the name */
if (wq->type != old_type)
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 066b21a32232..4d4477df4ede 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1331,13 +1331,14 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
sdma_channel_synchronize(chan);
- if (sdmac->event_id0)
+ if (sdmac->event_id0 >= 0)
sdma_event_disable(sdmac, sdmac->event_id0);
if (sdmac->event_id1)
sdma_event_disable(sdmac, sdmac->event_id1);
sdmac->event_id0 = 0;
sdmac->event_id1 = 0;
+ sdmac->context_loaded = false;
sdma_set_channel_priority(sdmac, 0);
@@ -1631,7 +1632,7 @@ static int sdma_config(struct dma_chan *chan,
memcpy(&sdmac->slave_config, dmaengine_cfg, sizeof(*dmaengine_cfg));
/* Set ENBLn earlier to make sure dma request triggered after that */
- if (sdmac->event_id0) {
+ if (sdmac->event_id0 >= 0) {
if (sdmac->event_id0 >= sdmac->sdma->drvdata->num_events)
return -EINVAL;
sdma_event_enable(sdmac, sdmac->event_id0);
diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
index 3a45079d11ec..4a750e29bfb5 100644
--- a/drivers/dma/tegra20-apb-dma.c
+++ b/drivers/dma/tegra20-apb-dma.c
@@ -281,7 +281,7 @@ static struct tegra_dma_desc *tegra_dma_desc_get(
/* Do not allocate if desc are waiting for ack */
list_for_each_entry(dma_desc, &tdc->free_dma_desc, node) {
- if (async_tx_test_ack(&dma_desc->txd)) {
+ if (async_tx_test_ack(&dma_desc->txd) && !dma_desc->cb_count) {
list_del(&dma_desc->node);
spin_unlock_irqrestore(&tdc->lock, flags);
dma_desc->txd.flags = 0;
@@ -756,10 +756,6 @@ static int tegra_dma_terminate_all(struct dma_chan *dc)
bool was_busy;
spin_lock_irqsave(&tdc->lock, flags);
- if (list_empty(&tdc->pending_sg_req)) {
- spin_unlock_irqrestore(&tdc->lock, flags);
- return 0;
- }
if (!tdc->busy)
goto skip_dma_stop;
diff --git a/drivers/dma/ti/k3-udma.c b/drivers/dma/ti/k3-udma.c
index ea79c2df28e0..0536866a58ce 100644
--- a/drivers/dma/ti/k3-udma.c
+++ b/drivers/dma/ti/k3-udma.c
@@ -5,6 +5,7 @@
*/
#include <linux/kernel.h>
+#include <linux/delay.h>
#include <linux/dmaengine.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
@@ -96,6 +97,24 @@ struct udma_match_data {
u32 level_start_idx[];
};
+struct udma_hwdesc {
+ size_t cppi5_desc_size;
+ void *cppi5_desc_vaddr;
+ dma_addr_t cppi5_desc_paddr;
+
+ /* TR descriptor internal pointers */
+ void *tr_req_base;
+ struct cppi5_tr_resp_t *tr_resp_base;
+};
+
+struct udma_rx_flush {
+ struct udma_hwdesc hwdescs[2];
+
+ size_t buffer_size;
+ void *buffer_vaddr;
+ dma_addr_t buffer_paddr;
+};
+
struct udma_dev {
struct dma_device ddev;
struct device *dev;
@@ -112,6 +131,8 @@ struct udma_dev {
struct list_head desc_to_purge;
spinlock_t lock;
+ struct udma_rx_flush rx_flush;
+
int tchan_cnt;
int echan_cnt;
int rchan_cnt;
@@ -130,16 +151,6 @@ struct udma_dev {
u32 psil_base;
};
-struct udma_hwdesc {
- size_t cppi5_desc_size;
- void *cppi5_desc_vaddr;
- dma_addr_t cppi5_desc_paddr;
-
- /* TR descriptor internal pointers */
- void *tr_req_base;
- struct cppi5_tr_resp_t *tr_resp_base;
-};
-
struct udma_desc {
struct virt_dma_desc vd;
@@ -169,7 +180,7 @@ enum udma_chan_state {
struct udma_tx_drain {
struct delayed_work work;
- unsigned long jiffie;
+ ktime_t tstamp;
u32 residue;
};
@@ -502,7 +513,7 @@ static bool udma_is_chan_paused(struct udma_chan *uc)
{
u32 val, pause_mask;
- switch (uc->desc->dir) {
+ switch (uc->config.dir) {
case DMA_DEV_TO_MEM:
val = udma_rchanrt_read(uc->rchan,
UDMA_RCHAN_RT_PEER_RT_EN_REG);
@@ -551,12 +562,17 @@ static void udma_sync_for_device(struct udma_chan *uc, int idx)
}
}
+static inline dma_addr_t udma_get_rx_flush_hwdesc_paddr(struct udma_chan *uc)
+{
+ return uc->ud->rx_flush.hwdescs[uc->config.pkt_mode].cppi5_desc_paddr;
+}
+
static int udma_push_to_ring(struct udma_chan *uc, int idx)
{
struct udma_desc *d = uc->desc;
-
struct k3_ring *ring = NULL;
- int ret = -EINVAL;
+ dma_addr_t paddr;
+ int ret;
switch (uc->config.dir) {
case DMA_DEV_TO_MEM:
@@ -567,21 +583,37 @@ static int udma_push_to_ring(struct udma_chan *uc, int idx)
ring = uc->tchan->t_ring;
break;
default:
- break;
+ return -EINVAL;
}
- if (ring) {
- dma_addr_t desc_addr = udma_curr_cppi5_desc_paddr(d, idx);
+ /* RX flush packet: idx == -1 is only passed in case of DEV_TO_MEM */
+ if (idx == -1) {
+ paddr = udma_get_rx_flush_hwdesc_paddr(uc);
+ } else {
+ paddr = udma_curr_cppi5_desc_paddr(d, idx);
wmb(); /* Ensure that writes are not moved over this point */
udma_sync_for_device(uc, idx);
- ret = k3_ringacc_ring_push(ring, &desc_addr);
- uc->in_ring_cnt++;
}
+ ret = k3_ringacc_ring_push(ring, &paddr);
+ if (!ret)
+ uc->in_ring_cnt++;
+
return ret;
}
+static bool udma_desc_is_rx_flush(struct udma_chan *uc, dma_addr_t addr)
+{
+ if (uc->config.dir != DMA_DEV_TO_MEM)
+ return false;
+
+ if (addr == udma_get_rx_flush_hwdesc_paddr(uc))
+ return true;
+
+ return false;
+}
+
static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
{
struct k3_ring *ring = NULL;
@@ -610,6 +642,10 @@ static int udma_pop_from_ring(struct udma_chan *uc, dma_addr_t *addr)
if (cppi5_desc_is_tdcm(*addr))
return ret;
+ /* Check for flush descriptor */
+ if (udma_desc_is_rx_flush(uc, *addr))
+ return -ENOENT;
+
d = udma_udma_desc_from_paddr(uc, *addr);
if (d)
@@ -890,6 +926,9 @@ static int udma_stop(struct udma_chan *uc)
switch (uc->config.dir) {
case DMA_DEV_TO_MEM:
+ if (!uc->cyclic && !uc->desc)
+ udma_push_to_ring(uc, -1);
+
udma_rchanrt_write(uc->rchan, UDMA_RCHAN_RT_PEER_RT_EN_REG,
UDMA_PEER_RT_EN_ENABLE |
UDMA_PEER_RT_EN_TEARDOWN);
@@ -946,9 +985,10 @@ static bool udma_is_desc_really_done(struct udma_chan *uc, struct udma_desc *d)
peer_bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_PEER_BCNT_REG);
bcnt = udma_tchanrt_read(uc->tchan, UDMA_TCHAN_RT_BCNT_REG);
+ /* Transfer is incomplete, store current residue and time stamp */
if (peer_bcnt < bcnt) {
uc->tx_drain.residue = bcnt - peer_bcnt;
- uc->tx_drain.jiffie = jiffies;
+ uc->tx_drain.tstamp = ktime_get();
return false;
}
@@ -961,35 +1001,59 @@ static void udma_check_tx_completion(struct work_struct *work)
tx_drain.work.work);
bool desc_done = true;
u32 residue_diff;
- unsigned long jiffie_diff, delay;
+ ktime_t time_diff;
+ unsigned long delay;
+
+ while (1) {
+ if (uc->desc) {
+ /* Get previous residue and time stamp */
+ residue_diff = uc->tx_drain.residue;
+ time_diff = uc->tx_drain.tstamp;
+ /*
+ * Get current residue and time stamp or see if
+ * transfer is complete
+ */
+ desc_done = udma_is_desc_really_done(uc, uc->desc);
+ }
- if (uc->desc) {
- residue_diff = uc->tx_drain.residue;
- jiffie_diff = uc->tx_drain.jiffie;
- desc_done = udma_is_desc_really_done(uc, uc->desc);
- }
-
- if (!desc_done) {
- jiffie_diff = uc->tx_drain.jiffie - jiffie_diff;
- residue_diff -= uc->tx_drain.residue;
- if (residue_diff) {
- /* Try to guess when we should check next time */
- residue_diff /= jiffie_diff;
- delay = uc->tx_drain.residue / residue_diff / 3;
- if (jiffies_to_msecs(delay) < 5)
- delay = 0;
- } else {
- /* No progress, check again in 1 second */
- delay = HZ;
+ if (!desc_done) {
+ /*
+ * Find the time delta and residue delta w.r.t
+ * previous poll
+ */
+ time_diff = ktime_sub(uc->tx_drain.tstamp,
+ time_diff) + 1;
+ residue_diff -= uc->tx_drain.residue;
+ if (residue_diff) {
+ /*
+ * Try to guess when we should check
+ * next time by calculating rate at
+ * which data is being drained at the
+ * peer device
+ */
+ delay = (time_diff / residue_diff) *
+ uc->tx_drain.residue;
+ } else {
+ /* No progress, check again in 1 second */
+ schedule_delayed_work(&uc->tx_drain.work, HZ);
+ break;
+ }
+
+ usleep_range(ktime_to_us(delay),
+ ktime_to_us(delay) + 10);
+ continue;
}
- schedule_delayed_work(&uc->tx_drain.work, delay);
- } else if (uc->desc) {
- struct udma_desc *d = uc->desc;
+ if (uc->desc) {
+ struct udma_desc *d = uc->desc;
- uc->bcnt += d->residue;
- udma_start(uc);
- vchan_cookie_complete(&d->vd);
+ uc->bcnt += d->residue;
+ udma_start(uc);
+ vchan_cookie_complete(&d->vd);
+ break;
+ }
+
+ break;
}
}
@@ -1033,29 +1097,27 @@ static irqreturn_t udma_ring_irq_handler(int irq, void *data)
goto out;
}
- if (uc->cyclic) {
- /* push the descriptor back to the ring */
- if (d == uc->desc) {
+ if (d == uc->desc) {
+ /* active descriptor */
+ if (uc->cyclic) {
udma_cyclic_packet_elapsed(uc);
vchan_cyclic_callback(&d->vd);
- }
- } else {
- bool desc_done = false;
-
- if (d == uc->desc) {
- desc_done = udma_is_desc_really_done(uc, d);
-
- if (desc_done) {
+ } else {
+ if (udma_is_desc_really_done(uc, d)) {
uc->bcnt += d->residue;
udma_start(uc);
+ vchan_cookie_complete(&d->vd);
} else {
schedule_delayed_work(&uc->tx_drain.work,
0);
}
}
-
- if (desc_done)
- vchan_cookie_complete(&d->vd);
+ } else {
+ /*
+ * terminated descriptor, mark the descriptor as
+ * completed to update the channel's cookie marker
+ */
+ dma_cookie_complete(&d->vd.tx);
}
}
out:
@@ -1965,36 +2027,81 @@ static struct udma_desc *udma_alloc_tr_desc(struct udma_chan *uc,
return d;
}
+/**
+ * udma_get_tr_counters - calculate TR counters for a given length
+ * @len: Length of the trasnfer
+ * @align_to: Preferred alignment
+ * @tr0_cnt0: First TR icnt0
+ * @tr0_cnt1: First TR icnt1
+ * @tr1_cnt0: Second (if used) TR icnt0
+ *
+ * For len < SZ_64K only one TR is enough, tr1_cnt0 is not updated
+ * For len >= SZ_64K two TRs are used in a simple way:
+ * First TR: SZ_64K-alignment blocks (tr0_cnt0, tr0_cnt1)
+ * Second TR: the remaining length (tr1_cnt0)
+ *
+ * Returns the number of TRs the length needs (1 or 2)
+ * -EINVAL if the length can not be supported
+ */
+static int udma_get_tr_counters(size_t len, unsigned long align_to,
+ u16 *tr0_cnt0, u16 *tr0_cnt1, u16 *tr1_cnt0)
+{
+ if (len < SZ_64K) {
+ *tr0_cnt0 = len;
+ *tr0_cnt1 = 1;
+
+ return 1;
+ }
+
+ if (align_to > 3)
+ align_to = 3;
+
+realign:
+ *tr0_cnt0 = SZ_64K - BIT(align_to);
+ if (len / *tr0_cnt0 >= SZ_64K) {
+ if (align_to) {
+ align_to--;
+ goto realign;
+ }
+ return -EINVAL;
+ }
+
+ *tr0_cnt1 = len / *tr0_cnt0;
+ *tr1_cnt0 = len % *tr0_cnt0;
+
+ return 2;
+}
+
static struct udma_desc *
udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
unsigned int sglen, enum dma_transfer_direction dir,
unsigned long tx_flags, void *context)
{
- enum dma_slave_buswidth dev_width;
struct scatterlist *sgent;
struct udma_desc *d;
- size_t tr_size;
struct cppi5_tr_type1_t *tr_req = NULL;
+ u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
unsigned int i;
- u32 burst;
+ size_t tr_size;
+ int num_tr = 0;
+ int tr_idx = 0;
- if (dir == DMA_DEV_TO_MEM) {
- dev_width = uc->cfg.src_addr_width;
- burst = uc->cfg.src_maxburst;
- } else if (dir == DMA_MEM_TO_DEV) {
- dev_width = uc->cfg.dst_addr_width;
- burst = uc->cfg.dst_maxburst;
- } else {
- dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ if (!is_slave_direction(dir)) {
+ dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
return NULL;
}
- if (!burst)
- burst = 1;
+ /* estimate the number of TRs we will need */
+ for_each_sg(sgl, sgent, sglen, i) {
+ if (sg_dma_len(sgent) < SZ_64K)
+ num_tr++;
+ else
+ num_tr += 2;
+ }
/* Now allocate and setup the descriptor. */
tr_size = sizeof(struct cppi5_tr_type1_t);
- d = udma_alloc_tr_desc(uc, tr_size, sglen, dir);
+ d = udma_alloc_tr_desc(uc, tr_size, num_tr, dir);
if (!d)
return NULL;
@@ -2002,19 +2109,46 @@ udma_prep_slave_sg_tr(struct udma_chan *uc, struct scatterlist *sgl,
tr_req = d->hwdesc[0].tr_req_base;
for_each_sg(sgl, sgent, sglen, i) {
- d->residue += sg_dma_len(sgent);
+ dma_addr_t sg_addr = sg_dma_address(sgent);
+
+ num_tr = udma_get_tr_counters(sg_dma_len(sgent), __ffs(sg_addr),
+ &tr0_cnt0, &tr0_cnt1, &tr1_cnt0);
+ if (num_tr < 0) {
+ dev_err(uc->ud->dev, "size %u is not supported\n",
+ sg_dma_len(sgent));
+ udma_free_hwdesc(uc, d);
+ kfree(d);
+ return NULL;
+ }
cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
cppi5_tr_csf_set(&tr_req[i].flags, CPPI5_TR_CSF_SUPR_EVT);
- tr_req[i].addr = sg_dma_address(sgent);
- tr_req[i].icnt0 = burst * dev_width;
- tr_req[i].dim1 = burst * dev_width;
- tr_req[i].icnt1 = sg_dma_len(sgent) / tr_req[i].icnt0;
+ tr_req[tr_idx].addr = sg_addr;
+ tr_req[tr_idx].icnt0 = tr0_cnt0;
+ tr_req[tr_idx].icnt1 = tr0_cnt1;
+ tr_req[tr_idx].dim1 = tr0_cnt0;
+ tr_idx++;
+
+ if (num_tr == 2) {
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
+ false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags,
+ CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req[tr_idx].addr = sg_addr + tr0_cnt1 * tr0_cnt0;
+ tr_req[tr_idx].icnt0 = tr1_cnt0;
+ tr_req[tr_idx].icnt1 = 1;
+ tr_req[tr_idx].dim1 = tr1_cnt0;
+ tr_idx++;
+ }
+
+ d->residue += sg_dma_len(sgent);
}
- cppi5_tr_csf_set(&tr_req[i - 1].flags, CPPI5_TR_CSF_EOP);
+ cppi5_tr_csf_set(&tr_req[tr_idx - 1].flags, CPPI5_TR_CSF_EOP);
return d;
}
@@ -2319,47 +2453,66 @@ udma_prep_dma_cyclic_tr(struct udma_chan *uc, dma_addr_t buf_addr,
size_t buf_len, size_t period_len,
enum dma_transfer_direction dir, unsigned long flags)
{
- enum dma_slave_buswidth dev_width;
struct udma_desc *d;
- size_t tr_size;
+ size_t tr_size, period_addr;
struct cppi5_tr_type1_t *tr_req;
- unsigned int i;
unsigned int periods = buf_len / period_len;
- u32 burst;
+ u16 tr0_cnt0, tr0_cnt1, tr1_cnt0;
+ unsigned int i;
+ int num_tr;
- if (dir == DMA_DEV_TO_MEM) {
- dev_width = uc->cfg.src_addr_width;
- burst = uc->cfg.src_maxburst;
- } else if (dir == DMA_MEM_TO_DEV) {
- dev_width = uc->cfg.dst_addr_width;
- burst = uc->cfg.dst_maxburst;
- } else {
- dev_err(uc->ud->dev, "%s: bad direction?\n", __func__);
+ if (!is_slave_direction(dir)) {
+ dev_err(uc->ud->dev, "Only slave cyclic is supported\n");
return NULL;
}
- if (!burst)
- burst = 1;
+ num_tr = udma_get_tr_counters(period_len, __ffs(buf_addr), &tr0_cnt0,
+ &tr0_cnt1, &tr1_cnt0);
+ if (num_tr < 0) {
+ dev_err(uc->ud->dev, "size %zu is not supported\n",
+ period_len);
+ return NULL;
+ }
/* Now allocate and setup the descriptor. */
tr_size = sizeof(struct cppi5_tr_type1_t);
- d = udma_alloc_tr_desc(uc, tr_size, periods, dir);
+ d = udma_alloc_tr_desc(uc, tr_size, periods * num_tr, dir);
if (!d)
return NULL;
tr_req = d->hwdesc[0].tr_req_base;
+ period_addr = buf_addr;
for (i = 0; i < periods; i++) {
- cppi5_tr_init(&tr_req[i].flags, CPPI5_TR_TYPE1, false, false,
- CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ int tr_idx = i * num_tr;
- tr_req[i].addr = buf_addr + period_len * i;
- tr_req[i].icnt0 = dev_width;
- tr_req[i].icnt1 = period_len / dev_width;
- tr_req[i].dim1 = dev_width;
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1, false,
+ false, CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+
+ tr_req[tr_idx].addr = period_addr;
+ tr_req[tr_idx].icnt0 = tr0_cnt0;
+ tr_req[tr_idx].icnt1 = tr0_cnt1;
+ tr_req[tr_idx].dim1 = tr0_cnt0;
+
+ if (num_tr == 2) {
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags,
+ CPPI5_TR_CSF_SUPR_EVT);
+ tr_idx++;
+
+ cppi5_tr_init(&tr_req[tr_idx].flags, CPPI5_TR_TYPE1,
+ false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+
+ tr_req[tr_idx].addr = period_addr + tr0_cnt1 * tr0_cnt0;
+ tr_req[tr_idx].icnt0 = tr1_cnt0;
+ tr_req[tr_idx].icnt1 = 1;
+ tr_req[tr_idx].dim1 = tr1_cnt0;
+ }
if (!(flags & DMA_PREP_INTERRUPT))
- cppi5_tr_csf_set(&tr_req[i].flags,
+ cppi5_tr_csf_set(&tr_req[tr_idx].flags,
CPPI5_TR_CSF_SUPR_EVT);
+
+ period_addr += period_len;
}
return d;
@@ -2517,29 +2670,12 @@ udma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
return NULL;
}
- if (len < SZ_64K) {
- num_tr = 1;
- tr0_cnt0 = len;
- tr0_cnt1 = 1;
- } else {
- unsigned long align_to = __ffs(src | dest);
-
- if (align_to > 3)
- align_to = 3;
- /*
- * Keep simple: tr0: SZ_64K-alignment blocks,
- * tr1: the remaining
- */
- num_tr = 2;
- tr0_cnt0 = (SZ_64K - BIT(align_to));
- if (len / tr0_cnt0 >= SZ_64K) {
- dev_err(uc->ud->dev, "size %zu is not supported\n",
- len);
- return NULL;
- }
-
- tr0_cnt1 = len / tr0_cnt0;
- tr1_cnt0 = len % tr0_cnt0;
+ num_tr = udma_get_tr_counters(len, __ffs(src | dest), &tr0_cnt0,
+ &tr0_cnt1, &tr1_cnt0);
+ if (num_tr < 0) {
+ dev_err(uc->ud->dev, "size %zu is not supported\n",
+ len);
+ return NULL;
}
d = udma_alloc_tr_desc(uc, tr_size, num_tr, DMA_MEM_TO_MEM);
@@ -2631,6 +2767,9 @@ static enum dma_status udma_tx_status(struct dma_chan *chan,
ret = dma_cookie_status(chan, cookie, txstate);
+ if (!udma_is_chan_running(uc))
+ ret = DMA_COMPLETE;
+
if (ret == DMA_IN_PROGRESS && udma_is_chan_paused(uc))
ret = DMA_PAUSED;
@@ -2697,11 +2836,8 @@ static int udma_pause(struct dma_chan *chan)
{
struct udma_chan *uc = to_udma_chan(chan);
- if (!uc->desc)
- return -EINVAL;
-
/* pause the channel */
- switch (uc->desc->dir) {
+ switch (uc->config.dir) {
case DMA_DEV_TO_MEM:
udma_rchanrt_update_bits(uc->rchan,
UDMA_RCHAN_RT_PEER_RT_EN_REG,
@@ -2730,11 +2866,8 @@ static int udma_resume(struct dma_chan *chan)
{
struct udma_chan *uc = to_udma_chan(chan);
- if (!uc->desc)
- return -EINVAL;
-
/* resume the channel */
- switch (uc->desc->dir) {
+ switch (uc->config.dir) {
case DMA_DEV_TO_MEM:
udma_rchanrt_update_bits(uc->rchan,
UDMA_RCHAN_RT_PEER_RT_EN_REG,
@@ -3248,6 +3381,98 @@ static int udma_setup_resources(struct udma_dev *ud)
return ch_count;
}
+static int udma_setup_rx_flush(struct udma_dev *ud)
+{
+ struct udma_rx_flush *rx_flush = &ud->rx_flush;
+ struct cppi5_desc_hdr_t *tr_desc;
+ struct cppi5_tr_type1_t *tr_req;
+ struct cppi5_host_desc_t *desc;
+ struct device *dev = ud->dev;
+ struct udma_hwdesc *hwdesc;
+ size_t tr_size;
+
+ /* Allocate 1K buffer for discarded data on RX channel teardown */
+ rx_flush->buffer_size = SZ_1K;
+ rx_flush->buffer_vaddr = devm_kzalloc(dev, rx_flush->buffer_size,
+ GFP_KERNEL);
+ if (!rx_flush->buffer_vaddr)
+ return -ENOMEM;
+
+ rx_flush->buffer_paddr = dma_map_single(dev, rx_flush->buffer_vaddr,
+ rx_flush->buffer_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, rx_flush->buffer_paddr))
+ return -ENOMEM;
+
+ /* Set up descriptor to be used for TR mode */
+ hwdesc = &rx_flush->hwdescs[0];
+ tr_size = sizeof(struct cppi5_tr_type1_t);
+ hwdesc->cppi5_desc_size = cppi5_trdesc_calc_size(tr_size, 1);
+ hwdesc->cppi5_desc_size = ALIGN(hwdesc->cppi5_desc_size,
+ ud->desc_align);
+
+ hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
+ GFP_KERNEL);
+ if (!hwdesc->cppi5_desc_vaddr)
+ return -ENOMEM;
+
+ hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
+ hwdesc->cppi5_desc_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
+ return -ENOMEM;
+
+ /* Start of the TR req records */
+ hwdesc->tr_req_base = hwdesc->cppi5_desc_vaddr + tr_size;
+ /* Start address of the TR response array */
+ hwdesc->tr_resp_base = hwdesc->tr_req_base + tr_size;
+
+ tr_desc = hwdesc->cppi5_desc_vaddr;
+ cppi5_trdesc_init(tr_desc, 1, tr_size, 0, 0);
+ cppi5_desc_set_pktids(tr_desc, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(tr_desc, 0, 0);
+
+ tr_req = hwdesc->tr_req_base;
+ cppi5_tr_init(&tr_req->flags, CPPI5_TR_TYPE1, false, false,
+ CPPI5_TR_EVENT_SIZE_COMPLETION, 0);
+ cppi5_tr_csf_set(&tr_req->flags, CPPI5_TR_CSF_SUPR_EVT);
+
+ tr_req->addr = rx_flush->buffer_paddr;
+ tr_req->icnt0 = rx_flush->buffer_size;
+ tr_req->icnt1 = 1;
+
+ /* Set up descriptor to be used for packet mode */
+ hwdesc = &rx_flush->hwdescs[1];
+ hwdesc->cppi5_desc_size = ALIGN(sizeof(struct cppi5_host_desc_t) +
+ CPPI5_INFO0_HDESC_EPIB_SIZE +
+ CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE,
+ ud->desc_align);
+
+ hwdesc->cppi5_desc_vaddr = devm_kzalloc(dev, hwdesc->cppi5_desc_size,
+ GFP_KERNEL);
+ if (!hwdesc->cppi5_desc_vaddr)
+ return -ENOMEM;
+
+ hwdesc->cppi5_desc_paddr = dma_map_single(dev, hwdesc->cppi5_desc_vaddr,
+ hwdesc->cppi5_desc_size,
+ DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, hwdesc->cppi5_desc_paddr))
+ return -ENOMEM;
+
+ desc = hwdesc->cppi5_desc_vaddr;
+ cppi5_hdesc_init(desc, 0, 0);
+ cppi5_desc_set_pktids(&desc->hdr, 0, CPPI5_INFO1_DESC_FLOWID_DEFAULT);
+ cppi5_desc_set_retpolicy(&desc->hdr, 0, 0);
+
+ cppi5_hdesc_attach_buf(desc,
+ rx_flush->buffer_paddr, rx_flush->buffer_size,
+ rx_flush->buffer_paddr, rx_flush->buffer_size);
+
+ dma_sync_single_for_device(dev, hwdesc->cppi5_desc_paddr,
+ hwdesc->cppi5_desc_size, DMA_TO_DEVICE);
+ return 0;
+}
+
#define TI_UDMAC_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
BIT(DMA_SLAVE_BUSWIDTH_3_BYTES) | \
@@ -3361,6 +3586,10 @@ static int udma_probe(struct platform_device *pdev)
if (ud->desc_align < dma_get_cache_alignment())
ud->desc_align = dma_get_cache_alignment();
+ ret = udma_setup_rx_flush(ud);
+ if (ret)
+ return ret;
+
for (i = 0; i < ud->tchan_cnt; i++) {
struct udma_tchan *tchan = &ud->tchans[i];
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 7243b88f81d8..69e0d90460e6 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -505,16 +505,10 @@ void edac_mc_free(struct mem_ctl_info *mci)
{
edac_dbg(1, "\n");
- /* If we're not yet registered with sysfs free only what was allocated
- * in edac_mc_alloc().
- */
- if (!device_is_registered(&mci->dev)) {
- _edac_mc_free(mci);
- return;
- }
+ if (device_is_registered(&mci->dev))
+ edac_unregister_sysfs(mci);
- /* the mci instance is freed here, when the sysfs object is dropped */
- edac_unregister_sysfs(mci);
+ _edac_mc_free(mci);
}
EXPORT_SYMBOL_GPL(edac_mc_free);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index 0367554e7437..c70ec0a306d8 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -276,10 +276,7 @@ static const struct attribute_group *csrow_attr_groups[] = {
static void csrow_attr_release(struct device *dev)
{
- struct csrow_info *csrow = container_of(dev, struct csrow_info, dev);
-
- edac_dbg(1, "device %s released\n", dev_name(dev));
- kfree(csrow);
+ /* release device with _edac_mc_free() */
}
static const struct device_type csrow_attr_type = {
@@ -447,8 +444,7 @@ error:
csrow = mci->csrows[i];
if (!nr_pages_per_csrow(csrow))
continue;
-
- device_del(&mci->csrows[i]->dev);
+ device_unregister(&mci->csrows[i]->dev);
}
return err;
@@ -608,10 +604,7 @@ static const struct attribute_group *dimm_attr_groups[] = {
static void dimm_attr_release(struct device *dev)
{
- struct dimm_info *dimm = container_of(dev, struct dimm_info, dev);
-
- edac_dbg(1, "device %s released\n", dev_name(dev));
- kfree(dimm);
+ /* release device with _edac_mc_free() */
}
static const struct device_type dimm_attr_type = {
@@ -893,10 +886,7 @@ static const struct attribute_group *mci_attr_groups[] = {
static void mci_attr_release(struct device *dev)
{
- struct mem_ctl_info *mci = container_of(dev, struct mem_ctl_info, dev);
-
- edac_dbg(1, "device %s released\n", dev_name(dev));
- kfree(mci);
+ /* release device with _edac_mc_free() */
}
static const struct device_type mci_attr_type = {
diff --git a/drivers/edac/synopsys_edac.c b/drivers/edac/synopsys_edac.c
index 2d263382d797..880ffd833718 100644
--- a/drivers/edac/synopsys_edac.c
+++ b/drivers/edac/synopsys_edac.c
@@ -479,20 +479,14 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
pinf = &p->ceinfo;
if (!priv->p_data->quirks) {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type:%s Row %d Bank %d Col %d ",
- "CE", pinf->row, pinf->bank, pinf->col);
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "Bit Position: %d Data: 0x%08x\n",
+ "DDR ECC error type:%s Row %d Bank %d Col %d Bit Position: %d Data: 0x%08x",
+ "CE", pinf->row, pinf->bank, pinf->col,
pinf->bitpos, pinf->data);
} else {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type:%s Row %d Bank %d Col %d ",
- "CE", pinf->row, pinf->bank, pinf->col);
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "BankGroup Number %d Block Number %d ",
- pinf->bankgrpnr, pinf->blknr);
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "Bit Position: %d Data: 0x%08x\n",
+ "DDR ECC error type:%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d Bit Position: %d Data: 0x%08x",
+ "CE", pinf->row, pinf->bank, pinf->col,
+ pinf->bankgrpnr, pinf->blknr,
pinf->bitpos, pinf->data);
}
@@ -509,10 +503,8 @@ static void handle_error(struct mem_ctl_info *mci, struct synps_ecc_status *p)
"UE", pinf->row, pinf->bank, pinf->col);
} else {
snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "DDR ECC error type :%s Row %d Bank %d Col %d ",
- "UE", pinf->row, pinf->bank, pinf->col);
- snprintf(priv->message, SYNPS_EDAC_MSG_SIZE,
- "BankGroup Number %d Block Number %d",
+ "DDR ECC error type :%s Row %d Bank %d Col %d BankGroup Number %d Block Number %d",
+ "UE", pinf->row, pinf->bank, pinf->col,
pinf->bankgrpnr, pinf->blknr);
}
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 621220ab3d0e..21ea99f65113 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -552,7 +552,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
seed = early_memremap(efi.rng_seed, sizeof(*seed));
if (seed != NULL) {
- size = seed->size;
+ size = READ_ONCE(seed->size);
early_memunmap(seed, sizeof(*seed));
} else {
pr_err("Could not map UEFI random seed!\n");
@@ -562,7 +562,7 @@ int __init efi_config_parse_tables(void *config_tables, int count, int sz,
sizeof(*seed) + size);
if (seed != NULL) {
pr_notice("seeding entropy pool\n");
- add_bootloader_randomness(seed->bits, seed->size);
+ add_bootloader_randomness(seed->bits, size);
early_memunmap(seed, sizeof(*seed) + size);
} else {
pr_err("Could not map UEFI random seed!\n");
diff --git a/drivers/firmware/imx/imx-scu.c b/drivers/firmware/imx/imx-scu.c
index 03b43b7a6d1d..f71eaa5bf52d 100644
--- a/drivers/firmware/imx/imx-scu.c
+++ b/drivers/firmware/imx/imx-scu.c
@@ -29,6 +29,7 @@ struct imx_sc_chan {
struct mbox_client cl;
struct mbox_chan *ch;
int idx;
+ struct completion tx_done;
};
struct imx_sc_ipc {
@@ -100,6 +101,14 @@ int imx_scu_get_handle(struct imx_sc_ipc **ipc)
}
EXPORT_SYMBOL(imx_scu_get_handle);
+/* Callback called when the word of a message is ack-ed, eg read by SCU */
+static void imx_scu_tx_done(struct mbox_client *cl, void *mssg, int r)
+{
+ struct imx_sc_chan *sc_chan = container_of(cl, struct imx_sc_chan, cl);
+
+ complete(&sc_chan->tx_done);
+}
+
static void imx_scu_rx_callback(struct mbox_client *c, void *msg)
{
struct imx_sc_chan *sc_chan = container_of(c, struct imx_sc_chan, cl);
@@ -149,6 +158,19 @@ static int imx_scu_ipc_write(struct imx_sc_ipc *sc_ipc, void *msg)
for (i = 0; i < hdr->size; i++) {
sc_chan = &sc_ipc->chans[i % 4];
+
+ /*
+ * SCU requires that all messages words are written
+ * sequentially but linux MU driver implements multiple
+ * independent channels for each register so ordering between
+ * different channels must be ensured by SCU API interface.
+ *
+ * Wait for tx_done before every send to ensure that no
+ * queueing happens at the mailbox channel level.
+ */
+ wait_for_completion(&sc_chan->tx_done);
+ reinit_completion(&sc_chan->tx_done);
+
ret = mbox_send_message(sc_chan->ch, &data[i]);
if (ret < 0)
return ret;
@@ -247,6 +269,11 @@ static int imx_scu_probe(struct platform_device *pdev)
cl->knows_txdone = true;
cl->rx_callback = imx_scu_rx_callback;
+ /* Initial tx_done completion as "done" */
+ cl->tx_done = imx_scu_tx_done;
+ init_completion(&sc_chan->tx_done);
+ complete(&sc_chan->tx_done);
+
sc_chan->sc_ipc = sc_ipc;
sc_chan->idx = i % 4;
sc_chan->ch = mbox_request_channel_byname(cl, chan_name);
diff --git a/drivers/firmware/imx/misc.c b/drivers/firmware/imx/misc.c
index 4b56a587dacd..d073cb3ce699 100644
--- a/drivers/firmware/imx/misc.c
+++ b/drivers/firmware/imx/misc.c
@@ -16,7 +16,7 @@ struct imx_sc_msg_req_misc_set_ctrl {
u32 ctrl;
u32 val;
u16 resource;
-} __packed;
+} __packed __aligned(4);
struct imx_sc_msg_req_cpu_start {
struct imx_sc_rpc_msg hdr;
@@ -24,18 +24,18 @@ struct imx_sc_msg_req_cpu_start {
u32 address_lo;
u16 resource;
u8 enable;
-} __packed;
+} __packed __aligned(4);
struct imx_sc_msg_req_misc_get_ctrl {
struct imx_sc_rpc_msg hdr;
u32 ctrl;
u16 resource;
-} __packed;
+} __packed __aligned(4);
struct imx_sc_msg_resp_misc_get_ctrl {
struct imx_sc_rpc_msg hdr;
u32 val;
-} __packed;
+} __packed __aligned(4);
/*
* This function sets a miscellaneous control value.
diff --git a/drivers/firmware/imx/scu-pd.c b/drivers/firmware/imx/scu-pd.c
index b556612207e5..af3ae0087de4 100644
--- a/drivers/firmware/imx/scu-pd.c
+++ b/drivers/firmware/imx/scu-pd.c
@@ -61,7 +61,7 @@ struct imx_sc_msg_req_set_resource_power_mode {
struct imx_sc_rpc_msg hdr;
u16 resource;
u8 mode;
-} __packed;
+} __packed __aligned(4);
#define IMX_SCU_PD_NAME_SIZE 20
struct imx_sc_pm_domain {
diff --git a/drivers/fsi/Kconfig b/drivers/fsi/Kconfig
index 92ce6d85802c..4cc0e630ab79 100644
--- a/drivers/fsi/Kconfig
+++ b/drivers/fsi/Kconfig
@@ -55,6 +55,7 @@ config FSI_MASTER_AST_CF
config FSI_MASTER_ASPEED
tristate "FSI ASPEED master"
+ depends on HAS_IOMEM
help
This option enables a FSI master that is present behind an OPB bridge
in the AST2600.
diff --git a/drivers/gpio/gpio-bd71828.c b/drivers/gpio/gpio-bd71828.c
index 04aade9e0a4d..3dbbc638e9a9 100644
--- a/drivers/gpio/gpio-bd71828.c
+++ b/drivers/gpio/gpio-bd71828.c
@@ -10,16 +10,6 @@
#define GPIO_OUT_REG(off) (BD71828_REG_GPIO_CTRL1 + (off))
#define HALL_GPIO_OFFSET 3
-/*
- * These defines can be removed when
- * "gpio: Add definition for GPIO direction"
- * (9208b1e77d6e8e9776f34f46ef4079ecac9c3c25 in GPIO tree) gets merged,
- */
-#ifndef GPIO_LINE_DIRECTION_IN
- #define GPIO_LINE_DIRECTION_IN 1
- #define GPIO_LINE_DIRECTION_OUT 0
-#endif
-
struct bd71828_gpio {
struct rohm_regmap_dev chip;
struct gpio_chip gpio;
diff --git a/drivers/gpio/gpio-sifive.c b/drivers/gpio/gpio-sifive.c
index 147a1bd04515..c54dd08f2cbf 100644
--- a/drivers/gpio/gpio-sifive.c
+++ b/drivers/gpio/gpio-sifive.c
@@ -35,7 +35,7 @@ struct sifive_gpio {
void __iomem *base;
struct gpio_chip gc;
struct regmap *regs;
- u32 irq_state;
+ unsigned long irq_state;
unsigned int trigger[SIFIVE_GPIO_MAX];
unsigned int irq_parent[SIFIVE_GPIO_MAX];
};
@@ -94,7 +94,7 @@ static void sifive_gpio_irq_enable(struct irq_data *d)
spin_unlock_irqrestore(&gc->bgpio_lock, flags);
/* Enable interrupts */
- assign_bit(offset, (unsigned long *)&chip->irq_state, 1);
+ assign_bit(offset, &chip->irq_state, 1);
sifive_gpio_set_ie(chip, offset);
}
@@ -104,7 +104,7 @@ static void sifive_gpio_irq_disable(struct irq_data *d)
struct sifive_gpio *chip = gpiochip_get_data(gc);
int offset = irqd_to_hwirq(d) % SIFIVE_GPIO_MAX;
- assign_bit(offset, (unsigned long *)&chip->irq_state, 0);
+ assign_bit(offset, &chip->irq_state, 0);
sifive_gpio_set_ie(chip, offset);
irq_chip_disable_parent(d);
}
diff --git a/drivers/gpio/gpio-xilinx.c b/drivers/gpio/gpio-xilinx.c
index a9748b5198e6..67f9f82e0db0 100644
--- a/drivers/gpio/gpio-xilinx.c
+++ b/drivers/gpio/gpio-xilinx.c
@@ -147,9 +147,10 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
for (i = 0; i < gc->ngpio; i++) {
if (*mask == 0)
break;
+ /* Once finished with an index write it out to the register */
if (index != xgpio_index(chip, i)) {
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, i),
+ index * XGPIO_CHANNEL_OFFSET,
chip->gpio_state[index]);
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
index = xgpio_index(chip, i);
@@ -165,7 +166,7 @@ static void xgpio_set_multiple(struct gpio_chip *gc, unsigned long *mask,
}
xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
- xgpio_regoffset(chip, i), chip->gpio_state[index]);
+ index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index 753283486037..4d0106ceeba7 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -3035,13 +3035,33 @@ EXPORT_SYMBOL_GPL(gpiochip_free_own_desc);
* rely on gpio_request() having been called beforehand.
*/
-static int gpio_set_config(struct gpio_chip *gc, unsigned int offset,
- enum pin_config_param mode)
+static int gpio_do_set_config(struct gpio_chip *gc, unsigned int offset,
+ unsigned long config)
{
if (!gc->set_config)
return -ENOTSUPP;
- return gc->set_config(gc, offset, mode);
+ return gc->set_config(gc, offset, config);
+}
+
+static int gpio_set_config(struct gpio_chip *gc, unsigned int offset,
+ enum pin_config_param mode)
+{
+ unsigned long config;
+ unsigned arg;
+
+ switch (mode) {
+ case PIN_CONFIG_BIAS_PULL_DOWN:
+ case PIN_CONFIG_BIAS_PULL_UP:
+ arg = 1;
+ break;
+
+ default:
+ arg = 0;
+ }
+
+ config = PIN_CONF_PACKED(mode, arg);
+ return gpio_do_set_config(gc, offset, config);
}
static int gpio_set_bias(struct gpio_chip *chip, struct gpio_desc *desc)
@@ -3277,7 +3297,7 @@ int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce)
chip = desc->gdev->chip;
config = pinconf_to_config_packed(PIN_CONFIG_INPUT_DEBOUNCE, debounce);
- return gpio_set_config(chip, gpio_chip_hwgpio(desc), config);
+ return gpio_do_set_config(chip, gpio_chip_hwgpio(desc), config);
}
EXPORT_SYMBOL_GPL(gpiod_set_debounce);
@@ -3311,7 +3331,7 @@ int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
packed = pinconf_to_config_packed(PIN_CONFIG_PERSIST_STATE,
!transitory);
gpio = gpio_chip_hwgpio(desc);
- rc = gpio_set_config(chip, gpio, packed);
+ rc = gpio_do_set_config(chip, gpio, packed);
if (rc == -ENOTSUPP) {
dev_dbg(&desc->gdev->dev, "Persistence not supported for GPIO %d\n",
gpio);
diff --git a/drivers/gpu/drm/Kconfig b/drivers/gpu/drm/Kconfig
index d0aa6cff2e02..43594978958e 100644
--- a/drivers/gpu/drm/Kconfig
+++ b/drivers/gpu/drm/Kconfig
@@ -54,9 +54,6 @@ config DRM_DEBUG_MM
If in doubt, say "N".
-config DRM_EXPORT_FOR_TESTS
- bool
-
config DRM_DEBUG_SELFTEST
tristate "kselftests for DRM"
depends on DRM
@@ -389,6 +386,8 @@ source "drivers/gpu/drm/aspeed/Kconfig"
source "drivers/gpu/drm/mcde/Kconfig"
+source "drivers/gpu/drm/tidss/Kconfig"
+
# Keep legacy drivers last
menuconfig DRM_LEGACY
@@ -468,6 +467,9 @@ config DRM_SAVAGE
endif # DRM_LEGACY
+config DRM_EXPORT_FOR_TESTS
+ bool
+
# Separate option because drm_panel_orientation_quirks.c is shared with fbdev
config DRM_PANEL_ORIENTATION_QUIRKS
tristate
diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile
index 6493088a0fdd..7f72ef5e7811 100644
--- a/drivers/gpu/drm/Makefile
+++ b/drivers/gpu/drm/Makefile
@@ -39,7 +39,8 @@ obj-$(CONFIG_DRM_VRAM_HELPER) += drm_vram_helper.o
drm_ttm_helper-y := drm_gem_ttm_helper.o
obj-$(CONFIG_DRM_TTM_HELPER) += drm_ttm_helper.o
-drm_kms_helper-y := drm_crtc_helper.o drm_dp_helper.o drm_dsc.o drm_probe_helper.o \
+drm_kms_helper-y := drm_bridge_connector.o drm_crtc_helper.o drm_dp_helper.o \
+ drm_dsc.o drm_probe_helper.o \
drm_plane_helper.o drm_dp_mst_topology.o drm_atomic_helper.o \
drm_kms_helper_common.o drm_dp_dual_mode_helper.o \
drm_simple_kms_helper.o drm_modeset_helper.o \
@@ -122,3 +123,4 @@ obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
obj-$(CONFIG_DRM_MCDE) += mcde/
+obj-$(CONFIG_DRM_TIDSS) += tidss/
diff --git a/drivers/gpu/drm/amd/acp/Kconfig b/drivers/gpu/drm/amd/acp/Kconfig
index 13340f353ea8..216d932a7831 100644
--- a/drivers/gpu/drm/amd/acp/Kconfig
+++ b/drivers/gpu/drm/amd/acp/Kconfig
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: MIT
menu "ACP (Audio CoProcessor) Configuration"
+ depends on DRM_AMDGPU
config DRM_AMD_ACP
bool "Enable AMD Audio CoProcessor IP support"
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index da3bcff61b97..2992a49ad4a5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -579,6 +579,7 @@ struct amdgpu_asic_funcs {
/* invalidate hdp read cache */
void (*invalidate_hdp)(struct amdgpu_device *adev,
struct amdgpu_ring *ring);
+ void (*reset_hdp_ras_error_count)(struct amdgpu_device *adev);
/* check if the asic needs a full reset of if soft reset will work */
bool (*need_full_reset)(struct amdgpu_device *adev);
/* initialize doorbell layout for specific asic*/
@@ -969,6 +970,7 @@ struct amdgpu_device {
int pstate;
/* enable runtime pm on the device */
bool runpm;
+ bool in_runpm;
bool pm_sysfs_en;
bool ucode_sysfs_en;
@@ -992,6 +994,8 @@ uint32_t amdgpu_mm_rreg(struct amdgpu_device *adev, uint32_t reg,
uint32_t acc_flags);
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags);
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+ uint32_t acc_flags);
void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value);
uint8_t amdgpu_mm_rreg8(struct amdgpu_device *adev, uint32_t offset);
@@ -1174,9 +1178,9 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
int amdgpu_device_ip_suspend(struct amdgpu_device *adev);
int amdgpu_device_suspend(struct drm_device *dev, bool fbcon);
int amdgpu_device_resume(struct drm_device *dev, bool fbcon);
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc);
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc);
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc);
long amdgpu_kms_compat_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index 8609287620ea..abfbe89e805e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -29,6 +29,7 @@
#include <linux/module.h>
#include <linux/dma-buf.h>
#include "amdgpu_xgmi.h"
+#include <uapi/linux/kfd_ioctl.h>
static const unsigned int compute_vmid_bitmap = 0xFF00;
@@ -126,7 +127,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
/* this is going to have a few of the MSBs set that we need to
* clear
*/
- bitmap_complement(gpu_resources.queue_bitmap,
+ bitmap_complement(gpu_resources.cp_queue_bitmap,
adev->gfx.mec.queue_bitmap,
KGD_MAX_QUEUES);
@@ -137,7 +138,7 @@ void amdgpu_amdkfd_device_init(struct amdgpu_device *adev)
* adev->gfx.mec.num_pipe_per_mec
* adev->gfx.mec.num_queue_per_pipe;
for (i = last_valid_bit; i < KGD_MAX_QUEUES; ++i)
- clear_bit(i, gpu_resources.queue_bitmap);
+ clear_bit(i, gpu_resources.cp_queue_bitmap);
amdgpu_doorbell_get_kfd_info(adev,
&gpu_resources.doorbell_physical_address,
@@ -178,18 +179,18 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
kgd2kfd_interrupt(adev->kfd.dev, ih_ring_entry);
}
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev)
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm)
{
if (adev->kfd.dev)
- kgd2kfd_suspend(adev->kfd.dev);
+ kgd2kfd_suspend(adev->kfd.dev, run_pm);
}
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm)
{
int r = 0;
if (adev->kfd.dev)
- r = kgd2kfd_resume(adev->kfd.dev);
+ r = kgd2kfd_resume(adev->kfd.dev, run_pm);
return r;
}
@@ -224,7 +225,7 @@ void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
- void **cpu_ptr, bool mqd_gfx9)
+ void **cpu_ptr, bool cp_mqd_gfx9)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
struct amdgpu_bo *bo = NULL;
@@ -240,8 +241,8 @@ int amdgpu_amdkfd_alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
bp.type = ttm_bo_type_kernel;
bp.resv = NULL;
- if (mqd_gfx9)
- bp.flags |= AMDGPU_GEM_CREATE_MQD_GFX9;
+ if (cp_mqd_gfx9)
+ bp.flags |= AMDGPU_GEM_CREATE_CP_MQD_GFX9;
r = amdgpu_bo_create(adev, &bp, &bo);
if (r) {
@@ -402,7 +403,7 @@ void amdgpu_amdkfd_get_local_mem_info(struct kgd_dev *kgd,
if (amdgpu_sriov_vf(adev))
mem_info->mem_clk_max = adev->clock.default_mclk / 100;
- else if (adev->powerplay.pp_funcs) {
+ else if (adev->pm.dpm_enabled) {
if (amdgpu_emu_mode == 1)
mem_info->mem_clk_max = 0;
else
@@ -427,7 +428,7 @@ uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct kgd_dev *kgd)
/* the sclk is in quantas of 10kHz */
if (amdgpu_sriov_vf(adev))
return adev->clock.default_sclk / 100;
- else if (adev->powerplay.pp_funcs)
+ else if (adev->pm.dpm_enabled)
return amdgpu_dpm_get_sclk(adev, false) / 100;
else
return 100;
@@ -501,10 +502,11 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
metadata_size, &metadata_flags);
if (flags) {
*flags = (bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
- ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT;
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM
+ : KFD_IOC_ALLOC_MEM_FLAGS_GTT;
if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)
- *flags |= ALLOC_MEM_FLAGS_PUBLIC;
+ *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC;
}
out_put:
@@ -525,6 +527,14 @@ uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd)
return adev->gmc.xgmi.hive_id;
}
+
+uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ return adev->unique_id;
+}
+
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src)
{
struct amdgpu_device *peer_adev = (struct amdgpu_device *)src;
@@ -647,13 +657,9 @@ int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct kgd_dev *kgd, uint16_t vmid)
int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct kgd_dev *kgd, uint16_t pasid)
{
struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
- uint32_t flush_type = 0;
+ const uint32_t flush_type = 0;
bool all_hub = false;
- if (adev->gmc.xgmi.num_physical_nodes &&
- adev->asic_type == CHIP_VEGA20)
- flush_type = 2;
-
if (adev->family == AMDGPU_FAMILY_AI)
all_hub = true;
@@ -677,6 +683,11 @@ void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo)
{
}
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+ return 0;
+}
+
void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
@@ -713,11 +724,11 @@ void kgd2kfd_exit(void)
{
}
-void kgd2kfd_suspend(struct kfd_dev *kfd)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
}
-int kgd2kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index 47b0f2957d1f..13feb313e9b3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -96,6 +96,7 @@ struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context,
struct mm_struct *mm);
bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm);
struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f);
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo);
struct amdkfd_process_info {
/* List head of all VMs that belong to a KFD process */
@@ -122,8 +123,8 @@ struct amdkfd_process_info {
int amdgpu_amdkfd_init(void);
void amdgpu_amdkfd_fini(void);
-void amdgpu_amdkfd_suspend(struct amdgpu_device *adev);
-int amdgpu_amdkfd_resume(struct amdgpu_device *adev);
+void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm);
+int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm);
void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev,
const void *ih_ring_entry);
void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev);
@@ -171,6 +172,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct kgd_dev *kgd, int dma_buf_fd,
uint32_t *flags);
uint64_t amdgpu_amdkfd_get_vram_usage(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_hive_id(struct kgd_dev *kgd);
+uint64_t amdgpu_amdkfd_get_unique_id(struct kgd_dev *kgd);
uint64_t amdgpu_amdkfd_get_mmio_remap_phys_addr(struct kgd_dev *kgd);
uint32_t amdgpu_amdkfd_get_num_gws(struct kgd_dev *kgd);
uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct kgd_dev *dst, struct kgd_dev *src);
@@ -240,6 +242,9 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_unreserve_memory_limit(struct amdgpu_bo *bo);
+int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config);
+
/* KGD2KFD callbacks */
int kgd2kfd_init(void);
void kgd2kfd_exit(void);
@@ -249,8 +254,8 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
struct drm_device *ddev,
const struct kgd2kfd_shared_resources *gpu_resources);
void kgd2kfd_device_exit(struct kfd_dev *kfd);
-void kgd2kfd_suspend(struct kfd_dev *kfd);
-int kgd2kfd_resume(struct kfd_dev *kfd);
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm);
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm);
int kgd2kfd_pre_reset(struct kfd_dev *kfd);
int kgd2kfd_post_reset(struct kfd_dev *kfd);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
index 4bcc175a149d..6529caca88fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c
@@ -79,7 +79,7 @@ static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev,
dev_warn(adev->dev,
"Invalid sdma engine id (%d), using engine id 0\n",
engine_id);
- /* fall through */
+ fallthrough;
case 0:
sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, 0,
mmSDMA0_RLC0_RB_CNTL) - mmSDMA0_RLC0_RB_CNTL;
@@ -319,7 +319,6 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = {
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
- .get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
index a7b17c8deb00..4ec6d0c03201 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c
@@ -42,38 +42,6 @@ enum hqd_dequeue_request_type {
SAVE_WAVES
};
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
-#if 0
-/* TODO - confirm REG_GET_FIELD x2, should be OK as is... but
- * MC_ARB_RAMCFG register doesn't exist on Vega10 - initial amdgpu
- * changes commented out related code, doing the same here for now but
- * need to sync with Ken et al
- */
- config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFBANK);
- config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFRANKS);
-#endif
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -805,7 +773,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = {
.address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
- .get_tile_config = amdgpu_amdkfd_get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
+ .get_unique_id = amdgpu_amdkfd_get_unique_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 8f052e98a3c6..0b7e78748540 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -84,31 +84,6 @@ union TCP_WATCH_CNTL_BITS {
float f32All;
};
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
- config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFBANK);
- config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFRANKS);
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -730,7 +705,6 @@ const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
.address_watch_get_offset = kgd_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
.read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 19a10db93d68..ccd635b812b5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -41,31 +41,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-static int get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
- config->num_banks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFBANK);
- config->num_ranks = REG_GET_FIELD(adev->gfx.config.mc_arb_ramcfg,
- MC_ARB_RAMCFG, NOOFRANKS);
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -676,6 +651,5 @@ const struct kfd2kgd_calls gfx_v8_kfd2kgd = {
.get_atc_vmid_pasid_mapping_info =
get_atc_vmid_pasid_mapping_info,
.set_scratch_backing_va = set_scratch_backing_va,
- .get_tile_config = get_tile_config,
.set_vm_context_page_table_base = set_vm_context_page_table_base,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 8562afe5b761..df841c2ac5e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -48,28 +48,6 @@ enum hqd_dequeue_request_type {
RESET_WAVES
};
-
-/* Because of REG_GET_FIELD() being used, we put this function in the
- * asic specific file.
- */
-int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
-
- config->gb_addr_config = adev->gfx.config.gb_addr_config;
-
- config->tile_config_ptr = adev->gfx.config.tile_mode_array;
- config->num_tile_configs =
- ARRAY_SIZE(adev->gfx.config.tile_mode_array);
- config->macro_tile_config_ptr =
- adev->gfx.config.macrotile_mode_array;
- config->num_macro_tile_configs =
- ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
-
- return 0;
-}
-
static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
{
return (struct amdgpu_device *)kgd;
@@ -736,7 +714,7 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = {
.address_watch_get_offset = kgd_gfx_v9_address_watch_get_offset,
.get_atc_vmid_pasid_mapping_info =
kgd_gfx_v9_get_atc_vmid_pasid_mapping_info,
- .get_tile_config = kgd_gfx_v9_get_tile_config,
.set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base,
.get_hive_id = amdgpu_amdkfd_get_hive_id,
+ .get_unique_id = amdgpu_amdkfd_get_unique_id,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
index 63d3e6683dfe..aedf67d57449 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h
@@ -60,5 +60,3 @@ uint32_t kgd_gfx_v9_address_watch_get_offset(struct kgd_dev *kgd,
bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct kgd_dev *kgd,
uint8_t vmid, uint16_t *p_pasid);
-int kgd_gfx_v9_get_tile_config(struct kgd_dev *kgd,
- struct tile_config *config);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index fa8ac9d19a7a..9dff792c9290 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -29,6 +29,7 @@
#include "amdgpu_vm.h"
#include "amdgpu_amdkfd.h"
#include "amdgpu_dma_buf.h"
+#include <uapi/linux/kfd_ioctl.h>
/* BO flag to indicate a KFD userptr BO */
#define AMDGPU_AMDKFD_USERPTR_BO (1ULL << 63)
@@ -276,6 +277,42 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
return 0;
}
+int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo)
+{
+ struct amdgpu_bo *root = bo;
+ struct amdgpu_vm_bo_base *vm_bo;
+ struct amdgpu_vm *vm;
+ struct amdkfd_process_info *info;
+ struct amdgpu_amdkfd_fence *ef;
+ int ret;
+
+ /* we can always get vm_bo from root PD bo.*/
+ while (root->parent)
+ root = root->parent;
+
+ vm_bo = root->vm_bo;
+ if (!vm_bo)
+ return 0;
+
+ vm = vm_bo->vm;
+ if (!vm)
+ return 0;
+
+ info = vm->process_info;
+ if (!info || !info->eviction_fence)
+ return 0;
+
+ ef = container_of(dma_fence_get(&info->eviction_fence->base),
+ struct amdgpu_amdkfd_fence, base);
+
+ BUG_ON(!dma_resv_trylock(bo->tbo.base.resv));
+ ret = amdgpu_amdkfd_remove_eviction_fence(bo, ef);
+ dma_resv_unlock(bo->tbo.base.resv);
+
+ dma_fence_put(&ef->base);
+ return ret;
+}
+
static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
bool wait)
{
@@ -364,18 +401,18 @@ static int vm_update_pds(struct amdgpu_vm *vm, struct amdgpu_sync *sync)
static uint64_t get_pte_flags(struct amdgpu_device *adev, struct kgd_mem *mem)
{
struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev);
- bool coherent = mem->alloc_flags & ALLOC_MEM_FLAGS_COHERENT;
+ bool coherent = mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_COHERENT;
uint32_t mapping_flags;
mapping_flags = AMDGPU_VM_PAGE_READABLE;
- if (mem->alloc_flags & ALLOC_MEM_FLAGS_WRITABLE)
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE)
mapping_flags |= AMDGPU_VM_PAGE_WRITEABLE;
- if (mem->alloc_flags & ALLOC_MEM_FLAGS_EXECUTABLE)
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE)
mapping_flags |= AMDGPU_VM_PAGE_EXECUTABLE;
switch (adev->asic_type) {
case CHIP_ARCTURUS:
- if (mem->alloc_flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (mem->alloc_flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
if (bo_adev == adev)
mapping_flags |= coherent ?
AMDGPU_VM_MTYPE_CC : AMDGPU_VM_MTYPE_RW;
@@ -847,9 +884,9 @@ static int process_sync_pds_resv(struct amdkfd_process_info *process_info,
vm_list_node) {
struct amdgpu_bo *pd = peer_vm->root.base.bo;
- ret = amdgpu_sync_resv(NULL,
- sync, pd->tbo.base.resv,
- AMDGPU_FENCE_OWNER_KFD, false);
+ ret = amdgpu_sync_resv(NULL, sync, pd->tbo.base.resv,
+ AMDGPU_SYNC_NE_OWNER,
+ AMDGPU_FENCE_OWNER_KFD);
if (ret)
return ret;
}
@@ -1044,6 +1081,8 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev,
list_del(&vm->vm_list_node);
mutex_unlock(&process_info->lock);
+ vm->process_info = NULL;
+
/* Release per-process resources when last compute VM is destroyed */
if (!process_info->n_vms) {
WARN_ON(!list_empty(&process_info->kfd_bo_list));
@@ -1122,24 +1161,24 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
/*
* Check on which domain to allocate BO
*/
- if (flags & ALLOC_MEM_FLAGS_VRAM) {
+ if (flags & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_VRAM;
alloc_flags = AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
- alloc_flags |= (flags & ALLOC_MEM_FLAGS_PUBLIC) ?
+ alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ?
AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED :
AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
- } else if (flags & ALLOC_MEM_FLAGS_GTT) {
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) {
domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_flags = 0;
- } else if (flags & ALLOC_MEM_FLAGS_USERPTR) {
+ } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
alloc_flags = 0;
if (!offset || !*offset)
return -EINVAL;
user_addr = untagged_addr(*offset);
- } else if (flags & (ALLOC_MEM_FLAGS_DOORBELL |
- ALLOC_MEM_FLAGS_MMIO_REMAP)) {
+ } else if (flags & (KFD_IOC_ALLOC_MEM_FLAGS_DOORBELL |
+ KFD_IOC_ALLOC_MEM_FLAGS_MMIO_REMAP)) {
domain = AMDGPU_GEM_DOMAIN_GTT;
alloc_domain = AMDGPU_GEM_DOMAIN_CPU;
bo_type = ttm_bo_type_sg;
@@ -1160,7 +1199,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(
}
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
- (*mem)->aql_queue = !!(flags & ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
+ (*mem)->aql_queue = !!(flags & KFD_IOC_ALLOC_MEM_FLAGS_AQL_QUEUE_MEM);
/* Workaround for AQL queue wraparound bug. Map the same
* memory twice. That means we only actually allocate half
@@ -1642,10 +1681,12 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct kgd_dev *kgd,
INIT_LIST_HEAD(&(*mem)->bo_va_list);
mutex_init(&(*mem)->lock);
+
(*mem)->alloc_flags =
((bo->preferred_domains & AMDGPU_GEM_DOMAIN_VRAM) ?
- ALLOC_MEM_FLAGS_VRAM : ALLOC_MEM_FLAGS_GTT) |
- ALLOC_MEM_FLAGS_WRITABLE | ALLOC_MEM_FLAGS_EXECUTABLE;
+ KFD_IOC_ALLOC_MEM_FLAGS_VRAM : KFD_IOC_ALLOC_MEM_FLAGS_GTT)
+ | KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE
+ | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
(*mem)->bo = amdgpu_bo_ref(bo);
(*mem)->va = va;
@@ -2204,3 +2245,25 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem)
kfree(mem);
return 0;
}
+
+/* Returns GPU-specific tiling mode information */
+int amdgpu_amdkfd_get_tile_config(struct kgd_dev *kgd,
+ struct tile_config *config)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ config->gb_addr_config = adev->gfx.config.gb_addr_config;
+ config->tile_config_ptr = adev->gfx.config.tile_mode_array;
+ config->num_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.tile_mode_array);
+ config->macro_tile_config_ptr =
+ adev->gfx.config.macrotile_mode_array;
+ config->num_macro_tile_configs =
+ ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
+
+ /* Those values are not set from GFX9 onwards */
+ config->num_banks = adev->gfx.config.num_banks;
+ config->num_ranks = adev->gfx.config.num_ranks;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
index a62cbc8199de..f355d9a752d2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_connectors.c
@@ -1461,6 +1461,20 @@ static enum drm_mode_status amdgpu_connector_dp_mode_valid(struct drm_connector
return MODE_OK;
}
+static int
+amdgpu_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_connector *amdgpu_connector = to_amdgpu_connector(connector);
+ int r = 0;
+
+ if (amdgpu_connector->ddc_bus->has_aux) {
+ amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
+ r = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
+ }
+
+ return r;
+}
+
static const struct drm_connector_helper_funcs amdgpu_connector_dp_helper_funcs = {
.get_modes = amdgpu_connector_dp_get_modes,
.mode_valid = amdgpu_connector_dp_mode_valid,
@@ -1475,6 +1489,7 @@ static const struct drm_connector_funcs amdgpu_connector_dp_funcs = {
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
+ .late_register = amdgpu_connector_late_register,
};
static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
@@ -1485,6 +1500,7 @@ static const struct drm_connector_funcs amdgpu_connector_edp_funcs = {
.early_unregister = amdgpu_connector_unregister,
.destroy = amdgpu_connector_destroy,
.force = amdgpu_connector_dvi_force,
+ .late_register = amdgpu_connector_late_register,
};
void
@@ -1931,7 +1947,6 @@ amdgpu_connector_add(struct amdgpu_device *adev,
connector->polled = DRM_CONNECTOR_POLL_HPD;
connector->display_info.subpixel_order = subpixel_order;
- drm_connector_register(connector);
if (has_aux)
amdgpu_atombios_dp_aux_init(amdgpu_connector);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index a52a084158b1..af91627b19b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -28,6 +28,7 @@
#include <linux/file.h>
#include <linux/pagemap.h>
#include <linux/sync_file.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_syncobj.h>
@@ -415,7 +416,9 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
/* Don't move this buffer if we have depleted our allowance
* to move it. Don't move anything if the threshold is zero.
*/
- if (p->bytes_moved < p->bytes_moved_threshold) {
+ if (p->bytes_moved < p->bytes_moved_threshold &&
+ (!bo->tbo.base.dma_buf ||
+ list_empty(&bo->tbo.base.dma_buf->attachments))) {
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
(bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) {
/* And don't move a CPU_ACCESS_REQUIRED BO to limited
@@ -651,16 +654,19 @@ out:
static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)
{
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_bo_list_entry *e;
int r;
list_for_each_entry(e, &p->validated, tv.head) {
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo);
struct dma_resv *resv = bo->tbo.base.resv;
+ enum amdgpu_sync_mode sync_mode;
- r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, p->filp,
- amdgpu_bo_explicit_sync(bo));
-
+ sync_mode = amdgpu_bo_explicit_sync(bo) ?
+ AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER;
+ r = amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode,
+ &fpriv->vm);
if (r)
return r;
}
@@ -1202,7 +1208,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct drm_sched_entity *entity = p->entity;
enum drm_sched_priority priority;
- struct amdgpu_ring *ring;
struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
uint64_t seq;
@@ -1211,7 +1216,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
job = p->job;
p->job = NULL;
- r = drm_sched_job_init(&job->base, entity, p->filp);
+ r = drm_sched_job_init(&job->base, entity, &fpriv->vm);
if (r)
goto error_unlock;
@@ -1255,9 +1260,6 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
- ring = to_amdgpu_ring(entity->rq->sched);
- amdgpu_ring_priority_get(ring, priority);
-
amdgpu_vm_move_to_lru_tail(p->adev, &fpriv->vm);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 94a6c42f29ea..6ed36a2c5f73 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -61,12 +61,24 @@ static int amdgpu_ctx_priority_permit(struct drm_file *filp,
return -EACCES;
}
+static enum gfx_pipe_priority amdgpu_ctx_sched_prio_to_compute_prio(enum drm_sched_priority prio)
+{
+ switch (prio) {
+ case DRM_SCHED_PRIORITY_HIGH_HW:
+ case DRM_SCHED_PRIORITY_KERNEL:
+ return AMDGPU_GFX_PIPE_PRIO_HIGH;
+ default:
+ return AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ }
+}
+
static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const u32 ring)
{
struct amdgpu_device *adev = ctx->adev;
struct amdgpu_ctx_entity *entity;
struct drm_gpu_scheduler **scheds = NULL, *sched = NULL;
unsigned num_scheds = 0;
+ enum gfx_pipe_priority hw_prio;
enum drm_sched_priority priority;
int r;
@@ -79,46 +91,51 @@ static int amdgpu_ctx_init_entity(struct amdgpu_ctx *ctx, const u32 hw_ip, const
priority = (ctx->override_priority == DRM_SCHED_PRIORITY_UNSET) ?
ctx->init_priority : ctx->override_priority;
switch (hw_ip) {
- case AMDGPU_HW_IP_GFX:
- sched = &adev->gfx.gfx_ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_COMPUTE:
- scheds = adev->gfx.compute_sched;
- num_scheds = adev->gfx.num_compute_sched;
- break;
- case AMDGPU_HW_IP_DMA:
- scheds = adev->sdma.sdma_sched;
- num_scheds = adev->sdma.num_sdma_sched;
- break;
- case AMDGPU_HW_IP_UVD:
- sched = &adev->uvd.inst[0].ring.sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCE:
- sched = &adev->vce.ring[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- sched = &adev->uvd.inst[0].ring_enc[0].sched;
- scheds = &sched;
- num_scheds = 1;
- break;
- case AMDGPU_HW_IP_VCN_DEC:
- scheds = adev->vcn.vcn_dec_sched;
- num_scheds = adev->vcn.num_vcn_dec_sched;
- break;
- case AMDGPU_HW_IP_VCN_ENC:
- scheds = adev->vcn.vcn_enc_sched;
- num_scheds = adev->vcn.num_vcn_enc_sched;
- break;
- case AMDGPU_HW_IP_VCN_JPEG:
- scheds = adev->jpeg.jpeg_sched;
- num_scheds = adev->jpeg.num_jpeg_sched;
- break;
+ case AMDGPU_HW_IP_GFX:
+ sched = &adev->gfx.gfx_ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_COMPUTE:
+ hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
+ scheds = adev->gfx.compute_prio_sched[hw_prio];
+ num_scheds = adev->gfx.num_compute_sched[hw_prio];
+ break;
+ case AMDGPU_HW_IP_DMA:
+ scheds = adev->sdma.sdma_sched;
+ num_scheds = adev->sdma.num_sdma_sched;
+ break;
+ case AMDGPU_HW_IP_UVD:
+ sched = &adev->uvd.inst[0].ring.sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCE:
+ sched = &adev->vce.ring[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_UVD_ENC:
+ sched = &adev->uvd.inst[0].ring_enc[0].sched;
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_DEC:
+ sched = drm_sched_pick_best(adev->vcn.vcn_dec_sched,
+ adev->vcn.num_vcn_dec_sched);
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_ENC:
+ sched = drm_sched_pick_best(adev->vcn.vcn_enc_sched,
+ adev->vcn.num_vcn_enc_sched);
+ scheds = &sched;
+ num_scheds = 1;
+ break;
+ case AMDGPU_HW_IP_VCN_JPEG:
+ scheds = adev->jpeg.jpeg_sched;
+ num_scheds = adev->jpeg.num_jpeg_sched;
+ break;
}
r = drm_sched_entity_init(&entity->entity, priority, scheds, num_scheds,
@@ -502,6 +519,29 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
return fence;
}
+static void amdgpu_ctx_set_entity_priority(struct amdgpu_ctx *ctx,
+ struct amdgpu_ctx_entity *aentity,
+ int hw_ip,
+ enum drm_sched_priority priority)
+{
+ struct amdgpu_device *adev = ctx->adev;
+ enum gfx_pipe_priority hw_prio;
+ struct drm_gpu_scheduler **scheds = NULL;
+ unsigned num_scheds;
+
+ /* set sw priority */
+ drm_sched_entity_set_priority(&aentity->entity, priority);
+
+ /* set hw priority */
+ if (hw_ip == AMDGPU_HW_IP_COMPUTE) {
+ hw_prio = amdgpu_ctx_sched_prio_to_compute_prio(priority);
+ scheds = adev->gfx.compute_prio_sched[hw_prio];
+ num_scheds = adev->gfx.num_compute_sched[hw_prio];
+ drm_sched_entity_modify_sched(&aentity->entity, scheds,
+ num_scheds);
+ }
+}
+
void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
enum drm_sched_priority priority)
{
@@ -514,13 +554,11 @@ void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
ctx->init_priority : ctx->override_priority;
for (i = 0; i < AMDGPU_HW_IP_NUM; ++i) {
for (j = 0; j < amdgpu_ctx_num_entities[i]; ++j) {
- struct drm_sched_entity *entity;
-
if (!ctx->entities[i][j])
continue;
- entity = &ctx->entities[i][j]->entity;
- drm_sched_entity_set_priority(entity, ctx_prio);
+ amdgpu_ctx_set_entity_priority(ctx, ctx->entities[i][j],
+ i, ctx_prio);
}
}
}
@@ -628,20 +666,53 @@ void amdgpu_ctx_mgr_fini(struct amdgpu_ctx_mgr *mgr)
mutex_destroy(&mgr->lock);
}
+
+static void amdgpu_ctx_init_compute_sched(struct amdgpu_device *adev)
+{
+ int num_compute_sched_normal = 0;
+ int num_compute_sched_high = AMDGPU_MAX_COMPUTE_RINGS - 1;
+ int i;
+
+ /* use one drm sched array, gfx.compute_sched to store both high and
+ * normal priority drm compute schedulers */
+ for (i = 0; i < adev->gfx.num_compute_rings; i++) {
+ if (!adev->gfx.compute_ring[i].has_high_prio)
+ adev->gfx.compute_sched[num_compute_sched_normal++] =
+ &adev->gfx.compute_ring[i].sched;
+ else
+ adev->gfx.compute_sched[num_compute_sched_high--] =
+ &adev->gfx.compute_ring[i].sched;
+ }
+
+ /* compute ring only has two priority for now */
+ i = AMDGPU_GFX_PIPE_PRIO_NORMAL;
+ adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+ adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+
+ i = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ if (num_compute_sched_high == (AMDGPU_MAX_COMPUTE_RINGS - 1)) {
+ /* When compute has no high priority rings then use */
+ /* normal priority sched array */
+ adev->gfx.compute_prio_sched[i] = &adev->gfx.compute_sched[0];
+ adev->gfx.num_compute_sched[i] = num_compute_sched_normal;
+ } else {
+ adev->gfx.compute_prio_sched[i] =
+ &adev->gfx.compute_sched[num_compute_sched_high - 1];
+ adev->gfx.num_compute_sched[i] =
+ adev->gfx.num_compute_rings - num_compute_sched_normal;
+ }
+}
+
void amdgpu_ctx_init_sched(struct amdgpu_device *adev)
{
int i, j;
+ amdgpu_ctx_init_compute_sched(adev);
for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
adev->gfx.gfx_sched[i] = &adev->gfx.gfx_ring[i].sched;
adev->gfx.num_gfx_sched++;
}
- for (i = 0; i < adev->gfx.num_compute_rings; i++) {
- adev->gfx.compute_sched[i] = &adev->gfx.compute_ring[i].sched;
- adev->gfx.num_compute_sched++;
- }
-
for (i = 0; i < adev->sdma.num_instances; i++) {
adev->sdma.sdma_sched[i] = &adev->sdma.instance[i].ring.sched;
adev->sdma.num_sdma_sched++;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
index f24ed9a1a3e5..c0f9a651dc06 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c
@@ -31,6 +31,9 @@
#include <drm/drm_debugfs.h>
#include "amdgpu.h"
+#include "amdgpu_pm.h"
+#include "amdgpu_dm_debugfs.h"
+#include "amdgpu_ras.h"
/**
* amdgpu_debugfs_add_files - Add simple debugfs entries
@@ -176,7 +179,7 @@ static int amdgpu_debugfs_process_reg_op(bool read, struct file *f,
} else {
r = get_user(value, (uint32_t *)buf);
if (!r)
- WREG32(*pos >> 2, value);
+ amdgpu_mm_wreg_mmio_rlc(adev, *pos >> 2, value, 0);
}
if (r) {
result = r;
@@ -781,11 +784,11 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
ssize_t result = 0;
uint32_t offset, se, sh, cu, wave, simd, thread, bank, *data;
- if (size & 3 || *pos & 3)
+ if (size > 4096 || size & 3 || *pos & 3)
return -EINVAL;
/* decode offset */
- offset = *pos & GENMASK_ULL(11, 0);
+ offset = (*pos & GENMASK_ULL(11, 0)) >> 2;
se = (*pos & GENMASK_ULL(19, 12)) >> 12;
sh = (*pos & GENMASK_ULL(27, 20)) >> 20;
cu = (*pos & GENMASK_ULL(35, 28)) >> 28;
@@ -823,7 +826,7 @@ static ssize_t amdgpu_debugfs_gpr_read(struct file *f, char __user *buf,
while (size) {
uint32_t value;
- value = data[offset++];
+ value = data[result >> 2];
r = put_user(value, (uint32_t *)buf);
if (r) {
result = r;
@@ -840,6 +843,55 @@ err:
return result;
}
+/**
+ * amdgpu_debugfs_regs_gfxoff_write - Enable/disable GFXOFF
+ *
+ * @f: open file handle
+ * @buf: User buffer to write data from
+ * @size: Number of bytes to write
+ * @pos: Offset to seek to
+ *
+ * Write a 32-bit zero to disable or a 32-bit non-zero to enable
+ */
+static ssize_t amdgpu_debugfs_gfxoff_write(struct file *f, const char __user *buf,
+ size_t size, loff_t *pos)
+{
+ struct amdgpu_device *adev = file_inode(f)->i_private;
+ ssize_t result = 0;
+ int r;
+
+ if (size & 0x3 || *pos & 0x3)
+ return -EINVAL;
+
+ r = pm_runtime_get_sync(adev->ddev->dev);
+ if (r < 0)
+ return r;
+
+ while (size) {
+ uint32_t value;
+
+ r = get_user(value, (uint32_t *)buf);
+ if (r) {
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ return r;
+ }
+
+ amdgpu_gfx_off_ctrl(adev, value ? true : false);
+
+ result += 4;
+ buf += 4;
+ *pos += 4;
+ size -= 4;
+ }
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ return result;
+}
+
+
static const struct file_operations amdgpu_debugfs_regs_fops = {
.owner = THIS_MODULE,
.read = amdgpu_debugfs_regs_read,
@@ -888,6 +940,11 @@ static const struct file_operations amdgpu_debugfs_gpr_fops = {
.llseek = default_llseek
};
+static const struct file_operations amdgpu_debugfs_gfxoff_fops = {
+ .owner = THIS_MODULE,
+ .write = amdgpu_debugfs_gfxoff_write,
+};
+
static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_regs_fops,
&amdgpu_debugfs_regs_didt_fops,
@@ -897,6 +954,7 @@ static const struct file_operations *debugfs_regs[] = {
&amdgpu_debugfs_sensors_fops,
&amdgpu_debugfs_wave_fops,
&amdgpu_debugfs_gpr_fops,
+ &amdgpu_debugfs_gfxoff_fops,
};
static const char *debugfs_regs_names[] = {
@@ -908,6 +966,7 @@ static const char *debugfs_regs_names[] = {
"amdgpu_sensors",
"amdgpu_wave",
"amdgpu_gpr",
+ "amdgpu_gfxoff",
};
/**
@@ -934,18 +993,6 @@ int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
return 0;
}
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev)
-{
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(debugfs_regs); i++) {
- if (adev->debugfs_regs[i]) {
- debugfs_remove(adev->debugfs_regs[i]);
- adev->debugfs_regs[i] = NULL;
- }
- }
-}
-
static int amdgpu_debugfs_test_ib(struct seq_file *m, void *data)
{
struct drm_info_node *node = (struct drm_info_node *) m->private;
@@ -1211,11 +1258,47 @@ failure:
return 0;
}
+static int amdgpu_debugfs_sclk_set(void *data, u64 val)
+{
+ int ret = 0;
+ uint32_t max_freq, min_freq;
+ struct amdgpu_device *adev = (struct amdgpu_device *)data;
+
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
+ return -EINVAL;
+
+ ret = pm_runtime_get_sync(adev->ddev->dev);
+ if (ret < 0)
+ return ret;
+
+ if (is_support_sw_smu(adev)) {
+ ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq, true);
+ if (ret || val > max_freq || val < min_freq)
+ return -EINVAL;
+ ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val, true);
+ } else {
+ return 0;
+ }
+
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+
+ if (ret)
+ return -EINVAL;
+
+ return 0;
+}
+
DEFINE_SIMPLE_ATTRIBUTE(fops_ib_preempt, NULL,
amdgpu_debugfs_ib_preempt, "%llu\n");
+DEFINE_SIMPLE_ATTRIBUTE(fops_sclk_set, NULL,
+ amdgpu_debugfs_sclk_set, "%llu\n");
+
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
+ int r, i;
+
adev->debugfs_preempt =
debugfs_create_file("amdgpu_preempt_ib", 0600,
adev->ddev->primary->debugfs_root, adev,
@@ -1225,24 +1308,78 @@ int amdgpu_debugfs_init(struct amdgpu_device *adev)
return -EIO;
}
+ adev->smu.debugfs_sclk =
+ debugfs_create_file("amdgpu_force_sclk", 0200,
+ adev->ddev->primary->debugfs_root, adev,
+ &fops_sclk_set);
+ if (!(adev->smu.debugfs_sclk)) {
+ DRM_ERROR("unable to create amdgpu_set_sclk debugsfs file\n");
+ return -EIO;
+ }
+
+ /* Register debugfs entries for amdgpu_ttm */
+ r = amdgpu_ttm_debugfs_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to init debugfs\n");
+ return r;
+ }
+
+ r = amdgpu_debugfs_pm_init(adev);
+ if (r) {
+ DRM_ERROR("Failed to register debugfs file for dpm!\n");
+ return r;
+ }
+
+ if (amdgpu_debugfs_sa_init(adev)) {
+ dev_err(adev->dev, "failed to register debugfs file for SA\n");
+ }
+
+ if (amdgpu_debugfs_fence_init(adev))
+ dev_err(adev->dev, "fence debugfs file creation failed\n");
+
+ r = amdgpu_debugfs_gem_init(adev);
+ if (r)
+ DRM_ERROR("registering gem debugfs failed (%d).\n", r);
+
+ r = amdgpu_debugfs_regs_init(adev);
+ if (r)
+ DRM_ERROR("registering register debugfs failed (%d).\n", r);
+
+ r = amdgpu_debugfs_firmware_init(adev);
+ if (r)
+ DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
+
+#if defined(CONFIG_DRM_AMD_DC)
+ if (amdgpu_device_has_dc_support(adev)) {
+ if (dtn_debugfs_init(adev))
+ DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
+ }
+#endif
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+ struct amdgpu_ring *ring = adev->rings[i];
+
+ if (!ring)
+ continue;
+
+ if (amdgpu_debugfs_ring_init(adev, ring)) {
+ DRM_ERROR("Failed to register debugfs file for rings !\n");
+ }
+ }
+
+ amdgpu_ras_debugfs_create_all(adev);
+
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_list,
ARRAY_SIZE(amdgpu_debugfs_list));
}
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev)
-{
- debugfs_remove(adev->debugfs_preempt);
-}
-
#else
int amdgpu_debugfs_init(struct amdgpu_device *adev)
{
return 0;
}
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev) { }
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev)
{
return 0;
}
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev) { }
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
index f289d28ad6b2..de12d1101526 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.h
@@ -32,9 +32,8 @@ struct amdgpu_debugfs {
};
int amdgpu_debugfs_regs_init(struct amdgpu_device *adev);
-void amdgpu_debugfs_regs_cleanup(struct amdgpu_device *adev);
int amdgpu_debugfs_init(struct amdgpu_device *adev);
-void amdgpu_debugfs_preempt_cleanup(struct amdgpu_device *adev);
+void amdgpu_debugfs_fini(struct amdgpu_device *adev);
int amdgpu_debugfs_add_files(struct amdgpu_device *adev,
const struct drm_info_list *files,
unsigned nfiles);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 39cd545976b7..6f469facabfb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -183,20 +183,51 @@ bool amdgpu_device_supports_baco(struct drm_device *dev)
void amdgpu_device_vram_access(struct amdgpu_device *adev, loff_t pos,
uint32_t *buf, size_t size, bool write)
{
- uint64_t last;
unsigned long flags;
+ uint32_t hi = ~0;
+ uint64_t last;
+
+
+#ifdef CONFIG_64BIT
+ last = min(pos + size, adev->gmc.visible_vram_size);
+ if (last > pos) {
+ void __iomem *addr = adev->mman.aper_base_kaddr + pos;
+ size_t count = last - pos;
+
+ if (write) {
+ memcpy_toio(addr, buf, count);
+ mb();
+ amdgpu_asic_flush_hdp(adev, NULL);
+ } else {
+ amdgpu_asic_invalidate_hdp(adev, NULL);
+ mb();
+ memcpy_fromio(buf, addr, count);
+ }
+
+ if (count == size)
+ return;
+
+ pos += count;
+ buf += count / 4;
+ size -= count;
+ }
+#endif
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ for (last = pos + size; pos < last; pos += 4) {
+ uint32_t tmp = pos >> 31;
- last = size - 4;
- for (last += pos; pos <= last; pos += 4) {
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, pos >> 31);
+ if (tmp != hi) {
+ WREG32_NO_KIQ(mmMM_INDEX_HI, tmp);
+ hi = tmp;
+ }
if (write)
WREG32_NO_KIQ(mmMM_DATA, *buf++);
else
*buf++ = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
}
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
}
/*
@@ -275,6 +306,26 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
BUG();
}
+void static inline amdgpu_mm_wreg_mmio(struct amdgpu_device *adev, uint32_t reg, uint32_t v, uint32_t acc_flags)
+{
+ trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
+
+ if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
+ writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
+ else {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
+ writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ }
+
+ if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
+ udelay(500);
+ }
+}
+
/**
* amdgpu_mm_wreg - write to a memory mapped IO register
*
@@ -288,8 +339,6 @@ void amdgpu_mm_wreg8(struct amdgpu_device *adev, uint32_t offset, uint8_t value)
void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
uint32_t acc_flags)
{
- trace_amdgpu_mm_wreg(adev->pdev->device, reg, v);
-
if (adev->asic_type >= CHIP_VEGA10 && reg == 0) {
adev->last_mm_index = v;
}
@@ -297,20 +346,26 @@ void amdgpu_mm_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
if ((acc_flags & AMDGPU_REGS_KIQ) || (!(acc_flags & AMDGPU_REGS_NO_KIQ) && amdgpu_sriov_runtime(adev)))
return amdgpu_kiq_wreg(adev, reg, v);
- if ((reg * 4) < adev->rmmio_size && !(acc_flags & AMDGPU_REGS_IDX))
- writel(v, ((void __iomem *)adev->rmmio) + (reg * 4));
- else {
- unsigned long flags;
+ amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
+}
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- writel((reg * 4), ((void __iomem *)adev->rmmio) + (mmMM_INDEX * 4));
- writel(v, ((void __iomem *)adev->rmmio) + (mmMM_DATA * 4));
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- }
+/*
+ * amdgpu_mm_wreg_mmio_rlc - write register either with mmio or with RLC path if in range
+ *
+ * this function is invoked only the debugfs register access
+ * */
+void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, uint32_t reg, uint32_t v,
+ uint32_t acc_flags)
+{
+ if (amdgpu_sriov_fullaccess(adev) &&
+ adev->gfx.rlc.funcs &&
+ adev->gfx.rlc.funcs->is_rlcg_access_range) {
- if (adev->asic_type >= CHIP_VEGA10 && reg == 1 && adev->last_mm_index == 0x5702C) {
- udelay(500);
+ if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg))
+ return adev->gfx.rlc.funcs->rlcg_wreg(adev, reg, v);
}
+
+ amdgpu_mm_wreg_mmio(adev, reg, v, acc_flags);
}
/**
@@ -1136,7 +1191,7 @@ static bool amdgpu_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {
@@ -2344,15 +2399,16 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
}
adev->ip_blocks[i].status.hw = false;
/* handle putting the SMC in the appropriate state */
- if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
- r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
- if (r) {
- DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
- adev->mp1_state, r);
- return r;
+ if(!amdgpu_sriov_vf(adev)){
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) {
+ r = amdgpu_dpm_set_mp1_state(adev, adev->mp1_state);
+ if (r) {
+ DRM_ERROR("SMC failed to set mp1 state %d, %d\n",
+ adev->mp1_state, r);
+ return r;
+ }
}
}
-
adev->ip_blocks[i].status.hw = false;
}
@@ -2800,7 +2856,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
adev->usec_timeout = AMDGPU_MAX_USEC_TIMEOUT;
if (amdgpu_emu_mode == 1)
- adev->usec_timeout *= 2;
+ adev->usec_timeout *= 10;
adev->gmc.gart_size = 512 * 1024 * 1024;
adev->accel_working = false;
adev->num_rings = 0;
@@ -3088,22 +3144,6 @@ fence_driver_init:
} else
adev->ucode_sysfs_en = true;
- r = amdgpu_debugfs_gem_init(adev);
- if (r)
- DRM_ERROR("registering gem debugfs failed (%d).\n", r);
-
- r = amdgpu_debugfs_regs_init(adev);
- if (r)
- DRM_ERROR("registering register debugfs failed (%d).\n", r);
-
- r = amdgpu_debugfs_firmware_init(adev);
- if (r)
- DRM_ERROR("registering firmware debugfs failed (%d).\n", r);
-
- r = amdgpu_debugfs_init(adev);
- if (r)
- DRM_ERROR("Creating debugfs files failed (%d).\n", r);
-
if ((amdgpu_testing & 1)) {
if (adev->accel_working)
amdgpu_test_moves(adev);
@@ -3177,6 +3217,12 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
flush_delayed_work(&adev->delayed_init_work);
adev->shutdown = true;
+ /* make sure IB test finished before entering exclusive mode
+ * to avoid preemption on IB test
+ * */
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
+
/* disable all interrupts */
amdgpu_irq_disable_all(adev);
if (adev->mode_info.mode_config_initialized){
@@ -3219,13 +3265,11 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
adev->rmmio = NULL;
amdgpu_device_doorbell_fini(adev);
- amdgpu_debugfs_regs_cleanup(adev);
device_remove_file(adev->dev, &dev_attr_pcie_replay_count);
if (adev->ucode_sysfs_en)
amdgpu_ucode_sysfs_fini(adev);
if (IS_ENABLED(CONFIG_PERF_EVENTS))
amdgpu_pmu_fini(adev);
- amdgpu_debugfs_preempt_cleanup(adev);
if (amdgpu_discovery && adev->asic_type >= CHIP_NAVI10)
amdgpu_discovery_fini(adev);
}
@@ -3309,7 +3353,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon)
}
}
- amdgpu_amdkfd_suspend(adev);
+ amdgpu_amdkfd_suspend(adev, !fbcon);
amdgpu_ras_suspend(adev);
@@ -3393,7 +3437,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon)
}
}
}
- r = amdgpu_amdkfd_resume(adev);
+ r = amdgpu_amdkfd_resume(adev, !fbcon);
if (r)
return r;
@@ -3913,6 +3957,8 @@ static int amdgpu_do_asic_reset(struct amdgpu_hive_info *hive,
if (r)
goto out;
+ amdgpu_fbdev_set_suspend(tmp_adev, 0);
+
/* must succeed. */
amdgpu_ras_resume(tmp_adev);
@@ -4086,6 +4132,8 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
*/
amdgpu_unregister_gpu_instance(tmp_adev);
+ amdgpu_fbdev_set_suspend(adev, 1);
+
/* disable ras on ALL IPs */
if (!(in_ras_intr && !use_baco) &&
amdgpu_device_ip_need_full_reset(tmp_adev))
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
index f95092741c38..27d8ae19a7a4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c
@@ -307,7 +307,7 @@ int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
- DRM_INFO("set register base offset for %s\n",
+ DRM_DEBUG("set register base offset for %s\n",
hw_id_names[le16_to_cpu(ip->hw_id)]);
adev->reg_offset[hw_ip][ip->number_instance] =
ip->base_address;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index 6d520a3eec40..84cee27cd7ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -99,7 +99,7 @@ static void amdgpu_display_flip_work_func(struct work_struct *__work)
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(work->target_vblank -
- amdgpu_get_vblank_counter_kms(adev->ddev, amdgpu_crtc->crtc_id)) > 0) {
+ amdgpu_get_vblank_counter_kms(crtc)) > 0) {
schedule_delayed_work(&work->flip_work, usecs_to_jiffies(1000));
return;
}
@@ -219,7 +219,7 @@ int amdgpu_display_crtc_page_flip_target(struct drm_crtc *crtc,
if (!adev->enable_virtual_display)
work->base = amdgpu_bo_gpu_offset(new_abo);
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
- amdgpu_get_vblank_counter_kms(dev, work->crtc_id);
+ amdgpu_get_vblank_counter_kms(crtc);
/* we borrow the event spin lock for protecting flip_wrok */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -924,3 +924,15 @@ int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc)
return AMDGPU_CRTC_IRQ_NONE;
}
}
+
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+
+ return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+ stime, etime, mode);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
index a59cd47aa6c1..ffeb20f11c07 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c
@@ -223,6 +223,37 @@ static void amdgpu_dma_buf_detach(struct dma_buf *dmabuf,
}
/**
+ * amdgpu_dma_buf_pin - &dma_buf_ops.pin implementation
+ *
+ * @attach: attachment to pin down
+ *
+ * Pin the BO which is backing the DMA-buf so that it can't move any more.
+ */
+static int amdgpu_dma_buf_pin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ /* pin buffer into GTT */
+ return amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
+}
+
+/**
+ * amdgpu_dma_buf_unpin - &dma_buf_ops.unpin implementation
+ *
+ * @attach: attachment to unpin
+ *
+ * Unpin a previously pinned BO to make it movable again.
+ */
+static void amdgpu_dma_buf_unpin(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->dmabuf->priv;
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+
+ amdgpu_bo_unpin(bo);
+}
+
+/**
* amdgpu_dma_buf_map - &dma_buf_ops.map_dma_buf implementation
* @attach: DMA-buf attachment
* @dir: DMA direction
@@ -244,9 +275,19 @@ static struct sg_table *amdgpu_dma_buf_map(struct dma_buf_attachment *attach,
struct sg_table *sgt;
long r;
- r = amdgpu_bo_pin(bo, AMDGPU_GEM_DOMAIN_GTT);
- if (r)
- return ERR_PTR(r);
+ if (!bo->pin_count) {
+ /* move buffer into GTT */
+ struct ttm_operation_ctx ctx = { false, false };
+
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ return ERR_PTR(r);
+
+ } else if (!(amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type) &
+ AMDGPU_GEM_DOMAIN_GTT)) {
+ return ERR_PTR(-EBUSY);
+ }
sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages);
if (IS_ERR(sgt))
@@ -277,13 +318,9 @@ static void amdgpu_dma_buf_unmap(struct dma_buf_attachment *attach,
struct sg_table *sgt,
enum dma_data_direction dir)
{
- struct drm_gem_object *obj = attach->dmabuf->priv;
- struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
-
dma_unmap_sg(attach->dev, sgt->sgl, sgt->nents, dir);
sg_free_table(sgt);
kfree(sgt);
- amdgpu_bo_unpin(bo);
}
/**
@@ -327,9 +364,10 @@ static int amdgpu_dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
}
const struct dma_buf_ops amdgpu_dmabuf_ops = {
- .dynamic_mapping = true,
.attach = amdgpu_dma_buf_attach,
.detach = amdgpu_dma_buf_detach,
+ .pin = amdgpu_dma_buf_pin,
+ .unpin = amdgpu_dma_buf_unpin,
.map_dma_buf = amdgpu_dma_buf_map,
.unmap_dma_buf = amdgpu_dma_buf_unmap,
.release = drm_gem_dmabuf_release,
@@ -413,6 +451,73 @@ error:
}
/**
+ * amdgpu_dma_buf_move_notify - &attach.move_notify implementation
+ *
+ * @attach: the DMA-buf attachment
+ *
+ * Invalidate the DMA-buf attachment, making sure that the we re-create the
+ * mapping before the next use.
+ */
+static void
+amdgpu_dma_buf_move_notify(struct dma_buf_attachment *attach)
+{
+ struct drm_gem_object *obj = attach->importer_priv;
+ struct ww_acquire_ctx *ticket = dma_resv_locking_ctx(obj->resv);
+ struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj);
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_operation_ctx ctx = { false, false };
+ struct ttm_placement placement = {};
+ struct amdgpu_vm_bo_base *bo_base;
+ int r;
+
+ if (bo->tbo.mem.mem_type == TTM_PL_SYSTEM)
+ return;
+
+ r = ttm_bo_validate(&bo->tbo, &placement, &ctx);
+ if (r) {
+ DRM_ERROR("Failed to invalidate DMA-buf import (%d))\n", r);
+ return;
+ }
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
+ struct amdgpu_vm *vm = bo_base->vm;
+ struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
+
+ if (ticket) {
+ /* When we get an error here it means that somebody
+ * else is holding the VM lock and updating page tables
+ * So we can just continue here.
+ */
+ r = dma_resv_lock(resv, ticket);
+ if (r)
+ continue;
+
+ } else {
+ /* TODO: This is more problematic and we actually need
+ * to allow page tables updates without holding the
+ * lock.
+ */
+ if (!dma_resv_trylock(resv))
+ continue;
+ }
+
+ r = amdgpu_vm_clear_freed(adev, vm, NULL);
+ if (!r)
+ r = amdgpu_vm_handle_moved(adev, vm);
+
+ if (r && r != -EBUSY)
+ DRM_ERROR("Failed to invalidate VM page tables (%d))\n",
+ r);
+
+ dma_resv_unlock(resv);
+ }
+}
+
+static const struct dma_buf_attach_ops amdgpu_dma_buf_attach_ops = {
+ .move_notify = amdgpu_dma_buf_move_notify
+};
+
+/**
* amdgpu_gem_prime_import - &drm_driver.gem_prime_import implementation
* @dev: DRM device
* @dma_buf: Shared DMA buffer
@@ -444,7 +549,8 @@ struct drm_gem_object *amdgpu_gem_prime_import(struct drm_device *dev,
if (IS_ERR(obj))
return obj;
- attach = dma_buf_dynamic_attach(dma_buf, dev->dev, true);
+ attach = dma_buf_dynamic_attach(dma_buf, dev->dev,
+ &amdgpu_dma_buf_attach_ops, obj);
if (IS_ERR(attach)) {
drm_gem_object_put(obj);
return ERR_CAST(attach);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
index a2e8c3dfb4f1..ba1bb95a3cf9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.c
@@ -1171,3 +1171,20 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
return ret;
}
+
+int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
+ uint32_t cstate)
+{
+ int ret = 0;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ void *pp_handle = adev->powerplay.pp_handle;
+ struct smu_context *smu = &adev->smu;
+
+ if (is_support_sw_smu(adev))
+ ret = smu_set_df_cstate(smu, cstate);
+ else if (pp_funcs &&
+ pp_funcs->set_df_cstate)
+ ret = pp_funcs->set_df_cstate(pp_handle, cstate);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
index 902ca6c00cca..936d85aa0fbc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dpm.h
@@ -448,6 +448,8 @@ struct amdgpu_pm {
/* powerplay feature */
uint32_t pp_feature;
+ /* Used for I2C access to various EEPROMs on relevant ASICs */
+ struct i2c_adapter smu_i2c;
};
#define R600_SSTU_DFLT 0
@@ -533,4 +535,7 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
+int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
+ uint32_t cstate);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 94e2fd758e01..8ea86ffdea0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -1021,6 +1021,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct drm_device *dev;
+ struct amdgpu_device *adev;
unsigned long flags = ent->driver_data;
int ret, retry = 0;
bool supports_atomic = false;
@@ -1090,6 +1091,8 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
pci_set_drvdata(pdev, dev);
+ amdgpu_driver_load_kms(dev, ent->driver_data);
+
retry_init:
ret = drm_dev_register(dev, ent->driver_data);
if (ret == -EAGAIN && ++retry <= 3) {
@@ -1100,6 +1103,11 @@ retry_init:
} else if (ret)
goto err_pci;
+ adev = dev->dev_private;
+ ret = amdgpu_debugfs_init(adev);
+ if (ret)
+ DRM_ERROR("Creating debugfs files failed (%d).\n", ret);
+
return 0;
err_pci:
@@ -1119,9 +1127,10 @@ amdgpu_pci_remove(struct pci_dev *pdev)
#endif
DRM_ERROR("Hotplug removal is not supported\n");
drm_dev_unplug(dev);
- drm_dev_put(dev);
+ amdgpu_driver_unload_kms(dev);
pci_disable_device(pdev);
pci_set_drvdata(pdev, NULL);
+ drm_dev_put(dev);
}
static void
@@ -1220,11 +1229,15 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
}
}
+ adev->in_runpm = true;
if (amdgpu_device_supports_boco(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_CHANGING;
drm_kms_helper_poll_disable(drm_dev);
ret = amdgpu_device_suspend(drm_dev, false);
+ if (ret)
+ return ret;
+
if (amdgpu_device_supports_boco(drm_dev)) {
/* Only need to handle PCI state in the driver for ATPX
* PCI core handles it for _PR3.
@@ -1278,6 +1291,7 @@ static int amdgpu_pmops_runtime_resume(struct device *dev)
drm_kms_helper_poll_enable(drm_dev);
if (amdgpu_device_supports_boco(drm_dev))
drm_dev->switch_power_state = DRM_SWITCH_POWER_ON;
+ adev->in_runpm = false;
return 0;
}
@@ -1285,24 +1299,55 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_dev->dev_private;
- struct drm_crtc *crtc;
+ /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
+ int ret = 1;
if (!adev->runpm) {
pm_runtime_forbid(dev);
return -EBUSY;
}
- list_for_each_entry(crtc, &drm_dev->mode_config.crtc_list, head) {
- if (crtc->enabled) {
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
- return -EBUSY;
+ if (amdgpu_device_has_dc_support(adev)) {
+ struct drm_crtc *crtc;
+
+ drm_modeset_lock_all(drm_dev);
+
+ drm_for_each_crtc(crtc, drm_dev) {
+ if (crtc->state->active) {
+ ret = -EBUSY;
+ break;
+ }
+ }
+
+ drm_modeset_unlock_all(drm_dev);
+
+ } else {
+ struct drm_connector *list_connector;
+ struct drm_connector_list_iter iter;
+
+ mutex_lock(&drm_dev->mode_config.mutex);
+ drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
+
+ drm_connector_list_iter_begin(drm_dev, &iter);
+ drm_for_each_connector_iter(list_connector, &iter) {
+ if (list_connector->dpms == DRM_MODE_DPMS_ON) {
+ ret = -EBUSY;
+ break;
+ }
}
+
+ drm_connector_list_iter_end(&iter);
+
+ drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
+ mutex_unlock(&drm_dev->mode_config.mutex);
}
+ if (ret == -EBUSY)
+ DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
+
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
- /* we don't want the main rpm_idle to call suspend - we want to autosuspend */
- return 1;
+ return ret;
}
long amdgpu_drm_ioctl(struct file *filp,
@@ -1377,32 +1422,15 @@ int amdgpu_file_to_fpriv(struct file *filp, struct amdgpu_fpriv **fpriv)
return 0;
}
-static bool
-amdgpu_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- return amdgpu_display_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
- stime, etime, mode);
-}
-
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_ATOMIC |
+ DRIVER_ATOMIC |
DRIVER_GEM |
DRIVER_RENDER | DRIVER_MODESET | DRIVER_SYNCOBJ |
DRIVER_SYNCOBJ_TIMELINE,
- .load = amdgpu_driver_load_kms,
.open = amdgpu_driver_open_kms,
.postclose = amdgpu_driver_postclose_kms,
.lastclose = amdgpu_driver_lastclose_kms,
- .unload = amdgpu_driver_unload_kms,
- .get_vblank_counter = amdgpu_get_vblank_counter_kms,
- .enable_vblank = amdgpu_enable_vblank_kms,
- .disable_vblank = amdgpu_disable_vblank_kms,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
- .get_scanout_position = amdgpu_get_crtc_scanout_position,
.irq_handler = amdgpu_irq_handler,
.ioctls = amdgpu_ioctls_kms,
.gem_free_object_unlocked = amdgpu_gem_object_free,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index 2672dc64a310..9ae7b61f696a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -336,15 +336,12 @@ int amdgpu_fbdev_init(struct amdgpu_device *adev)
drm_fb_helper_prepare(adev->ddev, &rfbdev->helper,
&amdgpu_fb_helper_funcs);
- ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper,
- AMDGPUFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(adev->ddev, &rfbdev->helper);
if (ret) {
kfree(rfbdev);
return ret;
}
- drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
-
/* disable all the possible outputs/crtcs before entering KMS mode */
if (!amdgpu_device_has_dc_support(adev))
drm_helper_disable_unused_functions(adev->ddev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 3c01252b1e0e..7531527067df 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -503,9 +503,6 @@ int amdgpu_fence_driver_init_ring(struct amdgpu_ring *ring,
*/
int amdgpu_fence_driver_init(struct amdgpu_device *adev)
{
- if (amdgpu_debugfs_fence_init(adev))
- dev_err(adev->dev, "fence debugfs file creation failed\n");
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
index 0f960b498792..6b9c9193cdfa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c
@@ -192,6 +192,14 @@ static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
return adev->gfx.mec.num_mec > 1;
}
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+ int queue)
+{
+ /* Policy: make queue 0 of each pipe as high priority compute queue */
+ return (queue == 0);
+
+}
+
void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
{
int i, queue, pipe, mec;
@@ -477,7 +485,7 @@ int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
RESET_QUEUES, 0, 0);
- return amdgpu_ring_test_ring(kiq_ring);
+ return amdgpu_ring_test_helper(kiq_ring);
}
int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
@@ -565,7 +573,6 @@ int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
int r;
struct ras_fs_if fs_info = {
.sysfs_name = "gfx_err_count",
- .debugfs_name = "gfx_err_inject",
};
struct ras_ih_if ih_info = {
.cb = amdgpu_gfx_process_ras_data_cb,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
index ca17ffb01301..5825692d07e4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h
@@ -41,6 +41,15 @@
#define AMDGPU_MAX_GFX_QUEUES KGD_MAX_QUEUES
#define AMDGPU_MAX_COMPUTE_QUEUES KGD_MAX_QUEUES
+enum gfx_pipe_priority {
+ AMDGPU_GFX_PIPE_PRIO_NORMAL = 1,
+ AMDGPU_GFX_PIPE_PRIO_HIGH,
+ AMDGPU_GFX_PIPE_PRIO_MAX
+};
+
+#define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0
+#define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15
+
struct amdgpu_mec {
struct amdgpu_bo *hpd_eop_obj;
u64 hpd_eop_gpu_addr;
@@ -151,6 +160,8 @@ struct amdgpu_gfx_config {
unsigned num_gpus;
unsigned multi_gpu_tile_size;
unsigned mc_arb_ramcfg;
+ unsigned num_banks;
+ unsigned num_ranks;
unsigned gb_addr_config;
unsigned num_rbs;
unsigned gs_vgt_table_depth;
@@ -204,6 +215,7 @@ struct amdgpu_gfx_funcs {
u32 queue, u32 vmid);
int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if);
int (*query_ras_error_count) (struct amdgpu_device *adev, void *ras_error_status);
+ void (*reset_ras_error_count) (struct amdgpu_device *adev);
};
struct sq_work {
@@ -278,8 +290,9 @@ struct amdgpu_gfx {
uint32_t num_gfx_sched;
unsigned num_gfx_rings;
struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
+ struct drm_gpu_scheduler **compute_prio_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
struct drm_gpu_scheduler *compute_sched[AMDGPU_MAX_COMPUTE_RINGS];
- uint32_t num_compute_sched;
+ uint32_t num_compute_sched[AMDGPU_GFX_PIPE_PRIO_MAX];
unsigned num_compute_rings;
struct amdgpu_irq_src eop_irq;
struct amdgpu_irq_src priv_reg_irq;
@@ -361,6 +374,8 @@ void amdgpu_gfx_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
int *mec, int *pipe, int *queue);
bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev, int mec,
int pipe, int queue);
+bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
+ int queue);
int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev, int me,
int pipe, int queue);
void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index d3c27a3c43f6..7546da0cc70c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -195,6 +195,7 @@ struct amdgpu_gmc {
uint32_t srbm_soft_reset;
bool prt_warning;
uint64_t stolen_size;
+ uint32_t sdpif_register;
/* apertures */
u64 shared_aperture_start;
u64 shared_aperture_end;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 60655834d649..ccbd7acfc4cb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -48,7 +48,6 @@
* produce command buffers which are send to the kernel and
* put in IBs for execution by the requested ring.
*/
-static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
/**
* amdgpu_ib_get - request an IB (Indirect Buffer)
@@ -295,9 +294,7 @@ int amdgpu_ib_pool_init(struct amdgpu_device *adev)
}
adev->ib_pool_ready = true;
- if (amdgpu_debugfs_sa_init(adev)) {
- dev_err(adev->dev, "failed to register debugfs file for SA\n");
- }
+
return 0;
}
@@ -421,7 +418,7 @@ static const struct drm_info_list amdgpu_debugfs_sa_list[] = {
#endif
-static int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_sa_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_sa_list, 1);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index d42be880a236..4981e443a884 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -117,12 +117,10 @@ void amdgpu_job_free_resources(struct amdgpu_job *job)
static void amdgpu_job_free_cb(struct drm_sched_job *s_job)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(s_job->sched);
struct amdgpu_job *job = to_amdgpu_job(s_job);
drm_sched_job_cleanup(s_job);
- amdgpu_ring_priority_put(ring, s_job->s_priority);
dma_fence_put(job->fence);
amdgpu_sync_free(&job->sync);
amdgpu_sync_free(&job->sched_sync);
@@ -143,7 +141,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
void *owner, struct dma_fence **f)
{
enum drm_sched_priority priority;
- struct amdgpu_ring *ring;
int r;
if (!f)
@@ -158,9 +155,6 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
- ring = to_amdgpu_ring(entity->rq->sched);
- amdgpu_ring_priority_get(ring, priority);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 60591dbc2097..fd1dc3236eca 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -88,9 +88,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev)
if (adev->rmmio == NULL)
goto done_free;
- if (amdgpu_sriov_vf(adev))
- amdgpu_virt_request_full_gpu(adev, false);
-
if (adev->runpm) {
pm_runtime_get_sync(dev->dev);
pm_runtime_forbid(dev->dev);
@@ -170,10 +167,17 @@ int amdgpu_driver_load_kms(struct drm_device *dev, unsigned long flags)
}
if (amdgpu_device_supports_boco(dev) &&
- (amdgpu_runtime_pm != 0)) /* enable runpm by default */
+ (amdgpu_runtime_pm != 0)) /* enable runpm by default for boco */
+ adev->runpm = true;
+ else if (amdgpu_device_supports_baco(dev) &&
+ (amdgpu_runtime_pm != 0) &&
+ (adev->asic_type >= CHIP_TOPAZ) &&
+ (adev->asic_type != CHIP_VEGA10) &&
+ (adev->asic_type != CHIP_VEGA20) &&
+ (adev->asic_type != CHIP_ARCTURUS)) /* enable runpm on VI+ */
adev->runpm = true;
else if (amdgpu_device_supports_baco(dev) &&
- (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 */
+ (amdgpu_runtime_pm > 0)) /* enable runpm if runpm=1 on CI */
adev->runpm = true;
/* Call ACPI methods: require modeset init
@@ -1110,14 +1114,15 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
/**
* amdgpu_get_vblank_counter_kms - get frame count
*
- * @dev: drm dev pointer
- * @pipe: crtc to get the frame count from
+ * @crtc: crtc to get the frame count from
*
* Gets the frame count on the requested crtc (all asics).
* Returns frame count on success, -EINVAL on failure.
*/
-u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+u32 amdgpu_get_vblank_counter_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private;
int vpos, hpos, stat;
u32 count;
@@ -1177,14 +1182,15 @@ u32 amdgpu_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
/**
* amdgpu_enable_vblank_kms - enable vblank interrupt
*
- * @dev: drm dev pointer
- * @pipe: crtc to enable vblank interrupt for
+ * @crtc: crtc to enable vblank interrupt for
*
* Enable the interrupt on the requested crtc (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
+int amdgpu_enable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private;
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
@@ -1194,13 +1200,14 @@ int amdgpu_enable_vblank_kms(struct drm_device *dev, unsigned int pipe)
/**
* amdgpu_disable_vblank_kms - disable vblank interrupt
*
- * @dev: drm dev pointer
- * @pipe: crtc to disable vblank interrupt for
+ * @crtc: crtc to disable vblank interrupt for
*
* Disable the interrupt on the requested crtc (all asics).
*/
-void amdgpu_disable_vblank_kms(struct drm_device *dev, unsigned int pipe)
+void amdgpu_disable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct amdgpu_device *adev = dev->dev_private;
int idx = amdgpu_display_crtc_idx_to_irq_type(adev, pipe);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
index 676c48c02d77..ead3dc572ec5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c
@@ -32,7 +32,6 @@ int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev)
};
struct ras_fs_if fs_info = {
.sysfs_name = "mmhub_err_count",
- .debugfs_name = "mmhub_err_inject",
};
if (!adev->mmhub.ras_if) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
index 1cd78940cf82..e89fb35fec71 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h
@@ -26,6 +26,7 @@ struct amdgpu_mmhub_funcs {
int (*ras_late_init)(struct amdgpu_device *adev);
void (*query_ras_error_count)(struct amdgpu_device *adev,
void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
};
struct amdgpu_mmhub {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
index eb9975f4decb..37ba07e2feb5 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mode.h
@@ -612,6 +612,11 @@ void amdgpu_panel_mode_fixup(struct drm_encoder *encoder,
struct drm_display_mode *adjusted_mode);
int amdgpu_display_crtc_idx_to_irq_type(struct amdgpu_device *adev, int crtc);
+bool amdgpu_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos,
+ int *hpos, ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
/* fbdev layer */
int amdgpu_fbdev_init(struct amdgpu_device *adev);
void amdgpu_fbdev_fini(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
index 7d5c3a9de9ea..6201a5f4b4fa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c
@@ -30,7 +30,6 @@ int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev)
};
struct ras_fs_if fs_info = {
.sysfs_name = "pcie_bif_err_count",
- .debugfs_name = "pcie_bif_err_inject",
};
if (!adev->nbio.ras_if) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index e3f16b49e970..c687f5415b3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -31,6 +31,7 @@
*/
#include <linux/list.h>
#include <linux/slab.h>
+#include <linux/dma-buf.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h>
@@ -925,6 +926,9 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
+ if (bo->tbo.base.import_attach)
+ dma_buf_pin(bo->tbo.base.import_attach);
+
bo->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
@@ -1008,6 +1012,9 @@ int amdgpu_bo_unpin(struct amdgpu_bo *bo)
amdgpu_bo_subtract_pin_size(bo);
+ if (bo->tbo.base.import_attach)
+ dma_buf_unpin(bo->tbo.base.import_attach);
+
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
@@ -1274,6 +1281,10 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
+ if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
+ bo->mem.mem_type != TTM_PL_SYSTEM)
+ dma_buf_move_notify(abo->tbo.base.dma_buf);
+
/* remember the eviction */
if (evict)
atomic64_inc(&adev->num_evictions);
@@ -1307,6 +1318,12 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (abo->kfd_bo)
amdgpu_amdkfd_unreserve_memory_limit(abo);
+ /* We only remove the fence if the resv has individualized. */
+ WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
+ && bo->base.resv != &bo->base._resv);
+ if (bo->base.resv == &bo->base._resv)
+ amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
+
if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
!(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
return;
@@ -1403,30 +1420,52 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
}
/**
- * amdgpu_sync_wait_resv - Wait for BO reservation fences
+ * amdgpu_bo_sync_wait_resv - Wait for BO reservation fences
*
- * @bo: buffer object
+ * @adev: amdgpu device pointer
+ * @resv: reservation object to sync to
+ * @sync_mode: synchronization mode
* @owner: fence owner
* @intr: Whether the wait is interruptible
*
+ * Extract the fences from the reservation object and waits for them to finish.
+ *
* Returns:
* 0 on success, errno otherwise.
*/
-int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode, void *owner,
+ bool intr)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
struct amdgpu_sync sync;
int r;
amdgpu_sync_create(&sync);
- amdgpu_sync_resv(adev, &sync, bo->tbo.base.resv, owner, false);
+ amdgpu_sync_resv(adev, &sync, resv, sync_mode, owner);
r = amdgpu_sync_wait(&sync, intr);
amdgpu_sync_free(&sync);
-
return r;
}
/**
+ * amdgpu_bo_sync_wait - Wrapper for amdgpu_bo_sync_wait_resv
+ * @bo: buffer object to wait for
+ * @owner: fence owner
+ * @intr: Whether the wait is interruptible
+ *
+ * Wrapper to wait for fences in a BO.
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
+int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
+ return amdgpu_bo_sync_wait_resv(adev, bo->tbo.base.resv,
+ AMDGPU_SYNC_NE_OWNER, owner, intr);
+}
+
+/**
* amdgpu_bo_gpu_offset - return GPU offset of bo
* @bo: amdgpu object for which we query the offset
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 36dec51d1ef1..5e39ecd8cc28 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -277,6 +277,9 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo);
int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo);
void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared);
+int amdgpu_bo_sync_wait_resv(struct amdgpu_device *adev, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode, void *owner,
+ bool intr);
int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr);
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo);
int amdgpu_bo_validate(struct amdgpu_bo *bo);
@@ -316,6 +319,7 @@ void amdgpu_sa_bo_free(struct amdgpu_device *adev,
void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
struct seq_file *m);
#endif
+int amdgpu_debugfs_sa_init(struct amdgpu_device *adev);
bool amdgpu_bo_support_uswc(u64 bo_flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index b03b1eb7ba04..bc3cf04a1a94 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -41,8 +41,6 @@
#include "hwmgr.h"
#define WIDTH_4K 3840
-static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
-
static const struct cg_flag_name clocks[] = {
{AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"},
{AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"},
@@ -3398,11 +3396,6 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)
DRM_ERROR("failed to create device file unique_id\n");
return ret;
}
- ret = amdgpu_debugfs_pm_init(adev);
- if (ret) {
- DRM_ERROR("Failed to register debugfs file for dpm!\n");
- return ret;
- }
if ((adev->asic_type >= CHIP_VEGA10) &&
!(adev->flags & AMD_IS_APU)) {
@@ -3669,7 +3662,7 @@ static const struct drm_info_list amdgpu_pm_info_list[] = {
};
#endif
-static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
+int amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list));
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
index 3da1da277805..5db0ef86e84c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.h
@@ -43,4 +43,6 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
+int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
index 07914e34bc25..1311d6aec5d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pmu.c
@@ -52,7 +52,7 @@ static int amdgpu_perf_event_init(struct perf_event *event)
return -ENOENT;
/* update the hw_perf_event struct with config data */
- hwc->conf = event->attr.config;
+ hwc->config = event->attr.config;
return 0;
}
@@ -74,9 +74,9 @@ static void amdgpu_perf_start(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
if (!(flags & PERF_EF_RELOAD))
- pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 1);
- pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 0);
+ pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, 0);
break;
default:
break;
@@ -101,7 +101,7 @@ static void amdgpu_perf_read(struct perf_event *event)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->conf,
+ pe->adev->df.funcs->pmc_get_count(pe->adev, hwc->config,
&count);
break;
default:
@@ -126,7 +126,7 @@ static void amdgpu_perf_stop(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 0);
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 0);
break;
default:
break;
@@ -156,7 +156,8 @@ static int amdgpu_perf_add(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- retval = pe->adev->df.funcs->pmc_start(pe->adev, hwc->conf, 1);
+ retval = pe->adev->df.funcs->pmc_start(pe->adev,
+ hwc->config, 1);
break;
default:
return 0;
@@ -184,7 +185,7 @@ static void amdgpu_perf_del(struct perf_event *event, int flags)
switch (pe->pmu_perf_type) {
case PERF_TYPE_AMDGPU_DF:
- pe->adev->df.funcs->pmc_stop(pe->adev, hwc->conf, 1);
+ pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, 1);
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 3a1570dafe34..dc42086a672b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -24,6 +24,7 @@
*/
#include <linux/firmware.h>
+#include <linux/dma-mapping.h>
#include "amdgpu.h"
#include "amdgpu_psp.h"
@@ -38,6 +39,42 @@
static void psp_set_funcs(struct amdgpu_device *adev);
+static int psp_sysfs_init(struct amdgpu_device *adev);
+static void psp_sysfs_fini(struct amdgpu_device *adev);
+
+/*
+ * Due to DF Cstate management centralized to PMFW, the firmware
+ * loading sequence will be updated as below:
+ * - Load KDB
+ * - Load SYS_DRV
+ * - Load tOS
+ * - Load PMFW
+ * - Setup TMR
+ * - Load other non-psp fw
+ * - Load ASD
+ * - Load XGMI/RAS/HDCP/DTM TA if any
+ *
+ * This new sequence is required for
+ * - Arcturus
+ * - Navi12 and onwards
+ */
+static void psp_check_pmfw_centralized_cstate_management(struct psp_context *psp)
+{
+ struct amdgpu_device *adev = psp->adev;
+
+ psp->pmfw_centralized_cstate_management = false;
+
+ if (amdgpu_sriov_vf(adev))
+ return;
+
+ if (adev->flags & AMD_IS_APU)
+ return;
+
+ if ((adev->asic_type == CHIP_ARCTURUS) ||
+ (adev->asic_type >= CHIP_NAVI12))
+ psp->pmfw_centralized_cstate_management = true;
+}
+
static int psp_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
@@ -75,6 +112,8 @@ static int psp_early_init(void *handle)
psp->adev = adev;
+ psp_check_pmfw_centralized_cstate_management(psp);
+
return 0;
}
@@ -101,6 +140,13 @@ static int psp_sw_init(void *handle)
return ret;
}
+ if (adev->asic_type == CHIP_NAVI10) {
+ ret= psp_sysfs_init(adev);
+ if (ret) {
+ return ret;
+ }
+ }
+
return 0;
}
@@ -113,10 +159,18 @@ static int psp_sw_fini(void *handle)
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
+ if (adev->psp.cap_fw) {
+ release_firmware(adev->psp.cap_fw);
+ adev->psp.cap_fw = NULL;
+ }
if (adev->psp.ta_fw) {
release_firmware(adev->psp.ta_fw);
adev->psp.ta_fw = NULL;
}
+
+ if (adev->asic_type == CHIP_NAVI10)
+ psp_sysfs_fini(adev);
+
return 0;
}
@@ -150,6 +204,7 @@ psp_cmd_submit_buf(struct psp_context *psp,
int ret;
int index;
int timeout = 2000;
+ bool ras_intr = false;
mutex_lock(&psp->mutex);
@@ -174,7 +229,8 @@ psp_cmd_submit_buf(struct psp_context *psp,
* because gpu reset thread triggered and lock resource should
* be released for psp resume sequence.
*/
- if (amdgpu_ras_intr_triggered())
+ ras_intr = amdgpu_ras_intr_triggered();
+ if (ras_intr)
break;
msleep(1);
amdgpu_asic_invalidate_hdp(psp->adev, NULL);
@@ -187,14 +243,14 @@ psp_cmd_submit_buf(struct psp_context *psp,
* during psp initialization to avoid breaking hw_init and it doesn't
* return -EINVAL.
*/
- if (psp->cmd_buf_mem->resp.status || !timeout) {
+ if ((psp->cmd_buf_mem->resp.status || !timeout) && !ras_intr) {
if (ucode)
DRM_WARN("failed to load ucode id (%d) ",
ucode->ucode_id);
DRM_WARN("psp command (0x%X) failed and response status is (0x%X)\n",
psp->cmd_buf_mem->cmd_id,
psp->cmd_buf_mem->resp.status);
- if (!timeout) {
+ if ((ucode->ucode_id == AMDGPU_UCODE_ID_CAP) || !timeout) {
mutex_unlock(&psp->mutex);
return -EINVAL;
}
@@ -558,7 +614,7 @@ int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
return psp_ta_invoke(psp, ta_cmd_id, psp->xgmi_context.session_id);
}
-static int psp_xgmi_terminate(struct psp_context *psp)
+int psp_xgmi_terminate(struct psp_context *psp)
{
int ret;
@@ -579,7 +635,7 @@ static int psp_xgmi_terminate(struct psp_context *psp)
return 0;
}
-static int psp_xgmi_initialize(struct psp_context *psp)
+int psp_xgmi_initialize(struct psp_context *psp)
{
struct ta_xgmi_shared_memory *xgmi_cmd;
int ret;
@@ -1013,6 +1069,30 @@ static int psp_dtm_initialize(struct psp_context *psp)
return 0;
}
+static int psp_dtm_unload(struct psp_context *psp)
+{
+ int ret;
+ struct psp_gfx_cmd_resp *cmd;
+
+ /*
+ * TODO: bypass the unloading in sriov for now
+ */
+ if (amdgpu_sriov_vf(psp->adev))
+ return 0;
+
+ cmd = kzalloc(sizeof(struct psp_gfx_cmd_resp), GFP_KERNEL);
+ if (!cmd)
+ return -ENOMEM;
+
+ psp_prep_ta_unload_cmd_buf(cmd, psp->dtm_context.session_id);
+
+ ret = psp_cmd_submit_buf(psp, NULL, cmd, psp->fence_buf_mc_addr);
+
+ kfree(cmd);
+
+ return ret;
+}
+
int psp_dtm_invoke(struct psp_context *psp, uint32_t ta_cmd_id)
{
/*
@@ -1037,7 +1117,7 @@ static int psp_dtm_terminate(struct psp_context *psp)
if (!psp->dtm_context.dtm_initialized)
return 0;
- ret = psp_hdcp_unload(psp);
+ ret = psp_dtm_unload(psp);
if (ret)
return ret;
@@ -1057,7 +1137,7 @@ static int psp_hw_start(struct psp_context *psp)
struct amdgpu_device *adev = psp->adev;
int ret;
- if (!amdgpu_sriov_vf(adev) || !adev->in_gpu_reset) {
+ if (!amdgpu_sriov_vf(adev)) {
if (psp->kdb_bin_size &&
(psp->funcs->bootloader_load_kdb != NULL)) {
ret = psp_bootloader_load_kdb(psp);
@@ -1092,10 +1172,17 @@ static int psp_hw_start(struct psp_context *psp)
return ret;
}
- ret = psp_tmr_load(psp);
- if (ret) {
- DRM_ERROR("PSP load tmr failed!\n");
- return ret;
+ /*
+ * For those ASICs with DF Cstate management centralized
+ * to PMFW, TMR setup should be performed after PMFW
+ * loaded and before other non-psp firmware loaded.
+ */
+ if (!psp->pmfw_centralized_cstate_management) {
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
+ return ret;
+ }
}
return 0;
@@ -1105,6 +1192,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode,
enum psp_gfx_fw_type *type)
{
switch (ucode->ucode_id) {
+ case AMDGPU_UCODE_ID_CAP:
+ *type = GFX_FW_TYPE_CAP;
+ break;
case AMDGPU_UCODE_ID_SDMA0:
*type = GFX_FW_TYPE_SDMA0;
break;
@@ -1292,9 +1382,10 @@ static int psp_np_fw_load(struct psp_context *psp)
struct amdgpu_firmware_info *ucode;
struct amdgpu_device* adev = psp->adev;
- if (psp->autoload_supported) {
+ if (psp->autoload_supported ||
+ psp->pmfw_centralized_cstate_management) {
ucode = &adev->firmware.ucode[AMDGPU_UCODE_ID_SMC];
- if (!ucode->fw)
+ if (!ucode->fw || amdgpu_sriov_vf(adev))
goto out;
ret = psp_execute_np_fw_load(psp, ucode);
@@ -1302,6 +1393,14 @@ static int psp_np_fw_load(struct psp_context *psp)
return ret;
}
+ if (psp->pmfw_centralized_cstate_management) {
+ ret = psp_tmr_load(psp);
+ if (ret) {
+ DRM_ERROR("PSP load tmr failed!\n");
+ return ret;
+ }
+ }
+
out:
for (i = 0; i < adev->firmware.max_ucodes; i++) {
ucode = &adev->firmware.ucode[i];
@@ -1309,7 +1408,9 @@ out:
continue;
if (ucode->ucode_id == AMDGPU_UCODE_ID_SMC &&
- (psp_smu_reload_quirk(psp) || psp->autoload_supported))
+ (psp_smu_reload_quirk(psp) ||
+ psp->autoload_supported ||
+ psp->pmfw_centralized_cstate_management))
continue;
if (amdgpu_sriov_vf(adev) &&
@@ -1420,16 +1521,6 @@ skip_memalloc:
return ret;
}
- if (adev->gmc.xgmi.num_physical_nodes > 1) {
- ret = psp_xgmi_initialize(psp);
- /* Warning the XGMI seesion initialize failure
- * Instead of stop driver initialization
- */
- if (ret)
- dev_err(psp->adev->dev,
- "XGMI: Failed to initialize XGMI session\n");
- }
-
if (psp->adev->psp.ta_fw) {
ret = psp_ras_initialize(psp);
if (ret)
@@ -1494,10 +1585,6 @@ static int psp_hw_fini(void *handle)
void *tmr_buf;
void **pptr;
- if (adev->gmc.xgmi.num_physical_nodes > 1 &&
- psp->xgmi_context.initialized == 1)
- psp_xgmi_terminate(psp);
-
if (psp->adev->psp.ta_fw) {
psp_ras_terminate(psp);
psp_dtm_terminate(psp);
@@ -1753,6 +1840,97 @@ static int psp_set_powergating_state(void *handle,
return 0;
}
+static ssize_t psp_usbc_pd_fw_sysfs_read(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ uint32_t fw_ver;
+ int ret;
+
+ if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ DRM_INFO("PSP block is not ready yet.");
+ return -EBUSY;
+ }
+
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_read_usbc_pd_fw(&adev->psp, &fw_ver);
+ mutex_unlock(&adev->psp.mutex);
+
+ if (ret) {
+ DRM_ERROR("Failed to read USBC PD FW, err = %d", ret);
+ return ret;
+ }
+
+ return snprintf(buf, PAGE_SIZE, "%x\n", fw_ver);
+}
+
+static ssize_t psp_usbc_pd_fw_sysfs_write(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf,
+ size_t count)
+{
+ struct drm_device *ddev = dev_get_drvdata(dev);
+ struct amdgpu_device *adev = ddev->dev_private;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+ int ret;
+ char fw_name[100];
+ const struct firmware *usbc_pd_fw;
+
+ if (!adev->ip_blocks[AMD_IP_BLOCK_TYPE_PSP].status.late_initialized) {
+ DRM_INFO("PSP block is not ready yet.");
+ return -EBUSY;
+ }
+
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s", buf);
+ ret = request_firmware(&usbc_pd_fw, fw_name, adev->dev);
+ if (ret)
+ goto fail;
+
+ /* We need contiguous physical mem to place the FW for psp to access */
+ cpu_addr = dma_alloc_coherent(adev->dev, usbc_pd_fw->size, &dma_addr, GFP_KERNEL);
+
+ ret = dma_mapping_error(adev->dev, dma_addr);
+ if (ret)
+ goto rel_buf;
+
+ memcpy_toio(cpu_addr, usbc_pd_fw->data, usbc_pd_fw->size);
+
+ /*
+ * x86 specific workaround.
+ * Without it the buffer is invisible in PSP.
+ *
+ * TODO Remove once PSP starts snooping CPU cache
+ */
+#ifdef CONFIG_X86
+ clflush_cache_range(cpu_addr, (usbc_pd_fw->size & ~(L1_CACHE_BYTES - 1)));
+#endif
+
+ mutex_lock(&adev->psp.mutex);
+ ret = psp_load_usbc_pd_fw(&adev->psp, dma_addr);
+ mutex_unlock(&adev->psp.mutex);
+
+rel_buf:
+ dma_free_coherent(adev->dev, usbc_pd_fw->size, cpu_addr, dma_addr);
+ release_firmware(usbc_pd_fw);
+
+fail:
+ if (ret) {
+ DRM_ERROR("Failed to load USBC PD FW, err = %d", ret);
+ return ret;
+ }
+
+ return count;
+}
+
+static DEVICE_ATTR(usbc_pd_fw, S_IRUGO | S_IWUSR,
+ psp_usbc_pd_fw_sysfs_read,
+ psp_usbc_pd_fw_sysfs_write);
+
+
+
const struct amd_ip_funcs psp_ip_funcs = {
.name = "psp",
.early_init = psp_early_init,
@@ -1771,6 +1949,21 @@ const struct amd_ip_funcs psp_ip_funcs = {
.set_powergating_state = psp_set_powergating_state,
};
+static int psp_sysfs_init(struct amdgpu_device *adev)
+{
+ int ret = device_create_file(adev->dev, &dev_attr_usbc_pd_fw);
+
+ if (ret)
+ DRM_ERROR("Failed to create USBC PD FW control file!");
+
+ return ret;
+}
+
+static void psp_sysfs_fini(struct amdgpu_device *adev)
+{
+ device_remove_file(adev->dev, &dev_attr_usbc_pd_fw);
+}
+
static const struct amdgpu_psp_funcs psp_funcs = {
.check_fw_loading_status = psp_check_fw_loading_status,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
index 611021514c52..4a4d8f2ccca2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h
@@ -114,6 +114,8 @@ struct psp_funcs
int (*mem_training)(struct psp_context *psp, uint32_t ops);
uint32_t (*ring_get_wptr)(struct psp_context *psp);
void (*ring_set_wptr)(struct psp_context *psp, uint32_t value);
+ int (*load_usbc_pd_fw)(struct psp_context *psp, dma_addr_t dma_addr);
+ int (*read_usbc_pd_fw)(struct psp_context *psp, uint32_t *fw_ver);
};
#define AMDGPU_XGMI_MAX_CONNECTED_NODES 64
@@ -250,6 +252,9 @@ struct psp_context
uint32_t asd_ucode_size;
uint8_t *asd_start_addr;
+ /* cap firmware */
+ const struct firmware *cap_fw;
+
/* fence buffer */
struct amdgpu_bo *fence_buf_bo;
uint64_t fence_buf_mc_addr;
@@ -264,6 +269,8 @@ struct psp_context
atomic_t fence_value;
/* flag to mark whether gfx fw autoload is supported or not */
bool autoload_supported;
+ /* flag to mark whether df cstate management centralized to PMFW */
+ bool pmfw_centralized_cstate_management;
/* xgmi ta firmware and buffer */
const struct firmware *ta_fw;
@@ -349,6 +356,14 @@ struct amdgpu_psp_funcs {
#define psp_ring_get_wptr(psp) (psp)->funcs->ring_get_wptr((psp))
#define psp_ring_set_wptr(psp, value) (psp)->funcs->ring_set_wptr((psp), (value))
+#define psp_load_usbc_pd_fw(psp, dma_addr) \
+ ((psp)->funcs->load_usbc_pd_fw ? \
+ (psp)->funcs->load_usbc_pd_fw((psp), (dma_addr)) : -EINVAL)
+
+#define psp_read_usbc_pd_fw(psp, fw_ver) \
+ ((psp)->funcs->read_usbc_pd_fw ? \
+ (psp)->funcs->read_usbc_pd_fw((psp), fw_ver) : -EINVAL)
+
extern const struct amd_ip_funcs psp_ip_funcs;
extern const struct amdgpu_ip_block_version psp_v3_1_ip_block;
@@ -362,6 +377,8 @@ int psp_gpu_reset(struct amdgpu_device *adev);
int psp_update_vcn_sram(struct amdgpu_device *adev, int inst_idx,
uint64_t cmd_gpu_addr, int cmd_size);
+int psp_xgmi_initialize(struct psp_context *psp);
+int psp_xgmi_terminate(struct psp_context *psp);
int psp_xgmi_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
int psp_ras_invoke(struct psp_context *psp, uint32_t ta_cmd_id);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
index cef94e2169fe..43055a01f35e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c
@@ -31,6 +31,7 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_xgmi.h"
#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h"
const char *ras_error_string[] = {
@@ -720,6 +721,9 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
if (adev->nbio.funcs->query_ras_error_count)
adev->nbio.funcs->query_ras_error_count(adev, &err_data);
break;
+ case AMDGPU_RAS_BLOCK__XGMI_WAFL:
+ amdgpu_xgmi_query_ras_error_count(adev, &err_data);
+ break;
default:
break;
}
@@ -742,20 +746,6 @@ int amdgpu_ras_error_query(struct amdgpu_device *adev,
return 0;
}
-uint64_t get_xgmi_relative_phy_addr(struct amdgpu_device *adev, uint64_t addr)
-{
- uint32_t df_inst_id;
-
- if ((!adev->df.funcs) ||
- (!adev->df.funcs->get_df_inst_id) ||
- (!adev->df.funcs->get_dram_base_addr))
- return addr;
-
- df_inst_id = adev->df.funcs->get_df_inst_id(adev);
-
- return addr + adev->df.funcs->get_dram_base_addr(adev, df_inst_id);
-}
-
/* wrapper of psp_ras_trigger_error */
int amdgpu_ras_error_inject(struct amdgpu_device *adev,
struct ras_inject_if *info)
@@ -775,8 +765,9 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev,
/* Calculate XGMI relative offset */
if (adev->gmc.xgmi.num_physical_nodes > 1) {
- block_info.address = get_xgmi_relative_phy_addr(adev,
- block_info.address);
+ block_info.address =
+ amdgpu_xgmi_get_relative_phy_addr(adev,
+ block_info.address);
}
switch (info->head.block) {
@@ -1122,6 +1113,32 @@ void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
&amdgpu_ras_debugfs_ops);
}
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+ struct ras_fs_if fs_info;
+
+ /*
+ * it won't be called in resume path, no need to check
+ * suspend and gpu reset status
+ */
+ if (!con)
+ return;
+
+ amdgpu_ras_debugfs_create_ctrl_node(adev);
+
+ list_for_each_entry(obj, &con->head, node) {
+ if (amdgpu_ras_is_supported(adev, obj->head.block) &&
+ (obj->attr_inuse == 1)) {
+ sprintf(fs_info.debugfs_name, "%s_err_inject",
+ ras_block_str(obj->head.block));
+ fs_info.head = obj->head;
+ amdgpu_ras_debugfs_create(adev, &fs_info);
+ }
+ }
+}
+
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head)
{
@@ -1154,7 +1171,6 @@ static void amdgpu_ras_debugfs_remove_all(struct amdgpu_device *adev)
static int amdgpu_ras_fs_init(struct amdgpu_device *adev)
{
amdgpu_ras_sysfs_create_feature_node(adev);
- amdgpu_ras_debugfs_create_ctrl_node(adev);
return 0;
}
@@ -1319,6 +1335,33 @@ static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev)
}
/* ih end */
+/* traversal all IPs except NBIO to query error counter */
+static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev)
+{
+ struct amdgpu_ras *con = amdgpu_ras_get_context(adev);
+ struct ras_manager *obj;
+
+ if (!con)
+ return;
+
+ list_for_each_entry(obj, &con->head, node) {
+ struct ras_query_if info = {
+ .head = obj->head,
+ };
+
+ /*
+ * PCIE_BIF IP has one different isr by ras controller
+ * interrupt, the specific ras counter query will be
+ * done in that isr. So skip such block from common
+ * sync flood interrupt isr calling.
+ */
+ if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF)
+ continue;
+
+ amdgpu_ras_error_query(adev, &info);
+ }
+}
+
/* recovery begin */
/* return 0 on success.
@@ -1373,6 +1416,12 @@ static void amdgpu_ras_do_recovery(struct work_struct *work)
struct amdgpu_ras *ras =
container_of(work, struct amdgpu_ras, recovery_work);
+ /*
+ * Query and print non zero error counter per IP block for
+ * awareness before recovering GPU.
+ */
+ amdgpu_ras_log_on_err_counter(ras->adev);
+
if (amdgpu_device_should_recover_gpu(ras->adev))
amdgpu_device_gpu_recover(ras->adev, 0);
atomic_set(&ras->in_recovery, 0);
@@ -1713,18 +1762,30 @@ static void amdgpu_ras_check_supported(struct amdgpu_device *adev,
*hw_supported = 0;
*supported = 0;
- if (amdgpu_sriov_vf(adev) ||
+ if (amdgpu_sriov_vf(adev) || !adev->is_atom_fw ||
(adev->asic_type != CHIP_VEGA20 &&
adev->asic_type != CHIP_ARCTURUS))
return;
- if (adev->is_atom_fw &&
- (amdgpu_atomfirmware_mem_ecc_supported(adev) ||
- amdgpu_atomfirmware_sram_ecc_supported(adev)))
- *hw_supported = AMDGPU_RAS_BLOCK_MASK;
+ if (amdgpu_atomfirmware_mem_ecc_supported(adev)) {
+ DRM_INFO("HBM ECC is active.\n");
+ *hw_supported |= (1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else
+ DRM_INFO("HBM ECC is not presented.\n");
+
+ if (amdgpu_atomfirmware_sram_ecc_supported(adev)) {
+ DRM_INFO("SRAM ECC is active.\n");
+ *hw_supported |= ~(1 << AMDGPU_RAS_BLOCK__UMC |
+ 1 << AMDGPU_RAS_BLOCK__DF);
+ } else
+ DRM_INFO("SRAM ECC is not presented.\n");
+
+ /* hw_supported needs to be aligned with RAS block mask. */
+ *hw_supported &= AMDGPU_RAS_BLOCK_MASK;
*supported = amdgpu_ras_enable == 0 ?
- 0 : *hw_supported & amdgpu_ras_mask;
+ 0 : *hw_supported & amdgpu_ras_mask;
}
int amdgpu_ras_init(struct amdgpu_device *adev)
@@ -1825,8 +1886,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
goto interrupt;
}
- amdgpu_ras_debugfs_create(adev, fs_info);
-
r = amdgpu_ras_sysfs_create(adev, fs_info);
if (r)
goto sysfs;
@@ -1835,7 +1894,6 @@ int amdgpu_ras_late_init(struct amdgpu_device *adev,
cleanup:
amdgpu_ras_sysfs_remove(adev, ras_block);
sysfs:
- amdgpu_ras_debugfs_remove(adev, ras_block);
if (ih_info->cb)
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
interrupt:
@@ -1852,7 +1910,6 @@ void amdgpu_ras_late_fini(struct amdgpu_device *adev,
return;
amdgpu_ras_sysfs_remove(adev, ras_block);
- amdgpu_ras_debugfs_remove(adev, ras_block);
if (ih_info->cb)
amdgpu_ras_interrupt_remove_handler(adev, ih_info);
amdgpu_ras_feature_enable(adev, ras_block, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
index a5fe29a9373e..55c3eceb390d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h
@@ -592,6 +592,8 @@ int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev,
void amdgpu_ras_debugfs_create(struct amdgpu_device *adev,
struct ras_fs_if *head);
+void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev);
+
void amdgpu_ras_debugfs_remove(struct amdgpu_device *adev,
struct ras_common_if *head);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
index 2a8e04895595..c0096097bbcf 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c
@@ -25,10 +25,11 @@
#include "amdgpu.h"
#include "amdgpu_ras.h"
#include <linux/bits.h>
-#include "smu_v11_0_i2c.h"
+#include "atom.h"
-#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
-#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
+#define EEPROM_I2C_TARGET_ADDR_VEGA20 0xA0
+#define EEPROM_I2C_TARGET_ADDR_ARCTURUS 0xA8
+#define EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342 0xA0
/*
* The 2 macros bellow represent the actual size in bytes that
@@ -55,6 +56,45 @@
#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control))->adev
+static bool __get_eeprom_i2c_addr_arct(struct amdgpu_device *adev,
+ uint16_t *i2c_addr)
+{
+ struct atom_context *atom_ctx = adev->mode_info.atom_context;
+
+ if (!i2c_addr || !atom_ctx)
+ return false;
+
+ if (strnstr(atom_ctx->vbios_version,
+ "D342",
+ sizeof(atom_ctx->vbios_version)))
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS_D342;
+ else
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
+
+ return true;
+}
+
+static bool __get_eeprom_i2c_addr(struct amdgpu_device *adev,
+ uint16_t *i2c_addr)
+{
+ if (!i2c_addr)
+ return false;
+
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ *i2c_addr = EEPROM_I2C_TARGET_ADDR_VEGA20;
+ break;
+
+ case CHIP_ARCTURUS:
+ return __get_eeprom_i2c_addr_arct(adev, i2c_addr);
+
+ default:
+ return false;
+ }
+
+ return true;
+}
+
static void __encode_table_header_to_buff(struct amdgpu_ras_eeprom_table_header *hdr,
unsigned char *buff)
{
@@ -83,6 +123,7 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
unsigned char *buff)
{
int ret = 0;
+ struct amdgpu_device *adev = to_amdgpu_device(control);
struct i2c_msg msg = {
.addr = 0,
.flags = 0,
@@ -96,15 +137,13 @@ static int __update_table_header(struct amdgpu_ras_eeprom_control *control,
msg.addr = control->i2c_address;
- ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
if (ret < 1)
DRM_ERROR("Failed to write EEPROM table header, ret:%d", ret);
return ret;
}
-
-
static uint32_t __calc_hdr_byte_sum(struct amdgpu_ras_eeprom_control *control)
{
int i;
@@ -212,32 +251,18 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
.buf = buff,
};
- mutex_init(&control->tbl_mutex);
-
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- control->i2c_address = EEPROM_I2C_TARGET_ADDR_VEGA20;
- ret = smu_v11_0_i2c_eeprom_control_init(&control->eeprom_accessor);
- break;
-
- case CHIP_ARCTURUS:
- control->i2c_address = EEPROM_I2C_TARGET_ADDR_ARCTURUS;
- ret = smu_i2c_eeprom_init(&adev->smu, &control->eeprom_accessor);
- break;
+ /* Verify i2c adapter is initialized */
+ if (!adev->pm.smu_i2c.algo)
+ return -ENOENT;
- default:
- return 0;
- }
+ if (!__get_eeprom_i2c_addr(adev, &control->i2c_address))
+ return -EINVAL;
- if (ret) {
- DRM_ERROR("Failed to init I2C controller, ret:%d", ret);
- return ret;
- }
+ mutex_init(&control->tbl_mutex);
msg.addr = control->i2c_address;
-
/* Read/Create table header from EEPROM address 0 */
- ret = i2c_transfer(&control->eeprom_accessor, &msg, 1);
+ ret = i2c_transfer(&adev->pm.smu_i2c, &msg, 1);
if (ret < 1) {
DRM_ERROR("Failed to read EEPROM table header, ret:%d", ret);
return ret;
@@ -263,23 +288,6 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control)
return ret == 1 ? 0 : -EIO;
}
-void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control)
-{
- struct amdgpu_device *adev = to_amdgpu_device(control);
-
- switch (adev->asic_type) {
- case CHIP_VEGA20:
- smu_v11_0_i2c_eeprom_control_fini(&control->eeprom_accessor);
- break;
- case CHIP_ARCTURUS:
- smu_i2c_eeprom_fini(&adev->smu, &control->eeprom_accessor);
- break;
-
- default:
- return;
- }
-}
-
static void __encode_table_record_to_buff(struct amdgpu_ras_eeprom_control *control,
struct eeprom_table_record *record,
unsigned char *buff)
@@ -436,7 +444,7 @@ int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
control->next_addr += EEPROM_TABLE_RECORD_SIZE;
}
- ret = i2c_transfer(&control->eeprom_accessor, msgs, num);
+ ret = i2c_transfer(&adev->pm.smu_i2c, msgs, num);
if (ret < 1) {
DRM_ERROR("Failed to process EEPROM table records, ret:%d", ret);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
index ca78f812d436..7e8647a05df7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.h
@@ -44,7 +44,6 @@ struct amdgpu_ras_eeprom_table_header {
struct amdgpu_ras_eeprom_control {
struct amdgpu_ras_eeprom_table_header tbl_hdr;
- struct i2c_adapter eeprom_accessor;
uint32_t next_addr;
unsigned int num_recs;
struct mutex tbl_mutex;
@@ -79,7 +78,6 @@ struct eeprom_table_record {
}__attribute__((__packed__));
int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control);
-void amdgpu_ras_eeprom_fini(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_reset_table(struct amdgpu_ras_eeprom_control *control);
int amdgpu_ras_eeprom_process_recods(struct amdgpu_ras_eeprom_control *control,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
index e5c83e164d82..a7e1d0425ed0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c
@@ -48,9 +48,6 @@
* wptr. The GPU then starts fetching commands and executes
* them until the pointers are equal again.
*/
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring);
-static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
/**
* amdgpu_ring_alloc - allocate space on the ring buffer
@@ -154,76 +151,6 @@ void amdgpu_ring_undo(struct amdgpu_ring *ring)
}
/**
- * amdgpu_ring_priority_put - restore a ring's priority
- *
- * @ring: amdgpu_ring structure holding the information
- * @priority: target priority
- *
- * Release a request for executing at @priority
- */
-void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- int i;
-
- if (!ring->funcs->set_priority)
- return;
-
- if (atomic_dec_return(&ring->num_jobs[priority]) > 0)
- return;
-
- /* no need to restore if the job is already at the lowest priority */
- if (priority == DRM_SCHED_PRIORITY_NORMAL)
- return;
-
- mutex_lock(&ring->priority_mutex);
- /* something higher prio is executing, no need to decay */
- if (ring->priority > priority)
- goto out_unlock;
-
- /* decay priority to the next level with a job available */
- for (i = priority; i >= DRM_SCHED_PRIORITY_MIN; i--) {
- if (i == DRM_SCHED_PRIORITY_NORMAL
- || atomic_read(&ring->num_jobs[i])) {
- ring->priority = i;
- ring->funcs->set_priority(ring, i);
- break;
- }
- }
-
-out_unlock:
- mutex_unlock(&ring->priority_mutex);
-}
-
-/**
- * amdgpu_ring_priority_get - change the ring's priority
- *
- * @ring: amdgpu_ring structure holding the information
- * @priority: target priority
- *
- * Request a ring's priority to be raised to @priority (refcounted).
- */
-void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- if (!ring->funcs->set_priority)
- return;
-
- if (atomic_inc_return(&ring->num_jobs[priority]) <= 0)
- return;
-
- mutex_lock(&ring->priority_mutex);
- if (priority <= ring->priority)
- goto out_unlock;
-
- ring->priority = priority;
- ring->funcs->set_priority(ring, priority);
-
-out_unlock:
- mutex_unlock(&ring->priority_mutex);
-}
-
-/**
* amdgpu_ring_init - init driver ring struct.
*
* @adev: amdgpu_device pointer
@@ -334,10 +261,6 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
for (i = 0; i < DRM_SCHED_PRIORITY_MAX; ++i)
atomic_set(&ring->num_jobs[i], 0);
- if (amdgpu_debugfs_ring_init(adev, ring)) {
- DRM_ERROR("Failed to register debugfs file for rings !\n");
- }
-
return 0;
}
@@ -351,12 +274,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
*/
void amdgpu_ring_fini(struct amdgpu_ring *ring)
{
- ring->sched.ready = false;
/* Not to finish a ring which is not initialized */
if (!(ring->adev) || !(ring->adev->rings[ring->idx]))
return;
+ ring->sched.ready = false;
+
amdgpu_device_wb_free(ring->adev, ring->rptr_offs);
amdgpu_device_wb_free(ring->adev, ring->wptr_offs);
@@ -367,8 +291,6 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)
&ring->gpu_addr,
(void **)&ring->ring);
- amdgpu_debugfs_ring_fini(ring);
-
dma_fence_put(ring->vmid_wait);
ring->vmid_wait = NULL;
ring->me = 0;
@@ -485,8 +407,8 @@ static const struct file_operations amdgpu_debugfs_ring_fops = {
#endif
-static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
- struct amdgpu_ring *ring)
+int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring)
{
#if defined(CONFIG_DEBUG_FS)
struct drm_minor *minor = adev->ddev->primary;
@@ -507,13 +429,6 @@ static int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
return 0;
}
-static void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring)
-{
-#if defined(CONFIG_DEBUG_FS)
- debugfs_remove(ring->ent);
-#endif
-}
-
/**
* amdgpu_ring_test_helper - tests ring and set sched readiness status
*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 930316e60155..9a443013d70d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -167,9 +167,6 @@ struct amdgpu_ring_funcs {
uint32_t reg0, uint32_t reg1,
uint32_t ref, uint32_t mask);
void (*emit_tmz)(struct amdgpu_ring *ring, bool start);
- /* priority functions */
- void (*set_priority) (struct amdgpu_ring *ring,
- enum drm_sched_priority priority);
/* Try to soft recover the ring to make the fence signal */
void (*soft_recovery)(struct amdgpu_ring *ring, unsigned vmid);
int (*preempt_ib)(struct amdgpu_ring *ring);
@@ -222,6 +219,7 @@ struct amdgpu_ring {
struct mutex priority_mutex;
/* protected by priority_mutex */
int priority;
+ bool has_high_prio;
#if defined(CONFIG_DEBUG_FS)
struct dentry *ent;
@@ -258,10 +256,6 @@ void amdgpu_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count);
void amdgpu_ring_generic_pad_ib(struct amdgpu_ring *ring, struct amdgpu_ib *ib);
void amdgpu_ring_commit(struct amdgpu_ring *ring);
void amdgpu_ring_undo(struct amdgpu_ring *ring);
-void amdgpu_ring_priority_get(struct amdgpu_ring *ring,
- enum drm_sched_priority priority);
-void amdgpu_ring_priority_put(struct amdgpu_ring *ring,
- enum drm_sched_priority priority);
int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,
unsigned ring_size, struct amdgpu_irq_src *irq_src,
unsigned irq_type);
@@ -328,4 +322,8 @@ static inline void amdgpu_ring_write_multiple(struct amdgpu_ring *ring,
int amdgpu_ring_test_helper(struct amdgpu_ring *ring);
+int amdgpu_debugfs_ring_init(struct amdgpu_device *adev,
+ struct amdgpu_ring *ring);
+void amdgpu_debugfs_ring_fini(struct amdgpu_ring *ring);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
index d3d4707f2168..60bb3e8b3118 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h
@@ -126,6 +126,9 @@ struct amdgpu_rlc_funcs {
void (*stop)(struct amdgpu_device *adev);
void (*reset)(struct amdgpu_device *adev);
void (*start)(struct amdgpu_device *adev);
+ void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid);
+ void (*rlcg_wreg)(struct amdgpu_device *adev, u32 offset, u32 v);
+ bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg);
};
struct amdgpu_rlc {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
index a2ee30b16212..250a309e4dee 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.c
@@ -70,7 +70,8 @@ uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring,
uint32_t index = 0;
int r;
- if (vmid == 0 || !amdgpu_mcbp)
+ /* don't enable OS preemption on SDMA under SRIOV */
+ if (amdgpu_sriov_vf(adev) || vmid == 0 || !amdgpu_mcbp)
return 0;
r = amdgpu_sdma_get_index_from_ring(ring, &index);
@@ -92,7 +93,6 @@ int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev,
struct ras_ih_if *ih_info = (struct ras_ih_if *)ras_ih_info;
struct ras_fs_if fs_info = {
.sysfs_name = "sdma_err_count",
- .debugfs_name = "sdma_err_inject",
};
if (!ih_info)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
index 485335267d78..4b352206354b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h
@@ -56,6 +56,7 @@ struct amdgpu_sdma_ras_funcs {
void (*ras_fini)(struct amdgpu_device *adev);
int (*query_ras_error_count)(struct amdgpu_device *adev,
uint32_t instance, void *ras_error_status);
+ void (*reset_ras_error_count)(struct amdgpu_device *adev);
};
struct amdgpu_sdma {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a09b6b9c27d1..b86392253696 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -202,18 +202,17 @@ int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence)
*
* @sync: sync object to add fences from reservation object to
* @resv: reservation object with embedded fence
- * @explicit_sync: true if we should only sync to the exclusive fence
+ * @mode: how owner affects which fences we sync to
+ * @owner: owner of the planned job submission
*
* Sync to the fence
*/
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct dma_resv *resv,
- void *owner, bool explicit_sync)
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_resv *resv, enum amdgpu_sync_mode mode,
+ void *owner)
{
struct dma_resv_list *flist;
struct dma_fence *f;
- void *fence_owner;
unsigned i;
int r = 0;
@@ -229,30 +228,46 @@ int amdgpu_sync_resv(struct amdgpu_device *adev,
return r;
for (i = 0; i < flist->shared_count; ++i) {
+ void *fence_owner;
+
f = rcu_dereference_protected(flist->shared[i],
dma_resv_held(resv));
+
+ fence_owner = amdgpu_sync_get_owner(f);
+
+ /* Always sync to moves, no matter what */
+ if (fence_owner == AMDGPU_FENCE_OWNER_UNDEFINED) {
+ r = amdgpu_sync_fence(sync, f, false);
+ if (r)
+ break;
+ }
+
/* We only want to trigger KFD eviction fences on
* evict or move jobs. Skip KFD fences otherwise.
*/
- fence_owner = amdgpu_sync_get_owner(f);
if (fence_owner == AMDGPU_FENCE_OWNER_KFD &&
owner != AMDGPU_FENCE_OWNER_UNDEFINED)
continue;
- if (amdgpu_sync_same_dev(adev, f)) {
- /* VM updates only sync with moves but not with user
- * command submissions or KFD evictions fences
- */
- if (owner == AMDGPU_FENCE_OWNER_VM &&
- fence_owner != AMDGPU_FENCE_OWNER_UNDEFINED)
+ /* Ignore fences depending on the sync mode */
+ switch (mode) {
+ case AMDGPU_SYNC_ALWAYS:
+ break;
+
+ case AMDGPU_SYNC_NE_OWNER:
+ if (amdgpu_sync_same_dev(adev, f) &&
+ fence_owner == owner)
continue;
+ break;
- /* Ignore fence from the same owner and explicit one as
- * long as it isn't undefined.
- */
- if (owner != AMDGPU_FENCE_OWNER_UNDEFINED &&
- (fence_owner == owner || explicit_sync))
+ case AMDGPU_SYNC_EQ_OWNER:
+ if (amdgpu_sync_same_dev(adev, f) &&
+ fence_owner != owner)
continue;
+ break;
+
+ case AMDGPU_SYNC_EXPLICIT:
+ continue;
}
r = amdgpu_sync_fence(sync, f, false);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
index d62c2b81d92b..cfbe5788b8b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.h
@@ -31,6 +31,13 @@ struct dma_resv;
struct amdgpu_device;
struct amdgpu_ring;
+enum amdgpu_sync_mode {
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_SYNC_NE_OWNER,
+ AMDGPU_SYNC_EQ_OWNER,
+ AMDGPU_SYNC_EXPLICIT
+};
+
/*
* Container for fences used to sync command submissions.
*/
@@ -43,11 +50,9 @@ void amdgpu_sync_create(struct amdgpu_sync *sync);
int amdgpu_sync_fence(struct amdgpu_sync *sync, struct dma_fence *f,
bool explicit);
int amdgpu_sync_vm_fence(struct amdgpu_sync *sync, struct dma_fence *fence);
-int amdgpu_sync_resv(struct amdgpu_device *adev,
- struct amdgpu_sync *sync,
- struct dma_resv *resv,
- void *owner,
- bool explicit_sync);
+int amdgpu_sync_resv(struct amdgpu_device *adev, struct amdgpu_sync *sync,
+ struct dma_resv *resv, enum amdgpu_sync_mode mode,
+ void *owner);
struct dma_fence *amdgpu_sync_peek_fence(struct amdgpu_sync *sync,
struct amdgpu_ring *ring);
struct dma_fence *amdgpu_sync_get_fence(struct amdgpu_sync *sync,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index dee446278417..c10ae1cdc1b9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -60,20 +60,14 @@
#include "amdgpu_ras.h"
#include "bif/bif_4_1_d.h"
+#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
+
static int amdgpu_map_buffer(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem, unsigned num_pages,
uint64_t offset, unsigned window,
struct amdgpu_ring *ring,
uint64_t *addr);
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev);
-
-static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
/**
* amdgpu_init_mem_type - Initialize a memory manager for a specific type of
* memory request.
@@ -1034,7 +1028,7 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
- if (abo->flags & AMDGPU_GEM_CREATE_MQD_GFX9) {
+ if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) {
uint64_t page_idx = 1;
r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
@@ -1042,7 +1036,10 @@ int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
if (r)
goto gart_bind_fail;
- /* Patch mtype of the second part BO */
+ /* The memory type of the first page defaults to UC. Now
+ * modify the memory type to NC from the second page of
+ * the BO onward.
+ */
flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK;
flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC);
@@ -1596,7 +1593,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
while (len && pos < adev->gmc.mc_vram_size) {
uint64_t aligned_pos = pos & ~(uint64_t)3;
- uint32_t bytes = 4 - (pos & 3);
+ uint64_t bytes = 4 - (pos & 3);
uint32_t shift = (pos & 3) * 8;
uint32_t mask = 0xffffffff << shift;
@@ -1605,20 +1602,28 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
bytes = len;
}
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
- if (!write || mask != 0xffffffff)
- value = RREG32_NO_KIQ(mmMM_DATA);
- if (write) {
- value &= ~mask;
- value |= (*(uint32_t *)buf << shift) & mask;
- WREG32_NO_KIQ(mmMM_DATA, value);
- }
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
- if (!write) {
- value = (value & mask) >> shift;
- memcpy(buf, &value, bytes);
+ if (mask != 0xffffffff) {
+ spin_lock_irqsave(&adev->mmio_idx_lock, flags);
+ WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
+ WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
+ if (!write || mask != 0xffffffff)
+ value = RREG32_NO_KIQ(mmMM_DATA);
+ if (write) {
+ value &= ~mask;
+ value |= (*(uint32_t *)buf << shift) & mask;
+ WREG32_NO_KIQ(mmMM_DATA, value);
+ }
+ spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
+ if (!write) {
+ value = (value & mask) >> shift;
+ memcpy(buf, &value, bytes);
+ }
+ } else {
+ bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
+ bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
+
+ amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
+ bytes, write);
}
ret += bytes;
@@ -1638,7 +1643,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
- .invalidate_caches = &amdgpu_invalidate_caches,
.init_mem_type = &amdgpu_init_mem_type,
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
@@ -1911,12 +1915,6 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
return r;
}
- /* Register debugfs entries for amdgpu_ttm */
- r = amdgpu_ttm_debugfs_init(adev);
- if (r) {
- DRM_ERROR("Failed to init debugfs\n");
- return r;
- }
return 0;
}
@@ -1938,7 +1936,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
if (!adev->mman.initialized)
return;
- amdgpu_ttm_debugfs_fini(adev);
amdgpu_ttm_training_reserve_vram_fini(adev);
/* return the IP Discovery TMR memory back to VRAM */
amdgpu_bo_free_kernel(&adev->discovery_memory, NULL, NULL);
@@ -2113,8 +2110,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
}
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED,
- false);
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -2198,7 +2195,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
if (resv) {
r = amdgpu_sync_resv(adev, &job->sync, resv,
- AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r) {
DRM_ERROR("sync failed (%d).\n", r);
goto error_free;
@@ -2279,7 +2277,6 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
{
struct amdgpu_device *adev = file_inode(f)->i_private;
ssize_t result = 0;
- int r;
if (size & 0x3 || *pos & 0x3)
return -EINVAL;
@@ -2287,27 +2284,19 @@ static ssize_t amdgpu_ttm_vram_read(struct file *f, char __user *buf,
if (*pos >= adev->gmc.mc_vram_size)
return -ENXIO;
+ size = min(size, (size_t)(adev->gmc.mc_vram_size - *pos));
while (size) {
- unsigned long flags;
- uint32_t value;
+ size_t bytes = min(size, AMDGPU_TTM_VRAM_MAX_DW_READ * 4);
+ uint32_t value[AMDGPU_TTM_VRAM_MAX_DW_READ];
- if (*pos >= adev->gmc.mc_vram_size)
- return result;
-
- spin_lock_irqsave(&adev->mmio_idx_lock, flags);
- WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000);
- WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31);
- value = RREG32_NO_KIQ(mmMM_DATA);
- spin_unlock_irqrestore(&adev->mmio_idx_lock, flags);
-
- r = put_user(value, (uint32_t *)buf);
- if (r)
- return r;
+ amdgpu_device_vram_access(adev, *pos, value, bytes, false);
+ if (copy_to_user(buf, value, bytes))
+ return -EFAULT;
- result += 4;
- buf += 4;
- *pos += 4;
- size -= 4;
+ result += bytes;
+ buf += bytes;
+ *pos += bytes;
+ size -= bytes;
}
return result;
@@ -2544,7 +2533,7 @@ static const struct {
#endif
-static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
+int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
unsigned count;
@@ -2579,13 +2568,3 @@ static int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
return 0;
#endif
}
-
-static void amdgpu_ttm_debugfs_fini(struct amdgpu_device *adev)
-{
-#if defined(CONFIG_DEBUG_FS)
- unsigned i;
-
- for (i = 0; i < ARRAY_SIZE(ttm_debugfs_entries); i++)
- debugfs_remove(adev->mman.debugfs_entries[i]);
-#endif
-}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index 0dddedc06ae3..bd05bbb4878d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -133,4 +133,6 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_mem_reg *mem);
uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
+int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev);
+
#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index b0e656409c03..88f226070229 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -283,7 +283,8 @@ union amdgpu_firmware_header {
* fw loading support
*/
enum AMDGPU_UCODE_ID {
- AMDGPU_UCODE_ID_SDMA0 = 0,
+ AMDGPU_UCODE_ID_CAP = 0, /* CAP must be the 1st fw to be loaded */
+ AMDGPU_UCODE_ID_SDMA0,
AMDGPU_UCODE_ID_SDMA1,
AMDGPU_UCODE_ID_SDMA2,
AMDGPU_UCODE_ID_SDMA3,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
index f4d40855147b..9dd51f0d2c11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c
@@ -28,7 +28,6 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev)
int r;
struct ras_fs_if fs_info = {
.sysfs_name = "umc_err_count",
- .debugfs_name = "umc_err_inject",
};
struct ras_ih_if ih_info = {
.cb = amdgpu_umc_process_ras_data_cb,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index a92f3b18e657..5fd32ad1c575 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -1099,7 +1099,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
goto err_free;
} else {
r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.base.resv,
- AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ AMDGPU_SYNC_ALWAYS,
+ AMDGPU_FENCE_OWNER_UNDEFINED);
if (r)
goto err_free;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index f96464e2c157..a41272fbcba2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -493,14 +493,9 @@ static int amdgpu_vcn_dec_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han
int amdgpu_vcn_dec_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence;
long r;
- /* temporarily disable ib test for sriov */
- if (amdgpu_sriov_vf(adev))
- return 0;
-
r = amdgpu_vcn_dec_get_create_msg(ring, 1, NULL);
if (r)
goto error;
@@ -527,6 +522,9 @@ int amdgpu_vcn_enc_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
r = amdgpu_ring_alloc(ring, 16);
if (r)
return r;
@@ -656,15 +654,10 @@ err:
int amdgpu_vcn_enc_ring_test_ib(struct amdgpu_ring *ring, long timeout)
{
- struct amdgpu_device *adev = ring->adev;
struct dma_fence *fence = NULL;
struct amdgpu_bo *bo = NULL;
long r;
- /* temporarily disable ib test for sriov */
- if (amdgpu_sriov_vf(adev))
- return 0;
-
r = amdgpu_bo_create_reserved(ring->adev, 128 * 1024, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM,
&bo, NULL, NULL);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
index d6deb0eb1e15..6fe057329de2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.h
@@ -179,6 +179,7 @@ struct amdgpu_vcn_inst {
struct amdgpu_irq_src irq;
struct amdgpu_vcn_reg external;
struct amdgpu_bo *dpg_sram_bo;
+ struct dpg_pause_state pause_state;
void *dpg_sram_cpu_addr;
uint64_t dpg_sram_gpu_addr;
uint32_t *dpg_sram_curr_addr;
@@ -190,8 +191,6 @@ struct amdgpu_vcn {
const struct firmware *fw; /* VCN firmware */
unsigned num_enc_rings;
enum amd_powergating_state cur_state;
- struct dpg_pause_state pause_state;
-
bool indirect_sram;
uint8_t num_vcn_inst;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
index daaf909d009a..f0128f745bd2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h
@@ -270,6 +270,9 @@ struct amdgpu_virt {
#define amdgpu_sriov_runtime(adev) \
((adev)->virt.caps & AMDGPU_SRIOV_CAPS_RUNTIME)
+#define amdgpu_sriov_fullaccess(adev) \
+(amdgpu_sriov_vf((adev)) && !amdgpu_sriov_runtime((adev)))
+
#define amdgpu_passthrough(adev) \
((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index d16231d6a790..6d9252a27916 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -120,23 +120,17 @@ static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm)
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
unsigned level)
{
- unsigned shift = 0xff;
-
switch (level) {
case AMDGPU_VM_PDB2:
case AMDGPU_VM_PDB1:
case AMDGPU_VM_PDB0:
- shift = 9 * (AMDGPU_VM_PDB0 - level) +
+ return 9 * (AMDGPU_VM_PDB0 - level) +
adev->vm_manager.block_size;
- break;
case AMDGPU_VM_PTB:
- shift = 0;
- break;
+ return 0;
default:
- dev_err(adev->dev, "the level%d isn't supported.\n", level);
+ return ~0;
}
-
- return shift;
}
/**
@@ -235,19 +229,6 @@ static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
else
list_move_tail(&vm_bo->vm_status, &vm->evicted);
}
-
-/**
- * amdgpu_vm_bo_relocated - vm_bo is reloacted
- *
- * @vm_bo: vm_bo which is relocated
- *
- * State for PDs/PTs which needs to update their parent PD.
- */
-static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
-{
- list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
-}
-
/**
* amdgpu_vm_bo_moved - vm_bo is moved
*
@@ -291,6 +272,22 @@ static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
}
/**
+ * amdgpu_vm_bo_relocated - vm_bo is reloacted
+ *
+ * @vm_bo: vm_bo which is relocated
+ *
+ * State for PDs/PTs which needs to update their parent PD.
+ * For the root PD, just move to idle state.
+ */
+static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
+{
+ if (vm_bo->bo->parent)
+ list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
+ else
+ amdgpu_vm_bo_idle(vm_bo);
+}
+
+/**
* amdgpu_vm_bo_done - vm_bo is done
*
* @vm_bo: vm_bo which is now done
@@ -588,8 +585,8 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
{
entry->priority = 0;
entry->tv.bo = &vm->root.base.bo->tbo;
- /* One for TTM and one for the CS job */
- entry->tv.num_shared = 2;
+ /* Two for VM updates, one for TTM and one for the CS job */
+ entry->tv.num_shared = 4;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
@@ -697,10 +694,7 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
amdgpu_vm_bo_moved(bo_base);
} else {
vm->update_funcs->map_table(bo);
- if (bo->parent)
- amdgpu_vm_bo_relocated(bo_base);
- else
- amdgpu_vm_bo_idle(bo_base);
+ amdgpu_vm_bo_relocated(bo_base);
}
}
@@ -803,7 +797,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
params.vm = vm;
params.direct = direct;
- r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_KFD, NULL);
+ r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
return r;
@@ -1086,8 +1080,12 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job,
struct dma_fence *fence = NULL;
bool pasid_mapping_needed = false;
unsigned patch_offset = 0;
+ bool update_spm_vmid_needed = (job->vm && (job->vm->reserved_vmid[vmhub] != NULL));
int r;
+ if (update_spm_vmid_needed && adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, job->vmid);
+
if (amdgpu_vmid_had_gpu_reset(adev, id)) {
gds_switch_needed = true;
vm_flush_needed = true;
@@ -1299,7 +1297,7 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev,
params.vm = vm;
params.direct = direct;
- r = vm->update_funcs->prepare(&params, AMDGPU_FENCE_OWNER_VM, NULL);
+ r = vm->update_funcs->prepare(&params, NULL, AMDGPU_SYNC_EXPLICIT);
if (r)
return r;
@@ -1448,21 +1446,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t incr, entry_end, pe_start;
struct amdgpu_bo *pt;
- /* make sure that the page tables covering the address range are
- * actually allocated
- */
- r = amdgpu_vm_alloc_pts(params->adev, params->vm, &cursor,
- params->direct);
- if (r)
- return r;
-
- pt = cursor.entry->base.bo;
-
- /* The root level can't be a huge page */
- if (cursor.level == adev->vm_manager.root_level) {
- if (!amdgpu_vm_pt_descendant(adev, &cursor))
- return -ENOENT;
- continue;
+ if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+ /* make sure that the page tables covering the
+ * address range are actually allocated
+ */
+ r = amdgpu_vm_alloc_pts(params->adev, params->vm,
+ &cursor, params->direct);
+ if (r)
+ return r;
}
shift = amdgpu_vm_level_shift(adev, cursor.level);
@@ -1480,25 +1471,38 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* smaller than the address shift. Go to the next
* child entry and try again.
*/
- if (!amdgpu_vm_pt_descendant(adev, &cursor))
- return -ENOENT;
- continue;
- } else if (frag >= parent_shift &&
- cursor.level - 1 != adev->vm_manager.root_level) {
+ if (amdgpu_vm_pt_descendant(adev, &cursor))
+ continue;
+ } else if (frag >= parent_shift) {
/* If the fragment size is even larger than the parent
- * shift we should go up one level and check it again
- * unless one level up is the root level.
+ * shift we should go up one level and check it again.
*/
if (!amdgpu_vm_pt_ancestor(&cursor))
- return -ENOENT;
+ return -EINVAL;
continue;
}
+ pt = cursor.entry->base.bo;
+ if (!pt) {
+ /* We need all PDs and PTs for mapping something, */
+ if (flags & AMDGPU_PTE_VALID)
+ return -ENOENT;
+
+ /* but unmapping something can happen at a higher
+ * level.
+ */
+ if (!amdgpu_vm_pt_ancestor(&cursor))
+ return -EINVAL;
+
+ pt = cursor.entry->base.bo;
+ shift = parent_shift;
+ }
+
/* Looks good so far, calculate parameters for the update */
incr = (uint64_t)AMDGPU_GPU_PAGE_SIZE << shift;
mask = amdgpu_vm_entries_mask(adev, cursor.level);
pe_start = ((cursor.pfn >> shift) & mask) * 8;
- entry_end = (uint64_t)(mask + 1) << shift;
+ entry_end = ((uint64_t)mask + 1) << shift;
entry_end += cursor.pfn & ~(entry_end - 1);
entry_end = min(entry_end, end);
@@ -1506,6 +1510,10 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
uint64_t upd_end = min(entry_end, frag_end);
unsigned nptes = (upd_end - frag_start) >> shift;
+ /* This can happen when we set higher level PDs to
+ * silent to stop fault floods.
+ */
+ nptes = max(nptes, 1u);
amdgpu_vm_update_flags(params, pt, cursor.level,
pe_start, dst, nptes, incr,
flags | AMDGPU_PTE_FRAG(frag));
@@ -1550,7 +1558,7 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
* @adev: amdgpu_device pointer
* @vm: requested vm
* @direct: direct submission in a page fault
- * @exclusive: fence we need to sync to
+ * @resv: fences we need to sync to
* @start: start of mapped range
* @last: last mapped entry
* @flags: flags for the entries
@@ -1565,14 +1573,14 @@ static int amdgpu_vm_update_ptes(struct amdgpu_vm_update_params *params,
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct amdgpu_vm *vm, bool direct,
- struct dma_fence *exclusive,
+ struct dma_resv *resv,
uint64_t start, uint64_t last,
uint64_t flags, uint64_t addr,
dma_addr_t *pages_addr,
struct dma_fence **fence)
{
struct amdgpu_vm_update_params params;
- void *owner = AMDGPU_FENCE_OWNER_VM;
+ enum amdgpu_sync_mode sync_mode;
int r;
memset(&params, 0, sizeof(params));
@@ -1581,9 +1589,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.direct = direct;
params.pages_addr = pages_addr;
- /* sync to everything except eviction fences on unmapping */
+ /* Implicitly sync to command submissions in the same VM before
+ * unmapping. Sync to moving fences before mapping.
+ */
if (!(flags & AMDGPU_PTE_VALID))
- owner = AMDGPU_FENCE_OWNER_KFD;
+ sync_mode = AMDGPU_SYNC_EQ_OWNER;
+ else
+ sync_mode = AMDGPU_SYNC_EXPLICIT;
amdgpu_vm_eviction_lock(vm);
if (vm->evicting) {
@@ -1591,7 +1603,14 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
goto error_unlock;
}
- r = vm->update_funcs->prepare(&params, owner, exclusive);
+ if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
+ struct amdgpu_bo *root = vm->root.base.bo;
+
+ if (!dma_fence_is_signaled(vm->last_direct))
+ amdgpu_bo_fence(root, vm->last_direct, true);
+ }
+
+ r = vm->update_funcs->prepare(&params, resv, sync_mode);
if (r)
goto error_unlock;
@@ -1610,7 +1629,7 @@ error_unlock:
* amdgpu_vm_bo_split_mapping - split a mapping into smaller chunks
*
* @adev: amdgpu_device pointer
- * @exclusive: fence we need to sync to
+ * @resv: fences we need to sync to
* @pages_addr: DMA addresses to use for mapping
* @vm: requested vm
* @mapping: mapped range and flags to use for the update
@@ -1626,7 +1645,7 @@ error_unlock:
* 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
- struct dma_fence *exclusive,
+ struct dma_resv *resv,
dma_addr_t *pages_addr,
struct amdgpu_vm *vm,
struct amdgpu_bo_va_mapping *mapping,
@@ -1696,13 +1715,13 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
- } else if (flags & AMDGPU_PTE_VALID) {
+ } else if (flags & (AMDGPU_PTE_VALID | AMDGPU_PTE_PRT)) {
addr += bo_adev->vm_manager.vram_base_offset;
addr += pfn << PAGE_SHIFT;
}
last = min((uint64_t)mapping->last, start + max_entries - 1);
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, exclusive,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
start, last, flags, addr,
dma_addr, fence);
if (r)
@@ -1741,7 +1760,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
dma_addr_t *pages_addr = NULL;
struct ttm_mem_reg *mem;
struct drm_mm_node *nodes;
- struct dma_fence *exclusive, **last_update;
+ struct dma_fence **last_update;
+ struct dma_resv *resv;
uint64_t flags;
struct amdgpu_device *bo_adev = adev;
int r;
@@ -1749,7 +1769,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
if (clear || !bo) {
mem = NULL;
nodes = NULL;
- exclusive = NULL;
+ resv = vm->root.base.bo->tbo.base.resv;
} else {
struct ttm_dma_tt *ttm;
@@ -1759,7 +1779,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
- exclusive = bo->tbo.moving;
+ resv = bo->tbo.base.resv;
}
if (bo) {
@@ -1769,7 +1789,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
flags = 0x0;
}
- if (clear || (bo && bo->tbo.base.resv == vm->root.base.bo->tbo.base.resv))
+ if (clear || (bo && bo->tbo.base.resv ==
+ vm->root.base.bo->tbo.base.resv))
last_update = &vm->last_update;
else
last_update = &bo_va->last_pt_update;
@@ -1783,7 +1804,7 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va,
}
list_for_each_entry(mapping, &bo_va->invalids, list) {
- r = amdgpu_vm_bo_split_mapping(adev, exclusive, pages_addr, vm,
+ r = amdgpu_vm_bo_split_mapping(adev, resv, pages_addr, vm,
mapping, flags, bo_adev, nodes,
last_update);
if (r)
@@ -1978,6 +1999,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence)
{
+ struct dma_resv *resv = vm->root.base.bo->tbo.base.resv;
struct amdgpu_bo_va_mapping *mapping;
uint64_t init_pte_value = 0;
struct dma_fence *f = NULL;
@@ -1992,7 +2014,7 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
- r = amdgpu_vm_bo_update_mapping(adev, vm, false, NULL,
+ r = amdgpu_vm_bo_update_mapping(adev, vm, false, resv,
mapping->start, mapping->last,
init_pte_value, 0, NULL, &f);
amdgpu_vm_free_mapping(adev, vm, mapping, f);
@@ -2563,8 +2585,7 @@ bool amdgpu_vm_evictable(struct amdgpu_bo *bo)
return false;
/* Don't evict VM page tables while they are updated */
- if (!dma_fence_is_signaled(bo_base->vm->last_direct) ||
- !dma_fence_is_signaled(bo_base->vm->last_delayed)) {
+ if (!dma_fence_is_signaled(bo_base->vm->last_direct)) {
amdgpu_vm_eviction_unlock(bo_base->vm);
return false;
}
@@ -2741,11 +2762,7 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)
if (timeout <= 0)
return timeout;
- timeout = dma_fence_wait_timeout(vm->last_direct, true, timeout);
- if (timeout <= 0)
- return timeout;
-
- return dma_fence_wait_timeout(vm->last_delayed, true, timeout);
+ return dma_fence_wait_timeout(vm->last_direct, true, timeout);
}
/**
@@ -2818,7 +2835,6 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->update_funcs = &amdgpu_vm_sdma_funcs;
vm->last_update = NULL;
vm->last_direct = dma_fence_get_stub();
- vm->last_delayed = dma_fence_get_stub();
mutex_init(&vm->eviction_lock);
vm->evicting = false;
@@ -2873,7 +2889,6 @@ error_free_root:
error_free_delayed:
dma_fence_put(vm->last_direct);
- dma_fence_put(vm->last_delayed);
drm_sched_entity_destroy(&vm->delayed);
error_free_direct:
@@ -3076,8 +3091,6 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
dma_fence_wait(vm->last_direct, false);
dma_fence_put(vm->last_direct);
- dma_fence_wait(vm->last_delayed, false);
- dma_fence_put(vm->last_delayed);
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
if (mapping->flags & AMDGPU_PTE_PRT && prt_fini_needed) {
@@ -3188,6 +3201,7 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
union drm_amdgpu_vm *args = data;
struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_fpriv *fpriv = filp->driver_priv;
+ long timeout = msecs_to_jiffies(2000);
int r;
switch (args->in.op) {
@@ -3199,6 +3213,21 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return r;
break;
case AMDGPU_VM_OP_UNRESERVE_VMID:
+ if (amdgpu_sriov_runtime(adev))
+ timeout = 8 * timeout;
+
+ /* Wait vm idle to make sure the vmid set in SPM_VMID is
+ * not referenced anymore.
+ */
+ r = amdgpu_bo_reserve(fpriv->vm.root.base.bo, true);
+ if (r)
+ return r;
+
+ r = amdgpu_vm_wait_idle(&fpriv->vm, timeout);
+ if (r < 0)
+ return r;
+
+ amdgpu_bo_unreserve(fpriv->vm.root.base.bo);
amdgpu_vmid_free_reserved(adev, &fpriv->vm, AMDGPU_GFXHUB_0);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index b4640ab38c95..06fe30e1492d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -227,8 +227,8 @@ struct amdgpu_vm_update_params {
struct amdgpu_vm_update_funcs {
int (*map_table)(struct amdgpu_bo *bo);
- int (*prepare)(struct amdgpu_vm_update_params *p, void * owner,
- struct dma_fence *exclusive);
+ int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode);
int (*update)(struct amdgpu_vm_update_params *p,
struct amdgpu_bo *bo, uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr, uint64_t flags);
@@ -276,7 +276,6 @@ struct amdgpu_vm {
/* Last submission to the scheduler entities */
struct dma_fence *last_direct;
- struct dma_fence *last_delayed;
unsigned int pasid;
/* dedicated to vm */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
index 73fec7a0ced5..e38516304070 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c
@@ -44,26 +44,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo *table)
* Returns:
* Negativ errno, 0 for success.
*/
-static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, void *owner,
- struct dma_fence *exclusive)
+static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p,
+ struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode)
{
- int r;
-
- /* Wait for any BO move to be completed */
- if (exclusive) {
- r = dma_fence_wait(exclusive, true);
- if (unlikely(r))
- return r;
- }
-
- /* Don't wait for submissions during page fault */
- if (p->direct)
+ if (!resv)
return 0;
- /* Wait for PT BOs to be idle. PTs share the same resv. object
- * as the root PD BO
- */
- return amdgpu_bo_sync_wait(p->vm->root.base.bo, owner, true);
+ return amdgpu_bo_sync_wait_resv(p->adev, resv, sync_mode, p->vm, true);
}
/**
@@ -86,6 +74,13 @@ static int amdgpu_vm_cpu_update(struct amdgpu_vm_update_params *p,
{
unsigned int i;
uint64_t value;
+ int r;
+
+ if (bo->tbo.moving) {
+ r = dma_fence_wait(bo->tbo.moving, true);
+ if (r)
+ return r;
+ }
pe += (unsigned long)amdgpu_bo_kptr(bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
index 19b7f80758f1..cf96c335b258 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c
@@ -58,9 +58,9 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo *table)
* Negativ errno, 0 for success.
*/
static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
- void *owner, struct dma_fence *exclusive)
+ struct dma_resv *resv,
+ enum amdgpu_sync_mode sync_mode)
{
- struct amdgpu_bo *root = p->vm->root.base.bo;
unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW;
int r;
@@ -70,17 +70,10 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p,
p->num_dw_left = ndw;
- /* Wait for moves to be completed */
- r = amdgpu_sync_fence(&p->job->sync, exclusive, false);
- if (r)
- return r;
-
- /* Don't wait for any submissions during page fault handling */
- if (p->direct)
+ if (!resv)
return 0;
- return amdgpu_sync_resv(p->adev, &p->job->sync, root->tbo.base.resv,
- owner, false);
+ return amdgpu_sync_resv(p->adev, &p->job->sync, resv, sync_mode, p->vm);
}
/**
@@ -111,12 +104,13 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p,
if (r)
goto error;
- tmp = dma_fence_get(f);
- if (p->direct)
+ if (p->direct) {
+ tmp = dma_fence_get(f);
swap(p->vm->last_direct, tmp);
- else
- swap(p->vm->last_delayed, tmp);
- dma_fence_put(tmp);
+ dma_fence_put(tmp);
+ } else {
+ dma_resv_add_shared_fence(p->vm->root.base.bo->tbo.base.resv, f);
+ }
if (fence && !p->direct)
swap(*fence, f);
@@ -147,7 +141,7 @@ static void amdgpu_vm_sdma_copy_ptes(struct amdgpu_vm_update_params *p,
src += p->num_dw_left * 4;
- pe += amdgpu_bo_gpu_offset(bo);
+ pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_copy_ptes(pe, src, count, p->direct);
amdgpu_vm_copy_pte(p->adev, ib, pe, src, count);
@@ -174,7 +168,7 @@ static void amdgpu_vm_sdma_set_ptes(struct amdgpu_vm_update_params *p,
{
struct amdgpu_ib *ib = p->job->ibs;
- pe += amdgpu_bo_gpu_offset(bo);
+ pe += amdgpu_gmc_sign_extend(bo->tbo.offset);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags, p->direct);
if (count < 3) {
amdgpu_vm_write_pte(p->adev, ib, pe, addr | flags,
@@ -208,6 +202,11 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p,
uint64_t *pte;
int r;
+ /* Wait for PD/PT moves to be completed */
+ r = amdgpu_sync_fence(&p->job->sync, bo->tbo.moving, false);
+ if (r)
+ return r;
+
do {
ndw = p->num_dw_left;
ndw -= p->job->ibs->length_dw;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
index a97af422575a..95b3327168ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c
@@ -26,7 +26,12 @@
#include "amdgpu_xgmi.h"
#include "amdgpu_smu.h"
#include "amdgpu_ras.h"
+#include "soc15.h"
#include "df/df_3_6_offset.h"
+#include "xgmi/xgmi_4_0_0_smn.h"
+#include "xgmi/xgmi_4_0_0_sh_mask.h"
+#include "wafl/wafl2_4_0_0_smn.h"
+#include "wafl/wafl2_4_0_0_sh_mask.h"
static DEFINE_MUTEX(xgmi_mutex);
@@ -36,6 +41,109 @@ static DEFINE_MUTEX(xgmi_mutex);
static struct amdgpu_hive_info xgmi_hives[AMDGPU_MAX_XGMI_HIVE];
static unsigned hive_count = 0;
+static const int xgmi_pcs_err_status_reg_vg20[] = {
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int wafl_pcs_err_status_reg_vg20[] = {
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const int xgmi_pcs_err_status_reg_arct[] = {
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
+ smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
+};
+
+/* same as vg20*/
+static const int wafl_pcs_err_status_reg_arct[] = {
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
+ smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
+};
+
+static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
+ {"XGMI PCS DataLossErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
+ {"XGMI PCS TrainingErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
+ {"XGMI PCS CRCErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
+ {"XGMI PCS BERExceededErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
+ {"XGMI PCS TxMetaDataErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
+ {"XGMI PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"XGMI PCS DataParityErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
+ {"XGMI PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"XGMI PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"XGMI PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"XGMI PCS DeskewErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
+ {"XGMI PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"XGMI PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"XGMI PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"XGMI PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"XGMI PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"XGMI PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"XGMI PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+};
+
+static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
+ {"WAFL PCS DataLossErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
+ {"WAFL PCS TrainingErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
+ {"WAFL PCS CRCErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
+ {"WAFL PCS BERExceededErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
+ {"WAFL PCS TxMetaDataErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
+ {"WAFL PCS ReplayBufParityErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
+ {"WAFL PCS DataParityErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
+ {"WAFL PCS ReplayFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
+ {"WAFL PCS ReplayFifoUnderflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
+ {"WAFL PCS ElasticFifoOverflowErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
+ {"WAFL PCS DeskewErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
+ {"WAFL PCS DataStartupLimitErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
+ {"WAFL PCS FCInitTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
+ {"WAFL PCS RecoveryTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
+ {"WAFL PCS ReadySerialTimeoutErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
+ {"WAFL PCS ReadySerialAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
+ {"WAFL PCS RecoveryAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
+ {"WAFL PCS RecoveryRelockAttemptErr",
+ SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
+};
+
void *amdgpu_xgmi_hive_try_lock(struct amdgpu_hive_info *hive)
{
return &hive->device_list;
@@ -365,6 +473,13 @@ int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
return 0;
if (amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
+ ret = psp_xgmi_initialize(&adev->psp);
+ if (ret) {
+ dev_err(adev->dev,
+ "XGMI: Failed to initialize xgmi session\n");
+ return ret;
+ }
+
ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
if (ret) {
dev_err(adev->dev,
@@ -451,16 +566,16 @@ exit:
return ret;
}
-void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
+int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
{
struct amdgpu_hive_info *hive;
if (!adev->gmc.xgmi.supported)
- return;
+ return -EINVAL;
hive = amdgpu_get_xgmi_hive(adev, 1);
if (!hive)
- return;
+ return -EINVAL;
if (!(hive->number_devices--)) {
amdgpu_xgmi_sysfs_destroy(adev, hive);
@@ -471,6 +586,8 @@ void amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
mutex_unlock(&hive->hive_lock);
}
+
+ return psp_xgmi_terminate(&adev->psp);
}
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
@@ -481,7 +598,6 @@ int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
};
struct ras_fs_if fs_info = {
.sysfs_name = "xgmi_wafl_err_count",
- .debugfs_name = "xgmi_wafl_err_inject",
};
if (!adev->gmc.xgmi.supported ||
@@ -521,3 +637,129 @@ void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
kfree(ras_if);
}
}
+
+uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
+ uint64_t addr)
+{
+ uint32_t df_inst_id;
+ uint64_t dram_base_addr = 0;
+ const struct amdgpu_df_funcs *df_funcs = adev->df.funcs;
+
+ if ((!df_funcs) ||
+ (!df_funcs->get_df_inst_id) ||
+ (!df_funcs->get_dram_base_addr)) {
+ dev_warn(adev->dev,
+ "XGMI: relative phy_addr algorithm is not supported\n");
+ return addr;
+ }
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) {
+ dev_warn(adev->dev,
+ "failed to disable DF-Cstate, DF register may not be accessible\n");
+ return addr;
+ }
+
+ df_inst_id = df_funcs->get_df_inst_id(adev);
+ dram_base_addr = df_funcs->get_dram_base_addr(adev, df_inst_id);
+
+ if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ dev_warn(adev->dev, "failed to enable DF-Cstate\n");
+
+ return addr + dram_base_addr;
+}
+
+static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
+ uint32_t value,
+ uint32_t *ue_count,
+ uint32_t *ce_count,
+ bool is_xgmi_pcs)
+{
+ int i;
+ int ue_cnt;
+
+ if (is_xgmi_pcs) {
+ /* query xgmi pcs error status,
+ * only ue is supported */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
+ ue_cnt = (value &
+ xgmi_pcs_ras_fields[i].pcs_err_mask) >>
+ xgmi_pcs_ras_fields[i].pcs_err_shift;
+ if (ue_cnt) {
+ dev_info(adev->dev, "%s detected\n",
+ xgmi_pcs_ras_fields[i].err_name);
+ *ue_count += ue_cnt;
+ }
+ }
+ } else {
+ /* query wafl pcs error status,
+ * only ue is supported */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
+ ue_cnt = (value &
+ wafl_pcs_ras_fields[i].pcs_err_mask) >>
+ wafl_pcs_ras_fields[i].pcs_err_shift;
+ if (ue_cnt) {
+ dev_info(adev->dev, "%s detected\n",
+ wafl_pcs_ras_fields[i].err_name);
+ *ue_count += ue_cnt;
+ }
+ }
+ }
+
+ return 0;
+}
+
+int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status)
+{
+ struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
+ int i;
+ uint32_t data;
+ uint32_t ue_cnt = 0, ce_cnt = 0;
+
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
+ return -EINVAL;
+
+ err_data->ue_count = 0;
+ err_data->ce_count = 0;
+
+ switch (adev->asic_type) {
+ case CHIP_ARCTURUS:
+ /* check xgmi pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
+ data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, true);
+ }
+ /* check wafl pcs error */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
+ data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, false);
+ }
+ break;
+ case CHIP_VEGA20:
+ default:
+ /* check xgmi pcs error */
+ for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
+ data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, true);
+ }
+ /* check wafl pcs error */
+ for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
+ data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
+ if (data)
+ amdgpu_xgmi_query_pcs_error_status(adev,
+ data, &ue_cnt, &ce_cnt, false);
+ }
+ break;
+ }
+
+ err_data->ue_count += ue_cnt;
+ err_data->ce_count += ce_cnt;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
index 74011fbc2251..4a92067fe595 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h
@@ -37,15 +37,25 @@ struct amdgpu_hive_info {
struct task_barrier tb;
};
+struct amdgpu_pcs_ras_field {
+ const char *err_name;
+ uint32_t pcs_err_mask;
+ uint32_t pcs_err_shift;
+};
+
struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev, int lock);
int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev);
int amdgpu_xgmi_add_device(struct amdgpu_device *adev);
-void amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
+int amdgpu_xgmi_remove_device(struct amdgpu_device *adev);
int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate);
int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
struct amdgpu_device *peer_adev);
int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev);
void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev);
+uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
+ uint64_t addr);
+int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
+ void *ras_error_status);
static inline bool amdgpu_xgmi_same_hive(struct amdgpu_device *adev,
struct amdgpu_device *bo_adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
index dd30f4e61a8c..cae426c7c086 100644
--- a/drivers/gpu/drm/amd/amdgpu/atom.c
+++ b/drivers/gpu/drm/amd/amdgpu/atom.c
@@ -744,8 +744,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
cjiffies = jiffies;
if (time_after(cjiffies, ctx->last_jump_jiffies)) {
cjiffies -= ctx->last_jump_jiffies;
- if ((jiffies_to_msecs(cjiffies) > 5000)) {
- DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
+ if ((jiffies_to_msecs(cjiffies) > 10000)) {
+ DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
ctx->abort = true;
}
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
index ea702a64f807..9b74cfdba7b8 100644
--- a/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
+++ b/drivers/gpu/drm/amd/amdgpu/atombios_dp.c
@@ -186,16 +186,10 @@ amdgpu_atombios_dp_aux_transfer(struct drm_dp_aux *aux, struct drm_dp_aux_msg *m
void amdgpu_atombios_dp_aux_init(struct amdgpu_connector *amdgpu_connector)
{
- int ret;
-
amdgpu_connector->ddc_bus->rec.hpd = amdgpu_connector->hpd.hpd;
- amdgpu_connector->ddc_bus->aux.dev = amdgpu_connector->base.kdev;
amdgpu_connector->ddc_bus->aux.transfer = amdgpu_atombios_dp_aux_transfer;
- ret = drm_dp_aux_register(&amdgpu_connector->ddc_bus->aux);
- if (!ret)
- amdgpu_connector->ddc_bus->has_aux = true;
-
- WARN(ret, "drm_dp_aux_register_i2c_bus() failed with error %d\n", ret);
+ drm_dp_aux_init(&amdgpu_connector->ddc_bus->aux);
+ amdgpu_connector->ddc_bus->has_aux = true;
}
/***** general DP utility functions *****/
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 40d2ac723dd6..2512e7ebfedf 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2494,6 +2494,10 @@ static const struct drm_crtc_funcs dce_v10_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v10_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2685,6 +2689,7 @@ static const struct drm_crtc_helper_funcs dce_v10_0_crtc_helper_funcs = {
.prepare = dce_v10_0_crtc_prepare,
.commit = dce_v10_0_crtc_commit,
.disable = dce_v10_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v10_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 898ef72d423c..0dde22db9848 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2573,6 +2573,10 @@ static const struct drm_crtc_funcs dce_v11_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v11_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2793,6 +2797,7 @@ static const struct drm_crtc_helper_funcs dce_v11_0_crtc_helper_funcs = {
.prepare = dce_v11_0_crtc_prepare,
.commit = dce_v11_0_crtc_commit,
.disable = dce_v11_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v11_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
index db15a112becc..84219534bd38 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c
@@ -2388,6 +2388,10 @@ static const struct drm_crtc_funcs dce_v6_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v6_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2575,6 +2579,7 @@ static const struct drm_crtc_helper_funcs dce_v6_0_crtc_helper_funcs = {
.prepare = dce_v6_0_crtc_prepare,
.commit = dce_v6_0_crtc_commit,
.disable = dce_v6_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v6_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
index f06c9022c1fd..3a640702d7d1 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c
@@ -2395,6 +2395,10 @@ static const struct drm_crtc_funcs dce_v8_0_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_v8_0_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -2593,6 +2597,7 @@ static const struct drm_crtc_helper_funcs dce_v8_0_crtc_helper_funcs = {
.prepare = dce_v8_0_crtc_prepare,
.commit = dce_v8_0_crtc_commit,
.disable = dce_v8_0_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_v8_0_crtc_init(struct amdgpu_device *adev, int index)
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
index e4f94863332c..13e12be667fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_virtual.c
@@ -123,6 +123,10 @@ static const struct drm_crtc_funcs dce_virtual_crtc_funcs = {
.set_config = amdgpu_display_crtc_set_config,
.destroy = dce_virtual_crtc_destroy,
.page_flip_target = amdgpu_display_crtc_page_flip_target,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
+ .enable_vblank = amdgpu_enable_vblank_kms,
+ .disable_vblank = amdgpu_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void dce_virtual_crtc_dpms(struct drm_crtc *crtc, int mode)
@@ -218,6 +222,7 @@ static const struct drm_crtc_helper_funcs dce_virtual_crtc_helper_funcs = {
.prepare = dce_virtual_crtc_prepare,
.commit = dce_virtual_crtc_commit,
.disable = dce_virtual_crtc_disable,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static int dce_virtual_crtc_init(struct amdgpu_device *adev, int index)
@@ -609,7 +614,6 @@ static int dce_virtual_connector_encoder_init(struct amdgpu_device *adev,
connector->display_info.subpixel_order = SubPixelHorizontalRGB;
connector->interlace_allowed = false;
connector->doublescan_allowed = false;
- drm_connector_register(connector);
/* link them */
drm_connector_attach_encoder(connector, encoder);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
index 1785fdad6ecb..42bbc0070831 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c
@@ -35,6 +35,8 @@
#include "gc/gc_10_1_0_offset.h"
#include "gc/gc_10_1_0_sh_mask.h"
+#include "smuio/smuio_11_0_0_offset.h"
+#include "smuio/smuio_11_0_0_sh_mask.h"
#include "navi10_enum.h"
#include "hdp/hdp_5_0_0_offset.h"
#include "ivsrcid/gfx/irqsrcs_gfx_10_1.h"
@@ -52,7 +54,7 @@
* 1. Primary ring
* 2. Async ring
*/
-#define GFX10_NUM_GFX_RINGS 2
+#define GFX10_NUM_GFX_RINGS_NV1X 1
#define GFX10_MEC_HPD_SIZE 2048
#define F32_CE_PROGRAM_RAM_SIZE 65536
@@ -222,6 +224,49 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00800000)
};
+static void gfx_v10_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+{
+ static void *scratch_reg0;
+ static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
+ static void *spare_int;
+ static uint32_t grbm_cntl;
+ static uint32_t grbm_idx;
+ uint32_t i = 0;
+ uint32_t retries = 50000;
+
+ scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
+ scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
+ scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
+ scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+ spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+
+ grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+ grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+ if (amdgpu_sriov_runtime(adev)) {
+ pr_err("shouldn't call rlcg write register during runtime\n");
+ return;
+ }
+
+ writel(v, scratch_reg0);
+ writel(offset | 0x80000000, scratch_reg1);
+ writel(1, spare_int);
+ for (i = 0; i < retries; i++) {
+ u32 tmp;
+
+ tmp = readl(scratch_reg1);
+ if (!(tmp & 0x80000000))
+ break;
+
+ udelay(10);
+ }
+
+ if (i >= retries)
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+}
+
static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] =
{
/* Pending on emulation bring up */
@@ -500,29 +545,28 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
struct amdgpu_device *adev = ring->adev;
struct amdgpu_ib ib;
struct dma_fence *f = NULL;
- uint32_t scratch;
- uint32_t tmp = 0;
+ unsigned index;
+ uint64_t gpu_addr;
+ uint32_t tmp;
long r;
- r = amdgpu_gfx_scratch_get(adev, &scratch);
- if (r) {
- DRM_ERROR("amdgpu: failed to get scratch reg (%ld).\n", r);
+ r = amdgpu_device_wb_get(adev, &index);
+ if (r)
return r;
- }
-
- WREG32(scratch, 0xCAFEDEAD);
+ gpu_addr = adev->wb.gpu_addr + (index * 4);
+ adev->wb.wb[index] = cpu_to_le32(0xCAFEDEAD);
memset(&ib, 0, sizeof(ib));
- r = amdgpu_ib_get(adev, NULL, 256, &ib);
- if (r) {
- DRM_ERROR("amdgpu: failed to get ib (%ld).\n", r);
+ r = amdgpu_ib_get(adev, NULL, 16, &ib);
+ if (r)
goto err1;
- }
- ib.ptr[0] = PACKET3(PACKET3_SET_UCONFIG_REG, 1);
- ib.ptr[1] = ((scratch - PACKET3_SET_UCONFIG_REG_START));
- ib.ptr[2] = 0xDEADBEEF;
- ib.length_dw = 3;
+ ib.ptr[0] = PACKET3(PACKET3_WRITE_DATA, 3);
+ ib.ptr[1] = WRITE_DATA_DST_SEL(5) | WR_CONFIRM;
+ ib.ptr[2] = lower_32_bits(gpu_addr);
+ ib.ptr[3] = upper_32_bits(gpu_addr);
+ ib.ptr[4] = 0xDEADBEEF;
+ ib.length_dw = 5;
r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f);
if (r)
@@ -530,15 +574,13 @@ static int gfx_v10_0_ring_test_ib(struct amdgpu_ring *ring, long timeout)
r = dma_fence_wait_timeout(f, false, timeout);
if (r == 0) {
- DRM_ERROR("amdgpu: IB test timed out.\n");
r = -ETIMEDOUT;
goto err2;
} else if (r < 0) {
- DRM_ERROR("amdgpu: fence wait failed (%ld).\n", r);
goto err2;
}
- tmp = RREG32(scratch);
+ tmp = adev->wb.wb[index];
if (tmp == 0xDEADBEEF)
r = 0;
else
@@ -547,8 +589,7 @@ err2:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
err1:
- amdgpu_gfx_scratch_free(adev, scratch);
-
+ amdgpu_device_wb_free(adev, index);
return r;
}
@@ -1016,6 +1057,10 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev)
return r;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1304,7 +1349,7 @@ static int gfx_v10_0_sw_init(void *handle)
case CHIP_NAVI14:
case CHIP_NAVI12:
adev->gfx.me.num_me = 1;
- adev->gfx.me.num_pipe_per_me = 2;
+ adev->gfx.me.num_pipe_per_me = 1;
adev->gfx.me.num_queue_per_pipe = 1;
adev->gfx.mec.num_mec = 2;
adev->gfx.mec.num_pipe_per_mec = 4;
@@ -1783,11 +1828,11 @@ static int gfx_v10_0_init_csb(struct amdgpu_device *adev)
adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
/* csib */
- WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_HI,
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_HI,
adev->gfx.rlc.clear_state_gpu_addr >> 32);
- WREG32_SOC15(GC, 0, mmRLC_CSIB_ADDR_LO,
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_ADDR_LO,
adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
- WREG32_SOC15(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
+ WREG32_SOC15_RLC(GC, 0, mmRLC_CSIB_LENGTH, adev->gfx.rlc.clear_state_size);
return 0;
}
@@ -2395,7 +2440,7 @@ static int gfx_v10_0_cp_gfx_enable(struct amdgpu_device *adev, bool enable)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
adev->gfx.gfx_ring[i].sched.ready = false;
}
- WREG32_SOC15(GC, 0, mmCP_ME_CNTL, tmp);
+ WREG32_SOC15_RLC(GC, 0, mmCP_ME_CNTL, tmp);
for (i = 0; i < adev->usec_timeout; i++) {
if (RREG32_SOC15(GC, 0, mmCP_STAT) == 0)
@@ -2710,18 +2755,20 @@ static int gfx_v10_0_cp_gfx_start(struct amdgpu_device *adev)
amdgpu_ring_commit(ring);
/* submit cs packet to copy state 0 to next available state */
- ring = &adev->gfx.gfx_ring[1];
- r = amdgpu_ring_alloc(ring, 2);
- if (r) {
- DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
- return r;
- }
-
- amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
- amdgpu_ring_write(ring, 0);
+ if (adev->gfx.num_gfx_rings > 1) {
+ /* maximum supported gfx ring is 2 */
+ ring = &adev->gfx.gfx_ring[1];
+ r = amdgpu_ring_alloc(ring, 2);
+ if (r) {
+ DRM_ERROR("amdgpu: cp failed to lock ring (%d).\n", r);
+ return r;
+ }
- amdgpu_ring_commit(ring);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_CLEAR_STATE, 0));
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_commit(ring);
+ }
return 0;
}
@@ -2818,39 +2865,41 @@ static int gfx_v10_0_cp_gfx_resume(struct amdgpu_device *adev)
mutex_unlock(&adev->srbm_mutex);
/* Init gfx ring 1 for pipe 1 */
- mutex_lock(&adev->srbm_mutex);
- gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
- ring = &adev->gfx.gfx_ring[1];
- rb_bufsz = order_base_2(ring->ring_size / 8);
- tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
- tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
- WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
- /* Initialize the ring buffer's write pointers */
- ring->wptr = 0;
- WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
- WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
- /* Set the wb address wether it's enabled or not */
- rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
- WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
- WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
- CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
- wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
- lower_32_bits(wptr_gpu_addr));
- WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
- upper_32_bits(wptr_gpu_addr));
-
- mdelay(1);
- WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
-
- rb_addr = ring->gpu_addr >> 8;
- WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
- WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
- WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
-
- gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
- mutex_unlock(&adev->srbm_mutex);
-
+ if (adev->gfx.num_gfx_rings > 1) {
+ mutex_lock(&adev->srbm_mutex);
+ gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID1);
+ /* maximum supported gfx ring is 2 */
+ ring = &adev->gfx.gfx_ring[1];
+ rb_bufsz = order_base_2(ring->ring_size / 8);
+ tmp = REG_SET_FIELD(0, CP_RB1_CNTL, RB_BUFSZ, rb_bufsz);
+ tmp = REG_SET_FIELD(tmp, CP_RB1_CNTL, RB_BLKSZ, rb_bufsz - 2);
+ WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
+ /* Initialize the ring buffer's write pointers */
+ ring->wptr = 0;
+ WREG32_SOC15(GC, 0, mmCP_RB1_WPTR, lower_32_bits(ring->wptr));
+ WREG32_SOC15(GC, 0, mmCP_RB1_WPTR_HI, upper_32_bits(ring->wptr));
+ /* Set the wb address wether it's enabled or not */
+ rptr_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4);
+ WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR, lower_32_bits(rptr_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB1_RPTR_ADDR_HI, upper_32_bits(rptr_addr) &
+ CP_RB1_RPTR_ADDR_HI__RB_RPTR_ADDR_HI_MASK);
+ wptr_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4);
+ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_LO,
+ lower_32_bits(wptr_gpu_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB_WPTR_POLL_ADDR_HI,
+ upper_32_bits(wptr_gpu_addr));
+
+ mdelay(1);
+ WREG32_SOC15(GC, 0, mmCP_RB1_CNTL, tmp);
+
+ rb_addr = ring->gpu_addr >> 8;
+ WREG32_SOC15(GC, 0, mmCP_RB1_BASE, rb_addr);
+ WREG32_SOC15(GC, 0, mmCP_RB1_BASE_HI, upper_32_bits(rb_addr));
+ WREG32_SOC15(GC, 0, mmCP_RB1_ACTIVE, 1);
+
+ gfx_v10_0_cp_gfx_set_doorbell(adev, ring);
+ mutex_unlock(&adev->srbm_mutex);
+ }
/* Switch to pipe 0 */
mutex_lock(&adev->srbm_mutex);
gfx_v10_0_cp_gfx_switch_pipe(adev, PIPE_ID0);
@@ -3164,12 +3213,7 @@ static int gfx_v10_0_kiq_enable_kgq(struct amdgpu_device *adev)
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.gfx_ring[i]);
- r = amdgpu_ring_test_ring(kiq_ring);
- if (r) {
- DRM_ERROR("kfq enable failed\n");
- kiq_ring->sched.ready = false;
- }
- return r;
+ return amdgpu_ring_test_helper(kiq_ring);
}
#endif
@@ -3212,6 +3256,22 @@ done:
return r;
}
+static void gfx_v10_0_compute_mqd_set_priority(struct amdgpu_ring *ring, struct v10_compute_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ ring->has_high_prio = true;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ } else {
+ ring->has_high_prio = false;
+ }
+ }
+}
+
static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3337,6 +3397,9 @@ static int gfx_v10_0_compute_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
+ /* set static priority for a compute queue/ring */
+ gfx_v10_0_compute_mqd_set_priority(ring, mqd);
+
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
@@ -3513,6 +3576,7 @@ static int gfx_v10_0_kcq_init_queue(struct amdgpu_ring *ring)
/* reset ring buffer */
ring->wptr = 0;
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
} else {
amdgpu_ring_clear_ring(ring);
@@ -3785,7 +3849,7 @@ static int gfx_v10_0_kiq_disable_kgq(struct amdgpu_device *adev)
kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.gfx_ring[i],
PREEMPT_QUEUES, 0, 0);
- return amdgpu_ring_test_ring(kiq_ring);
+ return amdgpu_ring_test_helper(kiq_ring);
}
#endif
@@ -3923,11 +3987,12 @@ static uint64_t gfx_v10_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
+ amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
- WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
- clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
- ((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
+ clock = (uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_LOWER) |
+ ((uint64_t)RREG32_SOC15(SMUIO, 0, mmGOLDEN_TSC_COUNT_UPPER) << 32ULL);
mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
return clock;
}
@@ -3964,7 +4029,8 @@ static int gfx_v10_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS;
+ adev->gfx.num_gfx_rings = GFX10_NUM_GFX_RINGS_NV1X;
+
adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS;
gfx_v10_0_set_kiq_pm4_funcs(adev);
@@ -4212,6 +4278,45 @@ static int gfx_v10_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v10_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static bool gfx_v10_0_check_rlcg_range(struct amdgpu_device *adev,
+ uint32_t offset,
+ struct soc15_reg_rlcg *entries, int arr_size)
+{
+ int i;
+ uint32_t reg;
+
+ if (!entries)
+ return false;
+
+ for (i = 0; i < arr_size; i++) {
+ const struct soc15_reg_rlcg *entry;
+
+ entry = &entries[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ if (offset == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static bool gfx_v10_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
+{
+ return gfx_v10_0_check_rlcg_range(adev, offset, NULL, 0);
+}
+
static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.is_rlc_enabled = gfx_v10_0_is_rlc_enabled,
.set_safe_mode = gfx_v10_0_set_safe_mode,
@@ -4222,7 +4327,10 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs = {
.resume = gfx_v10_0_rlc_resume,
.stop = gfx_v10_0_rlc_stop,
.reset = gfx_v10_0_rlc_reset,
- .start = gfx_v10_0_rlc_start
+ .start = gfx_v10_0_rlc_start,
+ .update_spm_vmid = gfx_v10_0_update_spm_vmid,
+ .rlcg_wreg = gfx_v10_rlcg_wreg,
+ .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range,
};
static int gfx_v10_0_set_powergating_state(void *handle,
@@ -4411,15 +4519,15 @@ static void gfx_v10_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
control |= ib->length_dw | (vmid << 24);
- if (amdgpu_mcbp && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
+ if ((amdgpu_sriov_vf(ring->adev) || amdgpu_mcbp) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
if (flags & AMDGPU_IB_PREEMPTED)
control |= INDIRECT_BUFFER_PRE_RESUME(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v10_0_ring_emit_de_meta(ring,
- flags & AMDGPU_IB_PREEMPTED ? true : false);
+ (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
}
amdgpu_ring_write(ring, header);
@@ -4566,9 +4674,9 @@ static void gfx_v10_0_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flag
{
uint32_t dw2 = 0;
- if (amdgpu_mcbp)
+ if (amdgpu_mcbp || amdgpu_sriov_vf(ring->adev))
gfx_v10_0_ring_emit_ce_meta(ring,
- flags & AMDGPU_IB_PREEMPTED ? true : false);
+ (!amdgpu_sriov_vf(ring->adev) && flags & AMDGPU_IB_PREEMPTED) ? true : false);
gfx_v10_0_ring_emit_tmz(ring, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 8f20a5dd44fe..733d398c61cc 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -3346,6 +3346,10 @@ static int gfx_v7_0_rlc_init(struct amdgpu_device *adev)
return r;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -3570,6 +3574,18 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
return 0;
}
+static void gfx_v7_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32(mmRLC_SPM_VMID);
+
+ data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
+
+ WREG32(mmRLC_SPM_VMID, data);
+}
+
static void gfx_v7_0_enable_cgcg(struct amdgpu_device *adev, bool enable)
{
u32 data, orig, tmp, tmp2;
@@ -4221,7 +4237,8 @@ static const struct amdgpu_rlc_funcs gfx_v7_0_rlc_funcs = {
.resume = gfx_v7_0_rlc_resume,
.stop = gfx_v7_0_rlc_stop,
.reset = gfx_v7_0_rlc_reset,
- .start = gfx_v7_0_rlc_start
+ .start = gfx_v7_0_rlc_start,
+ .update_spm_vmid = gfx_v7_0_update_spm_vmid
};
static int gfx_v7_0_early_init(void *handle)
@@ -4338,6 +4355,11 @@ static void gfx_v7_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+ adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
if (adev->flags & AMD_IS_APU) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index fa245973de12..fc32586ef80b 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -1318,6 +1318,10 @@ static int gfx_v8_0_rlc_init(struct amdgpu_device *adev)
return r;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1820,6 +1824,11 @@ static int gfx_v8_0_gpu_early_init(struct amdgpu_device *adev)
adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
+ adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFBANK);
+ adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
+ MC_ARB_RAMCFG, NOOFRANKS);
+
adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
adev->gfx.config.mem_max_burst_length_bytes = 256;
if (adev->flags & AMD_IS_APU) {
@@ -4421,6 +4430,22 @@ static int gfx_v8_0_deactivate_hqd(struct amdgpu_device *adev, u32 req)
return r;
}
+static void gfx_v8_0_mqd_set_priority(struct amdgpu_ring *ring, struct vi_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ ring->has_high_prio = true;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ } else {
+ ring->has_high_prio = false;
+ }
+ }
+}
+
static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -4544,9 +4569,6 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
/* defaults */
mqd->cp_hqd_eop_rptr = RREG32(mmCP_HQD_EOP_RPTR);
mqd->cp_hqd_eop_wptr = RREG32(mmCP_HQD_EOP_WPTR);
- mqd->cp_hqd_pipe_priority = RREG32(mmCP_HQD_PIPE_PRIORITY);
- mqd->cp_hqd_queue_priority = RREG32(mmCP_HQD_QUEUE_PRIORITY);
- mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
mqd->cp_hqd_ctx_save_base_addr_lo = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_LO);
mqd->cp_hqd_ctx_save_base_addr_hi = RREG32(mmCP_HQD_CTX_SAVE_BASE_ADDR_HI);
mqd->cp_hqd_cntl_stack_offset = RREG32(mmCP_HQD_CNTL_STACK_OFFSET);
@@ -4558,6 +4580,10 @@ static int gfx_v8_0_mqd_init(struct amdgpu_ring *ring)
mqd->cp_hqd_eop_wptr_mem = RREG32(mmCP_HQD_EOP_WPTR_MEM);
mqd->cp_hqd_eop_dones = RREG32(mmCP_HQD_EOP_DONES);
+ /* set static priority for a queue/ring */
+ gfx_v8_0_mqd_set_priority(ring, mqd);
+ mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
@@ -5589,6 +5615,18 @@ static void gfx_v8_0_unset_safe_mode(struct amdgpu_device *adev)
}
}
+static void gfx_v8_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32(mmRLC_SPM_VMID);
+
+ data &= ~RLC_SPM_VMID__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_VMID__RLC_SPM_VMID_MASK) << RLC_SPM_VMID__RLC_SPM_VMID__SHIFT;
+
+ WREG32(mmRLC_SPM_VMID, data);
+}
+
static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
.is_rlc_enabled = gfx_v8_0_is_rlc_enabled,
.set_safe_mode = gfx_v8_0_set_safe_mode,
@@ -5600,7 +5638,8 @@ static const struct amdgpu_rlc_funcs iceland_rlc_funcs = {
.resume = gfx_v8_0_rlc_resume,
.stop = gfx_v8_0_rlc_stop,
.reset = gfx_v8_0_rlc_reset,
- .start = gfx_v8_0_rlc_start
+ .start = gfx_v8_0_rlc_start,
+ .update_spm_vmid = gfx_v8_0_update_spm_vmid
};
static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev,
@@ -6094,7 +6133,7 @@ static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v8_0_ring_emit_de_meta(ring);
}
@@ -6236,104 +6275,6 @@ static void gfx_v8_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
WDOORBELL32(ring->doorbell_index, lower_32_bits(ring->wptr));
}
-static void gfx_v8_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
- bool acquire)
-{
- struct amdgpu_device *adev = ring->adev;
- int pipe_num, tmp, reg;
- int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
-
- pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
-
- /* first me only has 2 entries, GFX and HP3D */
- if (ring->me > 0)
- pipe_num -= 2;
-
- reg = mmSPI_WCL_PIPE_PERCENT_GFX + pipe_num;
- tmp = RREG32(reg);
- tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
- WREG32(reg, tmp);
-}
-
-static void gfx_v8_0_pipe_reserve_resources(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- int i, pipe;
- bool reserve;
- struct amdgpu_ring *iring;
-
- mutex_lock(&adev->gfx.pipe_reserve_mutex);
- pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
- if (acquire)
- set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- else
- clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-
- if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
- /* Clear all reservations - everyone reacquires all resources */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
- gfx_v8_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
- true);
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i)
- gfx_v8_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
- true);
- } else {
- /* Lower all pipes without a current reservation */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
- iring = &adev->gfx.gfx_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v8_0_ring_set_pipe_percent(iring, reserve);
- }
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
- iring = &adev->gfx.compute_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v8_0_ring_set_pipe_percent(iring, reserve);
- }
- }
-
- mutex_unlock(&adev->gfx.pipe_reserve_mutex);
-}
-
-static void gfx_v8_0_hqd_set_priority(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- uint32_t pipe_priority = acquire ? 0x2 : 0x0;
- uint32_t queue_priority = acquire ? 0xf : 0x0;
-
- mutex_lock(&adev->srbm_mutex);
- vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
-
- WREG32(mmCP_HQD_PIPE_PRIORITY, pipe_priority);
- WREG32(mmCP_HQD_QUEUE_PRIORITY, queue_priority);
-
- vi_srbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-static void gfx_v8_0_ring_set_priority_compute(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- struct amdgpu_device *adev = ring->adev;
- bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
-
- if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
- return;
-
- gfx_v8_0_hqd_set_priority(adev, ring, acquire);
- gfx_v8_0_pipe_reserve_resources(adev, ring, acquire);
-}
-
static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring,
u64 addr, u64 seq,
unsigned flags)
@@ -6966,7 +6907,6 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
.test_ib = gfx_v8_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .set_priority = gfx_v8_0_ring_set_priority_compute,
.emit_wreg = gfx_v8_0_ring_emit_wreg,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 90f64b8bc358..ba90a14089cf 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -697,6 +697,11 @@ static const struct soc15_reg_golden golden_settings_gc_9_4_1_arct[] =
SOC15_REG_GOLDEN_VALUE(GC, 0, mmSQ_FIFO_SIZES, 0xffffffff, 0x00000f00),
};
+static const struct soc15_reg_rlcg rlcg_access_gc_9_0[] = {
+ {SOC15_REG_ENTRY(GC, 0, mmGRBM_GFX_INDEX)},
+ {SOC15_REG_ENTRY(GC, 0, mmSQ_IND_INDEX)},
+};
+
static const u32 GFX_RLC_SRM_INDEX_CNTL_ADDR_OFFSETS[] =
{
mmRLC_SRM_INDEX_CNTL_ADDR_0 - mmRLC_SRM_INDEX_CNTL_ADDR_0,
@@ -721,6 +726,59 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] =
mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0,
};
+void gfx_v9_0_rlcg_wreg(struct amdgpu_device *adev, u32 offset, u32 v)
+{
+ static void *scratch_reg0;
+ static void *scratch_reg1;
+ static void *scratch_reg2;
+ static void *scratch_reg3;
+ static void *spare_int;
+ static uint32_t grbm_cntl;
+ static uint32_t grbm_idx;
+
+ scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4;
+ scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4;
+ scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2)*4;
+ scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3)*4;
+ spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4;
+
+ grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL;
+ grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX;
+
+ if (amdgpu_sriov_runtime(adev)) {
+ pr_err("shouldn't call rlcg write register during runtime\n");
+ return;
+ }
+
+ if (offset == grbm_cntl || offset == grbm_idx) {
+ if (offset == grbm_cntl)
+ writel(v, scratch_reg2);
+ else if (offset == grbm_idx)
+ writel(v, scratch_reg3);
+
+ writel(v, ((void __iomem *)adev->rmmio) + (offset * 4));
+ } else {
+ uint32_t i = 0;
+ uint32_t retries = 50000;
+
+ writel(v, scratch_reg0);
+ writel(offset | 0x80000000, scratch_reg1);
+ writel(1, spare_int);
+ for (i = 0; i < retries; i++) {
+ u32 tmp;
+
+ tmp = readl(scratch_reg1);
+ if (!(tmp & 0x80000000))
+ break;
+
+ udelay(10);
+ }
+ if (i >= retries)
+ pr_err("timeout: rlcg program reg:0x%05x failed !\n", offset);
+ }
+
+}
+
#define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042
#define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041
#define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042
@@ -738,9 +796,9 @@ static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring);
static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring);
static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev,
void *ras_error_status);
-static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev);
static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev);
static void gfx_v9_0_kiq_set_resources(struct amdgpu_ring *kiq_ring,
uint64_t queue_mask)
@@ -1106,10 +1164,11 @@ static void gfx_v9_0_check_fw_write_wait(struct amdgpu_device *adev)
adev->gfx.me_fw_write_wait = false;
adev->gfx.mec_fw_write_wait = false;
- if ((adev->gfx.mec_fw_version < 0x000001a5) ||
+ if ((adev->asic_type != CHIP_ARCTURUS) &&
+ ((adev->gfx.mec_fw_version < 0x000001a5) ||
(adev->gfx.mec_feature_version < 46) ||
(adev->gfx.pfp_fw_version < 0x000000b7) ||
- (adev->gfx.pfp_feature_version < 46))
+ (adev->gfx.pfp_feature_version < 46)))
DRM_WARN_ONCE("CP firmware version too old, please update!");
switch (adev->asic_type) {
@@ -1193,6 +1252,14 @@ static bool gfx_v9_0_should_disable_gfxoff(struct pci_dev *pdev)
return false;
}
+static bool is_raven_kicker(struct amdgpu_device *adev)
+{
+ if (adev->pm.fw_version >= 0x41e2b)
+ return true;
+ else
+ return false;
+}
+
static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
{
if (gfx_v9_0_should_disable_gfxoff(adev->pdev))
@@ -1205,9 +1272,8 @@ static void gfx_v9_0_check_if_need_gfxoff(struct amdgpu_device *adev)
break;
case CHIP_RAVEN:
if (!(adev->rev_id >= 0x8 || adev->pdev->device == 0x15d8) &&
- ((adev->gfx.rlc_fw_version != 106 &&
+ ((!is_raven_kicker(adev) &&
adev->gfx.rlc_fw_version < 531) ||
- (adev->gfx.rlc_fw_version == 53815) ||
(adev->gfx.rlc_feature_version < 1) ||
!adev->gfx.rlc.is_rlc_v2_1))
adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
@@ -1839,6 +1905,10 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev)
break;
}
+ /* init spm vmid with 0xf */
+ if (adev->gfx.rlc.funcs->update_spm_vmid)
+ adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
+
return 0;
}
@@ -1909,7 +1979,7 @@ static int gfx_v9_0_mec_init(struct amdgpu_device *adev)
static uint32_t wave_read_ind(struct amdgpu_device *adev, uint32_t simd, uint32_t wave, uint32_t address)
{
- WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
+ WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(address << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -1921,7 +1991,7 @@ static void wave_read_regs(struct amdgpu_device *adev, uint32_t simd,
uint32_t wave, uint32_t thread,
uint32_t regno, uint32_t num, uint32_t *out)
{
- WREG32_SOC15(GC, 0, mmSQ_IND_INDEX,
+ WREG32_SOC15_RLC(GC, 0, mmSQ_IND_INDEX,
(wave << SQ_IND_INDEX__WAVE_ID__SHIFT) |
(simd << SQ_IND_INDEX__SIMD_ID__SHIFT) |
(regno << SQ_IND_INDEX__INDEX__SHIFT) |
@@ -1985,7 +2055,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = {
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
.ras_error_inject = &gfx_v9_0_ras_error_inject,
- .query_ras_error_count = &gfx_v9_0_query_ras_error_count
+ .query_ras_error_count = &gfx_v9_0_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count,
};
static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
@@ -1996,7 +2067,8 @@ static const struct amdgpu_gfx_funcs gfx_v9_4_gfx_funcs = {
.read_wave_vgprs = &gfx_v9_0_read_wave_vgprs,
.select_me_pipe_q = &gfx_v9_0_select_me_pipe_q,
.ras_error_inject = &gfx_v9_4_ras_error_inject,
- .query_ras_error_count = &gfx_v9_4_query_ras_error_count
+ .query_ras_error_count = &gfx_v9_4_query_ras_error_count,
+ .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count,
};
static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev)
@@ -3302,6 +3374,22 @@ static void gfx_v9_0_kiq_setting(struct amdgpu_ring *ring)
WREG32_SOC15_RLC(GC, 0, mmRLC_CP_SCHEDULERS, tmp);
}
+static void gfx_v9_0_mqd_set_priority(struct amdgpu_ring *ring, struct v9_mqd *mqd)
+{
+ struct amdgpu_device *adev = ring->adev;
+
+ if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) {
+ if (amdgpu_gfx_is_high_priority_compute_queue(adev, ring->queue)) {
+ mqd->cp_hqd_pipe_priority = AMDGPU_GFX_PIPE_PRIO_HIGH;
+ ring->has_high_prio = true;
+ mqd->cp_hqd_queue_priority =
+ AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM;
+ } else {
+ ring->has_high_prio = false;
+ }
+ }
+}
+
static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -3438,6 +3526,10 @@ static int gfx_v9_0_mqd_init(struct amdgpu_ring *ring)
tmp = REG_SET_FIELD(tmp, CP_HQD_IB_CONTROL, MIN_IB_AVAIL_SIZE, 3);
mqd->cp_hqd_ib_control = tmp;
+ /* set static priority for a queue/ring */
+ gfx_v9_0_mqd_set_priority(ring, mqd);
+ mqd->cp_hqd_quantum = RREG32(mmCP_HQD_QUANTUM);
+
/* map_queues packet doesn't need activate the queue,
* so only kiq need set this field.
*/
@@ -3656,6 +3748,7 @@ static int gfx_v9_0_kcq_init_queue(struct amdgpu_ring *ring)
/* reset ring buffer */
ring->wptr = 0;
+ atomic64_set((atomic64_t *)&adev->wb.wb[ring->wptr_offs], 0);
amdgpu_ring_clear_ring(ring);
} else {
amdgpu_ring_clear_ring(ring);
@@ -3955,28 +4048,78 @@ static int gfx_v9_0_soft_reset(void *handle)
return 0;
}
+static uint64_t gfx_v9_0_kiq_read_clock(struct amdgpu_device *adev)
+{
+ signed long r, cnt = 0;
+ unsigned long flags;
+ uint32_t seq;
+ struct amdgpu_kiq *kiq = &adev->gfx.kiq;
+ struct amdgpu_ring *ring = &kiq->ring;
+
+ BUG_ON(!ring->funcs->emit_rreg);
+
+ spin_lock_irqsave(&kiq->ring_lock, flags);
+ amdgpu_ring_alloc(ring, 32);
+ amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4));
+ amdgpu_ring_write(ring, 9 | /* src: register*/
+ (5 << 8) | /* dst: memory */
+ (1 << 16) | /* count sel */
+ (1 << 20)); /* write confirm */
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, 0);
+ amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr +
+ kiq->reg_val_offs * 4));
+ amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr +
+ kiq->reg_val_offs * 4));
+ amdgpu_fence_emit_polling(ring, &seq);
+ amdgpu_ring_commit(ring);
+ spin_unlock_irqrestore(&kiq->ring_lock, flags);
+
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+
+ /* don't wait anymore for gpu reset case because this way may
+ * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
+ * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
+ * never return if we keep waiting in virt_kiq_rreg, which cause
+ * gpu_recover() hang there.
+ *
+ * also don't wait anymore for IRQ context
+ * */
+ if (r < 1 && (adev->in_gpu_reset || in_interrupt()))
+ goto failed_kiq_read;
+
+ might_sleep();
+ while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
+ msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
+ r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
+ }
+
+ if (cnt > MAX_KIQ_REG_TRY)
+ goto failed_kiq_read;
+
+ return (uint64_t)adev->wb.wb[kiq->reg_val_offs] |
+ (uint64_t)adev->wb.wb[kiq->reg_val_offs + 1 ] << 32ULL;
+
+failed_kiq_read:
+ pr_err("failed to read gpu clock\n");
+ return ~0;
+}
+
static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev)
{
uint64_t clock;
+ amdgpu_gfx_off_ctrl(adev, false);
mutex_lock(&adev->gfx.gpu_clock_mutex);
if (adev->asic_type == CHIP_VEGA10 && amdgpu_sriov_runtime(adev)) {
- uint32_t tmp, lsb, msb, i = 0;
- do {
- if (i != 0)
- udelay(1);
- tmp = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
- lsb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_LSB);
- msb = RREG32_SOC15(GC, 0, mmRLC_REFCLOCK_TIMESTAMP_MSB);
- i++;
- } while (unlikely(tmp != msb) && (i < adev->usec_timeout));
- clock = (uint64_t)lsb | ((uint64_t)msb << 32ULL);
+ clock = gfx_v9_0_kiq_read_clock(adev);
} else {
WREG32_SOC15(GC, 0, mmRLC_CAPTURE_GPU_CLOCK_COUNT, 1);
clock = (uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_LSB) |
((uint64_t)RREG32_SOC15(GC, 0, mmRLC_GPU_CLOCK_COUNT_MSB) << 32ULL);
}
mutex_unlock(&adev->gfx.gpu_clock_mutex);
+ amdgpu_gfx_off_ctrl(adev, true);
return clock;
}
@@ -4043,6 +4186,101 @@ static const u32 sgpr_init_compute_shader[] =
0xbe800080, 0xbf810000,
};
+static const u32 vgpr_init_compute_shader_arcturus[] = {
+ 0xd3d94000, 0x18000080, 0xd3d94001, 0x18000080, 0xd3d94002, 0x18000080,
+ 0xd3d94003, 0x18000080, 0xd3d94004, 0x18000080, 0xd3d94005, 0x18000080,
+ 0xd3d94006, 0x18000080, 0xd3d94007, 0x18000080, 0xd3d94008, 0x18000080,
+ 0xd3d94009, 0x18000080, 0xd3d9400a, 0x18000080, 0xd3d9400b, 0x18000080,
+ 0xd3d9400c, 0x18000080, 0xd3d9400d, 0x18000080, 0xd3d9400e, 0x18000080,
+ 0xd3d9400f, 0x18000080, 0xd3d94010, 0x18000080, 0xd3d94011, 0x18000080,
+ 0xd3d94012, 0x18000080, 0xd3d94013, 0x18000080, 0xd3d94014, 0x18000080,
+ 0xd3d94015, 0x18000080, 0xd3d94016, 0x18000080, 0xd3d94017, 0x18000080,
+ 0xd3d94018, 0x18000080, 0xd3d94019, 0x18000080, 0xd3d9401a, 0x18000080,
+ 0xd3d9401b, 0x18000080, 0xd3d9401c, 0x18000080, 0xd3d9401d, 0x18000080,
+ 0xd3d9401e, 0x18000080, 0xd3d9401f, 0x18000080, 0xd3d94020, 0x18000080,
+ 0xd3d94021, 0x18000080, 0xd3d94022, 0x18000080, 0xd3d94023, 0x18000080,
+ 0xd3d94024, 0x18000080, 0xd3d94025, 0x18000080, 0xd3d94026, 0x18000080,
+ 0xd3d94027, 0x18000080, 0xd3d94028, 0x18000080, 0xd3d94029, 0x18000080,
+ 0xd3d9402a, 0x18000080, 0xd3d9402b, 0x18000080, 0xd3d9402c, 0x18000080,
+ 0xd3d9402d, 0x18000080, 0xd3d9402e, 0x18000080, 0xd3d9402f, 0x18000080,
+ 0xd3d94030, 0x18000080, 0xd3d94031, 0x18000080, 0xd3d94032, 0x18000080,
+ 0xd3d94033, 0x18000080, 0xd3d94034, 0x18000080, 0xd3d94035, 0x18000080,
+ 0xd3d94036, 0x18000080, 0xd3d94037, 0x18000080, 0xd3d94038, 0x18000080,
+ 0xd3d94039, 0x18000080, 0xd3d9403a, 0x18000080, 0xd3d9403b, 0x18000080,
+ 0xd3d9403c, 0x18000080, 0xd3d9403d, 0x18000080, 0xd3d9403e, 0x18000080,
+ 0xd3d9403f, 0x18000080, 0xd3d94040, 0x18000080, 0xd3d94041, 0x18000080,
+ 0xd3d94042, 0x18000080, 0xd3d94043, 0x18000080, 0xd3d94044, 0x18000080,
+ 0xd3d94045, 0x18000080, 0xd3d94046, 0x18000080, 0xd3d94047, 0x18000080,
+ 0xd3d94048, 0x18000080, 0xd3d94049, 0x18000080, 0xd3d9404a, 0x18000080,
+ 0xd3d9404b, 0x18000080, 0xd3d9404c, 0x18000080, 0xd3d9404d, 0x18000080,
+ 0xd3d9404e, 0x18000080, 0xd3d9404f, 0x18000080, 0xd3d94050, 0x18000080,
+ 0xd3d94051, 0x18000080, 0xd3d94052, 0x18000080, 0xd3d94053, 0x18000080,
+ 0xd3d94054, 0x18000080, 0xd3d94055, 0x18000080, 0xd3d94056, 0x18000080,
+ 0xd3d94057, 0x18000080, 0xd3d94058, 0x18000080, 0xd3d94059, 0x18000080,
+ 0xd3d9405a, 0x18000080, 0xd3d9405b, 0x18000080, 0xd3d9405c, 0x18000080,
+ 0xd3d9405d, 0x18000080, 0xd3d9405e, 0x18000080, 0xd3d9405f, 0x18000080,
+ 0xd3d94060, 0x18000080, 0xd3d94061, 0x18000080, 0xd3d94062, 0x18000080,
+ 0xd3d94063, 0x18000080, 0xd3d94064, 0x18000080, 0xd3d94065, 0x18000080,
+ 0xd3d94066, 0x18000080, 0xd3d94067, 0x18000080, 0xd3d94068, 0x18000080,
+ 0xd3d94069, 0x18000080, 0xd3d9406a, 0x18000080, 0xd3d9406b, 0x18000080,
+ 0xd3d9406c, 0x18000080, 0xd3d9406d, 0x18000080, 0xd3d9406e, 0x18000080,
+ 0xd3d9406f, 0x18000080, 0xd3d94070, 0x18000080, 0xd3d94071, 0x18000080,
+ 0xd3d94072, 0x18000080, 0xd3d94073, 0x18000080, 0xd3d94074, 0x18000080,
+ 0xd3d94075, 0x18000080, 0xd3d94076, 0x18000080, 0xd3d94077, 0x18000080,
+ 0xd3d94078, 0x18000080, 0xd3d94079, 0x18000080, 0xd3d9407a, 0x18000080,
+ 0xd3d9407b, 0x18000080, 0xd3d9407c, 0x18000080, 0xd3d9407d, 0x18000080,
+ 0xd3d9407e, 0x18000080, 0xd3d9407f, 0x18000080, 0xd3d94080, 0x18000080,
+ 0xd3d94081, 0x18000080, 0xd3d94082, 0x18000080, 0xd3d94083, 0x18000080,
+ 0xd3d94084, 0x18000080, 0xd3d94085, 0x18000080, 0xd3d94086, 0x18000080,
+ 0xd3d94087, 0x18000080, 0xd3d94088, 0x18000080, 0xd3d94089, 0x18000080,
+ 0xd3d9408a, 0x18000080, 0xd3d9408b, 0x18000080, 0xd3d9408c, 0x18000080,
+ 0xd3d9408d, 0x18000080, 0xd3d9408e, 0x18000080, 0xd3d9408f, 0x18000080,
+ 0xd3d94090, 0x18000080, 0xd3d94091, 0x18000080, 0xd3d94092, 0x18000080,
+ 0xd3d94093, 0x18000080, 0xd3d94094, 0x18000080, 0xd3d94095, 0x18000080,
+ 0xd3d94096, 0x18000080, 0xd3d94097, 0x18000080, 0xd3d94098, 0x18000080,
+ 0xd3d94099, 0x18000080, 0xd3d9409a, 0x18000080, 0xd3d9409b, 0x18000080,
+ 0xd3d9409c, 0x18000080, 0xd3d9409d, 0x18000080, 0xd3d9409e, 0x18000080,
+ 0xd3d9409f, 0x18000080, 0xd3d940a0, 0x18000080, 0xd3d940a1, 0x18000080,
+ 0xd3d940a2, 0x18000080, 0xd3d940a3, 0x18000080, 0xd3d940a4, 0x18000080,
+ 0xd3d940a5, 0x18000080, 0xd3d940a6, 0x18000080, 0xd3d940a7, 0x18000080,
+ 0xd3d940a8, 0x18000080, 0xd3d940a9, 0x18000080, 0xd3d940aa, 0x18000080,
+ 0xd3d940ab, 0x18000080, 0xd3d940ac, 0x18000080, 0xd3d940ad, 0x18000080,
+ 0xd3d940ae, 0x18000080, 0xd3d940af, 0x18000080, 0xd3d940b0, 0x18000080,
+ 0xd3d940b1, 0x18000080, 0xd3d940b2, 0x18000080, 0xd3d940b3, 0x18000080,
+ 0xd3d940b4, 0x18000080, 0xd3d940b5, 0x18000080, 0xd3d940b6, 0x18000080,
+ 0xd3d940b7, 0x18000080, 0xd3d940b8, 0x18000080, 0xd3d940b9, 0x18000080,
+ 0xd3d940ba, 0x18000080, 0xd3d940bb, 0x18000080, 0xd3d940bc, 0x18000080,
+ 0xd3d940bd, 0x18000080, 0xd3d940be, 0x18000080, 0xd3d940bf, 0x18000080,
+ 0xd3d940c0, 0x18000080, 0xd3d940c1, 0x18000080, 0xd3d940c2, 0x18000080,
+ 0xd3d940c3, 0x18000080, 0xd3d940c4, 0x18000080, 0xd3d940c5, 0x18000080,
+ 0xd3d940c6, 0x18000080, 0xd3d940c7, 0x18000080, 0xd3d940c8, 0x18000080,
+ 0xd3d940c9, 0x18000080, 0xd3d940ca, 0x18000080, 0xd3d940cb, 0x18000080,
+ 0xd3d940cc, 0x18000080, 0xd3d940cd, 0x18000080, 0xd3d940ce, 0x18000080,
+ 0xd3d940cf, 0x18000080, 0xd3d940d0, 0x18000080, 0xd3d940d1, 0x18000080,
+ 0xd3d940d2, 0x18000080, 0xd3d940d3, 0x18000080, 0xd3d940d4, 0x18000080,
+ 0xd3d940d5, 0x18000080, 0xd3d940d6, 0x18000080, 0xd3d940d7, 0x18000080,
+ 0xd3d940d8, 0x18000080, 0xd3d940d9, 0x18000080, 0xd3d940da, 0x18000080,
+ 0xd3d940db, 0x18000080, 0xd3d940dc, 0x18000080, 0xd3d940dd, 0x18000080,
+ 0xd3d940de, 0x18000080, 0xd3d940df, 0x18000080, 0xd3d940e0, 0x18000080,
+ 0xd3d940e1, 0x18000080, 0xd3d940e2, 0x18000080, 0xd3d940e3, 0x18000080,
+ 0xd3d940e4, 0x18000080, 0xd3d940e5, 0x18000080, 0xd3d940e6, 0x18000080,
+ 0xd3d940e7, 0x18000080, 0xd3d940e8, 0x18000080, 0xd3d940e9, 0x18000080,
+ 0xd3d940ea, 0x18000080, 0xd3d940eb, 0x18000080, 0xd3d940ec, 0x18000080,
+ 0xd3d940ed, 0x18000080, 0xd3d940ee, 0x18000080, 0xd3d940ef, 0x18000080,
+ 0xd3d940f0, 0x18000080, 0xd3d940f1, 0x18000080, 0xd3d940f2, 0x18000080,
+ 0xd3d940f3, 0x18000080, 0xd3d940f4, 0x18000080, 0xd3d940f5, 0x18000080,
+ 0xd3d940f6, 0x18000080, 0xd3d940f7, 0x18000080, 0xd3d940f8, 0x18000080,
+ 0xd3d940f9, 0x18000080, 0xd3d940fa, 0x18000080, 0xd3d940fb, 0x18000080,
+ 0xd3d940fc, 0x18000080, 0xd3d940fd, 0x18000080, 0xd3d940fe, 0x18000080,
+ 0xd3d940ff, 0x18000080, 0xb07c0000, 0xbe8a00ff, 0x000000f8, 0xbf11080a,
+ 0x7e000280, 0x7e020280, 0x7e040280, 0x7e060280, 0x7e080280, 0x7e0a0280,
+ 0x7e0c0280, 0x7e0e0280, 0x808a880a, 0xbe80320a, 0xbf84fff5, 0xbf9c0000,
+ 0xd28c0001, 0x0001007f, 0xd28d0001, 0x0002027e, 0x10020288, 0xb88b0904,
+ 0xb78b4000, 0xd1196a01, 0x00001701, 0xbe8a0087, 0xbefc00c1, 0xd89c4000,
+ 0x00020201, 0xd89cc080, 0x00040401, 0x320202ff, 0x00000800, 0x808a810a,
+ 0xbf84fff8, 0xbf810000,
+};
+
/* When below register arrays changed, please update gpr_reg_size,
and sec_ded_counter_reg_size in function gfx_v9_0_do_edc_gpr_workarounds,
to cover all gfx9 ASICs */
@@ -4063,6 +4301,23 @@ static const struct soc15_reg_entry vgpr_init_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
};
+static const struct soc15_reg_entry vgpr_init_regs_arcturus[] = {
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Y), 4 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_Z), 1 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC1), 0x81 },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_PGM_RSRC2), 0x400000 }, /* 64KB LDS */
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE0), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE1), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE2), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE3), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE4), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE5), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE6), 0xffffffff },
+ { SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_STATIC_THREAD_MGMT_SE7), 0xffffffff },
+};
+
static const struct soc15_reg_entry sgpr1_init_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_RESOURCE_LIMITS), 0x0000000 },
{ SOC15_REG_ENTRY(GC, 0, mmCOMPUTE_NUM_THREAD_X), 0x40 },
@@ -4131,7 +4386,6 @@ static const struct soc15_reg_entry gfx_v9_0_edc_counter_regs[] = {
{ SOC15_REG_ENTRY(GC, 0, mmTCC_EDC_CNT2), 0, 1, 16},
{ SOC15_REG_ENTRY(GC, 0, mmTCA_EDC_CNT), 0, 1, 2},
{ SOC15_REG_ENTRY(GC, 0, mmSQC_EDC_CNT3), 0, 4, 6},
- { SOC15_REG_ENTRY(HDP, 0, mmHDP_EDC_CNT), 0, 1, 1},
};
static int gfx_v9_0_do_edc_gds_workarounds(struct amdgpu_device *adev)
@@ -4194,7 +4448,10 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
adev->gfx.config.max_cu_per_sh *
adev->gfx.config.max_sh_per_se;
int sgpr_work_group_size = 5;
- int gpr_reg_size = compute_dim_x / 16 + 6;
+ int gpr_reg_size = adev->gfx.config.max_shader_engines + 6;
+ int vgpr_init_shader_size;
+ const u32 *vgpr_init_shader_ptr;
+ const struct soc15_reg_entry *vgpr_init_regs_ptr;
/* only support when RAS is enabled */
if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
@@ -4204,6 +4461,16 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
if (!ring->sched.ready)
return 0;
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ vgpr_init_shader_ptr = vgpr_init_compute_shader_arcturus;
+ vgpr_init_shader_size = sizeof(vgpr_init_compute_shader_arcturus);
+ vgpr_init_regs_ptr = vgpr_init_regs_arcturus;
+ } else {
+ vgpr_init_shader_ptr = vgpr_init_compute_shader;
+ vgpr_init_shader_size = sizeof(vgpr_init_compute_shader);
+ vgpr_init_regs_ptr = vgpr_init_regs;
+ }
+
total_size =
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* VGPRS */
total_size +=
@@ -4212,7 +4479,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
(gpr_reg_size * 3 + 4 + 5 + 2) * 4; /* SGPRS2 */
total_size = ALIGN(total_size, 256);
vgpr_offset = total_size;
- total_size += ALIGN(sizeof(vgpr_init_compute_shader), 256);
+ total_size += ALIGN(vgpr_init_shader_size, 256);
sgpr_offset = total_size;
total_size += sizeof(sgpr_init_compute_shader);
@@ -4225,8 +4492,8 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
}
/* load the compute shaders */
- for (i = 0; i < ARRAY_SIZE(vgpr_init_compute_shader); i++)
- ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_compute_shader[i];
+ for (i = 0; i < vgpr_init_shader_size/sizeof(u32); i++)
+ ib.ptr[i + (vgpr_offset / 4)] = vgpr_init_shader_ptr[i];
for (i = 0; i < ARRAY_SIZE(sgpr_init_compute_shader); i++)
ib.ptr[i + (sgpr_offset / 4)] = sgpr_init_compute_shader[i];
@@ -4238,9 +4505,9 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write the register state for the compute dispatch */
for (i = 0; i < gpr_reg_size; i++) {
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_SET_SH_REG, 1);
- ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs[i])
+ ib.ptr[ib.length_dw++] = SOC15_REG_ENTRY_OFFSET(vgpr_init_regs_ptr[i])
- PACKET3_SET_SH_REG_START;
- ib.ptr[ib.length_dw++] = vgpr_init_regs[i].reg_value;
+ ib.ptr[ib.length_dw++] = vgpr_init_regs_ptr[i].reg_value;
}
/* write the shader start address: mmCOMPUTE_PGM_LO, mmCOMPUTE_PGM_HI */
gpu_addr = (ib.gpu_addr + (u64)vgpr_offset) >> 8;
@@ -4252,7 +4519,7 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
/* write dispatch packet */
ib.ptr[ib.length_dw++] = PACKET3(PACKET3_DISPATCH_DIRECT, 3);
- ib.ptr[ib.length_dw++] = compute_dim_x; /* x */
+ ib.ptr[ib.length_dw++] = compute_dim_x * 2; /* x */
ib.ptr[ib.length_dw++] = 1; /* y */
ib.ptr[ib.length_dw++] = 1; /* z */
ib.ptr[ib.length_dw++] =
@@ -4332,18 +4599,6 @@ static int gfx_v9_0_do_edc_gpr_workarounds(struct amdgpu_device *adev)
goto fail;
}
- switch (adev->asic_type)
- {
- case CHIP_VEGA20:
- gfx_v9_0_clear_ras_edc_counter(adev);
- break;
- case CHIP_ARCTURUS:
- gfx_v9_4_clear_ras_edc_counter(adev);
- break;
- default:
- break;
- }
-
fail:
amdgpu_ib_free(adev, &ib, NULL);
dma_fence_put(f);
@@ -4374,15 +4629,27 @@ static int gfx_v9_0_ecc_late_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int r;
- r = gfx_v9_0_do_edc_gds_workarounds(adev);
- if (r)
- return r;
+ /*
+ * Temp workaround to fix the issue that CP firmware fails to
+ * update read pointer when CPDMA is writing clearing operation
+ * to GDS in suspend/resume sequence on several cards. So just
+ * limit this operation in cold boot sequence.
+ */
+ if (!adev->in_suspend) {
+ r = gfx_v9_0_do_edc_gds_workarounds(adev);
+ if (r)
+ return r;
+ }
/* requires IBs so do in late init after IB pool is initialized */
r = gfx_v9_0_do_edc_gpr_workarounds(adev);
if (r)
return r;
+ if (adev->gfx.funcs &&
+ adev->gfx.funcs->reset_ras_error_count)
+ adev->gfx.funcs->reset_ras_error_count(adev);
+
r = amdgpu_gfx_ras_late_init(adev);
if (r)
return r;
@@ -4687,6 +4954,47 @@ static int gfx_v9_0_update_gfx_clock_gating(struct amdgpu_device *adev,
return 0;
}
+static void gfx_v9_0_update_spm_vmid(struct amdgpu_device *adev, unsigned vmid)
+{
+ u32 data;
+
+ data = RREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL);
+
+ data &= ~RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK;
+ data |= (vmid & RLC_SPM_MC_CNTL__RLC_SPM_VMID_MASK) << RLC_SPM_MC_CNTL__RLC_SPM_VMID__SHIFT;
+
+ WREG32_SOC15(GC, 0, mmRLC_SPM_MC_CNTL, data);
+}
+
+static bool gfx_v9_0_check_rlcg_range(struct amdgpu_device *adev,
+ uint32_t offset,
+ struct soc15_reg_rlcg *entries, int arr_size)
+{
+ int i;
+ uint32_t reg;
+
+ if (!entries)
+ return false;
+
+ for (i = 0; i < arr_size; i++) {
+ const struct soc15_reg_rlcg *entry;
+
+ entry = &entries[i];
+ reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg;
+ if (offset == reg)
+ return true;
+ }
+
+ return false;
+}
+
+static bool gfx_v9_0_is_rlcg_access_range(struct amdgpu_device *adev, u32 offset)
+{
+ return gfx_v9_0_check_rlcg_range(adev, offset,
+ (void *)rlcg_access_gc_9_0,
+ ARRAY_SIZE(rlcg_access_gc_9_0));
+}
+
static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.is_rlc_enabled = gfx_v9_0_is_rlc_enabled,
.set_safe_mode = gfx_v9_0_set_safe_mode,
@@ -4698,7 +5006,10 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = {
.resume = gfx_v9_0_rlc_resume,
.stop = gfx_v9_0_rlc_stop,
.reset = gfx_v9_0_rlc_reset,
- .start = gfx_v9_0_rlc_start
+ .start = gfx_v9_0_rlc_start,
+ .update_spm_vmid = gfx_v9_0_update_spm_vmid,
+ .rlcg_wreg = gfx_v9_0_rlcg_wreg,
+ .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range,
};
static int gfx_v9_0_set_powergating_state(void *handle,
@@ -4901,7 +5212,7 @@ static void gfx_v9_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
if (amdgpu_sriov_vf(ring->adev) && (ib->flags & AMDGPU_IB_FLAG_PREEMPT)) {
control |= INDIRECT_BUFFER_PRE_ENB(1);
- if (!(ib->flags & AMDGPU_IB_FLAG_CE))
+ if (!(ib->flags & AMDGPU_IB_FLAG_CE) && vmid)
gfx_v9_0_ring_emit_de_meta(ring);
}
@@ -5026,105 +5337,6 @@ static u64 gfx_v9_0_ring_get_wptr_compute(struct amdgpu_ring *ring)
return wptr;
}
-static void gfx_v9_0_ring_set_pipe_percent(struct amdgpu_ring *ring,
- bool acquire)
-{
- struct amdgpu_device *adev = ring->adev;
- int pipe_num, tmp, reg;
- int pipe_percent = acquire ? SPI_WCL_PIPE_PERCENT_GFX__VALUE_MASK : 0x1;
-
- pipe_num = ring->me * adev->gfx.mec.num_pipe_per_mec + ring->pipe;
-
- /* first me only has 2 entries, GFX and HP3D */
- if (ring->me > 0)
- pipe_num -= 2;
-
- reg = SOC15_REG_OFFSET(GC, 0, mmSPI_WCL_PIPE_PERCENT_GFX) + pipe_num;
- tmp = RREG32(reg);
- tmp = REG_SET_FIELD(tmp, SPI_WCL_PIPE_PERCENT_GFX, VALUE, pipe_percent);
- WREG32(reg, tmp);
-}
-
-static void gfx_v9_0_pipe_reserve_resources(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- int i, pipe;
- bool reserve;
- struct amdgpu_ring *iring;
-
- mutex_lock(&adev->gfx.pipe_reserve_mutex);
- pipe = amdgpu_gfx_mec_queue_to_bit(adev, ring->me, ring->pipe, 0);
- if (acquire)
- set_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- else
- clear_bit(pipe, adev->gfx.pipe_reserve_bitmap);
-
- if (!bitmap_weight(adev->gfx.pipe_reserve_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)) {
- /* Clear all reservations - everyone reacquires all resources */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i)
- gfx_v9_0_ring_set_pipe_percent(&adev->gfx.gfx_ring[i],
- true);
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i)
- gfx_v9_0_ring_set_pipe_percent(&adev->gfx.compute_ring[i],
- true);
- } else {
- /* Lower all pipes without a current reservation */
- for (i = 0; i < adev->gfx.num_gfx_rings; ++i) {
- iring = &adev->gfx.gfx_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v9_0_ring_set_pipe_percent(iring, reserve);
- }
-
- for (i = 0; i < adev->gfx.num_compute_rings; ++i) {
- iring = &adev->gfx.compute_ring[i];
- pipe = amdgpu_gfx_mec_queue_to_bit(adev,
- iring->me,
- iring->pipe,
- 0);
- reserve = test_bit(pipe, adev->gfx.pipe_reserve_bitmap);
- gfx_v9_0_ring_set_pipe_percent(iring, reserve);
- }
- }
-
- mutex_unlock(&adev->gfx.pipe_reserve_mutex);
-}
-
-static void gfx_v9_0_hqd_set_priority(struct amdgpu_device *adev,
- struct amdgpu_ring *ring,
- bool acquire)
-{
- uint32_t pipe_priority = acquire ? 0x2 : 0x0;
- uint32_t queue_priority = acquire ? 0xf : 0x0;
-
- mutex_lock(&adev->srbm_mutex);
- soc15_grbm_select(adev, ring->me, ring->pipe, ring->queue, 0);
-
- WREG32_SOC15_RLC(GC, 0, mmCP_HQD_PIPE_PRIORITY, pipe_priority);
- WREG32_SOC15_RLC(GC, 0, mmCP_HQD_QUEUE_PRIORITY, queue_priority);
-
- soc15_grbm_select(adev, 0, 0, 0, 0);
- mutex_unlock(&adev->srbm_mutex);
-}
-
-static void gfx_v9_0_ring_set_priority_compute(struct amdgpu_ring *ring,
- enum drm_sched_priority priority)
-{
- struct amdgpu_device *adev = ring->adev;
- bool acquire = priority == DRM_SCHED_PRIORITY_HIGH_HW;
-
- if (ring->funcs->type != AMDGPU_RING_TYPE_COMPUTE)
- return;
-
- gfx_v9_0_hqd_set_priority(adev, ring, acquire);
- gfx_v9_0_pipe_reserve_resources(adev, ring, acquire);
-}
-
static void gfx_v9_0_ring_set_wptr_compute(struct amdgpu_ring *ring)
{
struct amdgpu_device *adev = ring->adev;
@@ -6304,10 +6516,13 @@ static int gfx_v9_0_ras_error_count(const struct soc15_reg_entry *reg,
return 0;
}
-static void gfx_v9_0_clear_ras_edc_counter(struct amdgpu_device *adev)
+static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev)
{
int i, j, k;
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
/* read back registers to clear the counters */
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_0_edc_counter_regs); i++) {
@@ -6495,7 +6710,6 @@ static const struct amdgpu_ring_funcs gfx_v9_0_ring_funcs_compute = {
.test_ib = gfx_v9_0_ring_test_ib,
.insert_nop = amdgpu_ring_insert_nop,
.pad_ib = amdgpu_ring_generic_pad_ib,
- .set_priority = gfx_v9_0_ring_set_priority_compute,
.emit_wreg = gfx_v9_0_ring_emit_wreg,
.emit_reg_wait = gfx_v9_0_ring_emit_reg_wait,
.emit_reg_write_reg_wait = gfx_v9_0_ring_emit_reg_write_reg_wait,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
index f099f13d7f1e..cceb46faf212 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c
@@ -893,10 +893,13 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
return 0;
}
-void gfx_v9_4_clear_ras_edc_counter(struct amdgpu_device *adev)
+void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
{
int i, j, k;
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX))
+ return;
+
mutex_lock(&adev->grbm_idx_mutex);
for (i = 0; i < ARRAY_SIZE(gfx_v9_4_edc_counter_regs); i++) {
for (j = 0; j < gfx_v9_4_edc_counter_regs[i].se_num; j++) {
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
index 2e3f6f755ad4..1ffecc5c0f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h
@@ -32,4 +32,6 @@ int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev,
int gfx_v9_4_ras_error_inject(struct amdgpu_device *adev,
void *inject_if);
+void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev);
+
#endif /* __GFX_V9_4_H__ */
diff --git a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
index b70c7b483c24..cc866c367939 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfxhub_v2_0.c
@@ -81,24 +81,31 @@ static void gfxhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
{
uint64_t value;
- /* Disable AGP. */
- WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
- WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
- WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
-
- /* Program the system aperture low logical page number. */
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
-
- /* Set default page address. */
- value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
- + adev->vm_manager.vram_base_offset;
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
- (u32)(value >> 12));
- WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
- (u32)(value >> 44));
+ if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * the new L1 policy will block SRIOV guest from writing
+ * these regs, and they will be programed at host.
+ * so skip programing these regs.
+ */
+ /* Disable AGP. */
+ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BASE, 0);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_TOP, 0);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_AGP_BOT, 0x00FFFFFF);
+
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->gmc.vram_start >> 18);
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->gmc.vram_end >> 18);
+
+ /* Set default page address. */
+ value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start
+ + adev->vm_manager.vram_base_offset;
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB,
+ (u32)(value >> 12));
+ WREG32_SOC15(GC, 0, mmGCMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB,
+ (u32)(value >> 44));
+ }
/* Program "protection fault". */
WREG32_SOC15(GC, 0, mmGCVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32,
@@ -135,6 +142,10 @@ static void gfxhub_v2_0_init_cache_regs(struct amdgpu_device *adev)
{
uint32_t tmp;
+ /* These regs are not accessible for VF, PF will program these in SRIOV */
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* Setup L2 cache */
tmp = RREG32_SOC15(GC, 0, mmGCVM_L2_CNTL);
tmp = REG_SET_FIELD(tmp, GCVM_L2_CNTL, ENABLE_L2_CACHE, 1);
@@ -256,18 +267,6 @@ static void gfxhub_v2_0_program_invalidation(struct amdgpu_device *adev)
int gfxhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
- /*
- * GCMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
- * VF copy registers so vbios post doesn't program them, for
- * SRIOV driver need to program them
- */
- WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_BASE,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15(GC, 0, mmGCMC_VM_FB_LOCATION_TOP,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
gfxhub_v2_0_init_gart_aperture_regs(adev);
gfxhub_v2_0_init_system_aperture_regs(adev);
@@ -298,9 +297,11 @@ void gfxhub_v2_0_gart_disable(struct amdgpu_device *adev)
ENABLE_ADVANCED_DRIVER_MODEL, 0);
WREG32_SOC15(GC, 0, mmGCMC_VM_MX_L1_TLB_CNTL, tmp);
- /* Setup L2 cache */
- WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
- WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
+ if (!amdgpu_sriov_vf(adev)) {
+ /* Setup L2 cache */
+ WREG32_FIELD15(GC, 0, GCVM_L2_CNTL, ENABLE_L2_CACHE, 0);
+ WREG32_SOC15(GC, 0, mmGCVM_L2_CNTL3, 0);
+ }
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 90216abf14a4..8606f877478f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -476,13 +476,26 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
{
bool use_semaphore = gmc_v9_0_use_invalidate_semaphore(adev, vmhub);
const unsigned eng = 17;
- u32 j, inv_req, tmp;
+ u32 j, inv_req, inv_req2, tmp;
struct amdgpu_vmhub *hub;
BUG_ON(vmhub >= adev->num_vmhubs);
hub = &adev->vmhub[vmhub];
- inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ if (adev->gmc.xgmi.num_physical_nodes &&
+ adev->asic_type == CHIP_VEGA20) {
+ /* Vega20+XGMI caches PTEs in TC and TLB. Add a
+ * heavy-weight TLB flush (type 2), which flushes
+ * both. Due to a race condition with concurrent
+ * memory accesses using the same TLB cache line, we
+ * still need a second TLB flush after this.
+ */
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, 2);
+ inv_req2 = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ } else {
+ inv_req = gmc_v9_0_get_invalidate_req(vmid, flush_type);
+ inv_req2 = 0;
+ }
/* This is necessary for a HW workaround under SRIOV as well
* as GFXOFF under bare metal
@@ -521,21 +534,27 @@ static void gmc_v9_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid,
DRM_ERROR("Timeout waiting for sem acquire in VM flush!\n");
}
- WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
+ do {
+ WREG32_NO_KIQ(hub->vm_inv_eng0_req + eng, inv_req);
- /*
- * Issue a dummy read to wait for the ACK register to be cleared
- * to avoid a false ACK due to the new fast GRBM interface.
- */
- if (vmhub == AMDGPU_GFXHUB_0)
- RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
+ /*
+ * Issue a dummy read to wait for the ACK register to
+ * be cleared to avoid a false ACK due to the new fast
+ * GRBM interface.
+ */
+ if (vmhub == AMDGPU_GFXHUB_0)
+ RREG32_NO_KIQ(hub->vm_inv_eng0_req + eng);
- for (j = 0; j < adev->usec_timeout; j++) {
- tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
- if (tmp & (1 << vmid))
- break;
- udelay(1);
- }
+ for (j = 0; j < adev->usec_timeout; j++) {
+ tmp = RREG32_NO_KIQ(hub->vm_inv_eng0_ack + eng);
+ if (tmp & (1 << vmid))
+ break;
+ udelay(1);
+ }
+
+ inv_req = inv_req2;
+ inv_req2 = 0;
+ } while (inv_req);
/* TODO: It needs to continue working on debugging with semaphore for GFXHUB as well. */
if (use_semaphore)
@@ -577,9 +596,26 @@ static int gmc_v9_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev,
return -EIO;
if (ring->sched.ready) {
+ /* Vega20+XGMI caches PTEs in TC and TLB. Add a
+ * heavy-weight TLB flush (type 2), which flushes
+ * both. Due to a race condition with concurrent
+ * memory accesses using the same TLB cache line, we
+ * still need a second TLB flush after this.
+ */
+ bool vega20_xgmi_wa = (adev->gmc.xgmi.num_physical_nodes &&
+ adev->asic_type == CHIP_VEGA20);
+ /* 2 dwords flush + 8 dwords fence */
+ unsigned int ndw = kiq->pmf->invalidate_tlbs_size + 8;
+
+ if (vega20_xgmi_wa)
+ ndw += kiq->pmf->invalidate_tlbs_size;
+
spin_lock(&adev->gfx.kiq.ring_lock);
/* 2 dwords flush + 8 dwords fence */
- amdgpu_ring_alloc(ring, kiq->pmf->invalidate_tlbs_size + 8);
+ amdgpu_ring_alloc(ring, ndw);
+ if (vega20_xgmi_wa)
+ kiq->pmf->kiq_invalidate_tlbs(ring,
+ pasid, 2, all_hub);
kiq->pmf->kiq_invalidate_tlbs(ring,
pasid, flush_type, all_hub);
amdgpu_fence_emit_polling(ring, &seq);
@@ -886,32 +922,25 @@ static int gmc_v9_0_late_init(void *handle)
if (r)
return r;
/* Check if ecc is available */
- if (!amdgpu_sriov_vf(adev)) {
- switch (adev->asic_type) {
- case CHIP_VEGA10:
- case CHIP_VEGA20:
- case CHIP_ARCTURUS:
- r = amdgpu_atomfirmware_mem_ecc_supported(adev);
- if (!r) {
- DRM_INFO("ECC is not present.\n");
- if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
- adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
- } else {
- DRM_INFO("ECC is active.\n");
- }
-
- r = amdgpu_atomfirmware_sram_ecc_supported(adev);
- if (!r) {
- DRM_INFO("SRAM ECC is not present.\n");
- } else {
- DRM_INFO("SRAM ECC is active.\n");
- }
- break;
- default:
- break;
- }
+ if (!amdgpu_sriov_vf(adev) && (adev->asic_type == CHIP_VEGA10)) {
+ r = amdgpu_atomfirmware_mem_ecc_supported(adev);
+ if (!r) {
+ DRM_INFO("ECC is not present.\n");
+ if (adev->df.funcs->enable_ecc_force_par_wr_rmw)
+ adev->df.funcs->enable_ecc_force_par_wr_rmw(adev, false);
+ } else
+ DRM_INFO("ECC is active.\n");
+
+ r = amdgpu_atomfirmware_sram_ecc_supported(adev);
+ if (!r)
+ DRM_INFO("SRAM ECC is not present.\n");
+ else
+ DRM_INFO("SRAM ECC is active.\n");
}
+ if (adev->mmhub.funcs && adev->mmhub.funcs->reset_ras_error_count)
+ adev->mmhub.funcs->reset_ras_error_count(adev);
+
r = amdgpu_gmc_ras_late_init(adev);
if (r)
return r;
@@ -1272,6 +1301,19 @@ static void gmc_v9_0_init_golden_registers(struct amdgpu_device *adev)
}
/**
+ * gmc_v9_0_restore_registers - restores regs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This restores register values, saved at suspend.
+ */
+static void gmc_v9_0_restore_registers(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_RAVEN)
+ WREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0, adev->gmc.sdpif_register);
+}
+
+/**
* gmc_v9_0_gart_enable - gart enable
*
* @adev: amdgpu_device pointer
@@ -1377,6 +1419,20 @@ static int gmc_v9_0_hw_init(void *handle)
}
/**
+ * gmc_v9_0_save_registers - saves regs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * This saves potential register values that should be
+ * restored upon resume
+ */
+static void gmc_v9_0_save_registers(struct amdgpu_device *adev)
+{
+ if (adev->asic_type == CHIP_RAVEN)
+ adev->gmc.sdpif_register = RREG32(mmDCHUBBUB_SDPIF_MMIO_CNTRL_0);
+}
+
+/**
* gmc_v9_0_gart_disable - gart disable
*
* @adev: amdgpu_device pointer
@@ -1412,9 +1468,16 @@ static int gmc_v9_0_hw_fini(void *handle)
static int gmc_v9_0_suspend(void *handle)
{
+ int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- return gmc_v9_0_hw_fini(adev);
+ r = gmc_v9_0_hw_fini(adev);
+ if (r)
+ return r;
+
+ gmc_v9_0_save_registers(adev);
+
+ return 0;
}
static int gmc_v9_0_resume(void *handle)
@@ -1422,6 +1485,7 @@ static int gmc_v9_0_resume(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ gmc_v9_0_restore_registers(adev);
r = gmc_v9_0_hw_init(adev);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
index ff2e6e1ccde7..6173951db7b4 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_0.c
@@ -693,7 +693,7 @@ static int jpeg_v2_0_set_clockgating_state(void *handle,
bool enable = (state == AMD_CG_STATE_GATE);
if (enable) {
- if (jpeg_v2_0_is_idle(handle))
+ if (!jpeg_v2_0_is_idle(handle))
return -EBUSY;
jpeg_v2_0_enable_clock_gating(adev);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
index c6d046df4b70..c04c2078a7c1 100644
--- a/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v2_5.c
@@ -477,7 +477,7 @@ static int jpeg_v2_5_set_clockgating_state(void *handle,
continue;
if (enable) {
- if (jpeg_v2_5_is_idle(handle))
+ if (!jpeg_v2_5_is_idle(handle))
return -EBUSY;
jpeg_v2_5_enable_clock_gating(adev, i);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
index 49a3a56ec017..396c2a624de0 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
@@ -747,7 +747,19 @@ static void mmhub_v1_0_query_ras_error_count(struct amdgpu_device *adev,
err_data->ue_count += ded_count;
}
+static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ /* read back edc counter registers to reset the counters to 0 */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v1_0_edc_cnt_regs); i++)
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v1_0_edc_cnt_regs[i]));
+ }
+}
+
const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v1_0_query_ras_error_count,
+ .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
index bde189680521..fb3f228458e5 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v2_0.c
@@ -72,11 +72,18 @@ static void mmhub_v2_0_init_system_aperture_regs(struct amdgpu_device *adev)
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_TOP, 0);
WREG32_SOC15(MMHUB, 0, mmMMMC_VM_AGP_BOT, 0x00FFFFFF);
- /* Program the system aperture low logical page number. */
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
- adev->gmc.vram_start >> 18);
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
- adev->gmc.vram_end >> 18);
+ if (!amdgpu_sriov_vf(adev)) {
+ /*
+ * the new L1 policy will block SRIOV guest from writing
+ * these regs, and they will be programed at host.
+ * so skip programing these regs.
+ */
+ /* Program the system aperture low logical page number. */
+ WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_LOW_ADDR,
+ adev->gmc.vram_start >> 18);
+ WREG32_SOC15(MMHUB, 0, mmMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR,
+ adev->gmc.vram_end >> 18);
+ }
/* Set default page address. */
value = adev->vram_scratch.gpu_addr - adev->gmc.vram_start +
@@ -247,18 +254,6 @@ static void mmhub_v2_0_program_invalidation(struct amdgpu_device *adev)
int mmhub_v2_0_gart_enable(struct amdgpu_device *adev)
{
- if (amdgpu_sriov_vf(adev)) {
- /*
- * MMMC_VM_FB_LOCATION_BASE/TOP is NULL for VF, becuase they are
- * VF copy registers so vbios post doesn't program them, for
- * SRIOV driver need to program them
- */
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_BASE,
- adev->gmc.vram_start >> 24);
- WREG32_SOC15(MMHUB, 0, mmMMMC_VM_FB_LOCATION_TOP,
- adev->gmc.vram_end >> 24);
- }
-
/* GART Enable. */
mmhub_v2_0_init_gart_aperture_regs(adev);
mmhub_v2_0_init_system_aperture_regs(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
index a5281df8d84f..0d413fabd015 100644
--- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c
@@ -1596,7 +1596,19 @@ static void mmhub_v9_4_query_ras_error_count(struct amdgpu_device *adev,
err_data->ue_count += ded_count;
}
+static void mmhub_v9_4_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ uint32_t i;
+
+ /* read back edc counter registers to reset the counters to 0 */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__MMHUB)) {
+ for (i = 0; i < ARRAY_SIZE(mmhub_v9_4_edc_cnt_regs); i++)
+ RREG32(SOC15_REG_ENTRY_OFFSET(mmhub_v9_4_edc_cnt_regs[i]));
+ }
+}
+
const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = {
.ras_late_init = amdgpu_mmhub_ras_late_init,
.query_ras_error_count = mmhub_v9_4_query_ras_error_count,
+ .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count,
};
diff --git a/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h
new file mode 100644
index 000000000000..1b5086c7d4e6
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/mmsch_v2_0.h
@@ -0,0 +1,338 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __MMSCH_V2_0_H__
+#define __MMSCH_V2_0_H__
+
+// addressBlock: uvd0_mmsch_dec
+// base address: 0x1e000
+#define mmMMSCH_UCODE_ADDR 0x0000
+#define mmMMSCH_UCODE_ADDR_BASE_IDX 0
+#define mmMMSCH_UCODE_DATA 0x0001
+#define mmMMSCH_UCODE_DATA_BASE_IDX 0
+#define mmMMSCH_SRAM_ADDR 0x0002
+#define mmMMSCH_SRAM_ADDR_BASE_IDX 0
+#define mmMMSCH_SRAM_DATA 0x0003
+#define mmMMSCH_SRAM_DATA_BASE_IDX 0
+#define mmMMSCH_VF_SRAM_OFFSET 0x0004
+#define mmMMSCH_VF_SRAM_OFFSET_BASE_IDX 0
+#define mmMMSCH_DB_SRAM_OFFSET 0x0005
+#define mmMMSCH_DB_SRAM_OFFSET_BASE_IDX 0
+#define mmMMSCH_CTX_SRAM_OFFSET 0x0006
+#define mmMMSCH_CTX_SRAM_OFFSET_BASE_IDX 0
+#define mmMMSCH_CTL 0x0007
+#define mmMMSCH_CTL_BASE_IDX 0
+#define mmMMSCH_INTR 0x0008
+#define mmMMSCH_INTR_BASE_IDX 0
+#define mmMMSCH_INTR_ACK 0x0009
+#define mmMMSCH_INTR_ACK_BASE_IDX 0
+#define mmMMSCH_INTR_STATUS 0x000a
+#define mmMMSCH_INTR_STATUS_BASE_IDX 0
+#define mmMMSCH_VF_VMID 0x000b
+#define mmMMSCH_VF_VMID_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_LO 0x000c
+#define mmMMSCH_VF_CTX_ADDR_LO_BASE_IDX 0
+#define mmMMSCH_VF_CTX_ADDR_HI 0x000d
+#define mmMMSCH_VF_CTX_ADDR_HI_BASE_IDX 0
+#define mmMMSCH_VF_CTX_SIZE 0x000e
+#define mmMMSCH_VF_CTX_SIZE_BASE_IDX 0
+#define mmMMSCH_VF_GPCOM_ADDR_LO 0x000f
+#define mmMMSCH_VF_GPCOM_ADDR_LO_BASE_IDX 0
+#define mmMMSCH_VF_GPCOM_ADDR_HI 0x0010
+#define mmMMSCH_VF_GPCOM_ADDR_HI_BASE_IDX 0
+#define mmMMSCH_VF_GPCOM_SIZE 0x0011
+#define mmMMSCH_VF_GPCOM_SIZE_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_HOST 0x0012
+#define mmMMSCH_VF_MAILBOX_HOST_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_RESP 0x0013
+#define mmMMSCH_VF_MAILBOX_RESP_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_0 0x0014
+#define mmMMSCH_VF_MAILBOX_0_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_0_RESP 0x0015
+#define mmMMSCH_VF_MAILBOX_0_RESP_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_1 0x0016
+#define mmMMSCH_VF_MAILBOX_1_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX_1_RESP 0x0017
+#define mmMMSCH_VF_MAILBOX_1_RESP_BASE_IDX 0
+#define mmMMSCH_CNTL 0x001c
+#define mmMMSCH_CNTL_BASE_IDX 0
+#define mmMMSCH_NONCACHE_OFFSET0 0x001d
+#define mmMMSCH_NONCACHE_OFFSET0_BASE_IDX 0
+#define mmMMSCH_NONCACHE_SIZE0 0x001e
+#define mmMMSCH_NONCACHE_SIZE0_BASE_IDX 0
+#define mmMMSCH_NONCACHE_OFFSET1 0x001f
+#define mmMMSCH_NONCACHE_OFFSET1_BASE_IDX 0
+#define mmMMSCH_NONCACHE_SIZE1 0x0020
+#define mmMMSCH_NONCACHE_SIZE1_BASE_IDX 0
+#define mmMMSCH_PDEBUG_STATUS 0x0021
+#define mmMMSCH_PDEBUG_STATUS_BASE_IDX 0
+#define mmMMSCH_PDEBUG_DATA_32UPPERBITS 0x0022
+#define mmMMSCH_PDEBUG_DATA_32UPPERBITS_BASE_IDX 0
+#define mmMMSCH_PDEBUG_DATA_32LOWERBITS 0x0023
+#define mmMMSCH_PDEBUG_DATA_32LOWERBITS_BASE_IDX 0
+#define mmMMSCH_PDEBUG_EPC 0x0024
+#define mmMMSCH_PDEBUG_EPC_BASE_IDX 0
+#define mmMMSCH_PDEBUG_EXCCAUSE 0x0025
+#define mmMMSCH_PDEBUG_EXCCAUSE_BASE_IDX 0
+#define mmMMSCH_PROC_STATE1 0x0026
+#define mmMMSCH_PROC_STATE1_BASE_IDX 0
+#define mmMMSCH_LAST_MC_ADDR 0x0027
+#define mmMMSCH_LAST_MC_ADDR_BASE_IDX 0
+#define mmMMSCH_LAST_MEM_ACCESS_HI 0x0028
+#define mmMMSCH_LAST_MEM_ACCESS_HI_BASE_IDX 0
+#define mmMMSCH_LAST_MEM_ACCESS_LO 0x0029
+#define mmMMSCH_LAST_MEM_ACCESS_LO_BASE_IDX 0
+#define mmMMSCH_IOV_ACTIVE_FCN_ID 0x002a
+#define mmMMSCH_IOV_ACTIVE_FCN_ID_BASE_IDX 0
+#define mmMMSCH_SCRATCH_0 0x002b
+#define mmMMSCH_SCRATCH_0_BASE_IDX 0
+#define mmMMSCH_SCRATCH_1 0x002c
+#define mmMMSCH_SCRATCH_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_0 0x002d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_0 0x002e
+#define mmMMSCH_GPUIOV_CMD_CONTROL_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_0 0x002f
+#define mmMMSCH_GPUIOV_CMD_STATUS_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0 0x0030
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0 0x0031
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0 0x0032
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW6_0 0x0033
+#define mmMMSCH_GPUIOV_DW6_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW7_0 0x0034
+#define mmMMSCH_GPUIOV_DW7_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW8_0 0x0035
+#define mmMMSCH_GPUIOV_DW8_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_1 0x0036
+#define mmMMSCH_GPUIOV_SCH_BLOCK_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_1 0x0037
+#define mmMMSCH_GPUIOV_CMD_CONTROL_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_1 0x0038
+#define mmMMSCH_GPUIOV_CMD_STATUS_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1 0x0039
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1 0x003a
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1 0x003b
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW6_1 0x003c
+#define mmMMSCH_GPUIOV_DW6_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW7_1 0x003d
+#define mmMMSCH_GPUIOV_DW7_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW8_1 0x003e
+#define mmMMSCH_GPUIOV_DW8_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CNTXT 0x003f
+#define mmMMSCH_GPUIOV_CNTXT_BASE_IDX 0
+#define mmMMSCH_SCRATCH_2 0x0040
+#define mmMMSCH_SCRATCH_2_BASE_IDX 0
+#define mmMMSCH_SCRATCH_3 0x0041
+#define mmMMSCH_SCRATCH_3_BASE_IDX 0
+#define mmMMSCH_SCRATCH_4 0x0042
+#define mmMMSCH_SCRATCH_4_BASE_IDX 0
+#define mmMMSCH_SCRATCH_5 0x0043
+#define mmMMSCH_SCRATCH_5_BASE_IDX 0
+#define mmMMSCH_SCRATCH_6 0x0044
+#define mmMMSCH_SCRATCH_6_BASE_IDX 0
+#define mmMMSCH_SCRATCH_7 0x0045
+#define mmMMSCH_SCRATCH_7_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_HEAD_0 0x0046
+#define mmMMSCH_VFID_FIFO_HEAD_0_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_TAIL_0 0x0047
+#define mmMMSCH_VFID_FIFO_TAIL_0_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_HEAD_1 0x0048
+#define mmMMSCH_VFID_FIFO_HEAD_1_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_TAIL_1 0x0049
+#define mmMMSCH_VFID_FIFO_TAIL_1_BASE_IDX 0
+#define mmMMSCH_NACK_STATUS 0x004a
+#define mmMMSCH_NACK_STATUS_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX0_DATA 0x004b
+#define mmMMSCH_VF_MAILBOX0_DATA_BASE_IDX 0
+#define mmMMSCH_VF_MAILBOX1_DATA 0x004c
+#define mmMMSCH_VF_MAILBOX1_DATA_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0 0x004d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0 0x004e
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0 0x004f
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_0_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1 0x0050
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1 0x0051
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1 0x0052
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_1_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CNTXT_IP 0x0053
+#define mmMMSCH_GPUIOV_CNTXT_IP_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_2 0x0054
+#define mmMMSCH_GPUIOV_SCH_BLOCK_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_CONTROL_2 0x0055
+#define mmMMSCH_GPUIOV_CMD_CONTROL_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_2 0x0056
+#define mmMMSCH_GPUIOV_CMD_STATUS_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2 0x0057
+#define mmMMSCH_GPUIOV_VM_BUSY_STATUS_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2 0x0058
+#define mmMMSCH_GPUIOV_ACTIVE_FCNS_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2 0x0059
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW6_2 0x005a
+#define mmMMSCH_GPUIOV_DW6_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW7_2 0x005b
+#define mmMMSCH_GPUIOV_DW7_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_DW8_2 0x005c
+#define mmMMSCH_GPUIOV_DW8_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2 0x005d
+#define mmMMSCH_GPUIOV_SCH_BLOCK_IP_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2 0x005e
+#define mmMMSCH_GPUIOV_CMD_STATUS_IP_2_BASE_IDX 0
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2 0x005f
+#define mmMMSCH_GPUIOV_ACTIVE_FCN_ID_IP_2_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_HEAD_2 0x0060
+#define mmMMSCH_VFID_FIFO_HEAD_2_BASE_IDX 0
+#define mmMMSCH_VFID_FIFO_TAIL_2 0x0061
+#define mmMMSCH_VFID_FIFO_TAIL_2_BASE_IDX 0
+#define mmMMSCH_VM_BUSY_STATUS_0 0x0062
+#define mmMMSCH_VM_BUSY_STATUS_0_BASE_IDX 0
+#define mmMMSCH_VM_BUSY_STATUS_1 0x0063
+#define mmMMSCH_VM_BUSY_STATUS_1_BASE_IDX 0
+#define mmMMSCH_VM_BUSY_STATUS_2 0x0064
+#define mmMMSCH_VM_BUSY_STATUS_2_BASE_IDX 0
+
+#define MMSCH_VERSION_MAJOR 2
+#define MMSCH_VERSION_MINOR 0
+#define MMSCH_VERSION (MMSCH_VERSION_MAJOR << 16 | MMSCH_VERSION_MINOR)
+
+enum mmsch_v2_0_command_type {
+ MMSCH_COMMAND__DIRECT_REG_WRITE = 0,
+ MMSCH_COMMAND__DIRECT_REG_POLLING = 2,
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE = 3,
+ MMSCH_COMMAND__INDIRECT_REG_WRITE = 8,
+ MMSCH_COMMAND__END = 0xf
+};
+
+struct mmsch_v2_0_init_header {
+ uint32_t version;
+ uint32_t header_size;
+ uint32_t vcn_init_status;
+ uint32_t vcn_table_offset;
+ uint32_t vcn_table_size;
+};
+
+struct mmsch_v2_0_cmd_direct_reg_header {
+ uint32_t reg_offset : 28;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v2_0_cmd_indirect_reg_header {
+ uint32_t reg_offset : 20;
+ uint32_t reg_idx_space : 8;
+ uint32_t command_type : 4;
+};
+
+struct mmsch_v2_0_cmd_direct_write {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+struct mmsch_v2_0_cmd_direct_read_modify_write {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+ uint32_t write_data;
+ uint32_t mask_value;
+};
+
+struct mmsch_v2_0_cmd_direct_polling {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+ uint32_t mask_value;
+ uint32_t wait_value;
+};
+
+struct mmsch_v2_0_cmd_end {
+ struct mmsch_v2_0_cmd_direct_reg_header cmd_header;
+};
+
+struct mmsch_v2_0_cmd_indirect_write {
+ struct mmsch_v2_0_cmd_indirect_reg_header cmd_header;
+ uint32_t reg_value;
+};
+
+static inline void mmsch_v2_0_insert_direct_wt(struct mmsch_v2_0_cmd_direct_write *direct_wt,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t value)
+{
+ direct_wt->cmd_header.reg_offset = reg_offset;
+ direct_wt->reg_value = value;
+ memcpy((void *)init_table, direct_wt, sizeof(struct mmsch_v2_0_cmd_direct_write));
+}
+
+static inline void mmsch_v2_0_insert_direct_rd_mod_wt(struct mmsch_v2_0_cmd_direct_read_modify_write *direct_rd_mod_wt,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t mask, uint32_t data)
+{
+ direct_rd_mod_wt->cmd_header.reg_offset = reg_offset;
+ direct_rd_mod_wt->mask_value = mask;
+ direct_rd_mod_wt->write_data = data;
+ memcpy((void *)init_table, direct_rd_mod_wt,
+ sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write));
+}
+
+static inline void mmsch_v2_0_insert_direct_poll(struct mmsch_v2_0_cmd_direct_polling *direct_poll,
+ uint32_t *init_table,
+ uint32_t reg_offset,
+ uint32_t mask, uint32_t wait)
+{
+ direct_poll->cmd_header.reg_offset = reg_offset;
+ direct_poll->mask_value = mask;
+ direct_poll->wait_value = wait;
+ memcpy((void *)init_table, direct_poll, sizeof(struct mmsch_v2_0_cmd_direct_polling));
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(reg, mask, data) { \
+ mmsch_v2_0_insert_direct_rd_mod_wt(&direct_rd_mod_wt, \
+ init_table, (reg), \
+ (mask), (data)); \
+ init_table += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \
+ table_size += sizeof(struct mmsch_v2_0_cmd_direct_read_modify_write)/4; \
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_WT(reg, value) { \
+ mmsch_v2_0_insert_direct_wt(&direct_wt, \
+ init_table, (reg), \
+ (value)); \
+ init_table += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \
+ table_size += sizeof(struct mmsch_v2_0_cmd_direct_write)/4; \
+}
+
+#define MMSCH_V2_0_INSERT_DIRECT_POLL(reg, mask, wait) { \
+ mmsch_v2_0_insert_direct_poll(&direct_poll, \
+ init_table, (reg), \
+ (mask), (wait)); \
+ init_table += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \
+ table_size += sizeof(struct mmsch_v2_0_cmd_direct_polling)/4; \
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
index cf557a428298..e08245a446fc 100644
--- a/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
+++ b/drivers/gpu/drm/amd/amdgpu/navi10_ih.c
@@ -32,6 +32,7 @@
#include "soc15_common.h"
#include "navi10_ih.h"
+#define MAX_REARM_RETRY 10
static void navi10_ih_set_interrupt_funcs(struct amdgpu_device *adev);
@@ -284,6 +285,38 @@ static void navi10_ih_decode_iv(struct amdgpu_device *adev,
}
/**
+ * navi10_ih_irq_rearm - rearm IRQ if lost
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+static void navi10_ih_irq_rearm(struct amdgpu_device *adev,
+ struct amdgpu_ih_ring *ih)
+{
+ uint32_t reg_rptr = 0;
+ uint32_t v = 0;
+ uint32_t i = 0;
+
+ if (ih == &adev->irq.ih)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR);
+ else if (ih == &adev->irq.ih1)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING1);
+ else if (ih == &adev->irq.ih2)
+ reg_rptr = SOC15_REG_OFFSET(OSSSYS, 0, mmIH_RB_RPTR_RING2);
+ else
+ return;
+
+ /* Rearm IRQ / re-write doorbell if doorbell write is lost */
+ for (i = 0; i < MAX_REARM_RETRY; i++) {
+ v = RREG32_NO_KIQ(reg_rptr);
+ if ((v < ih->ring_size) && (v != ih->rptr))
+ WDOORBELL32(ih->doorbell_index, ih->rptr);
+ else
+ break;
+ }
+}
+
+/**
* navi10_ih_set_rptr - set the IH ring buffer rptr
*
* @adev: amdgpu_device pointer
@@ -297,6 +330,9 @@ static void navi10_ih_set_rptr(struct amdgpu_device *adev,
/* XXX check if swapping is necessary on BE */
*ih->rptr_cpu = ih->rptr;
WDOORBELL32(ih->doorbell_index, ih->rptr);
+
+ if (amdgpu_sriov_vf(adev))
+ navi10_ih_irq_rearm(adev, ih);
} else
WREG32_SOC15(OSSSYS, 0, mmIH_RB_RPTR, ih->rptr);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
index 65eb378fa035..149d386590df 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c
@@ -318,6 +318,7 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
{
uint32_t bif_doorbell_intr_cntl;
struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if);
+ struct ras_err_data err_data = {0, 0, 0, NULL};
bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL);
if (REG_GET_FIELD(bif_doorbell_intr_cntl,
@@ -332,7 +333,19 @@ static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device
* clear error status after ras_controller_intr according to
* hw team and count ue number for query
*/
- nbio_v7_4_query_ras_error_count(adev, &obj->err_data);
+ nbio_v7_4_query_ras_error_count(adev, &err_data);
+
+ /* logging on error counter and printing for awareness */
+ obj->err_data.ue_count += err_data.ue_count;
+ obj->err_data.ce_count += err_data.ce_count;
+
+ if (err_data.ce_count)
+ DRM_INFO("%ld correctable errors detected in %s block\n",
+ obj->err_data.ce_count, adev->nbio.ras_if->name);
+
+ if (err_data.ue_count)
+ DRM_INFO("%ld uncorrectable errors detected in %s block\n",
+ obj->err_data.ue_count, adev->nbio.ras_if->name);
DRM_WARN("RAS controller interrupt triggered by NBIF error\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c
index 2d1bebdf1603..033cbbca2072 100644
--- a/drivers/gpu/drm/amd/amdgpu/nv.c
+++ b/drivers/gpu/drm/amd/amdgpu/nv.c
@@ -516,7 +516,8 @@ int nv_set_ip_blocks(struct amdgpu_device *adev)
!amdgpu_sriov_vf(adev))
amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
- amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
+ if (!amdgpu_sriov_vf(adev))
+ amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
break;
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
index 36b65797434e..6ff9a9544110 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
+++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h
@@ -31,6 +31,9 @@
#define GFX_CMD_RESERVED_MASK 0x7FF00000
#define GFX_CMD_RESPONSE_MASK 0x80000000
+/* USBC PD FW version retrieval command */
+#define C2PMSG_CMD_GFX_USB_PD_FW_VER 0x2000000
+
/* TEE Gfx Command IDs for the register interface.
* Command ID must be between 0x00010000 and 0x000F0000.
*/
@@ -243,6 +246,7 @@ enum psp_gfx_fw_type {
GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */
GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */
GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */
+ GFX_FW_TYPE_CAP = 62, /* CAP_FW VG */
GFX_FW_TYPE_MAX
};
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
index 0829188c1a5c..0afd610a1263 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "amdgpu_psp.h"
+#include "amdgpu_ras.h"
#include "amdgpu_ucode.h"
#include "soc15_common.h"
#include "psp_v11_0.h"
@@ -65,6 +66,9 @@ MODULE_FIRMWARE("amdgpu/arcturus_ta.bin");
/* memory training timeout define */
#define MEM_TRAIN_SEND_MSG_TIMEOUT_US 3000000
+/* For large FW files the time to complete can be very long */
+#define USBC_PD_POLLING_LIMIT_S 240
+
static int psp_v11_0_init_microcode(struct psp_context *psp)
{
struct amdgpu_device *adev = psp->adev;
@@ -420,7 +424,8 @@ static int psp_v11_0_ring_init(struct psp_context *psp,
struct psp_ring *ring;
struct amdgpu_device *adev = psp->adev;
- psp_v11_0_reroute_ih(psp);
+ if (!amdgpu_sriov_vf(adev))
+ psp_v11_0_reroute_ih(psp);
ring = &psp->km_ring;
@@ -864,6 +869,11 @@ static int psp_v11_0_ras_trigger_error(struct psp_context *psp,
if (ret)
return -EINVAL;
+ /* If err_event_athub occurs error inject was successful, however
+ return status from TA is no long reliable */
+ if (amdgpu_ras_intr_triggered())
+ return 0;
+
return ras_cmd->ras_status;
}
@@ -1108,6 +1118,82 @@ static void psp_v11_0_ring_set_wptr(struct psp_context *psp, uint32_t value)
WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_67, value);
}
+static int psp_v11_0_load_usbc_pd_fw(struct psp_context *psp, dma_addr_t dma_addr)
+{
+ struct amdgpu_device *adev = psp->adev;
+ uint32_t reg_status;
+ int ret, i = 0;
+
+ /* Write lower 32-bit address of the PD Controller FW */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, lower_32_bits(dma_addr));
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ /* Fireup interrupt so PSP can pick up the lower address */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x800000);
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
+
+ if ((reg_status & 0xFFFF) != 0) {
+ DRM_ERROR("Lower address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = %02x...\n",
+ reg_status & 0xFFFF);
+ return -EIO;
+ }
+
+ /* Write upper 32-bit address of the PD Controller FW */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36, upper_32_bits(dma_addr));
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (ret)
+ return ret;
+
+ /* Fireup interrupt so PSP can pick up the upper address */
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, 0x4000000);
+
+ /* FW load takes very long time */
+ do {
+ msleep(1000);
+ reg_status = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35);
+
+ if (reg_status & 0x80000000)
+ goto done;
+
+ } while (++i < USBC_PD_POLLING_LIMIT_S);
+
+ return -ETIME;
+done:
+
+ if ((reg_status & 0xFFFF) != 0) {
+ DRM_ERROR("Upper address load failed - MP0_SMN_C2PMSG_35.Bits [15:0] = x%04x\n",
+ reg_status & 0xFFFF);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int psp_v11_0_read_usbc_pd_fw(struct psp_context *psp, uint32_t *fw_ver)
+{
+ struct amdgpu_device *adev = psp->adev;
+ int ret;
+
+ WREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_35, C2PMSG_CMD_GFX_USB_PD_FW_VER);
+
+ ret = psp_wait_for(psp, SOC15_REG_OFFSET(MP0, 0, mmMP0_SMN_C2PMSG_35),
+ 0x80000000, 0x80000000, false);
+ if (!ret)
+ *fw_ver = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_36);
+
+ return ret;
+}
+
static const struct psp_funcs psp_v11_0_funcs = {
.init_microcode = psp_v11_0_init_microcode,
.bootloader_load_kdb = psp_v11_0_bootloader_load_kdb,
@@ -1132,6 +1218,8 @@ static const struct psp_funcs psp_v11_0_funcs = {
.mem_training = psp_v11_0_memory_training,
.ring_get_wptr = psp_v11_0_ring_get_wptr,
.ring_set_wptr = psp_v11_0_ring_set_wptr,
+ .load_usbc_pd_fw = psp_v11_0_load_usbc_pd_fw,
+ .read_usbc_pd_fw = psp_v11_0_read_usbc_pd_fw
};
void psp_v11_0_set_psp_funcs(struct psp_context *psp)
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
index 735c43c7daab..43896f4779b0 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c
@@ -44,6 +44,7 @@
MODULE_FIRMWARE("amdgpu/vega10_sos.bin");
MODULE_FIRMWARE("amdgpu/vega10_asd.bin");
+MODULE_FIRMWARE("amdgpu/vega10_cap.bin");
MODULE_FIRMWARE("amdgpu/vega12_sos.bin");
MODULE_FIRMWARE("amdgpu/vega12_asd.bin");
@@ -63,6 +64,7 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
char fw_name[30];
int err = 0;
const struct psp_firmware_header_v1_0 *hdr;
+ struct amdgpu_firmware_info *info = NULL;
DRM_DEBUG("\n");
@@ -112,6 +114,26 @@ static int psp_v3_1_init_microcode(struct psp_context *psp)
adev->psp.asd_start_addr = (uint8_t *)hdr +
le32_to_cpu(hdr->header.ucode_array_offset_bytes);
+ if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_VEGA10) {
+ snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin",
+ chip_name);
+ err = request_firmware(&adev->psp.cap_fw, fw_name, adev->dev);
+ if (err)
+ goto out;
+
+ err = amdgpu_ucode_validate(adev->psp.cap_fw);
+ if (err)
+ goto out;
+
+ info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP];
+ info->ucode_id = AMDGPU_UCODE_ID_CAP;
+ info->fw = adev->psp.cap_fw;
+ hdr = (const struct psp_firmware_header_v1_0 *)
+ adev->psp.cap_fw->data;
+ adev->firmware.fw_size += ALIGN(
+ le32_to_cpu(hdr->header.ucode_size_bytes), PAGE_SIZE);
+ }
+
return 0;
out:
if (err) {
@@ -122,6 +144,8 @@ out:
adev->psp.sos_fw = NULL;
release_firmware(adev->psp.asd_fw);
adev->psp.asd_fw = NULL;
+ release_firmware(adev->psp.cap_fw);
+ adev->psp.cap_fw = NULL;
}
return err;
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
index e55884d204bd..9159bd46482b 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c
@@ -1801,13 +1801,9 @@ static int sdma_v4_0_late_init(void *handle)
struct ras_ih_if ih_info = {
.cb = sdma_v4_0_process_ras_data_cb,
};
- int i;
- /* read back edc counter registers to clear the counters */
- if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
- for (i = 0; i < adev->sdma.num_instances; i++)
- RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
- }
+ if (adev->sdma.funcs && adev->sdma.funcs->reset_ras_error_count)
+ adev->sdma.funcs->reset_ras_error_count(adev);
if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init)
return adev->sdma.funcs->ras_late_init(adev, &ih_info);
@@ -2572,10 +2568,22 @@ static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev,
return 0;
};
+static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev)
+{
+ int i;
+
+ /* read back edc counter registers to clear the counters */
+ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__SDMA)) {
+ for (i = 0; i < adev->sdma.num_instances; i++)
+ RREG32_SDMA(i, mmSDMA0_EDC_COUNTER);
+ }
+}
+
static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = {
.ras_late_init = amdgpu_sdma_ras_late_init,
.ras_fini = amdgpu_sdma_ras_fini,
.query_ras_error_count = sdma_v4_0_query_ras_error_count,
+ .reset_ras_error_count = sdma_v4_0_reset_ras_error_count,
};
static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
index 67b9830b7c7e..ebfd2cdf4e65 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v5_0.c
@@ -746,11 +746,9 @@ static int sdma_v5_0_gfx_resume(struct amdgpu_device *adev)
sdma_v5_0_enable(adev, true);
}
- r = amdgpu_ring_test_ring(ring);
- if (r) {
- ring->sched.ready = false;
+ r = amdgpu_ring_test_helper(ring);
+ if (r)
return r;
- }
if (adev->mman.buffer_funcs_ring == ring)
amdgpu_ttm_set_buffer_funcs_status(adev, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
index c902f26cf50d..9bffbab35041 100644
--- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
+++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c
@@ -46,8 +46,7 @@
#define I2C_NO_STOP 1
#define I2C_RESTART 2
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
-#define to_eeprom_control(x) container_of(x, struct amdgpu_ras_eeprom_control, eeprom_accessor)
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en)
{
@@ -592,7 +591,8 @@ static uint32_t smu_v11_0_i2c_eeprom_write_data(struct i2c_adapter *control,
static void lock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
- struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
+ struct amdgpu_device *adev = to_amdgpu_device(i2c);
+ struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!smu_v11_0_i2c_bus_lock(i2c)) {
DRM_ERROR("Failed to lock the bus from SMU");
@@ -610,7 +610,8 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags)
static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags)
{
- struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c);
+ struct amdgpu_device *adev = to_amdgpu_device(i2c);
+ struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!smu_v11_0_i2c_bus_unlock(i2c)) {
DRM_ERROR("Failed to unlock the bus from SMU");
@@ -630,7 +631,8 @@ static int smu_v11_0_i2c_eeprom_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
int i, ret;
- struct amdgpu_ras_eeprom_control *control = to_eeprom_control(i2c_adap);
+ struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
+ struct amdgpu_ras_eeprom_control *control = &adev->psp.ras.ras->eeprom_control;
if (!control->bus_locked) {
DRM_ERROR("I2C bus unlocked, stopping transaction!");
@@ -679,7 +681,7 @@ int smu_v11_0_i2c_eeprom_control_init(struct i2c_adapter *control)
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &smu_v11_0_i2c_eeprom_i2c_algo;
- snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+ snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops;
res = i2c_add_adapter(control);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c
index 15f3424a1ff7..a40499d51c93 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.c
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.c
@@ -89,6 +89,13 @@
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
+
+/* for Vega20/arcturus regiter offset change */
+#define mmROM_INDEX_VG20 0x00e4
+#define mmROM_INDEX_VG20_BASE_IDX 0
+#define mmROM_DATA_VG20 0x00e5
+#define mmROM_DATA_VG20_BASE_IDX 0
+
/*
* Indirect registers accessor
*/
@@ -272,7 +279,12 @@ static u32 soc15_get_config_memsize(struct amdgpu_device *adev)
static u32 soc15_get_xclk(struct amdgpu_device *adev)
{
- return adev->clock.spll.reference_freq;
+ u32 reference_clock = adev->clock.spll.reference_freq;
+
+ if (adev->asic_type == CHIP_RAVEN)
+ return reference_clock / 4;
+
+ return reference_clock;
}
@@ -304,6 +316,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
{
u32 *dw_ptr;
u32 i, length_dw;
+ uint32_t rom_index_offset;
+ uint32_t rom_data_offset;
if (bios == NULL)
return false;
@@ -316,11 +330,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4;
+ switch (adev->asic_type) {
+ case CHIP_VEGA20:
+ case CHIP_ARCTURUS:
+ rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
+ rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
+ break;
+ default:
+ rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
+ rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
+ break;
+ }
+
/* set rom index to 0 */
- WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0);
+ WREG32(rom_index_offset, 0);
/* read out the rom data */
for (i = 0; i < length_dw; i++)
- dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA));
+ dw_ptr[i] = RREG32(rom_data_offset);
return true;
}
@@ -826,6 +852,15 @@ static bool soc15_need_full_reset(struct amdgpu_device *adev)
/* change this when we implement soft reset */
return true;
}
+
+static void vega20_reset_hdp_ras_error_count(struct amdgpu_device *adev)
+{
+ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__HDP))
+ return;
+ /*read back hdp ras counter to reset it to 0 */
+ RREG32_SOC15(HDP, 0, mmHDP_EDC_CNT);
+}
+
static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0,
uint64_t *count1)
{
@@ -993,6 +1028,7 @@ static const struct amdgpu_asic_funcs vega20_asic_funcs =
.get_config_memsize = &soc15_get_config_memsize,
.flush_hdp = &soc15_flush_hdp,
.invalidate_hdp = &soc15_invalidate_hdp,
+ .reset_hdp_ras_error_count = &vega20_reset_hdp_ras_error_count,
.need_full_reset = &soc15_need_full_reset,
.init_doorbell_index = &vega20_doorbell_index_init,
.get_pcie_usage = &vega20_get_pcie_usage,
@@ -1238,6 +1274,10 @@ static int soc15_common_late_init(void *handle)
if (amdgpu_sriov_vf(adev))
xgpu_ai_mailbox_get_irq(adev);
+ if (adev->asic_funcs &&
+ adev->asic_funcs->reset_hdp_ras_error_count)
+ adev->asic_funcs->reset_hdp_ras_error_count(adev);
+
if (adev->nbio.funcs->ras_late_init)
r = adev->nbio.funcs->ras_late_init(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.h b/drivers/gpu/drm/amd/amdgpu/soc15.h
index d0fb7a67c1a3..b03f950c486c 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15.h
@@ -42,6 +42,13 @@ struct soc15_reg_golden {
u32 or_mask;
};
+struct soc15_reg_rlcg {
+ u32 hwip;
+ u32 instance;
+ u32 segment;
+ u32 reg;
+};
+
struct soc15_reg_entry {
uint32_t hwip;
uint32_t inst;
diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
index 19e870c79896..c893c645a4b2 100644
--- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h
+++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h
@@ -70,10 +70,9 @@
} \
} while (0)
-#define AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(a) (amdgpu_sriov_vf((a)) && !amdgpu_sriov_runtime((a)))
#define WREG32_RLC(reg, value) \
do { \
- if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
+ if (amdgpu_sriov_fullaccess(adev)) { \
uint32_t i = 0; \
uint32_t retries = 50000; \
uint32_t r0 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0; \
@@ -98,7 +97,7 @@
#define WREG32_SOC15_RLC_SHADOW(ip, inst, reg, value) \
do { \
uint32_t target_reg = adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg;\
- if (AMDGPU_VIRT_SUPPORT_RLC_PRG_REG(adev)) { \
+ if (amdgpu_sriov_fullaccess(adev)) { \
uint32_t r2 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG2; \
uint32_t r3 = adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3; \
uint32_t grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; \
diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
index 793bf70e64b1..14d346321a5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c
@@ -186,6 +186,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
if (rsmu_umc_index_state)
umc_v6_1_disable_umc_index_mode(adev);
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ DRM_WARN("Fail to disable DF-Cstate.\n");
+
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
umc_reg_offset = get_umc_6_reg_offset(adev,
umc_inst,
@@ -199,6 +203,10 @@ static void umc_v6_1_query_ras_error_count(struct amdgpu_device *adev,
&(err_data->ue_count));
}
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ DRM_WARN("Fail to enable DF-Cstate\n");
+
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
}
@@ -228,7 +236,11 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
SOC15_REG_OFFSET(UMC, 0, mmMCA_UMC_UMC0_MCUMC_ADDRT0);
}
- /* skip error address process if -ENOMEM */
+ mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
+
+ if (mc_umc_status == 0)
+ return;
+
if (!err_data->err_addr) {
/* clear umc status */
WREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4, 0x0ULL);
@@ -236,7 +248,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev,
}
err_rec = &err_data->err_addr[err_data->err_addr_cnt];
- mc_umc_status = RREG64_PCIE((mc_umc_status_addr + umc_reg_offset) * 4);
/* calculate error address if ue/ce error is detected */
if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 &&
@@ -288,6 +299,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
if (rsmu_umc_index_state)
umc_v6_1_disable_umc_index_mode(adev);
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW))
+ DRM_WARN("Fail to disable DF-Cstate.\n");
+
LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) {
umc_reg_offset = get_umc_6_reg_offset(adev,
umc_inst,
@@ -300,6 +315,10 @@ static void umc_v6_1_query_ras_error_address(struct amdgpu_device *adev,
umc_inst);
}
+ if ((adev->asic_type == CHIP_ARCTURUS) &&
+ amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW))
+ DRM_WARN("Fail to enable DF-Cstate\n");
+
if (rsmu_umc_index_state)
umc_v6_1_enable_umc_index_mode(adev);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 1a24fadd30e2..09b0572b838d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -1207,9 +1207,10 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
struct amdgpu_ring *ring;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
- adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ adev->vcn.inst[inst_idx].pause_state.fw_based,
+ adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
@@ -1258,13 +1259,14 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.jpeg != new_state->jpeg) {
+ if (adev->vcn.inst[inst_idx].pause_state.jpeg != new_state->jpeg) {
DRM_DEBUG("dpg pause state changed %d:%d -> %d:%d",
- adev->vcn.pause_state.fw_based, adev->vcn.pause_state.jpeg,
+ adev->vcn.inst[inst_idx].pause_state.fw_based,
+ adev->vcn.inst[inst_idx].pause_state.jpeg,
new_state->fw_based, new_state->jpeg);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
@@ -1318,7 +1320,7 @@ static int vcn_v1_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__JPEG_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.jpeg = new_state->jpeg;
+ adev->vcn.inst[inst_idx].pause_state.jpeg = new_state->jpeg;
}
return 0;
@@ -1350,7 +1352,7 @@ static int vcn_v1_0_set_clockgating_state(void *handle,
if (enable) {
/* wait for STATUS to clear */
- if (vcn_v1_0_is_idle(handle))
+ if (!vcn_v1_0_is_idle(handle))
return -EBUSY;
vcn_v1_0_enable_clock_gating(adev);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
index 4f7216788f11..ec8091a661df 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_0.c
@@ -29,6 +29,7 @@
#include "soc15d.h"
#include "amdgpu_pm.h"
#include "amdgpu_psp.h"
+#include "mmsch_v2_0.h"
#include "vcn/vcn_2_0_0_offset.h"
#include "vcn/vcn_2_0_0_sh_mask.h"
@@ -54,7 +55,7 @@ static int vcn_v2_0_set_powergating_state(void *handle,
enum amd_powergating_state state);
static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int inst_idx, struct dpg_pause_state *new_state);
-
+static int vcn_v2_0_start_sriov(struct amdgpu_device *adev);
/**
* vcn_v2_0_early_init - set function pointers
*
@@ -67,7 +68,10 @@ static int vcn_v2_0_early_init(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
adev->vcn.num_vcn_inst = 1;
- adev->vcn.num_enc_rings = 2;
+ if (amdgpu_sriov_vf(adev))
+ adev->vcn.num_enc_rings = 1;
+ else
+ adev->vcn.num_enc_rings = 2;
vcn_v2_0_set_dec_ring_funcs(adev);
vcn_v2_0_set_enc_ring_funcs(adev);
@@ -154,7 +158,10 @@ static int vcn_v2_0_sw_init(void *handle)
for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
ring = &adev->vcn.inst->ring_enc[i];
ring->use_doorbell = true;
- ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
+ if (!amdgpu_sriov_vf(adev))
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 2 + i;
+ else
+ ring->doorbell_index = (adev->doorbell_index.vcn.vcn_ring0_1 << 1) + 1 + i;
sprintf(ring->name, "vcn_enc%d", i);
r = amdgpu_ring_init(adev, ring, 512, &adev->vcn.inst->irq, 0);
if (r)
@@ -163,6 +170,10 @@ static int vcn_v2_0_sw_init(void *handle)
adev->vcn.pause_dpg_mode = vcn_v2_0_pause_dpg_mode;
+ r = amdgpu_virt_alloc_mm_table(adev);
+ if (r)
+ return r;
+
return 0;
}
@@ -178,6 +189,8 @@ static int vcn_v2_0_sw_fini(void *handle)
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ amdgpu_virt_free_mm_table(adev);
+
r = amdgpu_vcn_suspend(adev);
if (r)
return r;
@@ -203,6 +216,9 @@ static int vcn_v2_0_hw_init(void *handle)
adev->nbio.funcs->vcn_doorbell_range(adev, ring->use_doorbell,
ring->doorbell_index, 0);
+ if (amdgpu_sriov_vf(adev))
+ vcn_v2_0_start_sriov(adev);
+
r = amdgpu_ring_test_helper(ring);
if (r)
goto done;
@@ -304,6 +320,9 @@ static void vcn_v2_0_mc_resume(struct amdgpu_device *adev)
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
uint32_t offset;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* cache window 0: fw */
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
@@ -448,6 +467,9 @@ static void vcn_v2_0_disable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* UVD disable CGC */
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -606,6 +628,9 @@ static void vcn_v2_0_enable_clock_gating(struct amdgpu_device *adev)
{
uint32_t data = 0;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
/* enable UVD CGC */
data = RREG32_SOC15(VCN, 0, mmUVD_CGC_CTRL);
if (adev->cg_flags & AMD_CG_SUPPORT_VCN_MGCG)
@@ -658,6 +683,9 @@ static void vcn_v2_0_disable_static_power_gating(struct amdgpu_device *adev)
uint32_t data = 0;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
data = (1 << UVD_PGFSM_CONFIG__UVDM_PWR_CONFIG__SHIFT
| 1 << UVD_PGFSM_CONFIG__UVDU_PWR_CONFIG__SHIFT
@@ -705,6 +733,9 @@ static void vcn_v2_0_enable_static_power_gating(struct amdgpu_device *adev)
uint32_t data = 0;
int ret;
+ if (amdgpu_sriov_vf(adev))
+ return;
+
if (adev->pg_flags & AMD_PG_SUPPORT_VCN) {
/* Before power off, this indicator has to be turned on */
data = RREG32_SOC15(VCN, 0, mmUVD_POWER_STATUS);
@@ -1137,9 +1168,9 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
int ret_code;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
- adev->vcn.pause_state.fw_based, new_state->fw_based);
+ adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
@@ -1185,7 +1216,7 @@ static int vcn_v2_0_pause_dpg_mode(struct amdgpu_device *adev,
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, 0, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
return 0;
@@ -1215,9 +1246,12 @@ static int vcn_v2_0_set_clockgating_state(void *handle,
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE);
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
if (enable) {
/* wait for STATUS to clear */
- if (vcn_v2_0_is_idle(handle))
+ if (!vcn_v2_0_is_idle(handle))
return -EBUSY;
vcn_v2_0_enable_clock_gating(adev);
} else {
@@ -1631,6 +1665,9 @@ int vcn_v2_0_dec_ring_test_ring(struct amdgpu_ring *ring)
unsigned i;
int r;
+ if (amdgpu_sriov_vf(adev))
+ return 0;
+
WREG32(adev->vcn.inst[ring->me].external.scratch9, 0xCAFEDEAD);
r = amdgpu_ring_alloc(ring, 4);
if (r)
@@ -1667,6 +1704,11 @@ static int vcn_v2_0_set_powergating_state(void *handle,
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (amdgpu_sriov_vf(adev)) {
+ adev->vcn.cur_state = AMD_PG_STATE_UNGATE;
+ return 0;
+ }
+
if (state == adev->vcn.cur_state)
return 0;
@@ -1680,6 +1722,215 @@ static int vcn_v2_0_set_powergating_state(void *handle,
return ret;
}
+static int vcn_v2_0_start_mmsch(struct amdgpu_device *adev,
+ struct amdgpu_mm_table *table)
+{
+ uint32_t data = 0, loop;
+ uint64_t addr = table->gpu_addr;
+ struct mmsch_v2_0_init_header *header;
+ uint32_t size;
+ int i;
+
+ header = (struct mmsch_v2_0_init_header *)table->cpu_addr;
+ size = header->header_size + header->vcn_table_size;
+
+ /* 1, write to vce_mmsch_vf_ctx_addr_lo/hi register with GPU mc addr
+ * of memory descriptor location
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_LO, lower_32_bits(addr));
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_ADDR_HI, upper_32_bits(addr));
+
+ /* 2, update vmid of descriptor */
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID);
+ data &= ~MMSCH_VF_VMID__VF_CTX_VMID_MASK;
+ /* use domain0 for MM scheduler */
+ data |= (0 << MMSCH_VF_VMID__VF_CTX_VMID__SHIFT);
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_VMID, data);
+
+ /* 3, notify mmsch about the size of this descriptor */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_CTX_SIZE, size);
+
+ /* 4, set resp to zero */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP, 0);
+
+ adev->vcn.inst->ring_dec.wptr = 0;
+ adev->vcn.inst->ring_dec.wptr_old = 0;
+ vcn_v2_0_dec_ring_set_wptr(&adev->vcn.inst->ring_dec);
+
+ for (i = 0; i < adev->vcn.num_enc_rings; ++i) {
+ adev->vcn.inst->ring_enc[i].wptr = 0;
+ adev->vcn.inst->ring_enc[i].wptr_old = 0;
+ vcn_v2_0_enc_ring_set_wptr(&adev->vcn.inst->ring_enc[i]);
+ }
+
+ /* 5, kick off the initialization and wait until
+ * VCE_MMSCH_VF_MAILBOX_RESP becomes non-zero
+ */
+ WREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_HOST, 0x10000001);
+
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop = 1000;
+ while ((data & 0x10000002) != 0x10000002) {
+ udelay(10);
+ data = RREG32_SOC15(UVD, 0, mmMMSCH_VF_MAILBOX_RESP);
+ loop--;
+ if (!loop)
+ break;
+ }
+
+ if (!loop) {
+ DRM_ERROR("failed to init MMSCH, " \
+ "mmMMSCH_VF_MAILBOX_RESP = 0x%08x\n", data);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static int vcn_v2_0_start_sriov(struct amdgpu_device *adev)
+{
+ int r;
+ uint32_t tmp;
+ struct amdgpu_ring *ring;
+ uint32_t offset, size;
+ uint32_t table_size = 0;
+ struct mmsch_v2_0_cmd_direct_write direct_wt = { {0} };
+ struct mmsch_v2_0_cmd_direct_read_modify_write direct_rd_mod_wt = { {0} };
+ struct mmsch_v2_0_cmd_direct_polling direct_poll = { {0} };
+ struct mmsch_v2_0_cmd_end end = { {0} };
+ struct mmsch_v2_0_init_header *header;
+ uint32_t *init_table = adev->virt.mm_table.cpu_addr;
+ uint8_t i = 0;
+
+ header = (struct mmsch_v2_0_init_header *)init_table;
+ direct_wt.cmd_header.command_type = MMSCH_COMMAND__DIRECT_REG_WRITE;
+ direct_rd_mod_wt.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_READ_MODIFY_WRITE;
+ direct_poll.cmd_header.command_type =
+ MMSCH_COMMAND__DIRECT_REG_POLLING;
+ end.cmd_header.command_type = MMSCH_COMMAND__END;
+
+ if (header->vcn_table_offset == 0 && header->vcn_table_size == 0) {
+ header->version = MMSCH_VERSION;
+ header->header_size = sizeof(struct mmsch_v2_0_init_header) >> 2;
+
+ header->vcn_table_offset = header->header_size;
+
+ init_table += header->vcn_table_offset;
+
+ size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
+
+ MMSCH_V2_0_INSERT_DIRECT_RD_MOD_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_STATUS),
+ 0xFFFFFFFF, 0x00000004);
+
+ /* mc resume*/
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ tmp = AMDGPU_UCODE_ID_VCN;
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ adev->firmware.ucode[tmp].tmr_mc_addr_lo);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ adev->firmware.ucode[tmp].tmr_mc_addr_hi);
+ offset = 0;
+ } else {
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->gpu_addr));
+ offset = size;
+ }
+
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET0),
+ 0);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE0),
+ size);
+
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET1),
+ 0);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE1),
+ AMDGPU_VCN_STACK_SIZE);
+
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW),
+ lower_32_bits(adev->vcn.inst->gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH),
+ upper_32_bits(adev->vcn.inst->gpu_addr + offset +
+ AMDGPU_VCN_STACK_SIZE));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_OFFSET2),
+ 0);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_VCPU_CACHE_SIZE2),
+ AMDGPU_VCN_CONTEXT_SIZE);
+
+ for (r = 0; r < adev->vcn.num_enc_rings; ++r) {
+ ring = &adev->vcn.inst->ring_enc[r];
+ ring->wptr = 0;
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_LO),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_BASE_HI),
+ upper_32_bits(ring->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RB_SIZE),
+ ring->ring_size / 4);
+ }
+
+ ring = &adev->vcn.inst->ring_dec;
+ ring->wptr = 0;
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_LOW),
+ lower_32_bits(ring->gpu_addr));
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i,
+ mmUVD_LMI_RBC_RB_64BIT_BAR_HIGH),
+ upper_32_bits(ring->gpu_addr));
+ /* force RBC into idle state */
+ tmp = order_base_2(ring->ring_size);
+ tmp = REG_SET_FIELD(0, UVD_RBC_RB_CNTL, RB_BUFSZ, tmp);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_BLKSZ, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_FETCH, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_NO_UPDATE, 1);
+ tmp = REG_SET_FIELD(tmp, UVD_RBC_RB_CNTL, RB_RPTR_WR_EN, 1);
+ MMSCH_V2_0_INSERT_DIRECT_WT(
+ SOC15_REG_OFFSET(UVD, i, mmUVD_RBC_RB_CNTL), tmp);
+
+ /* add end packet */
+ tmp = sizeof(struct mmsch_v2_0_cmd_end);
+ memcpy((void *)init_table, &end, tmp);
+ table_size += (tmp / 4);
+ header->vcn_table_size = table_size;
+
+ }
+ return vcn_v2_0_start_mmsch(adev, &adev->virt.mm_table);
+}
+
static const struct amd_ip_funcs vcn_v2_0_ip_funcs = {
.name = "vcn_v2_0",
.early_init = vcn_v2_0_early_init,
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
index 70fae7977f8f..c6363f5ad564 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v2_5.c
@@ -74,29 +74,30 @@ static int amdgpu_ih_clientid_vcns[] = {
static int vcn_v2_5_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_ARCTURUS) {
- u32 harvest;
- int i;
-
- adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
- for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
- harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
- if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
- adev->vcn.harvest_config |= 1 << i;
- }
-
- if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
- AMDGPU_VCN_HARVEST_VCN1))
- /* both instances are harvested, disable the block */
- return -ENOENT;
- } else
- adev->vcn.num_vcn_inst = 1;
if (amdgpu_sriov_vf(adev)) {
adev->vcn.num_vcn_inst = 2;
adev->vcn.harvest_config = 0;
adev->vcn.num_enc_rings = 1;
} else {
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ u32 harvest;
+ int i;
+
+ adev->vcn.num_vcn_inst = VCN25_MAX_HW_INSTANCES_ARCTURUS;
+ for (i = 0; i < adev->vcn.num_vcn_inst; i++) {
+ harvest = RREG32_SOC15(UVD, i, mmCC_UVD_HARVESTING);
+ if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK)
+ adev->vcn.harvest_config |= 1 << i;
+ }
+
+ if (adev->vcn.harvest_config == (AMDGPU_VCN_HARVEST_VCN0 |
+ AMDGPU_VCN_HARVEST_VCN1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
+ } else
+ adev->vcn.num_vcn_inst = 1;
+
adev->vcn.num_enc_rings = 2;
}
@@ -1367,9 +1368,9 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
int ret_code;
/* pause/unpause if state is changed */
- if (adev->vcn.pause_state.fw_based != new_state->fw_based) {
+ if (adev->vcn.inst[inst_idx].pause_state.fw_based != new_state->fw_based) {
DRM_DEBUG("dpg pause state changed %d -> %d",
- adev->vcn.pause_state.fw_based, new_state->fw_based);
+ adev->vcn.inst[inst_idx].pause_state.fw_based, new_state->fw_based);
reg_data = RREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE) &
(~UVD_DPG_PAUSE__NJ_PAUSE_DPG_ACK_MASK);
@@ -1407,14 +1408,14 @@ static int vcn_v2_5_pause_dpg_mode(struct amdgpu_device *adev,
RREG32_SOC15(UVD, inst_idx, mmUVD_SCRATCH2) & 0x7FFFFFFF);
SOC15_WAIT_ON_RREG(UVD, inst_idx, mmUVD_POWER_STATUS,
- 0x0, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
+ UVD_PGFSM_CONFIG__UVDM_UVDU_PWR_ON, UVD_POWER_STATUS__UVD_POWER_STATUS_MASK, ret_code);
}
} else {
/* unpause dpg, no need to wait */
reg_data &= ~UVD_DPG_PAUSE__NJ_PAUSE_DPG_REQ_MASK;
WREG32_SOC15(UVD, inst_idx, mmUVD_DPG_PAUSE, reg_data);
}
- adev->vcn.pause_state.fw_based = new_state->fw_based;
+ adev->vcn.inst[inst_idx].pause_state.fw_based = new_state->fw_based;
}
return 0;
@@ -1672,7 +1673,7 @@ static int vcn_v2_5_set_clockgating_state(void *handle,
return 0;
if (enable) {
- if (vcn_v2_5_is_idle(handle))
+ if (!vcn_v2_5_is_idle(handle))
return -EBUSY;
vcn_v2_5_enable_clock_gating(adev);
} else {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index 3f0300e53727..0ec5f25adf56 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -127,6 +127,8 @@ static int kfd_open(struct inode *inode, struct file *filep)
return PTR_ERR(process);
if (kfd_is_locked()) {
+ dev_dbg(kfd_device, "kfd is locked!\n"
+ "process %d unreferenced", process->pasid);
kfd_unref_process(process);
return -EAGAIN;
}
@@ -1167,7 +1169,7 @@ static int kfd_ioctl_get_tile_config(struct file *filep,
if (!dev)
return -EINVAL;
- dev->kfd2kgd->get_tile_config(dev->kgd, &config);
+ amdgpu_amdkfd_get_tile_config(dev->kgd, &config);
args->gb_addr_config = config.gb_addr_config;
args->num_banks = config.num_banks;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 2a9e40131735..d5386f15c4a5 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -648,6 +648,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd,
if (kfd->kfd2kgd->get_hive_id)
kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
+ if (kfd->kfd2kgd->get_unique_id)
+ kfd->unique_id = kfd->kfd2kgd->get_unique_id(kfd->kgd);
+
if (kfd_interrupt_init(kfd)) {
dev_err(kfd_device, "Error initializing interrupts\n");
goto kfd_interrupt_error;
@@ -710,7 +713,7 @@ out:
void kgd2kfd_device_exit(struct kfd_dev *kfd)
{
if (kfd->init_complete) {
- kgd2kfd_suspend(kfd);
+ kgd2kfd_suspend(kfd, false);
device_queue_manager_uninit(kfd->dqm);
kfd_interrupt_exit(kfd);
kfd_topology_remove_device(kfd);
@@ -731,7 +734,7 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd)
kfd->dqm->ops.pre_reset(kfd->dqm);
- kgd2kfd_suspend(kfd);
+ kgd2kfd_suspend(kfd, false);
kfd_signal_reset_event(kfd);
return 0;
@@ -765,21 +768,23 @@ bool kfd_is_locked(void)
return (atomic_read(&kfd_locked) > 0);
}
-void kgd2kfd_suspend(struct kfd_dev *kfd)
+void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
{
if (!kfd->init_complete)
return;
- /* For first KFD device suspend all the KFD processes */
- if (atomic_inc_return(&kfd_locked) == 1)
- kfd_suspend_all_processes();
+ /* for runtime suspend, skip locking kfd */
+ if (!run_pm) {
+ /* For first KFD device suspend all the KFD processes */
+ if (atomic_inc_return(&kfd_locked) == 1)
+ kfd_suspend_all_processes();
+ }
kfd->dqm->ops.stop(kfd->dqm);
-
kfd_iommu_suspend(kfd);
}
-int kgd2kfd_resume(struct kfd_dev *kfd)
+int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
{
int ret, count;
@@ -790,10 +795,13 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
if (ret)
return ret;
- count = atomic_dec_return(&kfd_locked);
- WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
- if (count == 0)
- ret = kfd_resume_all_processes();
+ /* for runtime resume, skip unlocking kfd */
+ if (!run_pm) {
+ count = atomic_dec_return(&kfd_locked);
+ WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
+ if (count == 0)
+ ret = kfd_resume_all_processes();
+ }
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 80d22bf702e8..77ea0f0cb163 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -78,14 +78,14 @@ static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
/* queue is available for KFD usage if bit is 1 */
for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
if (test_bit(pipe_offset + i,
- dqm->dev->shared_resources.queue_bitmap))
+ dqm->dev->shared_resources.cp_queue_bitmap))
return true;
return false;
}
-unsigned int get_queues_num(struct device_queue_manager *dqm)
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
{
- return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
+ return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
KGD_MAX_QUEUES);
}
@@ -109,6 +109,11 @@ static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
return dqm->dev->device_info->num_xgmi_sdma_engines;
}
+static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
+{
+ return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
+}
+
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
{
return dqm->dev->device_info->num_sdma_engines
@@ -132,6 +137,22 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
qpd->sh_mem_bases);
}
+void increment_queue_count(struct device_queue_manager *dqm,
+ enum kfd_queue_type type)
+{
+ dqm->active_queue_count++;
+ if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ dqm->active_cp_queue_count++;
+}
+
+void decrement_queue_count(struct device_queue_manager *dqm,
+ enum kfd_queue_type type)
+{
+ dqm->active_queue_count--;
+ if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
+ dqm->active_cp_queue_count--;
+}
+
static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
{
struct kfd_dev *dev = qpd->dqm->dev;
@@ -281,8 +302,6 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
struct mqd_manager *mqd_mgr;
int retval;
- print_queue(q);
-
dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
@@ -359,12 +378,7 @@ add_queue_to_list:
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
- dqm->queue_count++;
-
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
- dqm->xgmi_sdma_queue_count++;
+ increment_queue_count(dqm, q->properties.type);
/*
* Unconditionally increment this counter, regardless of the queue's
@@ -446,15 +460,13 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
- if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
+ if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
deallocate_hqd(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- dqm->sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- dqm->xgmi_sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- } else {
+ else {
pr_debug("q->properties.type %d is invalid\n",
q->properties.type);
return -EINVAL;
@@ -494,7 +506,7 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
}
qpd->queue_count--;
if (q->properties.is_active)
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
return retval;
}
@@ -563,13 +575,13 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
/*
* check active state vs. the previous state and modify
* counter accordingly. map_queues_cpsch uses the
- * dqm->queue_count to determine whether a new runlist must be
+ * dqm->active_queue_count to determine whether a new runlist must be
* uploaded.
*/
if (q->properties.is_active && !prev_active)
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
else if (!q->properties.is_active && prev_active)
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
retval = map_queues_cpsch(dqm);
@@ -618,7 +630,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
@@ -662,7 +674,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = false;
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
}
retval = execute_queues_cpsch(dqm,
qpd->is_debug ?
@@ -731,7 +743,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
@@ -786,7 +798,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = true;
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
}
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
@@ -899,16 +911,15 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
- dqm->queue_count = dqm->next_pipe_to_allocate = 0;
- dqm->sdma_queue_count = 0;
- dqm->xgmi_sdma_queue_count = 0;
+ dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
+ dqm->active_cp_queue_count = 0;
for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
int pipe_offset = pipe * get_queues_per_pipe(dqm);
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
if (test_bit(pipe_offset + queue,
- dqm->dev->shared_resources.queue_bitmap))
+ dqm->dev->shared_resources.cp_queue_bitmap))
dqm->allocated_queues[pipe] |= 1 << queue;
}
@@ -924,7 +935,7 @@ static void uninitialize(struct device_queue_manager *dqm)
{
int i;
- WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
+ WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
@@ -966,8 +977,11 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
int bit;
if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- if (dqm->sdma_bitmap == 0)
+ if (dqm->sdma_bitmap == 0) {
+ pr_err("No more SDMA queue to allocate\n");
return -ENOMEM;
+ }
+
bit = __ffs64(dqm->sdma_bitmap);
dqm->sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit;
@@ -976,8 +990,10 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
q->properties.sdma_queue_id = q->sdma_id /
get_num_sdma_engines(dqm);
} else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- if (dqm->xgmi_sdma_bitmap == 0)
+ if (dqm->xgmi_sdma_bitmap == 0) {
+ pr_err("No more XGMI SDMA queue to allocate\n");
return -ENOMEM;
+ }
bit = __ffs64(dqm->xgmi_sdma_bitmap);
dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
q->sdma_id = bit;
@@ -1029,7 +1045,7 @@ static int set_sched_resources(struct device_queue_manager *dqm)
mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
/ dqm->dev->shared_resources.num_pipe_per_mec;
- if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
+ if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
continue;
/* only acquire queues from the first MEC */
@@ -1064,9 +1080,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
- dqm->queue_count = dqm->processes_count = 0;
- dqm->sdma_queue_count = 0;
- dqm->xgmi_sdma_queue_count = 0;
+ dqm->active_queue_count = dqm->processes_count = 0;
+ dqm->active_cp_queue_count = 0;
+
dqm->active_runlist = false;
dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
@@ -1158,7 +1174,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list);
- dqm->queue_count++;
+ increment_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
@@ -1172,7 +1188,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
{
dqm_lock(dqm);
list_del(&kq->list);
- dqm->queue_count--;
+ decrement_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/*
@@ -1238,13 +1254,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
- dqm->sdma_queue_count++;
- else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
- dqm->xgmi_sdma_queue_count++;
-
if (q->properties.is_active) {
- dqm->queue_count++;
+ increment_queue_count(dqm, q->properties.type);
+
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
}
@@ -1298,20 +1310,6 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
return 0;
}
-static int unmap_sdma_queues(struct device_queue_manager *dqm)
-{
- int i, retval = 0;
-
- for (i = 0; i < dqm->dev->device_info->num_sdma_engines +
- dqm->dev->device_info->num_xgmi_sdma_engines; i++) {
- retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
- KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
- if (retval)
- return retval;
- }
- return retval;
-}
-
/* dqm->lock mutex has to be locked before calling this function */
static int map_queues_cpsch(struct device_queue_manager *dqm)
{
@@ -1319,7 +1317,7 @@ static int map_queues_cpsch(struct device_queue_manager *dqm)
if (!dqm->sched_running)
return 0;
- if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
+ if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
return 0;
if (dqm->active_runlist)
return 0;
@@ -1349,12 +1347,6 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
if (!dqm->active_runlist)
return retval;
- pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
- dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
-
- if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
- unmap_sdma_queues(dqm);
-
retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
filter, filter_param, false, 0);
if (retval)
@@ -1427,18 +1419,15 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- dqm->sdma_queue_count--;
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- dqm->xgmi_sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- }
list_del(&q->list);
qpd->queue_count--;
if (q->properties.is_active) {
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
@@ -1648,7 +1637,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
list_del(&kq->list);
- dqm->queue_count--;
+ decrement_queue_count(dqm, kq->queue->properties.type);
qpd->is_debug = false;
dqm->total_queue_count--;
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
@@ -1656,16 +1645,13 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clear all user mode queues */
list_for_each_entry(q, &qpd->queues_list, list) {
- if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
- dqm->sdma_queue_count--;
+ if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q);
- } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
- dqm->xgmi_sdma_queue_count--;
+ else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
- }
if (q->properties.is_active)
- dqm->queue_count--;
+ decrement_queue_count(dqm, q->properties.type);
dqm->total_queue_count--;
}
@@ -1742,14 +1728,13 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
struct kfd_dev *dev = dqm->dev;
struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
- (dev->device_info->num_sdma_engines +
- dev->device_info->num_xgmi_sdma_engines) *
+ get_num_all_sdma_engines(dqm) *
dev->device_info->num_sdma_queues_per_engine +
dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
&(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
- (void *)&(mem_obj->cpu_ptr), true);
+ (void *)&(mem_obj->cpu_ptr), false);
return retval;
}
@@ -1979,7 +1964,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
if (!test_bit(pipe_offset + queue,
- dqm->dev->shared_resources.queue_bitmap))
+ dqm->dev->shared_resources.cp_queue_bitmap))
continue;
r = dqm->dev->kfd2kgd->hqd_dump(
@@ -1995,8 +1980,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < get_num_sdma_engines(dqm) +
- get_num_xgmi_sdma_engines(dqm); pipe++) {
+ for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
for (queue = 0;
queue < dqm->dev->device_info->num_sdma_queues_per_engine;
queue++) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 871d3b628d2d..50d919f814e9 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -180,9 +180,8 @@ struct device_queue_manager {
struct list_head queues;
unsigned int saved_flags;
unsigned int processes_count;
- unsigned int queue_count;
- unsigned int sdma_queue_count;
- unsigned int xgmi_sdma_queue_count;
+ unsigned int active_queue_count;
+ unsigned int active_cp_queue_count;
unsigned int total_queue_count;
unsigned int next_pipe_to_allocate;
unsigned int *allocated_queues;
@@ -219,7 +218,7 @@ void device_queue_manager_init_v10_navi10(
struct device_queue_manager_asic_ops *asic_ops);
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd);
-unsigned int get_queues_num(struct device_queue_manager *dqm);
+unsigned int get_cp_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 1f8365575b12..15476fca8fa6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -187,7 +187,7 @@ static int create_signal_event(struct file *devkfd,
if (p->signal_mapped_size &&
p->signal_event_count == p->signal_mapped_size / 8) {
if (!p->signal_event_limit_reached) {
- pr_warn("Signal event wasn't created because limit was reached\n");
+ pr_debug("Signal event wasn't created because limit was reached\n");
p->signal_event_limit_reached = true;
}
return -ENOSPC;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
index bb77b8890e77..78714f9a8b11 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_flat_memory.c
@@ -316,7 +316,7 @@ static void kfd_init_apertures_vi(struct kfd_process_device *pdd, uint8_t id)
{
/*
* node id couldn't be 0 - the three MSB bits of
- * aperture shoudn't be 0
+ * aperture shouldn't be 0
*/
pdd->lds_base = MAKE_LDS_APP_BASE_VI();
pdd->lds_limit = MAKE_LDS_APP_LIMIT(pdd->lds_base);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 436b7f518979..48cda3073b70 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -87,9 +87,21 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
int retval;
struct kfd_mem_obj *mqd_mem_obj = NULL;
- /* From V9, for CWSR, the control stack is located on the next page
- * boundary after the mqd, we will use the gtt allocation function
- * instead of sub-allocation function.
+ /* For V9 only, due to a HW bug, the control stack of a user mode
+ * compute queue needs to be allocated just behind the page boundary
+ * of its regular MQD buffer. So we allocate an enlarged MQD buffer:
+ * the first page of the buffer serves as the regular MQD buffer
+ * purpose and the remaining is for control stack. Although the two
+ * parts are in the same buffer object, they need different memory
+ * types: MQD part needs UC (uncached) as usual, while control stack
+ * needs NC (non coherent), which is different from the UC type which
+ * is used when control stack is allocated in user space.
+ *
+ * Because of all those, we use the gtt allocation function instead
+ * of sub-allocation function for this enlarged MQD buffer. Moreover,
+ * in order to achieve two memory types in a single buffer object, we
+ * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
+ * amdgpu memory functions to do so.
*/
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index dc406e6dee23..efdb75e7677b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -47,9 +47,8 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
struct kfd_dev *dev = pm->dqm->dev;
process_count = pm->dqm->processes_count;
- queue_count = pm->dqm->queue_count;
- compute_queue_count = queue_count - pm->dqm->sdma_queue_count -
- pm->dqm->xgmi_sdma_queue_count;
+ queue_count = pm->dqm->active_queue_count;
+ compute_queue_count = pm->dqm->active_cp_queue_count;
/* check if there is over subscription
* Note: the arbitration between the number of VMIDs and
@@ -62,7 +61,7 @@ static void pm_calc_rlib_size(struct packet_manager *pm,
max_proc_per_quantum = dev->max_proc_per_quantum;
if ((process_count > max_proc_per_quantum) ||
- compute_queue_count > get_queues_num(pm->dqm)) {
+ compute_queue_count > get_cp_queues_num(pm->dqm)) {
*over_subscription = true;
pr_debug("Over subscribed runlist\n");
}
@@ -141,7 +140,7 @@ static int pm_create_runlist_ib(struct packet_manager *pm,
pm->ib_size_bytes = alloc_size_bytes;
pr_debug("Building runlist ib process count: %d queues count %d\n",
- pm->dqm->processes_count, pm->dqm->queue_count);
+ pm->dqm->processes_count, pm->dqm->active_queue_count);
/* build the run list ib packet */
list_for_each_entry(cur, queues, list) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 6af1b5881f43..4a3049841086 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -41,6 +41,7 @@
#include <drm/drm_drv.h>
#include <drm/drm_device.h>
#include <kgd_kfd_interface.h>
+#include <linux/swap.h>
#include "amd_shared.h"
@@ -293,6 +294,9 @@ struct kfd_dev {
/* xGMI */
uint64_t hive_id;
+
+ /* UUID */
+ uint64_t unique_id;
bool pci_atomic_requested;
@@ -502,6 +506,9 @@ struct queue {
struct kfd_process *process;
struct kfd_dev *device;
void *gws;
+
+ /* procfs */
+ struct kobject kobj;
};
/*
@@ -646,6 +653,7 @@ struct kfd_process_device {
* function.
*/
bool already_dequeued;
+ bool runtime_inuse;
/* Is this process/pasid bound to this device? (amd_iommu_bind_pasid) */
enum kfd_pdd_bound bound;
@@ -729,6 +737,7 @@ struct kfd_process {
/* Kobj for our procfs */
struct kobject *kobj;
+ struct kobject *kobj_queues;
struct attribute attr_pasid;
};
@@ -835,6 +844,8 @@ extern struct device *kfd_device;
/* KFD's procfs */
void kfd_procfs_init(void);
void kfd_procfs_shutdown(void);
+int kfd_procfs_add_queue(struct queue *q);
+void kfd_procfs_del_queue(struct queue *q);
/* Topology */
int kfd_topology_init(void);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 25b90f70aecd..fe0cd49d4ea7 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -31,6 +31,7 @@
#include <linux/compat.h>
#include <linux/mman.h>
#include <linux/file.h>
+#include <linux/pm_runtime.h>
#include "amdgpu_amdkfd.h"
#include "amdgpu.h"
@@ -132,6 +133,88 @@ void kfd_procfs_shutdown(void)
}
}
+static ssize_t kfd_procfs_queue_show(struct kobject *kobj,
+ struct attribute *attr, char *buffer)
+{
+ struct queue *q = container_of(kobj, struct queue, kobj);
+
+ if (!strcmp(attr->name, "size"))
+ return snprintf(buffer, PAGE_SIZE, "%llu",
+ q->properties.queue_size);
+ else if (!strcmp(attr->name, "type"))
+ return snprintf(buffer, PAGE_SIZE, "%d", q->properties.type);
+ else if (!strcmp(attr->name, "gpuid"))
+ return snprintf(buffer, PAGE_SIZE, "%u", q->device->id);
+ else
+ pr_err("Invalid attribute");
+
+ return 0;
+}
+
+static struct attribute attr_queue_size = {
+ .name = "size",
+ .mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute attr_queue_type = {
+ .name = "type",
+ .mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute attr_queue_gpuid = {
+ .name = "gpuid",
+ .mode = KFD_SYSFS_FILE_MODE
+};
+
+static struct attribute *procfs_queue_attrs[] = {
+ &attr_queue_size,
+ &attr_queue_type,
+ &attr_queue_gpuid,
+ NULL
+};
+
+static const struct sysfs_ops procfs_queue_ops = {
+ .show = kfd_procfs_queue_show,
+};
+
+static struct kobj_type procfs_queue_type = {
+ .sysfs_ops = &procfs_queue_ops,
+ .default_attrs = procfs_queue_attrs,
+};
+
+int kfd_procfs_add_queue(struct queue *q)
+{
+ struct kfd_process *proc;
+ int ret;
+
+ if (!q || !q->process)
+ return -EINVAL;
+ proc = q->process;
+
+ /* Create proc/<pid>/queues/<queue id> folder */
+ if (!proc->kobj_queues)
+ return -EFAULT;
+ ret = kobject_init_and_add(&q->kobj, &procfs_queue_type,
+ proc->kobj_queues, "%u", q->properties.queue_id);
+ if (ret < 0) {
+ pr_warn("Creating proc/<pid>/queues/%u failed",
+ q->properties.queue_id);
+ kobject_put(&q->kobj);
+ return ret;
+ }
+
+ return 0;
+}
+
+void kfd_procfs_del_queue(struct queue *q)
+{
+ if (!q)
+ return;
+
+ kobject_del(&q->kobj);
+ kobject_put(&q->kobj);
+}
+
int kfd_process_create_wq(void)
{
if (!kfd_process_wq)
@@ -244,10 +327,10 @@ err_alloc_mem:
static int kfd_process_device_reserve_ib_mem(struct kfd_process_device *pdd)
{
struct qcm_process_device *qpd = &pdd->qpd;
- uint32_t flags = ALLOC_MEM_FLAGS_GTT |
- ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
- ALLOC_MEM_FLAGS_WRITABLE |
- ALLOC_MEM_FLAGS_EXECUTABLE;
+ uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT |
+ KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE |
+ KFD_IOC_ALLOC_MEM_FLAGS_WRITABLE |
+ KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
void *kaddr;
int ret;
@@ -323,6 +406,11 @@ struct kfd_process *kfd_create_process(struct file *filep)
if (ret)
pr_warn("Creating pasid for pid %d failed",
(int)process->lead_thread->pid);
+
+ process->kobj_queues = kobject_create_and_add("queues",
+ process->kobj);
+ if (!process->kobj_queues)
+ pr_warn("Creating KFD proc/queues folder failed");
}
out:
if (!IS_ERR(process))
@@ -440,6 +528,16 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
kfree(pdd->qpd.doorbell_bitmap);
idr_destroy(&pdd->alloc_idr);
+ /*
+ * before destroying pdd, make sure to report availability
+ * for auto suspend
+ */
+ if (pdd->runtime_inuse) {
+ pm_runtime_mark_last_busy(pdd->dev->ddev->dev);
+ pm_runtime_put_autosuspend(pdd->dev->ddev->dev);
+ pdd->runtime_inuse = false;
+ }
+
kfree(pdd);
}
}
@@ -457,6 +555,9 @@ static void kfd_process_wq_release(struct work_struct *work)
/* Remove the procfs files */
if (p->kobj) {
sysfs_remove_file(p->kobj, &p->attr_pasid);
+ kobject_del(p->kobj_queues);
+ kobject_put(p->kobj_queues);
+ p->kobj_queues = NULL;
kobject_del(p->kobj);
kobject_put(p->kobj);
p->kobj = NULL;
@@ -540,6 +641,11 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
/* Indicate to other users that MM is no longer valid */
p->mm = NULL;
+ /* Signal the eviction fence after user mode queues are
+ * destroyed. This allows any BOs to be freed without
+ * triggering pointless evictions or waiting for fences.
+ */
+ dma_fence_signal(p->ef);
mutex_unlock(&p->mutex);
@@ -591,8 +697,9 @@ static int kfd_process_device_init_cwsr_dgpu(struct kfd_process_device *pdd)
{
struct kfd_dev *dev = pdd->dev;
struct qcm_process_device *qpd = &pdd->qpd;
- uint32_t flags = ALLOC_MEM_FLAGS_GTT |
- ALLOC_MEM_FLAGS_NO_SUBSTITUTE | ALLOC_MEM_FLAGS_EXECUTABLE;
+ uint32_t flags = KFD_IOC_ALLOC_MEM_FLAGS_GTT
+ | KFD_IOC_ALLOC_MEM_FLAGS_NO_SUBSTITUTE
+ | KFD_IOC_ALLOC_MEM_FLAGS_EXECUTABLE;
void *kaddr;
int ret;
@@ -754,6 +861,7 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_dev *dev,
pdd->process = p;
pdd->bound = PDD_UNBOUND;
pdd->already_dequeued = false;
+ pdd->runtime_inuse = false;
list_add(&pdd->per_device_list, &p->per_device_data);
/* Init idr used for memory handle translation */
@@ -843,15 +951,41 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
+ /*
+ * signal runtime-pm system to auto resume and prevent
+ * further runtime suspend once device pdd is created until
+ * pdd is destroyed.
+ */
+ if (!pdd->runtime_inuse) {
+ err = pm_runtime_get_sync(dev->ddev->dev);
+ if (err < 0)
+ return ERR_PTR(err);
+ }
+
err = kfd_iommu_bind_process_to_device(pdd);
if (err)
- return ERR_PTR(err);
+ goto out;
err = kfd_process_device_init_vm(pdd, NULL);
if (err)
- return ERR_PTR(err);
+ goto out;
+
+ /*
+ * make sure that runtime_usage counter is incremented just once
+ * per pdd
+ */
+ pdd->runtime_inuse = true;
return pdd;
+
+out:
+ /* balance runpm reference count and exit with error */
+ if (!pdd->runtime_inuse) {
+ pm_runtime_mark_last_busy(dev->ddev->dev);
+ pm_runtime_put_autosuspend(dev->ddev->dev);
+ }
+
+ return ERR_PTR(err);
}
struct kfd_process_device *kfd_get_first_process_device_data(
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index 31fcd1b51f00..084c35f55d59 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -241,23 +241,18 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
case KFD_QUEUE_TYPE_SDMA_XGMI:
- if ((type == KFD_QUEUE_TYPE_SDMA && dev->dqm->sdma_queue_count
- >= get_num_sdma_queues(dev->dqm)) ||
- (type == KFD_QUEUE_TYPE_SDMA_XGMI &&
- dev->dqm->xgmi_sdma_queue_count
- >= get_num_xgmi_sdma_queues(dev->dqm))) {
- pr_debug("Over-subscription is not allowed for SDMA.\n");
- retval = -EPERM;
- goto err_create_queue;
- }
-
+ /* SDMA queues are always allocated statically no matter
+ * which scheduler mode is used. We also do not need to
+ * check whether a SDMA queue can be allocated here, because
+ * allocate_sdma_queue() in create_queue() has the
+ * corresponding check logic.
+ */
retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
if (retval != 0)
goto err_create_queue;
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
- pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
@@ -266,7 +261,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if ((dev->dqm->sched_policy ==
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
- (dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
+ (dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
@@ -278,7 +273,6 @@ int pqm_create_queue(struct process_queue_manager *pqm,
pqn->q = q;
pqn->kq = NULL;
retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd);
- pr_debug("DQM returned %d for create_queue\n", retval);
print_queue(q);
break;
case KFD_QUEUE_TYPE_DIQ:
@@ -299,7 +293,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
}
if (retval != 0) {
- pr_err("Pasid 0x%x DQM create queue %d failed. ret %d\n",
+ pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
pqm->process->pasid, type, retval);
goto err_create_queue;
}
@@ -322,12 +316,16 @@ int pqm_create_queue(struct process_queue_manager *pqm,
if (q) {
pr_debug("PQM done creating queue\n");
+ kfd_procfs_add_queue(q);
print_queue_properties(&q->properties);
}
return retval;
err_create_queue:
+ uninit_queue(q);
+ if (kq)
+ kernel_queue_uninit(kq, false);
kfree(pqn);
err_allocate_pqn:
/* check if queues list is empty unregister process from device */
@@ -378,6 +376,7 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
}
if (pqn->q) {
+ kfd_procfs_del_queue(pqn->q);
dqm = pqn->q->device->dqm;
retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
if (retval) {
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
index 203c823d65f1..aa0bfa78a667 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c
@@ -490,6 +490,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
dev->node_props.num_sdma_queues_per_engine);
sysfs_show_32bit_prop(buffer, "num_cp_queues",
dev->node_props.num_cp_queues);
+ sysfs_show_64bit_prop(buffer, "unique_id",
+ dev->node_props.unique_id);
if (dev->gpu) {
log_max_watch_addr =
@@ -1318,7 +1320,8 @@ int kfd_topology_add_device(struct kfd_dev *gpu)
dev->node_props.num_gws = (hws_gws_support &&
dev->gpu->dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) ?
amdgpu_amdkfd_get_num_gws(dev->gpu->kgd) : 0;
- dev->node_props.num_cp_queues = get_queues_num(dev->gpu->dqm);
+ dev->node_props.num_cp_queues = get_cp_queues_num(dev->gpu->dqm);
+ dev->node_props.unique_id = gpu->unique_id;
kfd_fill_mem_clk_max_info(dev);
kfd_fill_iolink_non_crat_info(dev);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
index 74e9b1682af8..46eeecaf1b68 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.h
@@ -54,6 +54,7 @@
struct kfd_node_properties {
uint64_t hive_id;
+ uint64_t unique_id;
uint32_t cpu_cores_count;
uint32_t simd_count;
uint32_t mem_banks_count;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 279541517a99..a4256780e70e 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -98,6 +98,9 @@ MODULE_FIRMWARE(FIRMWARE_RENOIR_DMUB);
#define FIRMWARE_RAVEN_DMCU "amdgpu/raven_dmcu.bin"
MODULE_FIRMWARE(FIRMWARE_RAVEN_DMCU);
+#define FIRMWARE_NAVI12_DMCU "amdgpu/navi12_dmcu.bin"
+MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU);
+
/* Number of bytes in PSP header for firmware. */
#define PSP_HEADER_BYTES 0x100
@@ -129,9 +132,6 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev);
/* removes and deallocates the drm structures, created by the above function */
static void amdgpu_dm_destroy_drm_device(struct amdgpu_display_manager *dm);
-static void
-amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector);
-
static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm,
struct drm_plane *plane,
unsigned long possible_crtcs,
@@ -383,8 +383,8 @@ static void dm_pflip_high_irq(void *interrupt_params)
* of pageflip completion, so last_flip_vblank is the forbidden count
* for queueing new pageflips if vsync + VRR is enabled.
*/
- amdgpu_crtc->last_flip_vblank = amdgpu_get_vblank_counter_kms(adev->ddev,
- amdgpu_crtc->crtc_id);
+ amdgpu_crtc->last_flip_vblank =
+ amdgpu_get_vblank_counter_kms(&amdgpu_crtc->base);
amdgpu_crtc->pflip_status = AMDGPU_FLIP_NONE;
spin_unlock_irqrestore(&adev->ddev->event_lock, flags);
@@ -407,8 +407,9 @@ static void dm_vupdate_high_irq(void *interrupt_params)
if (acrtc) {
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+ acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
/* Core vblank handling is done here after end of front-porch in
* vrr mode, as vblank timestamping will give valid results
@@ -458,8 +459,9 @@ static void dm_crtc_high_irq(void *interrupt_params)
if (acrtc) {
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n",
+ acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
/* Core vblank handling at start of front-porch is only possible
* in non-vrr mode, as only there vblank timestamping will give
@@ -522,8 +524,8 @@ static void dm_dcn_crtc_high_irq(void *interrupt_params)
acrtc_state = to_dm_crtc_state(acrtc->base.state);
- DRM_DEBUG_DRIVER("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
- amdgpu_dm_vrr_active(acrtc_state));
+ DRM_DEBUG_VBL("crtc:%d, vupdate-vrr:%d\n", acrtc->crtc_id,
+ amdgpu_dm_vrr_active(acrtc_state));
amdgpu_dm_crtc_handle_crc_irq(&acrtc->base);
drm_crtc_handle_vblank(&acrtc->base);
@@ -801,10 +803,20 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
fw_bss_data_size = le32_to_cpu(hdr->bss_data_bytes);
- memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
- fw_inst_const_size);
+ /* if adev->firmware.load_type == AMDGPU_FW_LOAD_PSP,
+ * amdgpu_ucode_init_single_fw will load dmub firmware
+ * fw_inst_const part to cw0; otherwise, the firmware back door load
+ * will be done by dm_dmub_hw_init
+ */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ memcpy(fb_info->fb[DMUB_WINDOW_0_INST_CONST].cpu_addr, fw_inst_const,
+ fw_inst_const_size);
+ }
+
memcpy(fb_info->fb[DMUB_WINDOW_2_BSS_DATA].cpu_addr, fw_bss_data,
fw_bss_data_size);
+
+ /* Copy firmware bios info into FB memory. */
memcpy(fb_info->fb[DMUB_WINDOW_3_VBIOS].cpu_addr, adev->bios,
adev->bios_size);
@@ -823,6 +835,10 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev)
hw_params.fb_base = adev->gmc.fb_start;
hw_params.fb_offset = adev->gmc.aper_base;
+ /* backdoor load firmware and trigger dmub running */
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ hw_params.load_inst_const = true;
+
if (dmcu)
hw_params.psp_version = dmcu->psp_version;
@@ -885,7 +901,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
init_data.asic_id.chip_family = adev->family;
- init_data.asic_id.pci_revision_id = adev->rev_id;
+ init_data.asic_id.pci_revision_id = adev->pdev->revision;
init_data.asic_id.hw_internal_rev = adev->external_rev_id;
init_data.asic_id.vram_width = adev->gmc.vram_width;
@@ -960,7 +976,7 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
#ifdef CONFIG_DRM_AMD_DC_HDCP
if (adev->asic_type >= CHIP_RAVEN) {
- adev->dm.hdcp_workqueue = hdcp_create_workqueue(&adev->psp, &init_params.cp_psp, adev->dm.dc);
+ adev->dm.hdcp_workqueue = hdcp_create_workqueue(adev, &init_params.cp_psp, adev->dm.dc);
if (!adev->dm.hdcp_workqueue)
DRM_ERROR("amdgpu: failed to initialize hdcp_workqueue.\n");
@@ -991,11 +1007,6 @@ static int amdgpu_dm_init(struct amdgpu_device *adev)
goto error;
}
-#if defined(CONFIG_DEBUG_FS)
- if (dtn_debugfs_init(adev))
- DRM_ERROR("amdgpu: failed initialize dtn debugfs support.\n");
-#endif
-
DRM_DEBUG_DRIVER("KMS initialized.\n");
return 0;
@@ -1079,9 +1090,11 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA20:
case CHIP_NAVI10:
case CHIP_NAVI14:
- case CHIP_NAVI12:
case CHIP_RENOIR:
return 0;
+ case CHIP_NAVI12:
+ fw_name_dmcu = FIRMWARE_NAVI12_DMCU;
+ break;
case CHIP_RAVEN:
if (ASICREV_IS_PICASSO(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
@@ -1192,22 +1205,21 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
return 0;
}
- if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
- DRM_WARN("Only PSP firmware loading is supported for DMUB\n");
- return 0;
- }
-
hdr = (const struct dmcub_firmware_header_v1_0 *)adev->dm.dmub_fw->data;
- adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
- AMDGPU_UCODE_ID_DMCUB;
- adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw = adev->dm.dmub_fw;
- adev->firmware.fw_size +=
- ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].ucode_id =
+ AMDGPU_UCODE_ID_DMCUB;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_DMCUB].fw =
+ adev->dm.dmub_fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->inst_const_bytes), PAGE_SIZE);
- DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
- adev->dm.dmcub_fw_version);
+ DRM_INFO("Loading DMUB firmware via PSP: version=0x%08X\n",
+ adev->dm.dmcub_fw_version);
+ }
+
+ adev->dm.dmcub_fw_version = le32_to_cpu(hdr->header.ucode_version);
adev->dm.dmub_srv = kzalloc(sizeof(*adev->dm.dmub_srv), GFP_KERNEL);
dmub_srv = adev->dm.dmub_srv;
@@ -1422,6 +1434,73 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend)
drm_kms_helper_hotplug_event(dev);
}
+static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = &adev->smu;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return 0;
+
+ /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends
+ * on window driver dc implementation.
+ * For Navi1x, clock settings of dcn watermarks are fixed. the settings
+ * should be passed to smu during boot up and resume from s3.
+ * boot up: dc calculate dcn watermark clock settings within dc_create,
+ * dcn20_resource_construct
+ * then call pplib functions below to pass the settings to smu:
+ * smu_set_watermarks_for_clock_ranges
+ * smu_set_watermarks_table
+ * navi10_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Renoir, clock settings of dcn watermark are also fixed values.
+ * dc has implemented different flow for window driver:
+ * dc_hardware_init / dc_set_power_state
+ * dcn10_init_hw
+ * notify_wm_ranges
+ * set_wm_ranges
+ * -- Linux
+ * smu_set_watermarks_for_clock_ranges
+ * renoir_set_watermarks_table
+ * smu_write_watermarks_table
+ *
+ * For Linux,
+ * dc_hardware_init -> amdgpu_dm_init
+ * dc_set_power_state --> dm_resume
+ *
+ * therefore, this function apply to navi10/12/14 but not Renoir
+ * *
+ */
+ switch(adev->asic_type) {
+ case CHIP_NAVI10:
+ case CHIP_NAVI14:
+ case CHIP_NAVI12:
+ break;
+ default:
+ return 0;
+ }
+
+ mutex_lock(&smu->mutex);
+
+ /* pass data to smu controller */
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_write_watermarks_table(smu);
+
+ if (ret) {
+ mutex_unlock(&smu->mutex);
+ DRM_ERROR("Failed to update WMTABLE!\n");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
+
+ mutex_unlock(&smu->mutex);
+
+ return 0;
+}
+
/**
* dm_hw_init() - Initialize DC device
* @handle: The base driver device containing the amdgpu_dm device.
@@ -1700,6 +1779,8 @@ static int dm_resume(void *handle)
amdgpu_dm_irq_resume_late(adev);
+ amdgpu_dm_smu_write_watermarks_table(adev);
+
return 0;
}
@@ -1758,8 +1839,63 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = {
.atomic_commit_tail = amdgpu_dm_atomic_commit_tail
};
-static void
-amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
+static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector)
+{
+ u32 max_cll, min_cll, max, min, q, r;
+ struct amdgpu_dm_backlight_caps *caps;
+ struct amdgpu_display_manager *dm;
+ struct drm_connector *conn_base;
+ struct amdgpu_device *adev;
+ static const u8 pre_computed_values[] = {
+ 50, 51, 52, 53, 55, 56, 57, 58, 59, 61, 62, 63, 65, 66, 68, 69,
+ 71, 72, 74, 75, 77, 79, 81, 82, 84, 86, 88, 90, 92, 94, 96, 98};
+
+ if (!aconnector || !aconnector->dc_link)
+ return;
+
+ conn_base = &aconnector->base;
+ adev = conn_base->dev->dev_private;
+ dm = &adev->dm;
+ caps = &dm->backlight_caps;
+ caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps;
+ caps->aux_support = false;
+ max_cll = conn_base->hdr_sink_metadata.hdmi_type1.max_cll;
+ min_cll = conn_base->hdr_sink_metadata.hdmi_type1.min_cll;
+
+ if (caps->ext_caps->bits.oled == 1 ||
+ caps->ext_caps->bits.sdr_aux_backlight_control == 1 ||
+ caps->ext_caps->bits.hdr_aux_backlight_control == 1)
+ caps->aux_support = true;
+
+ /* From the specification (CTA-861-G), for calculating the maximum
+ * luminance we need to use:
+ * Luminance = 50*2**(CV/32)
+ * Where CV is a one-byte value.
+ * For calculating this expression we may need float point precision;
+ * to avoid this complexity level, we take advantage that CV is divided
+ * by a constant. From the Euclids division algorithm, we know that CV
+ * can be written as: CV = 32*q + r. Next, we replace CV in the
+ * Luminance expression and get 50*(2**q)*(2**(r/32)), hence we just
+ * need to pre-compute the value of r/32. For pre-computing the values
+ * We just used the following Ruby line:
+ * (0...32).each {|cv| puts (50*2**(cv/32.0)).round}
+ * The results of the above expressions can be verified at
+ * pre_computed_values.
+ */
+ q = max_cll >> 5;
+ r = max_cll % 32;
+ max = (1 << q) * pre_computed_values[r];
+
+ // min luminance: maxLum * (CV/255)^2 / 100
+ q = DIV_ROUND_CLOSEST(min_cll, 255);
+ min = max * DIV_ROUND_CLOSEST((q * q), 100);
+
+ caps->aux_max_input_signal = max;
+ caps->aux_min_input_signal = min;
+}
+
+void amdgpu_dm_update_connector_after_detect(
+ struct amdgpu_dm_connector *aconnector)
{
struct drm_connector *connector = &aconnector->base;
struct drm_device *dev = connector->dev;
@@ -1872,7 +2008,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
aconnector->edid);
}
amdgpu_dm_update_freesync_caps(connector, aconnector->edid);
-
+ update_connector_ext_caps(aconnector);
} else {
drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_update_freesync_caps(connector, NULL);
@@ -1911,7 +2047,7 @@ static void handle_hpd_irq(void *param)
mutex_lock(&aconnector->hpd_lock);
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (adev->asic_type >= CHIP_RAVEN)
+ if (adev->dm.hdcp_workqueue)
hdcp_reset_display(adev->dm.hdcp_workqueue, aconnector->dc_link->link_index);
#endif
if (aconnector->fake_enable)
@@ -2088,8 +2224,10 @@ static void handle_hpd_rx_irq(void *param)
}
}
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ)
- hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+ if (hpd_irq_data.bytes.device_service_irq.bits.CP_IRQ) {
+ if (adev->dm.hdcp_workqueue)
+ hdcp_handle_cpirq(adev->dm.hdcp_workqueue, aconnector->base.index);
+ }
#endif
if ((dc_link->cur_link_settings.lane_count != LANE_COUNT_UNKNOWN) ||
(dc_link->type == dc_connection_mst_branch))
@@ -2484,6 +2622,7 @@ static int amdgpu_dm_mode_config_init(struct amdgpu_device *adev)
#define AMDGPU_DM_DEFAULT_MIN_BACKLIGHT 12
#define AMDGPU_DM_DEFAULT_MAX_BACKLIGHT 255
+#define AUX_BL_DEFAULT_TRANSITION_TIME_MS 50
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) ||\
defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
@@ -2498,9 +2637,11 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
amdgpu_acpi_get_backlight_caps(dm->adev, &caps);
if (caps.caps_valid) {
+ dm->backlight_caps.caps_valid = true;
+ if (caps.aux_support)
+ return;
dm->backlight_caps.min_input_signal = caps.min_input_signal;
dm->backlight_caps.max_input_signal = caps.max_input_signal;
- dm->backlight_caps.caps_valid = true;
} else {
dm->backlight_caps.min_input_signal =
AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
@@ -2508,40 +2649,95 @@ static void amdgpu_dm_update_backlight_caps(struct amdgpu_display_manager *dm)
AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
}
#else
+ if (dm->backlight_caps.aux_support)
+ return;
+
dm->backlight_caps.min_input_signal = AMDGPU_DM_DEFAULT_MIN_BACKLIGHT;
dm->backlight_caps.max_input_signal = AMDGPU_DM_DEFAULT_MAX_BACKLIGHT;
#endif
}
+static int set_backlight_via_aux(struct dc_link *link, uint32_t brightness)
+{
+ bool rc;
+
+ if (!link)
+ return 1;
+
+ rc = dc_link_set_backlight_level_nits(link, true, brightness,
+ AUX_BL_DEFAULT_TRANSITION_TIME_MS);
+
+ return rc ? 0 : 1;
+}
+
+static u32 convert_brightness(const struct amdgpu_dm_backlight_caps *caps,
+ const uint32_t user_brightness)
+{
+ u32 min, max, conversion_pace;
+ u32 brightness = user_brightness;
+
+ if (!caps)
+ goto out;
+
+ if (!caps->aux_support) {
+ max = caps->max_input_signal;
+ min = caps->min_input_signal;
+ /*
+ * The brightness input is in the range 0-255
+ * It needs to be rescaled to be between the
+ * requested min and max input signal
+ * It also needs to be scaled up by 0x101 to
+ * match the DC interface which has a range of
+ * 0 to 0xffff
+ */
+ conversion_pace = 0x101;
+ brightness =
+ user_brightness
+ * conversion_pace
+ * (max - min)
+ / AMDGPU_MAX_BL_LEVEL
+ + min * conversion_pace;
+ } else {
+ /* TODO
+ * We are doing a linear interpolation here, which is OK but
+ * does not provide the optimal result. We probably want
+ * something close to the Perceptual Quantizer (PQ) curve.
+ */
+ max = caps->aux_max_input_signal;
+ min = caps->aux_min_input_signal;
+
+ brightness = (AMDGPU_MAX_BL_LEVEL - user_brightness) * min
+ + user_brightness * max;
+ // Multiple the value by 1000 since we use millinits
+ brightness *= 1000;
+ brightness = DIV_ROUND_CLOSEST(brightness, AMDGPU_MAX_BL_LEVEL);
+ }
+
+out:
+ return brightness;
+}
+
static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
{
struct amdgpu_display_manager *dm = bl_get_data(bd);
struct amdgpu_dm_backlight_caps caps;
- uint32_t brightness = bd->props.brightness;
+ struct dc_link *link = NULL;
+ u32 brightness;
+ bool rc;
amdgpu_dm_update_backlight_caps(dm);
caps = dm->backlight_caps;
- /*
- * The brightness input is in the range 0-255
- * It needs to be rescaled to be between the
- * requested min and max input signal
- *
- * It also needs to be scaled up by 0x101 to
- * match the DC interface which has a range of
- * 0 to 0xffff
- */
- brightness =
- brightness
- * 0x101
- * (caps.max_input_signal - caps.min_input_signal)
- / AMDGPU_MAX_BL_LEVEL
- + caps.min_input_signal * 0x101;
-
- if (dc_link_set_backlight_level(dm->backlight_link,
- brightness, 0))
- return 0;
- else
- return 1;
+
+ link = (struct dc_link *)dm->backlight_link;
+
+ brightness = convert_brightness(&caps, bd->props.brightness);
+ // Change brightness based on AUX property
+ if (caps.aux_support)
+ return set_backlight_via_aux(link, brightness);
+
+ rc = dc_link_set_backlight_level(dm->backlight_link, brightness, 0);
+
+ return rc ? 0 : 1;
}
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
@@ -2826,6 +3022,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+ /* No userspace support. */
+ dm->dc->debug.disable_tri_buf = true;
+
return 0;
fail:
kfree(aencoder);
@@ -4117,9 +4316,22 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector,
struct dmcu *dmcu = core_dc->res_pool->dmcu;
stream->psr_version = dmcu->dmcu_version.psr_version;
- mod_build_vsc_infopacket(stream,
- &stream->vsc_infopacket,
- &stream->use_vsc_sdp_for_colorimetry);
+
+ //
+ // should decide stream support vsc sdp colorimetry capability
+ // before building vsc info packet
+ //
+ stream->use_vsc_sdp_for_colorimetry = false;
+ if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) {
+ stream->use_vsc_sdp_for_colorimetry =
+ aconnector->dc_sink->is_vsc_sdp_colorimetry_supported;
+ } else {
+ if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
+ stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) {
+ stream->use_vsc_sdp_for_colorimetry = true;
+ }
+ }
+ mod_build_vsc_infopacket(stream, &stream->vsc_infopacket);
}
}
finish:
@@ -4269,8 +4481,10 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.set_crc_source = amdgpu_dm_crtc_set_crc_source,
.verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
.get_crc_sources = amdgpu_dm_crtc_get_crc_sources,
+ .get_vblank_counter = amdgpu_get_vblank_counter_kms,
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static enum drm_connector_status
@@ -4491,6 +4705,19 @@ amdgpu_dm_connector_atomic_duplicate_state(struct drm_connector *connector)
return &new_state->base;
}
+static int
+amdgpu_dm_connector_late_register(struct drm_connector *connector)
+{
+ struct amdgpu_dm_connector *amdgpu_dm_connector =
+ to_amdgpu_dm_connector(connector);
+
+#if defined(CONFIG_DEBUG_FS)
+ connector_debugfs_init(amdgpu_dm_connector);
+#endif
+
+ return 0;
+}
+
static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.reset = amdgpu_dm_connector_funcs_reset,
.detect = amdgpu_dm_connector_detect,
@@ -4500,6 +4727,7 @@ static const struct drm_connector_funcs amdgpu_dm_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_set_property = amdgpu_dm_connector_atomic_set_property,
.atomic_get_property = amdgpu_dm_connector_atomic_get_property,
+ .late_register = amdgpu_dm_connector_late_register,
.early_unregister = amdgpu_dm_connector_unregister
};
@@ -4876,7 +5104,8 @@ static bool dm_crtc_helper_mode_fixup(struct drm_crtc *crtc,
static const struct drm_crtc_helper_funcs amdgpu_dm_crtc_helper_funcs = {
.disable = dm_crtc_helper_disable,
.atomic_check = dm_crtc_helper_atomic_check,
- .mode_fixup = dm_crtc_helper_mode_fixup
+ .mode_fixup = dm_crtc_helper_mode_fixup,
+ .get_scanout_position = amdgpu_crtc_get_scanout_position,
};
static void dm_encoder_helper_disable(struct drm_encoder *encoder)
@@ -5702,7 +5931,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm,
drm_connector_attach_vrr_capable_property(
&aconnector->base);
#ifdef CONFIG_DRM_AMD_DC_HDCP
- if (adev->asic_type >= CHIP_RAVEN)
+ if (adev->dm.hdcp_workqueue)
drm_connector_attach_content_protection_property(&aconnector->base, true);
#endif
}
@@ -5839,13 +6068,6 @@ static int amdgpu_dm_connector_init(struct amdgpu_display_manager *dm,
drm_connector_attach_encoder(
&aconnector->base, &aencoder->base);
- drm_connector_register(&aconnector->base);
-#if defined(CONFIG_DEBUG_FS)
- connector_debugfs_init(aconnector);
- aconnector->debugfs_dpcd_address = 0;
- aconnector->debugfs_dpcd_size = 0;
-#endif
-
if (connector_type == DRM_MODE_CONNECTOR_DisplayPort
|| connector_type == DRM_MODE_CONNECTOR_eDP)
amdgpu_dm_initialize_dp_connector(dm, aconnector);
@@ -6331,7 +6553,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
uint32_t target_vblank, last_flip_vblank;
bool vrr_active = amdgpu_dm_vrr_active(acrtc_state);
bool pflip_present = false;
- bool swizzle = true;
struct {
struct dc_surface_update surface_updates[MAX_SURFACES];
struct dc_plane_info plane_infos[MAX_SURFACES];
@@ -6377,9 +6598,6 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
dc_plane = dm_new_plane_state->dc_state;
- if (dc_plane && !dc_plane->tiling_info.gfx9.swizzle)
- swizzle = false;
-
bundle->surface_updates[planes_count].surface = dc_plane;
if (new_pcrtc_state->color_mgmt_changed) {
bundle->surface_updates[planes_count].gamma = dc_plane->gamma_correction;
@@ -6480,7 +6698,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
* clients using the GLX_OML_sync_control extension or
* DRI3/Present extension with defined target_msc.
*/
- last_flip_vblank = amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id);
+ last_flip_vblank = amdgpu_get_vblank_counter_kms(pcrtc);
}
else {
/* For variable refresh rate mode only:
@@ -6509,7 +6727,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
& (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK)) ==
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(int)(target_vblank -
- amdgpu_get_vblank_counter_kms(dm->ddev, acrtc_attach->crtc_id)) > 0)) {
+ amdgpu_get_vblank_counter_kms(pcrtc)) > 0)) {
usleep_range(1000, 1100);
}
@@ -6588,8 +6806,7 @@ static void amdgpu_dm_commit_planes(struct drm_atomic_state *state,
amdgpu_dm_link_setup_psr(acrtc_state->stream);
else if ((acrtc_state->update_type == UPDATE_TYPE_FAST) &&
acrtc_state->stream->link->psr_feature_enabled &&
- !acrtc_state->stream->link->psr_allow_active &&
- swizzle) {
+ !acrtc_state->stream->link->psr_allow_active) {
amdgpu_dm_psr_enable(acrtc_state->stream);
}
@@ -8408,7 +8625,6 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
/* Calculate number of static frames before generating interrupt to
* enter PSR.
*/
- unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
// Init fail safe of 2 frames static
unsigned int num_frames_static = 2;
@@ -8423,8 +8639,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream)
* Calculate number of frames such that at least 30 ms of time has
* passed.
*/
- if (vsync_rate_hz != 0)
+ if (vsync_rate_hz != 0) {
+ unsigned int frame_time_microsec = 1000000 / vsync_rate_hz;
num_frames_static = (30000 / frame_time_microsec) + 1;
+ }
params.triggers.cursor_update = true;
params.triggers.overlay_update = true;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index 7ea9acb0358d..5cab3e65d992 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -90,15 +90,41 @@ struct dm_comressor_info {
};
/**
- * struct amdgpu_dm_backlight_caps - Usable range of backlight values from ACPI
- * @min_input_signal: minimum possible input in range 0-255
- * @max_input_signal: maximum possible input in range 0-255
- * @caps_valid: true if these values are from the ACPI interface
+ * struct amdgpu_dm_backlight_caps - Information about backlight
+ *
+ * Describe the backlight support for ACPI or eDP AUX.
*/
struct amdgpu_dm_backlight_caps {
+ /**
+ * @ext_caps: Keep the data struct with all the information about the
+ * display support for HDR.
+ */
+ union dpcd_sink_ext_caps *ext_caps;
+ /**
+ * @aux_min_input_signal: Min brightness value supported by the display
+ */
+ u32 aux_min_input_signal;
+ /**
+ * @aux_max_input_signal: Max brightness value supported by the display
+ * in nits.
+ */
+ u32 aux_max_input_signal;
+ /**
+ * @min_input_signal: minimum possible input in range 0-255.
+ */
int min_input_signal;
+ /**
+ * @max_input_signal: maximum possible input in range 0-255.
+ */
int max_input_signal;
+ /**
+ * @caps_valid: true if these values are from the ACPI interface.
+ */
bool caps_valid;
+ /**
+ * @aux_support: Describes if the display supports AUX backlight.
+ */
+ bool aux_support;
};
/**
@@ -457,6 +483,9 @@ int amdgpu_dm_update_crtc_color_mgmt(struct dm_crtc_state *crtc);
int amdgpu_dm_update_plane_color_mgmt(struct dm_crtc_state *crtc,
struct dc_plane_state *dc_plane_state);
+void amdgpu_dm_update_connector_after_detect(
+ struct amdgpu_dm_connector *aconnector);
+
extern const struct drm_encoder_helper_funcs amdgpu_dm_encoder_helper_funcs;
#endif /* __AMDGPU_DM_H__ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
index f81d3439ee8c..0461fecd68db 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c
@@ -32,6 +32,19 @@
#include "amdgpu_dm.h"
#include "amdgpu_dm_debugfs.h"
#include "dm_helpers.h"
+#include "dmub/inc/dmub_srv.h"
+
+struct dmub_debugfs_trace_header {
+ uint32_t entry_count;
+ uint32_t reserved[3];
+};
+
+struct dmub_debugfs_trace_entry {
+ uint32_t trace_code;
+ uint32_t tick_count;
+ uint32_t param0;
+ uint32_t param1;
+};
/* function description
* get/ set DP configuration: lane_count, link_rate, spread_spectrum
@@ -675,6 +688,73 @@ static ssize_t dp_phy_test_pattern_debugfs_write(struct file *f, const char __us
return bytes_from_user;
}
+/**
+ * Returns the DMCUB tracebuffer contents.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_tracebuffer
+ */
+static int dmub_tracebuffer_show(struct seq_file *m, void *data)
+{
+ struct amdgpu_device *adev = m->private;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ struct dmub_debugfs_trace_entry *entries;
+ uint8_t *tbuf_base;
+ uint32_t tbuf_size, max_entries, num_entries, i;
+
+ if (!fb_info)
+ return 0;
+
+ tbuf_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].cpu_addr;
+ if (!tbuf_base)
+ return 0;
+
+ tbuf_size = fb_info->fb[DMUB_WINDOW_5_TRACEBUFF].size;
+ max_entries = (tbuf_size - sizeof(struct dmub_debugfs_trace_header)) /
+ sizeof(struct dmub_debugfs_trace_entry);
+
+ num_entries =
+ ((struct dmub_debugfs_trace_header *)tbuf_base)->entry_count;
+
+ num_entries = min(num_entries, max_entries);
+
+ entries = (struct dmub_debugfs_trace_entry
+ *)(tbuf_base +
+ sizeof(struct dmub_debugfs_trace_header));
+
+ for (i = 0; i < num_entries; ++i) {
+ struct dmub_debugfs_trace_entry *entry = &entries[i];
+
+ seq_printf(m,
+ "trace_code=%u tick_count=%u param0=%u param1=%u\n",
+ entry->trace_code, entry->tick_count, entry->param0,
+ entry->param1);
+ }
+
+ return 0;
+}
+
+/**
+ * Returns the DMCUB firmware state contents.
+ * Example usage: cat /sys/kernel/debug/dri/0/amdgpu_dm_dmub_fw_state
+ */
+static int dmub_fw_state_show(struct seq_file *m, void *data)
+{
+ struct amdgpu_device *adev = m->private;
+ struct dmub_srv_fb_info *fb_info = adev->dm.dmub_fb_info;
+ uint8_t *state_base;
+ uint32_t state_size;
+
+ if (!fb_info)
+ return 0;
+
+ state_base = (uint8_t *)fb_info->fb[DMUB_WINDOW_6_FW_STATE].cpu_addr;
+ if (!state_base)
+ return 0;
+
+ state_size = fb_info->fb[DMUB_WINDOW_6_FW_STATE].size;
+
+ return seq_write(m, state_base, state_size);
+}
+
/*
* Returns the current and maximum output bpc for the connector.
* Example usage: cat /sys/kernel/debug/dri/0/DP-1/output_bpc
@@ -880,6 +960,8 @@ static ssize_t dp_dpcd_data_read(struct file *f, char __user *buf,
return read_size - r;
}
+DEFINE_SHOW_ATTRIBUTE(dmub_fw_state);
+DEFINE_SHOW_ATTRIBUTE(dmub_tracebuffer);
DEFINE_SHOW_ATTRIBUTE(output_bpc);
DEFINE_SHOW_ATTRIBUTE(vrr_range);
@@ -1008,6 +1090,9 @@ void connector_debugfs_init(struct amdgpu_dm_connector *connector)
debugfs_create_file_unsafe("force_yuv420_output", 0644, dir, connector,
&force_yuv420_output_fops);
+ connector->debugfs_dpcd_address = 0;
+ connector->debugfs_dpcd_size = 0;
+
}
/*
@@ -1188,5 +1273,11 @@ int dtn_debugfs_init(struct amdgpu_device *adev)
debugfs_create_file_unsafe("amdgpu_dm_visual_confirm", 0644, root, adev,
&visual_confirm_fops);
+ debugfs_create_file_unsafe("amdgpu_dm_dmub_tracebuffer", 0644, root,
+ adev, &dmub_tracebuffer_fops);
+
+ debugfs_create_file_unsafe("amdgpu_dm_dmub_fw_state", 0644, root,
+ adev, &dmub_fw_state_fops);
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
index 0acd3409dd6c..5b70ed3cdb88 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c
@@ -28,6 +28,13 @@
#include "amdgpu_dm.h"
#include "dm_helpers.h"
#include <drm/drm_hdcp.h>
+#include "hdcp_psp.h"
+
+/*
+ * If the SRM version being loaded is less than or equal to the
+ * currently loaded SRM, psp will return 0xFFFF as the version
+ */
+#define PSP_SRM_VERSION_MAX 0xFFFF
static bool
lp_write_i2c(void *handle, uint32_t address, const uint8_t *data, uint32_t size)
@@ -67,6 +74,59 @@ lp_read_dpcd(void *handle, uint32_t address, uint8_t *data, uint32_t size)
return dm_helpers_dp_read_dpcd(link->ctx, link, address, data, size);
}
+static uint8_t *psp_get_srm(struct psp_context *psp, uint32_t *srm_version, uint32_t *srm_size)
+{
+
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
+ return NULL;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_GET_SRM;
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
+ return NULL;
+
+ *srm_version = hdcp_cmd->out_msg.hdcp_get_srm.srm_version;
+ *srm_size = hdcp_cmd->out_msg.hdcp_get_srm.srm_buf_size;
+
+
+ return hdcp_cmd->out_msg.hdcp_get_srm.srm_buf;
+}
+
+static int psp_set_srm(struct psp_context *psp, uint8_t *srm, uint32_t srm_size, uint32_t *srm_version)
+{
+
+ struct ta_hdcp_shared_memory *hdcp_cmd;
+
+ if (!psp->hdcp_context.hdcp_initialized) {
+ DRM_WARN("Failed to get hdcp srm. HDCP TA is not initialized.");
+ return -EINVAL;
+ }
+
+ hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
+ memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
+
+ memcpy(hdcp_cmd->in_msg.hdcp_set_srm.srm_buf, srm, srm_size);
+ hdcp_cmd->in_msg.hdcp_set_srm.srm_buf_size = srm_size;
+ hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP_SET_SRM;
+
+ psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
+
+ if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS || hdcp_cmd->out_msg.hdcp_set_srm.valid_signature != 1 ||
+ hdcp_cmd->out_msg.hdcp_set_srm.srm_version == PSP_SRM_VERSION_MAX)
+ return -EINVAL;
+
+ *srm_version = hdcp_cmd->out_msg.hdcp_set_srm.srm_version;
+ return 0;
+}
+
static void process_output(struct hdcp_workqueue *hdcp_work)
{
struct mod_hdcp_output output = hdcp_work->output;
@@ -88,6 +148,18 @@ static void process_output(struct hdcp_workqueue *hdcp_work)
schedule_delayed_work(&hdcp_work->property_validate_dwork, msecs_to_jiffies(0));
}
+static void link_lock(struct hdcp_workqueue *work, bool lock)
+{
+
+ int i = 0;
+
+ for (i = 0; i < work->max_link; i++) {
+ if (lock)
+ mutex_lock(&work[i].mutex);
+ else
+ mutex_unlock(&work[i].mutex);
+ }
+}
void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
unsigned int link_index,
struct amdgpu_dm_connector *aconnector,
@@ -112,6 +184,13 @@ void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
if (enable_encryption) {
+ /* Explicitly set the saved SRM as sysfs call will be after we already enabled hdcp
+ * (s3 resume case)
+ */
+ if (hdcp_work->srm_size > 0)
+ psp_set_srm(hdcp_work->hdcp.config.psp.handle, hdcp_work->srm, hdcp_work->srm_size,
+ &hdcp_work->srm_version);
+
display->adjust.disable = 0;
if (content_type == DRM_MODE_HDCP_CONTENT_TYPE0)
hdcp_w->link.adjust.hdcp2.force_type = MOD_HDCP_FORCE_TYPE_0;
@@ -301,8 +380,9 @@ void hdcp_destroy(struct hdcp_workqueue *hdcp_work)
cancel_delayed_work_sync(&hdcp_work[i].watchdog_timer_dwork);
}
+ kfree(hdcp_work->srm);
+ kfree(hdcp_work->srm_temp);
kfree(hdcp_work);
-
}
static void update_config(void *handle, struct cp_psp_stream_config *config)
@@ -332,26 +412,170 @@ static void update_config(void *handle, struct cp_psp_stream_config *config)
link->dig_be = config->link_enc_inst;
link->ddc_line = aconnector->dc_link->ddc_hw_inst + 1;
link->dp.rev = aconnector->dc_link->dpcd_caps.dpcd_rev.raw;
+ link->dp.mst_supported = config->mst_supported;
display->adjust.disable = 1;
link->adjust.auth_delay = 2;
hdcp_update_display(hdcp_work, link_index, aconnector, DRM_MODE_HDCP_CONTENT_TYPE0, false);
}
-struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc)
+
+/* NOTE: From the usermodes prospective you only need to call write *ONCE*, the kernel
+ * will automatically call once or twice depending on the size
+ *
+ * call: "cat file > /sys/class/drm/card0/device/hdcp_srm" from usermode no matter what the size is
+ *
+ * The kernel can only send PAGE_SIZE at once and since MAX_SRM_FILE(5120) > PAGE_SIZE(4096),
+ * srm_data_write can be called multiple times.
+ *
+ * sysfs interface doesn't tell us the size we will get so we are sending partial SRMs to psp and on
+ * the last call we will send the full SRM. PSP will fail on every call before the last.
+ *
+ * This means we don't know if the SRM is good until the last call. And because of this limitation we
+ * cannot throw errors early as it will stop the kernel from writing to sysfs
+ *
+ * Example 1:
+ * Good SRM size = 5096
+ * first call to write 4096 -> PSP fails
+ * Second call to write 1000 -> PSP Pass -> SRM is set
+ *
+ * Example 2:
+ * Bad SRM size = 4096
+ * first call to write 4096 -> PSP fails (This is the same as above, but we don't know if this
+ * is the last call)
+ *
+ * Solution?:
+ * 1: Parse the SRM? -> It is signed so we don't know the EOF
+ * 2: We can have another sysfs that passes the size before calling set. -> simpler solution
+ * below
+ *
+ * Easy Solution:
+ * Always call get after Set to verify if set was successful.
+ * +----------------------+
+ * | Why it works: |
+ * +----------------------+
+ * PSP will only update its srm if its older than the one we are trying to load.
+ * Always do set first than get.
+ * -if we try to "1. SET" a older version PSP will reject it and we can "2. GET" the newer
+ * version and save it
+ *
+ * -if we try to "1. SET" a newer version PSP will accept it and we can "2. GET" the
+ * same(newer) version back and save it
+ *
+ * -if we try to "1. SET" a newer version and PSP rejects it. That means the format is
+ * incorrect/corrupted and we should correct our SRM by getting it from PSP
+ */
+static ssize_t srm_data_write(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+ loff_t pos, size_t count)
+{
+ struct hdcp_workqueue *work;
+ uint32_t srm_version = 0;
+
+ work = container_of(bin_attr, struct hdcp_workqueue, attr);
+ link_lock(work, true);
+
+ memcpy(work->srm_temp + pos, buffer, count);
+
+ if (!psp_set_srm(work->hdcp.config.psp.handle, work->srm_temp, pos + count, &srm_version)) {
+ DRM_DEBUG_DRIVER("HDCP SRM SET version 0x%X", srm_version);
+ memcpy(work->srm, work->srm_temp, pos + count);
+ work->srm_size = pos + count;
+ work->srm_version = srm_version;
+ }
+
+
+ link_lock(work, false);
+
+ return count;
+}
+
+static ssize_t srm_data_read(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buffer,
+ loff_t pos, size_t count)
+{
+ struct hdcp_workqueue *work;
+ uint8_t *srm = NULL;
+ uint32_t srm_version;
+ uint32_t srm_size;
+ size_t ret = count;
+
+ work = container_of(bin_attr, struct hdcp_workqueue, attr);
+
+ link_lock(work, true);
+
+ srm = psp_get_srm(work->hdcp.config.psp.handle, &srm_version, &srm_size);
+
+ if (!srm)
+ return -EINVAL;
+
+ if (pos >= srm_size)
+ ret = 0;
+
+ if (srm_size - pos < count) {
+ memcpy(buffer, srm + pos, srm_size - pos);
+ ret = srm_size - pos;
+ goto ret;
+ }
+
+ memcpy(buffer, srm + pos, count);
+
+ret:
+ link_lock(work, false);
+ return ret;
+}
+
+/* From the hdcp spec (5.Renewability) SRM needs to be stored in a non-volatile memory.
+ *
+ * For example,
+ * if Application "A" sets the SRM (ver 2) and we reboot/suspend and later when Application "B"
+ * needs to use HDCP, the version in PSP should be SRM(ver 2). So SRM should be persistent
+ * across boot/reboots/suspend/resume/shutdown
+ *
+ * Currently when the system goes down (suspend/shutdown) the SRM is cleared from PSP. For HDCP we need
+ * to make the SRM persistent.
+ *
+ * -PSP owns the checking of SRM but doesn't have the ability to store it in a non-volatile memory.
+ * -The kernel cannot write to the file systems.
+ * -So we need usermode to do this for us, which is why an interface for usermode is needed
+ *
+ *
+ *
+ * Usermode can read/write to/from PSP using the sysfs interface
+ * For example:
+ * to save SRM from PSP to storage : cat /sys/class/drm/card0/device/hdcp_srm > srmfile
+ * to load from storage to PSP: cat srmfile > /sys/class/drm/card0/device/hdcp_srm
+ */
+static const struct bin_attribute data_attr = {
+ .attr = {.name = "hdcp_srm", .mode = 0664},
+ .size = PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, /* Limit SRM size */
+ .write = srm_data_write,
+ .read = srm_data_read,
+};
+
+
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc)
{
int max_caps = dc->caps.max_links;
- struct hdcp_workqueue *hdcp_work = kzalloc(max_caps*sizeof(*hdcp_work), GFP_KERNEL);
+ struct hdcp_workqueue *hdcp_work;
int i = 0;
+ hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL);
if (hdcp_work == NULL)
+ return NULL;
+
+ hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL);
+
+ if (hdcp_work->srm == NULL)
+ goto fail_alloc_context;
+
+ hdcp_work->srm_temp = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm_temp), GFP_KERNEL);
+
+ if (hdcp_work->srm_temp == NULL)
goto fail_alloc_context;
hdcp_work->max_link = max_caps;
for (i = 0; i < max_caps; i++) {
-
mutex_init(&hdcp_work[i].mutex);
INIT_WORK(&hdcp_work[i].cpirq_work, event_cpirq);
@@ -360,7 +584,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c
INIT_DELAYED_WORK(&hdcp_work[i].watchdog_timer_dwork, event_watchdog_timer);
INIT_DELAYED_WORK(&hdcp_work[i].property_validate_dwork, event_property_validate);
- hdcp_work[i].hdcp.config.psp.handle = psp_context;
+ hdcp_work[i].hdcp.config.psp.handle = &adev->psp;
hdcp_work[i].hdcp.config.ddc.handle = dc_get_link_at_index(dc, i);
hdcp_work[i].hdcp.config.ddc.funcs.write_i2c = lp_write_i2c;
hdcp_work[i].hdcp.config.ddc.funcs.read_i2c = lp_read_i2c;
@@ -371,9 +595,17 @@ struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *c
cp_psp->funcs.update_stream_config = update_config;
cp_psp->handle = hdcp_work;
+ /* File created at /sys/class/drm/card0/device/hdcp_srm*/
+ hdcp_work[0].attr = data_attr;
+
+ if (sysfs_create_bin_file(&adev->dev->kobj, &hdcp_work[0].attr))
+ DRM_WARN("Failed to create device file hdcp_srm");
+
return hdcp_work;
fail_alloc_context:
+ kfree(hdcp_work->srm);
+ kfree(hdcp_work->srm_temp);
kfree(hdcp_work);
return NULL;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
index 6abde86bce4a..5159b3a5e5b0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.h
@@ -30,6 +30,7 @@
#include "hdcp.h"
#include "dc.h"
#include "dm_cp_psp.h"
+#include "amdgpu.h"
struct mod_hdcp;
struct mod_hdcp_link;
@@ -52,6 +53,12 @@ struct hdcp_workqueue {
enum mod_hdcp_encryption_status encryption_status;
uint8_t max_link;
+
+ uint8_t *srm;
+ uint8_t *srm_temp;
+ uint32_t srm_version;
+ uint32_t srm_size;
+ struct bin_attribute attr;
};
void hdcp_update_display(struct hdcp_workqueue *hdcp_work,
@@ -64,6 +71,6 @@ void hdcp_reset_display(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_handle_cpirq(struct hdcp_workqueue *work, unsigned int link_index);
void hdcp_destroy(struct hdcp_workqueue *work);
-struct hdcp_workqueue *hdcp_create_workqueue(void *psp_context, struct cp_psp *cp_psp, struct dc *dc);
+struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct cp_psp *cp_psp, struct dc *dc);
#endif /* AMDGPU_DM_AMDGPU_DM_HDCP_H_ */
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
index 318b474ff20e..c20fb08c450b 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c
@@ -400,8 +400,8 @@ bool dm_helpers_dp_mst_start_top_mgr(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
- return false;
+ DRM_ERROR("Failed to find connector for link!");
+ return false;
}
if (boot) {
@@ -423,8 +423,8 @@ void dm_helpers_dp_mst_stop_top_mgr(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
- return;
+ DRM_ERROR("Failed to find connector for link!");
+ return;
}
DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n",
@@ -445,7 +445,7 @@ bool dm_helpers_dp_read_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
+ DRM_ERROR("Failed to find connector for link!");
return false;
}
@@ -463,7 +463,7 @@ bool dm_helpers_dp_write_dpcd(
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
+ DRM_ERROR("Failed to find connector for link!");
return false;
}
@@ -483,7 +483,7 @@ bool dm_helpers_submit_i2c(
bool result;
if (!aconnector) {
- DRM_ERROR("Failed to found connector for link!");
+ DRM_ERROR("Failed to find connector for link!");
return false;
}
@@ -538,7 +538,7 @@ bool dm_helpers_is_dp_sink_present(struct dc_link *link)
struct amdgpu_dm_connector *aconnector = link->priv;
if (!aconnector) {
- BUG_ON("Failed to found connector for link!");
+ BUG_ON("Failed to find connector for link!");
return true;
}
@@ -580,6 +580,20 @@ enum dc_edid_status dm_helpers_read_local_edid(
/* We don't need the original edid anymore */
kfree(edid);
+ /* connector->display_info will be parsed from EDID and saved
+ * into drm_connector->display_info from edid by call stack
+ * below:
+ * drm_parse_ycbcr420_deep_color_info
+ * drm_parse_hdmi_forum_vsdb
+ * drm_parse_cea_ext
+ * drm_add_display_info
+ * drm_connector_update_edid_property
+ *
+ * drm_connector->display_info will be used by amdgpu_dm funcs,
+ * like fill_stream_properties_from_drm_display_mode
+ */
+ amdgpu_dm_update_connector_after_detect(aconnector);
+
edid_status = dm_helpers_parse_edid_caps(
ctx,
&sink->dc_edid,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 5672f7765919..e8208df420d9 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -154,15 +154,18 @@ amdgpu_dm_mst_connector_late_register(struct drm_connector *connector)
{
struct amdgpu_dm_connector *amdgpu_dm_connector =
to_amdgpu_dm_connector(connector);
- struct drm_dp_mst_port *port = amdgpu_dm_connector->port;
+ int r;
+
+ amdgpu_dm_connector->dm_dp_aux.aux.dev = connector->kdev;
+ r = drm_dp_aux_register(&amdgpu_dm_connector->dm_dp_aux.aux);
+ if (r)
+ return r;
#if defined(CONFIG_DEBUG_FS)
connector_debugfs_init(amdgpu_dm_connector);
- amdgpu_dm_connector->debugfs_dpcd_address = 0;
- amdgpu_dm_connector->debugfs_dpcd_size = 0;
#endif
- return drm_dp_mst_connector_late_register(connector, port);
+ return r;
}
static void
@@ -204,7 +207,7 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto
if (!dc_dsc_parse_dsc_dpcd(aconnector->dc_link->ctx->dc,
dsc_caps, NULL,
- &dc_sink->sink_dsc_caps.dsc_dec_caps))
+ &dc_sink->dsc_caps.dsc_dec_caps))
return false;
return true;
@@ -259,8 +262,8 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector)
#if defined(CONFIG_DRM_AMD_DC_DCN)
if (!validate_dsc_caps_on_connector(aconnector))
- memset(&aconnector->dc_sink->sink_dsc_caps,
- 0, sizeof(aconnector->dc_sink->sink_dsc_caps));
+ memset(&aconnector->dc_sink->dsc_caps,
+ 0, sizeof(aconnector->dc_sink->dsc_caps));
#endif
}
}
@@ -437,9 +440,6 @@ dm_dp_add_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector)
{
- struct amdgpu_dm_connector *master = container_of(mgr, struct amdgpu_dm_connector, mst_mgr);
- struct drm_device *dev = master->base.dev;
- struct amdgpu_device *adev = dev->dev_private;
struct amdgpu_dm_connector *aconnector = to_amdgpu_dm_connector(connector);
DRM_INFO("DM_MST: Disabling connector: %p [id: %d] [master: %p]\n",
@@ -451,42 +451,26 @@ static void dm_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
aconnector->dc_sink);
dc_sink_release(aconnector->dc_sink);
aconnector->dc_sink = NULL;
+ aconnector->dc_link->cur_link_settings.lane_count = 0;
}
drm_connector_unregister(connector);
- if (adev->mode_info.rfbdev)
- drm_fb_helper_remove_one_connector(&adev->mode_info.rfbdev->helper, connector);
drm_connector_put(connector);
}
-static void dm_dp_mst_register_connector(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct amdgpu_device *adev = dev->dev_private;
-
- if (adev->mode_info.rfbdev)
- drm_fb_helper_add_one_connector(&adev->mode_info.rfbdev->helper, connector);
- else
- DRM_ERROR("adev->mode_info.rfbdev is NULL\n");
-
- drm_connector_register(connector);
-}
-
static const struct drm_dp_mst_topology_cbs dm_mst_cbs = {
.add_connector = dm_dp_add_mst_connector,
.destroy_connector = dm_dp_destroy_mst_connector,
- .register_connector = dm_dp_mst_register_connector
};
void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
struct amdgpu_dm_connector *aconnector)
{
aconnector->dm_dp_aux.aux.name = "dmdc";
- aconnector->dm_dp_aux.aux.dev = aconnector->base.kdev;
aconnector->dm_dp_aux.aux.transfer = dm_dp_aux_transfer;
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
- drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+ drm_dp_aux_init(&aconnector->dm_dp_aux.aux);
drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
&aconnector->base);
@@ -547,7 +531,7 @@ static void set_dsc_configs_from_fairness_vars(struct dsc_mst_fairness_params *p
memset(&params[i].timing->dsc_cfg, 0, sizeof(params[i].timing->dsc_cfg));
if (vars[i].dsc_enabled && dc_dsc_compute_config(
params[i].sink->ctx->dc->res_pool->dscs[0],
- &params[i].sink->sink_dsc_caps.dsc_dec_caps,
+ &params[i].sink->dsc_caps.dsc_dec_caps,
params[i].sink->ctx->dc->debug.dsc_min_slice_height_override,
0,
params[i].timing,
@@ -568,7 +552,7 @@ static int bpp_x16_from_pbn(struct dsc_mst_fairness_params param, int pbn)
kbps = div_u64((u64)pbn * 994 * 8 * 54, 64);
dc_dsc_compute_config(
param.sink->ctx->dc->res_pool->dscs[0],
- &param.sink->sink_dsc_caps.dsc_dec_caps,
+ &param.sink->dsc_caps.dsc_dec_caps,
param.sink->ctx->dc->debug.dsc_min_slice_height_override,
(int) kbps, param.timing, &dsc_config);
@@ -765,14 +749,14 @@ static bool compute_mst_dsc_configs_for_link(struct drm_atomic_state *state,
params[count].sink = stream->sink;
aconnector = (struct amdgpu_dm_connector *)stream->dm_stream_context;
params[count].port = aconnector->port;
- params[count].compression_possible = stream->sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported;
+ params[count].compression_possible = stream->sink->dsc_caps.dsc_dec_caps.is_dsc_supported;
dc_dsc_get_policy_for_timing(params[count].timing, &dsc_policy);
if (!dc_dsc_compute_bandwidth_range(
stream->sink->ctx->dc->res_pool->dscs[0],
stream->sink->ctx->dc->debug.dsc_min_slice_height_override,
dsc_policy.min_target_bpp,
dsc_policy.max_target_bpp,
- &stream->sink->sink_dsc_caps.dsc_dec_caps,
+ &stream->sink->dsc_caps.dsc_dec_caps,
&stream->timing, &params[count].bw_range))
params[count].bw_range.stream_kbps = dc_bandwidth_in_kbps_from_timing(&stream->timing);
@@ -854,7 +838,7 @@ bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state,
if (!aconnector || !aconnector->dc_sink)
continue;
- if (!aconnector->dc_sink->sink_dsc_caps.dsc_dec_caps.is_dsc_supported)
+ if (!aconnector->dc_sink->dsc_caps.dsc_dec_caps.is_dsc_supported)
continue;
if (computed_streams[i])
diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
index 2f1c9584ac32..37fa7b48250e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c
@@ -267,7 +267,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
&& id.enum_id == obj_id.enum_id)
return &bp->object_info_tbl.v1_4->display_path[i];
}
- /* fall through */
+ fallthrough;
case OBJECT_TYPE_CONNECTOR:
case OBJECT_TYPE_GENERIC:
/* Both Generic and Connector Object ID
@@ -280,7 +280,7 @@ static struct atom_display_object_path_v2 *get_bios_object(
&& id.enum_id == obj_id.enum_id)
return &bp->object_info_tbl.v1_4->display_path[i];
}
- /* fall through */
+ fallthrough;
default:
return NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
index 629a07a2719b..8edc2506d49e 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c
@@ -221,8 +221,8 @@ static void init_transmitter_control(struct bios_parser *bp)
uint8_t frev;
uint8_t crev;
- if (BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev) == false)
- BREAK_TO_DEBUGGER();
+ BIOS_CMD_TABLE_REVISION(dig1transmittercontrol, frev, crev);
+
switch (crev) {
case 6:
bp->cmd_tbl.transmitter_control = transmitter_control_v1_6;
@@ -711,10 +711,6 @@ static void enable_disp_power_gating_dmcub(
power_gating.header.sub_type = DMUB_CMD__VBIOS_ENABLE_DISP_POWER_GATING;
power_gating.power_gating.pwr = *pwr;
- /* ATOM_ENABLE is old API in DMUB */
- if (power_gating.power_gating.pwr.enable == ATOM_ENABLE)
- power_gating.power_gating.pwr.enable = ATOM_INIT;
-
dc_dmub_srv_cmd_queue(dmcub, &power_gating.header);
dc_dmub_srv_cmd_execute(dmcub);
dc_dmub_srv_wait_idle(dmcub);
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 7388c987c595..204d7942a6e5 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -53,25 +53,18 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
+ case DCE_VERSION_12_0:
+ case DCE_VERSION_12_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
#if defined(CONFIG_DRM_AMD_DC_DCN)
case DCN_VERSION_1_0:
case DCN_VERSION_1_01:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
-#endif
-
case DCN_VERSION_2_0:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
case DCN_VERSION_2_1:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
- case DCE_VERSION_12_0:
- case DCE_VERSION_12_1:
- *h = dal_cmd_tbl_helper_dce112_get_table2();
- return true;
+#endif
default:
/* Unsupported DCE */
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
index 5d081c42e81b..2c6db379afae 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c
@@ -3265,33 +3265,33 @@ bool bw_calcs(struct dc_context *ctx,
bw_fixed_to_int(bw_mul(data->
stutter_exit_watermark[9], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[0].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[4], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[1].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[5], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[2].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[6], bw_int_to_fixed(1000)));
- if (ctx->dc->caps.max_slave_planes) {
- calcs_output->stutter_entry_wm_ns[3].b_mark =
+ calcs_output->stutter_entry_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[0], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[4].b_mark =
+ stutter_entry_watermark[4], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[1].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[1], bw_int_to_fixed(1000)));
- } else {
- calcs_output->stutter_entry_wm_ns[3].b_mark =
+ stutter_entry_watermark[5], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[2].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[7], bw_int_to_fixed(1000)));
- calcs_output->stutter_entry_wm_ns[4].b_mark =
+ stutter_entry_watermark[6], bw_int_to_fixed(1000)));
+ if (ctx->dc->caps.max_slave_planes) {
+ calcs_output->stutter_entry_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[0], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[1], bw_int_to_fixed(1000)));
+ } else {
+ calcs_output->stutter_entry_wm_ns[3].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[7], bw_int_to_fixed(1000)));
+ calcs_output->stutter_entry_wm_ns[4].b_mark =
+ bw_fixed_to_int(bw_mul(data->
+ stutter_entry_watermark[8], bw_int_to_fixed(1000)));
+ }
+ calcs_output->stutter_entry_wm_ns[5].b_mark =
bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[8], bw_int_to_fixed(1000)));
- }
- calcs_output->stutter_entry_wm_ns[5].b_mark =
- bw_fixed_to_int(bw_mul(data->
- stutter_entry_watermark[9], bw_int_to_fixed(1000)));
+ stutter_entry_watermark[9], bw_int_to_fixed(1000)));
calcs_output->urgent_wm_ns[0].b_mark =
bw_fixed_to_int(bw_mul(data->
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 1a37550731de..3960a8db94cb 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -703,11 +703,24 @@ static void hack_bounding_box(struct dcn_bw_internal_vars *v,
}
-unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev)
+unsigned int get_highest_allowed_voltage_level(uint32_t hw_internal_rev, uint32_t pci_revision_id)
{
- /* for dali & pollock, the highest voltage level we want is 0 */
- if (ASICREV_IS_POLLOCK(hw_internal_rev) || ASICREV_IS_DALI(hw_internal_rev))
- return 0;
+ /* for low power RV2 variants, the highest voltage level we want is 0 */
+ if (ASICREV_IS_RAVEN2(hw_internal_rev))
+ switch (pci_revision_id) {
+ case PRID_DALI_DE:
+ case PRID_DALI_DF:
+ case PRID_DALI_E3:
+ case PRID_DALI_E4:
+ case PRID_POLLOCK_94:
+ case PRID_POLLOCK_95:
+ case PRID_POLLOCK_E9:
+ case PRID_POLLOCK_EA:
+ case PRID_POLLOCK_EB:
+ return 0;
+ default:
+ break;
+ }
/* we are ok with all levels */
return 4;
@@ -1277,7 +1290,9 @@ bool dcn_validate_bandwidth(
PERFORMANCE_TRACE_END();
BW_VAL_TRACE_FINISH();
- if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(dc->ctx->asic_id.hw_internal_rev))
+ if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level(
+ dc->ctx->asic_id.hw_internal_rev,
+ dc->ctx->asic_id.pci_revision_id))
return true;
else
return false;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
index 3cd283195091..c0f6a8c7de7d 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/Makefile
@@ -87,6 +87,12 @@ AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN20)
###############################################################################
CLK_MGR_DCN21 = rn_clk_mgr.o rn_clk_mgr_vbios_smu.o
+# prevent build errors regarding soft-float vs hard-float FP ABI tags
+# this code is currently unused on ppc64, as it applies to Renoir APUs only
+ifdef CONFIG_PPC64
+CFLAGS_$(AMDDALPATH)/dc/clk_mgr/dcn21/rn_clk_mgr.o := $(call cc-option,-mno-gnu-attribute)
+endif
+
AMD_DAL_CLK_MGR_DCN21 = $(addprefix $(AMDDALPATH)/dc/clk_mgr/dcn21/,$(CLK_MGR_DCN21))
AMD_DISPLAY_FILES += $(AMD_DAL_CLK_MGR_DCN21)
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
index a78e5c74c79c..8ec2dfe45d40 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/clk_mgr.c
@@ -63,6 +63,25 @@ int clk_mgr_helper_get_active_display_cnt(
return display_count;
}
+int clk_mgr_helper_get_active_plane_cnt(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i, total_plane_count;
+
+ total_plane_count = 0;
+ for (i = 0; i < context->stream_count; i++) {
+ const struct dc_stream_status stream_status = context->stream_status[i];
+
+ /*
+ * Sum up plane_count for all streams ( active and virtual ).
+ */
+ total_plane_count += stream_status.plane_count;
+ }
+
+ return total_plane_count;
+}
+
void clk_mgr_exit_optimized_pwr_state(const struct dc *dc, struct clk_mgr *clk_mgr)
{
struct dc_link *edp_link = get_edp_link(dc);
@@ -134,13 +153,6 @@ struct clk_mgr *dc_clk_mgr_create(struct dc_context *ctx, struct pp_smu_funcs *p
#if defined(CONFIG_DRM_AMD_DC_DCN)
case FAMILY_RV:
- if (ASICREV_IS_DALI(asic_id.hw_internal_rev) ||
- ASICREV_IS_POLLOCK(asic_id.hw_internal_rev)) {
- /* TEMP: this check has to come before ASICREV_IS_RENOIR */
- /* which also incorrectly returns true for Dali/Pollock*/
- rv2_clk_mgr_construct(ctx, clk_mgr, pp_smu);
- break;
- }
if (ASICREV_IS_RENOIR(asic_id.hw_internal_rev)) {
rn_clk_mgr_construct(ctx, clk_mgr, pp_smu, dccg);
break;
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
index 495f01e9f2ca..55d09adbf0d9 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c
@@ -115,12 +115,11 @@ void dcn20_update_clocks_update_dpp_dto(struct clk_mgr_internal *clk_mgr,
dpp_inst = i;
dppclk_khz = context->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
- prev_dppclk_khz = clk_mgr->base.ctx->dc->current_state->res_ctx.pipe_ctx[i].plane_res.bw.dppclk_khz;
+ prev_dppclk_khz = clk_mgr->dccg->pipe_dppclk_khz[i];
- if (safe_to_lower || prev_dppclk_khz < dppclk_khz) {
+ if (safe_to_lower || prev_dppclk_khz < dppclk_khz)
clk_mgr->dccg->funcs->update_dpp_dto(
clk_mgr->dccg, dpp_inst, dppclk_khz);
- }
}
}
@@ -158,6 +157,8 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
bool dpp_clock_lowered = false;
struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu;
bool force_reset = false;
+ bool p_state_change_support;
+ int total_plane_count;
if (dc->work_arounds.skip_clock_update)
return;
@@ -213,9 +214,11 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base,
pp_smu->set_hard_min_socclk_by_freq(&pp_smu->pp_smu, clk_mgr_base->clks.socclk_khz / 1000);
}
- if (should_update_pstate_support(safe_to_lower, new_clocks->p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
+ total_plane_count = clk_mgr_helper_get_active_plane_cnt(dc, context);
+ p_state_change_support = new_clocks->p_state_change_support || (total_plane_count == 0);
+ if (should_update_pstate_support(safe_to_lower, p_state_change_support, clk_mgr_base->clks.p_state_change_support)) {
clk_mgr_base->clks.prev_p_state_change_support = clk_mgr_base->clks.p_state_change_support;
- clk_mgr_base->clks.p_state_change_support = new_clocks->p_state_change_support;
+ clk_mgr_base->clks.p_state_change_support = p_state_change_support;
if (pp_smu && pp_smu->set_pstate_handshake_support)
pp_smu->set_pstate_handshake_support(&pp_smu->pp_smu, clk_mgr_base->clks.p_state_change_support);
}
diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
index 7ae4c06232dd..ab267ddd4abe 100644
--- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
+++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c
@@ -46,6 +46,7 @@
/* Constants */
#define LPDDR_MEM_RETRAIN_LATENCY 4.977 /* Number obtained from LPDDR4 Training Counter Requirement doc */
+#define SMU_VER_55_51_0 0x373300 /* SMU Version that is able to set DISPCLK below 100MHz */
/* Macros */
@@ -151,6 +152,12 @@ void rn_update_clocks(struct clk_mgr *clk_mgr_base,
rn_vbios_smu_set_min_deep_sleep_dcfclk(clk_mgr, clk_mgr_base->clks.dcfclk_deep_sleep_khz);
}
+ // workaround: Limit dppclk to 100Mhz to avoid lower eDP panel switch to plus 4K monitor underflow.
+ if (!IS_DIAG_DC(dc->ctx->dce_environment)) {
+ if (new_clocks->dppclk_khz < 100000)
+ new_clocks->dppclk_khz = 100000;
+ }
+
if (should_set_clock(safe_to_lower, new_clocks->dppclk_khz, clk_mgr->base.clks.dppclk_khz)) {
if (clk_mgr->base.clks.dppclk_khz > new_clocks->dppclk_khz)
dpp_clock_lowered = true;
@@ -399,7 +406,7 @@ void rn_init_clocks(struct clk_mgr *clk_mgr)
clk_mgr->clks.pwr_state = DCN_PWR_STATE_UNKNOWN;
}
-void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
+static void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_range_sets *ranges)
{
int i, num_valid_sets;
@@ -412,19 +419,19 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
ranges->reader_wm_sets[num_valid_sets].wm_inst = bw_params->wm_table.entries[i].wm_inst;
ranges->reader_wm_sets[num_valid_sets].wm_type = bw_params->wm_table.entries[i].wm_type;
- /* We will not select WM based on dcfclk, so leave it as unconstrained */
- ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
- ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
- /* fclk wil be used to select WM*/
+ /* We will not select WM based on fclk, so leave it as unconstrained */
+ ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MIN;
+ ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = PP_SMU_WM_SET_RANGE_CLK_UNCONSTRAINED_MAX;
+ /* dcfclk wil be used to select WM*/
if (ranges->reader_wm_sets[num_valid_sets].wm_type == WM_TYPE_PSTATE_CHG) {
if (i == 0)
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = 0;
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = 0;
else {
/* add 1 to make it non-overlapping with next lvl */
- ranges->reader_wm_sets[num_valid_sets].min_fill_clk_mhz = bw_params->clk_table.entries[i - 1].fclk_mhz + 1;
+ ranges->reader_wm_sets[num_valid_sets].min_drain_clk_mhz = bw_params->clk_table.entries[i - 1].dcfclk_mhz + 1;
}
- ranges->reader_wm_sets[num_valid_sets].max_fill_clk_mhz = bw_params->clk_table.entries[i].fclk_mhz;
+ ranges->reader_wm_sets[num_valid_sets].max_drain_clk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz;
} else {
/* unconstrained for memory retraining */
@@ -459,16 +466,15 @@ void build_watermark_ranges(struct clk_bw_params *bw_params, struct pp_smu_wm_ra
static void rn_notify_wm_ranges(struct clk_mgr *clk_mgr_base)
{
struct dc_debug_options *debug = &clk_mgr_base->ctx->dc->debug;
- struct pp_smu_wm_range_sets ranges = {0};
struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base);
struct pp_smu_funcs *pp_smu = clk_mgr->pp_smu;
if (!debug->disable_pplib_wm_range) {
- build_watermark_ranges(clk_mgr_base->bw_params, &ranges);
+ build_watermark_ranges(clk_mgr_base->bw_params, &clk_mgr_base->ranges);
/* Notify PP Lib/SMU which Watermarks to use for which clock ranges */
if (pp_smu && pp_smu->rn_funcs.set_wm_ranges)
- pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &ranges);
+ pp_smu->rn_funcs.set_wm_ranges(&pp_smu->rn_funcs.pp_smu, &clk_mgr_base->ranges);
}
}
@@ -498,7 +504,7 @@ static struct clk_mgr_funcs dcn21_funcs = {
.notify_wm_ranges = rn_notify_wm_ranges
};
-struct clk_bw_params rn_bw_params = {
+static struct clk_bw_params rn_bw_params = {
.vram_type = Ddr4MemType,
.num_channels = 1,
.clk_table = {
@@ -538,7 +544,7 @@ struct clk_bw_params rn_bw_params = {
};
-struct wm_table ddr4_wm_table = {
+static struct wm_table ddr4_wm_table = {
.entries = {
{
.wm_inst = WM_A,
@@ -575,7 +581,7 @@ struct wm_table ddr4_wm_table = {
}
};
-struct wm_table lpddr4_wm_table = {
+static struct wm_table lpddr4_wm_table = {
.entries = {
{
.wm_inst = WM_A,
@@ -715,6 +721,13 @@ void rn_clk_mgr_construct(
} else {
struct clk_log_info log_info = {0};
+ clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr);
+
+ /* SMU Version 55.51.0 and up no longer have an issue
+ * that needs to limit minimum dispclk */
+ if (clk_mgr->smu_ver >= SMU_VER_55_51_0)
+ debug->min_disp_clk_khz = 0;
+
/* TODO: Check we get what we expect during bringup */
clk_mgr->base.dentist_vco_freq_khz = get_vco_frequency_from_reg(clk_mgr);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 04441dbcba76..2ffb22177df9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -701,7 +701,7 @@ static bool dc_construct(struct dc *dc,
dc_ctx->created_bios = true;
}
-
+ dc->vendor_signature = init_params->vendor_signature;
/* Create GPIO service */
dc_ctx->gpio_service = dal_gpio_service_create(
@@ -761,6 +761,28 @@ static bool disable_all_writeback_pipes_for_stream(
return true;
}
+void apply_ctx_interdependent_lock(struct dc *dc, struct dc_state *context, struct dc_stream_state *stream, bool lock)
+{
+ int i = 0;
+
+ /* Checks if interdependent update function pointer is NULL or not, takes care of DCE110 case */
+ if (dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, lock);
+ else {
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
+
+ // Copied conditions that were previously in dce110_apply_ctx_for_surface
+ if (stream == pipe_ctx->stream) {
+ if (!pipe_ctx->top_pipe &&
+ (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, lock);
+ }
+ }
+ }
+}
+
static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
{
int i, j;
@@ -786,11 +808,20 @@ static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
if (should_disable && old_stream) {
dc_rem_all_planes_for_stream(dc, old_stream, dangling_context);
disable_all_writeback_pipes_for_stream(dc, old_stream, dangling_context);
- if (dc->hwss.apply_ctx_for_surface)
+
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, true);
dc->hwss.apply_ctx_for_surface(dc, old_stream, 0, dangling_context);
+ apply_ctx_interdependent_lock(dc, dc->current_state, old_stream, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, true);
+ dc->hwss.program_front_end_for_ctx(dc, dangling_context);
+ dc->hwss.interdependent_update_lock(dc, dc->current_state, false);
+ dc->hwss.post_unlock_program_front_end(dc, dangling_context);
+ }
}
- if (dc->hwss.program_front_end_for_ctx)
- dc->hwss.program_front_end_for_ctx(dc, dangling_context);
}
current_ctx = dc->current_state;
@@ -1210,16 +1241,19 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
/* re-program planes for existing stream, in case we need to
* free up plane resource for later use
*/
- if (dc->hwss.apply_ctx_for_surface)
+ if (dc->hwss.apply_ctx_for_surface) {
for (i = 0; i < context->stream_count; i++) {
if (context->streams[i]->mode_changed)
continue;
-
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context); /* use new pipe config in new context */
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
}
+ }
/* Program hardware */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
@@ -1238,19 +1272,27 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c
}
/* Program all planes within new context*/
- if (dc->hwss.program_front_end_for_ctx)
+ if (dc->hwss.program_front_end_for_ctx) {
+ dc->hwss.interdependent_update_lock(dc, context, true);
dc->hwss.program_front_end_for_ctx(dc, context);
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
for (i = 0; i < context->stream_count; i++) {
const struct dc_link *link = context->streams[i]->link;
if (!context->streams[i]->mode_changed)
continue;
- if (dc->hwss.apply_ctx_for_surface)
+ if (dc->hwss.apply_ctx_for_surface) {
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], true);
dc->hwss.apply_ctx_for_surface(
dc, context->streams[i],
context->stream_status[i].plane_count,
context);
+ apply_ctx_interdependent_lock(dc, context, context->streams[i], false);
+ dc->hwss.post_unlock_program_front_end(dc, context);
+ }
/*
* enable stereo
@@ -1318,18 +1360,12 @@ bool dc_commit_state(struct dc *dc, struct dc_state *context)
return (result == DC_OK);
}
-bool dc_is_hw_initialized(struct dc *dc)
-{
- struct dc_bios *dcb = dc->ctx->dc_bios;
- return dcb->funcs->is_accelerated_mode(dcb);
-}
-
bool dc_post_update_surfaces_to_stream(struct dc *dc)
{
int i;
struct dc_state *context = dc->current_state;
- if (!dc->optimized_required || dc->optimize_seamless_boot_streams > 0)
+ if ((!dc->optimized_required) || dc->optimize_seamless_boot_streams > 0)
return true;
post_surface_trace(dc);
@@ -1341,9 +1377,11 @@ bool dc_post_update_surfaces_to_stream(struct dc *dc)
dc->hwss.disable_plane(dc, &context->res_ctx.pipe_ctx[i]);
}
+ dc->hwss.optimize_bandwidth(dc, context);
+
dc->optimized_required = false;
+ dc->wm_optimized_required = false;
- dc->hwss.optimize_bandwidth(dc, context);
return true;
}
@@ -1734,14 +1772,15 @@ static enum surface_update_type check_update_surfaces_for_stream(
if (stream_update->wb_update)
su_flags->bits.wb_update = 1;
+
+ if (stream_update->dsc_config)
+ su_flags->bits.dsc_changed = 1;
+
if (su_flags->raw != 0)
overall_type = UPDATE_TYPE_FULL;
if (stream_update->output_csc_transform || stream_update->output_color_space)
su_flags->bits.out_csc = 1;
-
- if (stream_update->dsc_config)
- overall_type = UPDATE_TYPE_FULL;
}
for (i = 0 ; i < surface_count; i++) {
@@ -1776,8 +1815,11 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
type = check_update_surfaces_for_stream(dc, updates, surface_count, stream_update, stream_status);
if (type == UPDATE_TYPE_FULL) {
- if (stream_update)
+ if (stream_update) {
+ uint32_t dsc_changed = stream_update->stream->update_flags.bits.dsc_changed;
stream_update->stream->update_flags.raw = 0xFFFFFFFF;
+ stream_update->stream->update_flags.bits.dsc_changed = dsc_changed;
+ }
for (i = 0; i < surface_count; i++)
updates[i].surface->update_flags.raw = 0xFFFFFFFF;
}
@@ -1790,7 +1832,8 @@ enum surface_update_type dc_check_update_surfaces_for_stream(
// Else we fallback to mem compare.
} else if (memcmp(&dc->current_state->bw_ctx.bw.dcn.clk, &dc->clk_mgr->clks, offsetof(struct dc_clocks, prev_p_state_change_support)) != 0) {
dc->optimized_required = true;
- }
+ } else if (dc->wm_optimized_required)
+ dc->optimized_required = true;
}
return type;
@@ -1829,6 +1872,8 @@ static void copy_surface_update_to_plane(
surface->time.index++;
if (surface->time.index >= DC_PLANE_UPDATE_TIMES_MAX)
surface->time.index = 0;
+
+ surface->triplebuffer_flips = srf_update->flip_addr->triplebuffer_flips;
}
if (srf_update->scaling_info) {
@@ -2093,18 +2138,14 @@ static void commit_planes_do_stream_update(struct dc *dc,
}
}
- if (stream_update->dsc_config && dc->hwss.pipe_control_lock_global) {
- dc->hwss.pipe_control_lock_global(dc, pipe_ctx, true);
- dp_update_dsc_config(pipe_ctx);
- dc->hwss.pipe_control_lock_global(dc, pipe_ctx, false);
- }
/* Full fe update*/
if (update_type == UPDATE_TYPE_FAST)
continue;
- if (stream_update->dpms_off) {
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
+ if (stream_update->dsc_config)
+ dp_update_dsc_config(pipe_ctx);
+ if (stream_update->dpms_off) {
if (*stream_update->dpms_off) {
core_link_disable_stream(pipe_ctx);
/* for dpms, keep acquired resources*/
@@ -2118,8 +2159,6 @@ static void commit_planes_do_stream_update(struct dc *dc,
core_link_enable_stream(dc->current_state, pipe_ctx);
}
-
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@ -2175,6 +2214,32 @@ static void commit_planes_for_stream(struct dc *dc,
context_clock_trace(dc, context);
}
+ for (j = 0; j < dc->res_pool->pipe_count; j++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[j];
+
+ if (!pipe_ctx->top_pipe &&
+ !pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream &&
+ pipe_ctx->stream == stream) {
+ top_pipe_to_program = pipe_ctx;
+ }
+ }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable)
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable(
+ top_pipe_to_program->stream_res.tg);
+
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, true);
+ else
+ /* Lock the top pipe while updating plane addrs, since freesync requires
+ * plane addr update event triggers to be synchronized.
+ * top_pipe_to_program is expected to never be NULL
+ */
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
+
+
// Stream updates
if (stream_update)
commit_planes_do_stream_update(dc, stream, stream_update, update_type, context);
@@ -2189,6 +2254,12 @@ static void commit_planes_for_stream(struct dc *dc,
if (dc->hwss.program_front_end_for_ctx)
dc->hwss.program_front_end_for_ctx(dc, context);
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ else
+ dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
+
+ dc->hwss.post_unlock_program_front_end(dc, context);
return;
}
@@ -2224,8 +2295,6 @@ static void commit_planes_for_stream(struct dc *dc,
pipe_ctx->stream == stream) {
struct dc_stream_status *stream_status = NULL;
- top_pipe_to_program = pipe_ctx;
-
if (!pipe_ctx->plane_state)
continue;
@@ -2270,12 +2339,6 @@ static void commit_planes_for_stream(struct dc *dc,
// Update Type FAST, Surface updates
if (update_type == UPDATE_TYPE_FAST) {
- /* Lock the top pipe while updating plane addrs, since freesync requires
- * plane addr update event triggers to be synchronized.
- * top_pipe_to_program is expected to never be NULL
- */
- dc->hwss.pipe_control_lock(dc, top_pipe_to_program, true);
-
if (dc->hwss.set_flip_control_gsl)
for (i = 0; i < surface_count; i++) {
struct dc_plane_state *plane_state = srf_updates[i].surface;
@@ -2317,9 +2380,30 @@ static void commit_planes_for_stream(struct dc *dc,
dc->hwss.update_plane_addr(dc, pipe_ctx);
}
}
+ }
+ if ((update_type != UPDATE_TYPE_FAST) && dc->hwss.interdependent_update_lock)
+ dc->hwss.interdependent_update_lock(dc, context, false);
+ else
dc->hwss.pipe_control_lock(dc, top_pipe_to_program, false);
- }
+
+ if ((update_type != UPDATE_TYPE_FAST) && stream->update_flags.bits.dsc_changed)
+ if (top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_enable) {
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ top_pipe_to_program->stream_res.tg->funcs->wait_for_state(
+ top_pipe_to_program->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ top_pipe_to_program->stream_res.tg->funcs->lock_doublebuffer_disable(
+ top_pipe_to_program->stream_res.tg);
+ }
+
+ if (update_type != UPDATE_TYPE_FAST)
+ dc->hwss.post_unlock_program_front_end(dc, context);
// Fire manual trigger only when bottom plane is flipped
for (j = 0; j < dc->res_pool->pipe_count; j++) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a09119c10d7c..67cfff1586e9 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -45,7 +45,7 @@
#include "dpcd_defs.h"
#include "dmcu.h"
#include "hw/clk_mgr.h"
-#include "../dce/dmub_psr.h"
+#include "dce/dmub_psr.h"
#define DC_LOGGER_INIT(logger)
@@ -585,20 +585,23 @@ static void read_current_link_settings_on_detect(struct dc_link *link)
LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED;
}
-static bool detect_dp(
- struct dc_link *link,
- struct display_sink_capability *sink_caps,
- bool *converter_disable_audio,
- struct audio_support *audio_support,
- enum dc_detect_reason reason)
+static bool detect_dp(struct dc_link *link,
+ struct display_sink_capability *sink_caps,
+ bool *converter_disable_audio,
+ struct audio_support *audio_support,
+ enum dc_detect_reason reason)
{
bool boot = false;
+
sink_caps->signal = link_detect_sink(link, reason);
sink_caps->transaction_type =
get_ddc_transaction_type(sink_caps->signal);
if (sink_caps->transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
+
+ dpcd_set_source_specific_data(link);
+
if (!detect_dp_sink_caps(link))
return false;
@@ -606,9 +609,8 @@ static bool detect_dp(
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST;
link->type = dc_connection_mst_branch;
- dal_ddc_service_set_transaction_type(
- link->ddc,
- sink_caps->transaction_type);
+ dal_ddc_service_set_transaction_type(link->ddc,
+ sink_caps->transaction_type);
/*
* This call will initiate MST topology discovery. Which
@@ -637,13 +639,10 @@ static bool detect_dp(
if (reason == DETECT_REASON_BOOT)
boot = true;
- dm_helpers_dp_update_branch_info(
- link->ctx,
- link);
+ dm_helpers_dp_update_branch_info(link->ctx, link);
- if (!dm_helpers_dp_mst_start_top_mgr(
- link->ctx,
- link, boot)) {
+ if (!dm_helpers_dp_mst_start_top_mgr(link->ctx,
+ link, boot)) {
/* MST not supported */
link->type = dc_connection_single;
sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT;
@@ -651,7 +650,7 @@ static bool detect_dp(
}
if (link->type != dc_connection_mst_branch &&
- is_dp_active_dongle(link)) {
+ is_dp_active_dongle(link)) {
/* DP active dongles */
link->type = dc_connection_active_dongle;
if (!link->dpcd_caps.sink_count.bits.SINK_COUNT) {
@@ -662,14 +661,15 @@ static bool detect_dp(
return true;
}
- if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER)
+ if (link->dpcd_caps.dongle_type !=
+ DISPLAY_DONGLE_DP_HDMI_CONVERTER)
*converter_disable_audio = true;
}
} else {
/* DP passive dongles */
sink_caps->signal = dp_passive_dongle_detection(link->ddc,
- sink_caps,
- audio_support);
+ sink_caps,
+ audio_support);
}
return true;
@@ -769,8 +769,16 @@ static bool dc_link_detect_helper(struct dc_link *link,
if ((link->connector_signal == SIGNAL_TYPE_LVDS ||
link->connector_signal == SIGNAL_TYPE_EDP) &&
- link->local_sink)
+ link->local_sink) {
+
+ // need to re-write OUI and brightness in resume case
+ if (link->connector_signal == SIGNAL_TYPE_EDP) {
+ dpcd_set_source_specific_data(link);
+ dc_link_set_default_brightness_aux(link); //TODO: use cached
+ }
+
return true;
+ }
if (false == dc_link_detect_sink(link, &new_connection_type)) {
BREAK_TO_DEBUGGER();
@@ -818,6 +826,10 @@ static bool dc_link_detect_helper(struct dc_link *link,
}
case SIGNAL_TYPE_EDP: {
+ read_current_link_settings_on_detect(link);
+
+ dpcd_set_source_specific_data(link);
+
detect_edp_sink_caps(link);
read_current_link_settings_on_detect(link);
sink_caps.transaction_type = DDC_TRANSACTION_TYPE_I2C_OVER_AUX;
@@ -958,10 +970,16 @@ static bool dc_link_detect_helper(struct dc_link *link,
break;
}
+ if (link->local_sink->edid_caps.panel_patch.disable_fec)
+ link->ctx->dc->debug.disable_fec = true;
+
// Check if edid is the same
if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
+ if (sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ link->ctx->dc->debug.hdmi20_disable = true;
+
if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
/*
@@ -1480,9 +1498,8 @@ static void enable_stream_features(struct pipe_ctx *pipe_ctx)
}
}
-static enum dc_status enable_link_dp(
- struct dc_state *state,
- struct pipe_ctx *pipe_ctx)
+static enum dc_status enable_link_dp(struct dc_state *state,
+ struct pipe_ctx *pipe_ctx)
{
struct dc_stream_state *stream = pipe_ctx->stream;
enum dc_status status;
@@ -1492,6 +1509,7 @@ static enum dc_status enable_link_dp(
bool fec_enable;
int i;
bool apply_seamless_boot_optimization = false;
+ uint32_t bl_oled_enable_delay = 50; // in ms
// check for seamless boot
for (i = 0; i < state->stream_count; i++) {
@@ -1513,31 +1531,45 @@ static enum dc_status enable_link_dp(
pipe_ctx->stream_res.pix_clk_params.requested_sym_clk =
link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ;
if (state->clk_mgr && !apply_seamless_boot_optimization)
- state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false);
+ state->clk_mgr->funcs->update_clocks(state->clk_mgr,
+ state, false);
+
+ // during mode switch we do DP_SET_POWER off then on, and OUI is lost
+ dpcd_set_source_specific_data(link);
skip_video_pattern = true;
if (link_settings.link_rate == LINK_RATE_LOW)
- skip_video_pattern = false;
-
- if (perform_link_training_with_retries(
- &link_settings,
- skip_video_pattern,
- LINK_TRAINING_ATTEMPTS,
- pipe_ctx,
- pipe_ctx->stream->signal)) {
+ skip_video_pattern = false;
+
+ if (perform_link_training_with_retries(&link_settings,
+ skip_video_pattern,
+ LINK_TRAINING_ATTEMPTS,
+ pipe_ctx,
+ pipe_ctx->stream->signal)) {
link->cur_link_settings = link_settings;
status = DC_OK;
- }
- else
+ } else {
status = DC_FAIL_DP_LINK_TRAINING;
+ }
- if (link->preferred_training_settings.fec_enable != NULL)
+ if (link->preferred_training_settings.fec_enable)
fec_enable = *link->preferred_training_settings.fec_enable;
else
fec_enable = true;
dp_set_fec_enable(link, fec_enable);
+
+ // during mode set we do DP_SET_POWER off then on, aux writes are lost
+ if (link->dpcd_sink_ext_caps.bits.oled == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1) {
+ dc_link_set_default_brightness_aux(link); // TODO: use cached if known
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ msleep(bl_oled_enable_delay);
+ dc_link_backlight_enable_aux(link, true);
+ }
+
return status;
}
@@ -1733,8 +1765,7 @@ static void write_i2c_retimer_setting(
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
* needs to be set to 1 on every 0xA-0xC write.
@@ -1752,8 +1783,7 @@ static void write_i2c_retimer_setting(
pipe_ctx->stream->link->ddc,
slave_address, &offset, 1, &value, 1);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
buffer[0] = offset;
@@ -1765,8 +1795,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
}
}
@@ -1786,8 +1815,7 @@ static void write_i2c_retimer_setting(
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
* needs to be set to 1 on every 0xA-0xC write.
@@ -1805,8 +1833,7 @@ static void write_i2c_retimer_setting(
pipe_ctx->stream->link->ddc,
slave_address, &offset, 1, &value, 1);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
buffer[0] = offset;
@@ -1818,8 +1845,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
}
}
@@ -1837,8 +1863,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x00 to 0x23 */
buffer[0] = 0x00;
@@ -1849,8 +1874,7 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0xff to 0x00 */
buffer[0] = 0xff;
@@ -1861,10 +1885,14 @@ static void write_i2c_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set retimer failed");
}
static void write_i2c_default_retimer_setting(
@@ -1889,8 +1917,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1901,8 +1928,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0B to 0xDA or 0xD8 */
buffer[0] = 0x0B;
@@ -1913,8 +1939,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1925,8 +1950,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0C to 0x1D or 0x91 */
buffer[0] = 0x0C;
@@ -1937,8 +1961,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x0A to 0x17 */
buffer[0] = 0x0A;
@@ -1949,8 +1972,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
if (is_vga_mode) {
@@ -1965,8 +1987,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val = 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0x00 to 0x23 */
buffer[0] = 0x00;
@@ -1977,8 +1998,7 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val= 0x%x, i2c_success = %d\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
/* Write offset 0xff to 0x00 */
buffer[0] = 0xff;
@@ -1989,9 +2009,13 @@ static void write_i2c_default_retimer_setting(
offset = 0x%x, reg_val= 0x%x, i2c_success = %d end here\n",
slave_address, buffer[0], buffer[1], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ goto i2c_write_fail;
}
+
+ return;
+
+i2c_write_fail:
+ DC_LOG_DEBUG("Set default retimer failed");
}
static void write_i2c_redriver_setting(
@@ -2020,8 +2044,7 @@ static void write_i2c_redriver_setting(
slave_address, buffer[3], buffer[4], buffer[5], buffer[6], i2c_success?1:0);
if (!i2c_success)
- /* Write failure */
- ASSERT(i2c_success);
+ DC_LOG_DEBUG("Set redriver failed");
}
static void disable_link(struct dc_link *link, enum signal_type signal)
@@ -2400,8 +2423,8 @@ bool dc_link_set_psr_allow_active(struct dc_link *link, bool allow_active, bool
struct dmcu *dmcu = dc->res_pool->dmcu;
struct dmub_psr *psr = dc->res_pool->psr;
- if ((psr != NULL) && link->psr_feature_enabled)
- psr->funcs->set_psr_enable(psr, allow_active);
+ if (psr != NULL && link->psr_feature_enabled)
+ psr->funcs->psr_enable(psr, allow_active);
else if ((dmcu != NULL && dmcu->funcs->is_dmcu_initialized(dmcu)) && link->psr_feature_enabled)
dmcu->funcs->set_psr_enable(dmcu, allow_active, wait);
@@ -2417,7 +2440,7 @@ bool dc_link_get_psr_state(const struct dc_link *link, uint32_t *psr_state)
struct dmub_psr *psr = dc->res_pool->psr;
if (psr != NULL && link->psr_feature_enabled)
- psr->funcs->get_psr_state(psr_state);
+ psr->funcs->psr_get_state(psr, psr_state);
else if (dmcu != NULL && link->psr_feature_enabled)
dmcu->funcs->get_psr_state(dmcu, psr_state);
@@ -2589,7 +2612,7 @@ bool dc_link_setup_psr(struct dc_link *link,
psr_context->frame_delay = 0;
if (psr)
- link->psr_feature_enabled = psr->funcs->setup_psr(psr, link, psr_context);
+ link->psr_feature_enabled = psr->funcs->psr_copy_settings(psr, link, psr_context);
else
link->psr_feature_enabled = dmcu->funcs->setup_psr(dmcu, link, psr_context);
@@ -2922,10 +2945,13 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off)
memset(&config, 0, sizeof(config));
config.otg_inst = (uint8_t) pipe_ctx->stream_res.tg->inst;
- config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->id;
+ /*stream_enc_inst*/
+ config.stream_enc_inst = (uint8_t) pipe_ctx->stream_res.stream_enc->stream_enc_inst;
config.link_enc_inst = pipe_ctx->stream->link->link_enc_hw_inst;
config.dpms_off = dpms_off;
config.dm_stream_ctx = pipe_ctx->stream->dm_stream_context;
+ config.mst_supported = (pipe_ctx->stream->signal ==
+ SIGNAL_TYPE_DISPLAY_PORT_MST);
cp_psp->funcs.update_stream_config(cp_psp->handle, &config);
}
}
@@ -3061,6 +3087,9 @@ void core_link_enable_stream(
dc->hwss.unblank_stream(pipe_ctx,
&pipe_ctx->stream->link->cur_link_settings);
+ if (stream->sink_patches.delay_ignore_msa > 0)
+ msleep(stream->sink_patches.delay_ignore_msa);
+
if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx);
#if defined(CONFIG_DRM_AMD_DC_HDCP)
@@ -3373,7 +3402,7 @@ uint32_t dc_link_bandwidth_kbps(
link_bw_kbps *= 8; /* 8 bits per byte*/
link_bw_kbps *= link_setting->lane_count;
- if (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) {
+ if (dc_link_is_fec_supported(link) && !link->dc->debug.disable_fec) {
/* Account for FEC overhead.
* We have to do it based on caps,
* and not based on FEC being set ready,
@@ -3417,3 +3446,11 @@ void dc_link_overwrite_extended_receiver_cap(
dp_overwrite_extended_receiver_cap(link);
}
+bool dc_link_is_fec_supported(const struct dc_link *link)
+{
+ return (dc_is_dp_signal(link->connector_signal) &&
+ link->link_enc->features.fec_supported &&
+ link->dpcd_caps.fec_cap.bits.FEC_CAPABLE &&
+ !IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment));
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index a49c10d5df26..256889eed93e 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -126,22 +126,16 @@ struct aux_payloads {
struct vector payloads;
};
-static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
+static bool dal_ddc_i2c_payloads_create(
+ struct dc_context *ctx,
+ struct i2c_payloads *payloads,
+ uint32_t count)
{
- struct i2c_payloads *payloads;
-
- payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
-
- if (!payloads)
- return NULL;
-
if (dal_vector_construct(
&payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
- return payloads;
-
- kfree(payloads);
- return NULL;
+ return true;
+ return false;
}
static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
@@ -154,14 +148,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
return p->payloads.count;
}
-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
+static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
{
- if (!p || !*p)
+ if (!p)
return;
- dal_vector_destruct(&(*p)->payloads);
- kfree(*p);
- *p = NULL;
+ dal_vector_destruct(&p->payloads);
}
#define DDC_MIN(a, b) (((a) < (b)) ? (a) : (b))
@@ -524,9 +516,13 @@ bool dal_ddc_service_query_ddc_data(
uint32_t payloads_num = write_payloads + read_payloads;
+
if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
return false;
+ if (!payloads_num)
+ return false;
+
/*TODO: len of payload data for i2c and aux is uint8!!!!,
* but we want to read 256 over i2c!!!!*/
if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
@@ -557,23 +553,25 @@ bool dal_ddc_service_query_ddc_data(
ret = dal_ddc_submit_aux_command(ddc, &payload);
}
} else {
- struct i2c_payloads *payloads =
- dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
+ struct i2c_command command = {0};
+ struct i2c_payloads payloads;
- struct i2c_command command = {
- .payloads = dal_ddc_i2c_payloads_get(payloads),
- .number_of_payloads = 0,
- .engine = DDC_I2C_COMMAND_ENGINE,
- .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
+ if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
+ return false;
+
+ command.payloads = dal_ddc_i2c_payloads_get(&payloads);
+ command.number_of_payloads = 0;
+ command.engine = DDC_I2C_COMMAND_ENGINE;
+ command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
dal_ddc_i2c_payloads_add(
- payloads, address, write_size, write_buf, true);
+ &payloads, address, write_size, write_buf, true);
dal_ddc_i2c_payloads_add(
- payloads, address, read_size, read_buf, false);
+ &payloads, address, read_size, read_buf, false);
command.number_of_payloads =
- dal_ddc_i2c_payloads_get_count(payloads);
+ dal_ddc_i2c_payloads_get_count(&payloads);
ret = dm_helpers_submit_i2c(
ddc->ctx,
@@ -686,6 +684,10 @@ void dal_ddc_service_write_scdc_data(struct ddc_service *ddc_service,
uint8_t write_buffer[2] = {0};
/*Lower than 340 Scramble bit from SCDC caps*/
+ if (ddc_service->link->local_sink &&
+ ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ return;
+
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &sink_version, sizeof(sink_version));
if (sink_version == 1) {
@@ -715,6 +717,10 @@ void dal_ddc_service_read_scdc_data(struct ddc_service *ddc_service)
uint8_t offset = HDMI_SCDC_TMDS_CONFIG;
uint8_t tmds_config = 0;
+ if (ddc_service->link->local_sink &&
+ ddc_service->link->local_sink->edid_caps.panel_patch.skip_scdc_overwrite)
+ return;
+
dal_ddc_service_query_ddc_data(ddc_service, slave_address, &offset,
sizeof(offset), &tmds_config, sizeof(tmds_config));
if (tmds_config & 0x1) {
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index cb731c1d30b1..7cbb1efb4f68 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -945,6 +945,17 @@ static enum link_training_result perform_channel_equalization_sequence(
}
#define TRAINING_AUX_RD_INTERVAL 100 //us
+static void start_clock_recovery_pattern_early(struct dc_link *link,
+ struct link_training_settings *lt_settings,
+ uint32_t offset)
+{
+ DC_LOG_HW_LINK_TRAINING("%s\n GPU sends TPS1. Wait 400us.\n",
+ __func__);
+ dp_set_hw_training_pattern(link, DP_TRAINING_PATTERN_SEQUENCE_1, offset);
+ dp_set_hw_lane_settings(link, lt_settings, offset);
+ udelay(400);
+}
+
static enum link_training_result perform_clock_recovery_sequence(
struct dc_link *link,
struct link_training_settings *lt_settings,
@@ -962,7 +973,8 @@ static enum link_training_result perform_clock_recovery_sequence(
retries_cr = 0;
retry_count = 0;
- dp_set_hw_training_pattern(link, tr_pattern, offset);
+ if (!link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ dp_set_hw_training_pattern(link, tr_pattern, offset);
/* najeeb - The synaptics MST hub can put the LT in
* infinite loop by switching the VS
@@ -1434,6 +1446,13 @@ enum link_training_result dc_link_dp_perform_link_training(
&link->preferred_training_settings,
&lt_settings);
+ /* Configure lttpr mode */
+ if (!link->is_lttpr_mode_transparent)
+ configure_lttpr_mode(link);
+
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
+
/* 1. set link rate, lane count and spread. */
dpcd_set_link_settings(link, &lt_settings);
@@ -1445,8 +1464,6 @@ enum link_training_result dc_link_dp_perform_link_training(
dp_set_fec_ready(link, fec_enable);
if (!link->is_lttpr_mode_transparent) {
- /* Configure lttpr mode */
- configure_lttpr_mode(link);
/* 2. perform link training (set link training done
* to false is done as well)
@@ -1654,6 +1671,8 @@ enum link_training_result dc_link_dp_sync_lt_attempt(
dp_set_panel_mode(link, panel_mode);
/* Attempt to train with given link training settings */
+ if (link->ctx->dc->work_arounds.lt_early_cr_pattern)
+ start_clock_recovery_pattern_early(link, &lt_settings, DPRX);
/* Set link rate, lane count and spread. */
dpcd_set_link_settings(link, &lt_settings);
@@ -1892,6 +1911,16 @@ bool dp_verify_link_cap(
/* disable PHY done possible by BIOS, will be done by driver itself */
dp_disable_link_phy(link, link->connector_signal);
+ dp_cs_id = get_clock_source_id(link);
+
+ /* link training starts with the maximum common settings
+ * supported by both sink and ASIC.
+ */
+ initial_link_settings = get_common_supported_link_settings(
+ *known_limit_link_setting,
+ max_link_cap);
+ cur_link_setting = initial_link_settings;
+
/* Temporary Renoir-specific workaround for SWDEV-215184;
* PHY will sometimes be in bad state on hotplugging display from certain USB-C dongle,
* so add extra cycle of enabling and disabling the PHY before first link training.
@@ -1902,15 +1931,6 @@ bool dp_verify_link_cap(
dp_disable_link_phy(link, link->connector_signal);
}
- dp_cs_id = get_clock_source_id(link);
-
- /* link training starts with the maximum common settings
- * supported by both sink and ASIC.
- */
- initial_link_settings = get_common_supported_link_settings(
- *known_limit_link_setting,
- max_link_cap);
- cur_link_setting = initial_link_settings;
do {
skip_video_pattern = true;
@@ -2654,9 +2674,12 @@ static void dp_test_send_link_test_pattern(struct dc_link *link)
break;
}
- test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
- DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
- DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
+ if (dpcd_test_params.bits.CLR_FORMAT == 0)
+ test_pattern_color_space = DP_TEST_PATTERN_COLOR_SPACE_RGB;
+ else
+ test_pattern_color_space = dpcd_test_params.bits.YCBCR_COEFS ?
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR709 :
+ DP_TEST_PATTERN_COLOR_SPACE_YCBCR601;
dc_link_dp_set_test_pattern(
link,
@@ -3165,6 +3188,23 @@ static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data,
link->wa_flags.dp_keep_receiver_powered = false;
}
+/* Read additional sink caps defined in source specific DPCD area
+ * This function currently only reads from SinkCapability address (DP_SOURCE_SINK_CAP)
+ */
+static bool dpcd_read_sink_ext_caps(struct dc_link *link)
+{
+ uint8_t dpcd_data;
+
+ if (!link)
+ return false;
+
+ if (core_link_read_dpcd(link, DP_SOURCE_SINK_CAP, &dpcd_data, 1) != DC_OK)
+ return false;
+
+ link->dpcd_sink_ext_caps.raw = dpcd_data;
+ return true;
+}
+
static bool retrieve_link_cap(struct dc_link *link)
{
/* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16,
@@ -3401,6 +3441,17 @@ static bool retrieve_link_cap(struct dc_link *link)
sink_id.ieee_device_id,
sizeof(sink_id.ieee_device_id));
+ /* Quirk Apple MBP 2017 15" Retina panel: Wrong DP_MAX_LINK_RATE */
+ {
+ uint8_t str_mbp_2017[] = { 101, 68, 21, 101, 98, 97 };
+
+ if ((link->dpcd_caps.sink_dev_id == 0x0010fa) &&
+ !memcmp(link->dpcd_caps.sink_dev_id_str, str_mbp_2017,
+ sizeof(str_mbp_2017))) {
+ link->reported_link_cap.link_rate = 0x0c;
+ }
+ }
+
core_link_read_dpcd(
link,
DP_SINK_HW_REVISION_START,
@@ -3437,6 +3488,9 @@ static bool retrieve_link_cap(struct dc_link *link)
sizeof(link->dpcd_caps.dsc_caps.dsc_ext_caps.raw));
}
+ if (!dpcd_read_sink_ext_caps(link))
+ link->dpcd_sink_ext_caps.raw = 0;
+
/* Connectivity log: detection */
CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: ");
@@ -3589,6 +3643,8 @@ void detect_edp_sink_caps(struct dc_link *link)
}
}
link->verified_link_cap = link->reported_link_cap;
+
+ dc_link_set_default_brightness_aux(link);
}
void dc_link_dp_enable_hpd(const struct dc_link *link)
@@ -3680,7 +3736,8 @@ static void set_crtc_test_pattern(struct dc_link *link,
struct pipe_ctx *odm_pipe;
enum controller_dp_color_space controller_color_space;
int opp_cnt = 1;
- int count;
+ int offset = 0;
+ int dpg_width = width;
switch (test_pattern_color_space) {
case DP_TEST_PATTERN_COLOR_SPACE_RGB:
@@ -3702,33 +3759,30 @@ static void set_crtc_test_pattern(struct dc_link *link,
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
+ dpg_width = width / opp_cnt;
+ offset = dpg_width;
- width /= opp_cnt;
+ opp->funcs->opp_set_disp_pattern_generator(opp,
+ controller_test_pattern,
+ controller_color_space,
+ color_depth,
+ NULL,
+ dpg_width,
+ height,
+ 0);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
-
odm_opp->funcs->opp_program_bit_depth_reduction(odm_opp, &params);
odm_opp->funcs->opp_set_disp_pattern_generator(odm_opp,
controller_test_pattern,
controller_color_space,
color_depth,
NULL,
- width,
- height);
- }
- opp->funcs->opp_set_disp_pattern_generator(opp,
- controller_test_pattern,
- controller_color_space,
- color_depth,
- NULL,
- width,
- height);
- /* wait for dpg to blank pixel data with test pattern */
- for (count = 0; count < 1000; count++) {
- if (opp->funcs->dpg_is_blanked(opp))
- break;
- udelay(100);
+ dpg_width,
+ height,
+ offset);
+ offset += offset;
}
}
}
@@ -3746,11 +3800,12 @@ static void set_crtc_test_pattern(struct dc_link *link,
else if (opp->funcs->opp_set_disp_pattern_generator) {
struct pipe_ctx *odm_pipe;
int opp_cnt = 1;
+ int dpg_width = width;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe)
opp_cnt++;
- width /= opp_cnt;
+ dpg_width = width / opp_cnt;
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
struct output_pixel_processor *odm_opp = odm_pipe->stream_res.opp;
@@ -3760,16 +3815,18 @@ static void set_crtc_test_pattern(struct dc_link *link,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- width,
- height);
+ dpg_width,
+ height,
+ 0);
}
opp->funcs->opp_set_disp_pattern_generator(opp,
CONTROLLER_DP_TEST_PATTERN_VIDEOMODE,
CONTROLLER_DP_COLOR_SPACE_UDEFINED,
color_depth,
NULL,
- width,
- height);
+ dpg_width,
+ height,
+ 0);
}
}
break;
@@ -3947,6 +4004,11 @@ bool dc_link_dp_set_test_pattern(
default:
break;
}
+
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable)
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_enable(
+ pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg);
/* update MSA to requested color space */
pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute(pipe_ctx->stream_res.stream_enc,
&pipe_ctx->stream->timing,
@@ -3954,9 +4016,27 @@ bool dc_link_dp_set_test_pattern(
pipe_ctx->stream->use_vsc_sdp_for_colorimetry,
link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP);
+ if (pipe_ctx->stream->use_vsc_sdp_for_colorimetry) {
+ if (test_pattern == DP_TEST_PATTERN_COLOR_SQUARES_CEA)
+ pipe_ctx->stream->vsc_infopacket.sb[17] |= (1 << 7); // sb17 bit 7 Dynamic Range: 0 = VESA range, 1 = CTA range
+ else
+ pipe_ctx->stream->vsc_infopacket.sb[17] &= ~(1 << 7);
+ resource_build_info_frame(pipe_ctx);
+ link->dc->hwss.update_info_frame(pipe_ctx);
+ }
+
/* CRTC Patterns */
set_crtc_test_pattern(link, pipe_ctx, test_pattern, test_pattern_color_space);
-
+ pipe_ctx->stream_res.tg->funcs->unlock(pipe_ctx->stream_res.tg);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VBLANK);
+ pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg,
+ CRTC_STATE_VACTIVE);
+ if (pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable)
+ pipe_ctx->stream_res.tg->funcs->lock_doublebuffer_disable(
+ pipe_ctx->stream_res.tg);
/* Set Test Pattern state */
link->test_pattern_enabled = true;
}
@@ -4086,8 +4166,7 @@ void dp_set_fec_ready(struct dc_link *link, bool ready)
struct link_encoder *link_enc = link->link_enc;
uint8_t fec_config = 0;
- if (link->dc->debug.disable_fec ||
- IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
+ if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
return;
if (link_enc->funcs->fec_set_ready &&
@@ -4122,8 +4201,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
{
struct link_encoder *link_enc = link->link_enc;
- if (link->dc->debug.disable_fec ||
- IS_FPGA_MAXIMUS_DC(link->ctx->dce_environment))
+ if (!dc_link_is_fec_supported(link) || link->dc->debug.disable_fec)
return;
if (link_enc->funcs->fec_set_enable &&
@@ -4146,3 +4224,148 @@ void dp_set_fec_enable(struct dc_link *link, bool enable)
}
}
+void dpcd_set_source_specific_data(struct dc_link *link)
+{
+ const uint32_t post_oui_delay = 30; // 30ms
+
+ if (!link->dc->vendor_signature.is_valid) {
+ struct dpcd_amd_signature amd_signature;
+ amd_signature.AMD_IEEE_TxSignature_byte1 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte2 = 0x0;
+ amd_signature.AMD_IEEE_TxSignature_byte3 = 0x1A;
+ amd_signature.device_id_byte1 =
+ (uint8_t)(link->ctx->asic_id.chip_id);
+ amd_signature.device_id_byte2 =
+ (uint8_t)(link->ctx->asic_id.chip_id >> 8);
+ memset(&amd_signature.zero, 0, 4);
+ amd_signature.dce_version =
+ (uint8_t)(link->ctx->dce_version);
+ amd_signature.dal_version_byte1 = 0x0; // needed? where to get?
+ amd_signature.dal_version_byte2 = 0x0; // needed? where to get?
+
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ (uint8_t *)(&amd_signature),
+ sizeof(amd_signature));
+
+ } else {
+ core_link_write_dpcd(link, DP_SOURCE_OUI,
+ link->dc->vendor_signature.data.raw,
+ sizeof(link->dc->vendor_signature.data.raw));
+ }
+
+ // Sink may need to configure internals based on vendor, so allow some
+ // time before proceeding with possibly vendor specific transactions
+ msleep(post_oui_delay);
+}
+
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms)
+{
+ struct dpcd_source_backlight_set dpcd_backlight_set;
+ uint8_t backlight_control = isHDR ? 1 : 0;
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ // OLEDs have no PWM, they can only use AUX
+ if (link->dpcd_sink_ext_caps.bits.oled == 1)
+ backlight_control = 1;
+
+ *(uint32_t *)&dpcd_backlight_set.backlight_level_millinits = backlight_millinits;
+ *(uint16_t *)&dpcd_backlight_set.backlight_transition_time_ms = (uint16_t)transition_time_in_ms;
+
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *)(&dpcd_backlight_set),
+ sizeof(dpcd_backlight_set)) != DC_OK)
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_CONTROL,
+ &backlight_control, 1) != DC_OK)
+ return false;
+
+ return true;
+}
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits_avg,
+ uint32_t *backlight_millinits_peak)
+{
+ union dpcd_source_backlight_get dpcd_backlight_get;
+
+ memset(&dpcd_backlight_get, 0, sizeof(union dpcd_source_backlight_get));
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_CURRENT_PEAK,
+ dpcd_backlight_get.raw,
+ sizeof(union dpcd_source_backlight_get)))
+ return false;
+
+ *backlight_millinits_avg =
+ dpcd_backlight_get.bytes.backlight_millinits_avg;
+ *backlight_millinits_peak =
+ dpcd_backlight_get.bytes.backlight_millinits_peak;
+
+ /* On non-supported panels dpcd_read usually succeeds with 0 returned */
+ if (*backlight_millinits_avg == 0 ||
+ *backlight_millinits_avg > *backlight_millinits_peak)
+ return false;
+
+ return true;
+}
+
+bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable)
+{
+ uint8_t backlight_enable = enable ? 1 : 0;
+
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE,
+ &backlight_enable, 1) != DC_OK)
+ return false;
+
+ return true;
+}
+
+// we read default from 0x320 because we expect BIOS wrote it there
+// regular get_backlight_nit reads from panel set at 0x326
+bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits)
+{
+ if (!link || (link->connector_signal != SIGNAL_TYPE_EDP &&
+ link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT))
+ return false;
+
+ if (!core_link_read_dpcd(link, DP_SOURCE_BACKLIGHT_LEVEL,
+ (uint8_t *) backlight_millinits,
+ sizeof(uint32_t)))
+ return false;
+
+ return true;
+}
+
+bool dc_link_set_default_brightness_aux(struct dc_link *link)
+{
+ uint32_t default_backlight;
+
+ if (link &&
+ (link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)) {
+ if (!dc_link_read_default_bl_aux(link, &default_backlight))
+ default_backlight = 150000;
+ // if < 5 nits or > 5000, it might be wrong readback
+ if (default_backlight < 5000 || default_backlight > 5000000)
+ default_backlight = 150000; //
+
+ return dc_link_set_backlight_level_nits(link, true,
+ default_backlight, 0);
+ }
+ return false;
+}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
index ddb855045767..51e0ee6e7695 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c
@@ -153,18 +153,19 @@ bool edp_receiver_ready_T9(struct dc_link *link)
unsigned char edpRev = 0;
enum dc_status result = DC_OK;
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
- if (edpRev < DP_EDP_12)
- return true;
- /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
- do {
- sinkstatus = 1;
- result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
- if (sinkstatus == 0)
- break;
- if (result != DC_OK)
- break;
- udelay(100); //MAx T9
- } while (++tries < 50);
+
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ if (result == DC_OK && edpRev >= DP_EDP_12) {
+ do {
+ sinkstatus = 1;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 0)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(100); //MAx T9
+ } while (++tries < 50);
+ }
if (link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_delay_backlight_off * 1000);
@@ -183,21 +184,22 @@ bool edp_receiver_ready_T7(struct dc_link *link)
unsigned long long time_taken_in_ns = 0;
result = core_link_read_dpcd(link, DP_EDP_DPCD_REV, &edpRev, sizeof(edpRev));
- if (result == DC_OK && edpRev < DP_EDP_12)
- return true;
- /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
- enter_timestamp = dm_get_timestamp(link->ctx);
- do {
- sinkstatus = 0;
- result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
- if (sinkstatus == 1)
- break;
- if (result != DC_OK)
- break;
- udelay(25);
- finish_timestamp = dm_get_timestamp(link->ctx);
- time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
- } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+
+ if (result == DC_OK && edpRev >= DP_EDP_12) {
+ /* start from eDP version 1.2, SINK_STAUS indicate the sink is ready.*/
+ enter_timestamp = dm_get_timestamp(link->ctx);
+ do {
+ sinkstatus = 0;
+ result = core_link_read_dpcd(link, DP_SINK_STATUS, &sinkstatus, sizeof(sinkstatus));
+ if (sinkstatus == 1)
+ break;
+ if (result != DC_OK)
+ break;
+ udelay(25);
+ finish_timestamp = dm_get_timestamp(link->ctx);
+ time_taken_in_ns = dm_get_elapse_time_in_ns(link->ctx, finish_timestamp, enter_timestamp);
+ } while (time_taken_in_ns < 50 * 1000000); //MAx T7 is 50ms
+ }
if (link->local_sink->edid_caps.panel_patch.extra_t7_ms > 0)
udelay(link->local_sink->edid_caps.panel_patch.extra_t7_ms * 1000);
@@ -429,6 +431,7 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
ASSERT(dsc_cfg.dc_dsc_cfg.num_slices_h % opp_cnt == 0);
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
@@ -533,6 +536,7 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable)
dsc_cfg.pic_height = stream->timing.v_addressable + stream->timing.v_border_top + stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
DC_LOG_DSC(" ");
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index a0eb9e533a61..75c7ce4c7581 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -46,12 +46,12 @@
#include "dce100/dce100_resource.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
+#include "dce120/dce120_resource.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/dcn10_resource.h"
-#endif
#include "dcn20/dcn20_resource.h"
#include "dcn21/dcn21_resource.h"
-#include "dce120/dce120_resource.h"
+#endif
#define DC_LOGGER_INIT(logger)
@@ -532,6 +532,51 @@ static inline void get_vp_scan_direction(
*flip_horz_scan_dir = !*flip_horz_scan_dir;
}
+int get_num_odm_splits(struct pipe_ctx *pipe)
+{
+ int odm_split_count = 0;
+ struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
+ while (next_pipe) {
+ odm_split_count++;
+ next_pipe = next_pipe->next_odm_pipe;
+ }
+ pipe = pipe->prev_odm_pipe;
+ while (pipe) {
+ odm_split_count++;
+ pipe = pipe->prev_odm_pipe;
+ }
+ return odm_split_count;
+}
+
+static void calculate_split_count_and_index(struct pipe_ctx *pipe_ctx, int *split_count, int *split_idx)
+{
+ *split_count = get_num_odm_splits(pipe_ctx);
+ *split_idx = 0;
+ if (*split_count == 0) {
+ /*Check for mpc split*/
+ struct pipe_ctx *split_pipe = pipe_ctx->top_pipe;
+
+ while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+ (*split_idx)++;
+ (*split_count)++;
+ split_pipe = split_pipe->top_pipe;
+ }
+ split_pipe = pipe_ctx->bottom_pipe;
+ while (split_pipe && split_pipe->plane_state == pipe_ctx->plane_state) {
+ (*split_count)++;
+ split_pipe = split_pipe->bottom_pipe;
+ }
+ } else {
+ /*Get odm split index*/
+ struct pipe_ctx *split_pipe = pipe_ctx->prev_odm_pipe;
+
+ while (split_pipe) {
+ (*split_idx)++;
+ split_pipe = split_pipe->prev_odm_pipe;
+ }
+ }
+}
+
static void calculate_viewport(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
@@ -541,16 +586,16 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
struct rect clip, dest;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
- bool pri_split = pipe_ctx->bottom_pipe &&
- pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
- bool sec_split = pipe_ctx->top_pipe &&
- pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
+ int split_count = 0;
+ int split_idx = 0;
bool orthogonal_rotation, flip_y_start, flip_x_start;
+ calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+
if (stream->view_format == VIEW_3D_FORMAT_SIDE_BY_SIDE ||
stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM) {
- pri_split = false;
- sec_split = false;
+ split_count = 0;
+ split_idx = 0;
}
/* The actual clip is an intersection between stream
@@ -609,23 +654,32 @@ static void calculate_viewport(struct pipe_ctx *pipe_ctx)
data->viewport.height = clip.height * surf_src.height / dest.height;
/* Handle split */
- if (pri_split || sec_split) {
+ if (split_count) {
+ /* extra pixels in the division remainder need to go to pipes after
+ * the extra pixel index minus one(epimo) defined here as:
+ */
+ int epimo = 0;
+
if (orthogonal_rotation) {
- if (flip_y_start != pri_split)
- data->viewport.height /= 2;
- else {
- data->viewport.y += data->viewport.height / 2;
- /* Ceil offset pipe */
- data->viewport.height = (data->viewport.height + 1) / 2;
- }
+ if (flip_y_start)
+ split_idx = split_count - split_idx;
+
+ epimo = split_count - data->viewport.height % (split_count + 1);
+
+ data->viewport.y += (data->viewport.height / (split_count + 1)) * split_idx;
+ if (split_idx > epimo)
+ data->viewport.y += split_idx - epimo - 1;
+ data->viewport.height = data->viewport.height / (split_count + 1) + (split_idx > epimo ? 1 : 0);
} else {
- if (flip_x_start != pri_split)
- data->viewport.width /= 2;
- else {
- data->viewport.x += data->viewport.width / 2;
- /* Ceil offset pipe */
- data->viewport.width = (data->viewport.width + 1) / 2;
- }
+ if (flip_x_start)
+ split_idx = split_count - split_idx;
+
+ epimo = split_count - data->viewport.width % (split_count + 1);
+
+ data->viewport.x += (data->viewport.width / (split_count + 1)) * split_idx;
+ if (split_idx > epimo)
+ data->viewport.x += split_idx - epimo - 1;
+ data->viewport.width = data->viewport.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
}
}
@@ -644,58 +698,58 @@ static void calculate_recout(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect surf_clip = plane_state->clip_rect;
- bool pri_split = pipe_ctx->bottom_pipe &&
- pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state;
- bool sec_split = pipe_ctx->top_pipe &&
- pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state;
- bool top_bottom_split = stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
-
- pipe_ctx->plane_res.scl_data.recout.x = stream->dst.x;
+ bool pri_split_tb = pipe_ctx->bottom_pipe &&
+ pipe_ctx->bottom_pipe->plane_state == pipe_ctx->plane_state &&
+ stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+ bool sec_split_tb = pipe_ctx->top_pipe &&
+ pipe_ctx->top_pipe->plane_state == pipe_ctx->plane_state &&
+ stream->view_format == VIEW_3D_FORMAT_TOP_AND_BOTTOM;
+ int split_count = 0;
+ int split_idx = 0;
+
+ calculate_split_count_and_index(pipe_ctx, &split_count, &split_idx);
+
+ data->recout.x = stream->dst.x;
if (stream->src.x < surf_clip.x)
- pipe_ctx->plane_res.scl_data.recout.x += (surf_clip.x
- - stream->src.x) * stream->dst.width
+ data->recout.x += (surf_clip.x - stream->src.x) * stream->dst.width
/ stream->src.width;
- pipe_ctx->plane_res.scl_data.recout.width = surf_clip.width *
- stream->dst.width / stream->src.width;
- if (pipe_ctx->plane_res.scl_data.recout.width + pipe_ctx->plane_res.scl_data.recout.x >
- stream->dst.x + stream->dst.width)
- pipe_ctx->plane_res.scl_data.recout.width =
- stream->dst.x + stream->dst.width
- - pipe_ctx->plane_res.scl_data.recout.x;
+ data->recout.width = surf_clip.width * stream->dst.width / stream->src.width;
+ if (data->recout.width + data->recout.x > stream->dst.x + stream->dst.width)
+ data->recout.width = stream->dst.x + stream->dst.width - data->recout.x;
- pipe_ctx->plane_res.scl_data.recout.y = stream->dst.y;
+ data->recout.y = stream->dst.y;
if (stream->src.y < surf_clip.y)
- pipe_ctx->plane_res.scl_data.recout.y += (surf_clip.y
- - stream->src.y) * stream->dst.height
+ data->recout.y += (surf_clip.y - stream->src.y) * stream->dst.height
/ stream->src.height;
- pipe_ctx->plane_res.scl_data.recout.height = surf_clip.height *
- stream->dst.height / stream->src.height;
- if (pipe_ctx->plane_res.scl_data.recout.height + pipe_ctx->plane_res.scl_data.recout.y >
- stream->dst.y + stream->dst.height)
- pipe_ctx->plane_res.scl_data.recout.height =
- stream->dst.y + stream->dst.height
- - pipe_ctx->plane_res.scl_data.recout.y;
+ data->recout.height = surf_clip.height * stream->dst.height / stream->src.height;
+ if (data->recout.height + data->recout.y > stream->dst.y + stream->dst.height)
+ data->recout.height = stream->dst.y + stream->dst.height - data->recout.y;
/* Handle h & v split, handle rotation using viewport */
- if (sec_split && top_bottom_split) {
- pipe_ctx->plane_res.scl_data.recout.y +=
- pipe_ctx->plane_res.scl_data.recout.height / 2;
+ if (sec_split_tb) {
+ data->recout.y += data->recout.height / 2;
/* Floor primary pipe, ceil 2ndary pipe */
- pipe_ctx->plane_res.scl_data.recout.height =
- (pipe_ctx->plane_res.scl_data.recout.height + 1) / 2;
- } else if (pri_split && top_bottom_split)
- pipe_ctx->plane_res.scl_data.recout.height /= 2;
- else if (sec_split) {
- pipe_ctx->plane_res.scl_data.recout.x +=
- pipe_ctx->plane_res.scl_data.recout.width / 2;
- /* Ceil offset pipe */
- pipe_ctx->plane_res.scl_data.recout.width =
- (pipe_ctx->plane_res.scl_data.recout.width + 1) / 2;
- } else if (pri_split)
- pipe_ctx->plane_res.scl_data.recout.width /= 2;
+ data->recout.height = (data->recout.height + 1) / 2;
+ } else if (pri_split_tb)
+ data->recout.height /= 2;
+ else if (split_count) {
+ /* extra pixels in the division remainder need to go to pipes after
+ * the extra pixel index minus one(epimo) defined here as:
+ */
+ int epimo = split_count - data->recout.width % (split_count + 1);
+
+ /*no recout offset due to odm */
+ if (!pipe_ctx->next_odm_pipe && !pipe_ctx->prev_odm_pipe) {
+ data->recout.x += (data->recout.width / (split_count + 1)) * split_idx;
+ if (split_idx > epimo)
+ data->recout.x += split_idx - epimo - 1;
+ }
+ data->recout.width = data->recout.width / (split_count + 1) + (split_idx > epimo ? 1 : 0);
+ }
}
static void calculate_scaling_ratios(struct pipe_ctx *pipe_ctx)
@@ -832,12 +886,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
{
const struct dc_plane_state *plane_state = pipe_ctx->plane_state;
const struct dc_stream_state *stream = pipe_ctx->stream;
+ struct pipe_ctx *odm_pipe = pipe_ctx->prev_odm_pipe;
struct scaler_data *data = &pipe_ctx->plane_res.scl_data;
struct rect src = pipe_ctx->plane_state->src_rect;
int recout_skip_h, recout_skip_v, surf_size_h, surf_size_v;
int vpc_div = (data->format == PIXEL_FORMAT_420BPP8
|| data->format == PIXEL_FORMAT_420BPP10) ? 2 : 1;
bool orthogonal_rotation, flip_vert_scan_dir, flip_horz_scan_dir;
+ int odm_idx = 0;
/*
* Need to calculate the scan direction for viewport to make adjustments
@@ -869,6 +925,14 @@ static void calculate_inits_and_adj_vp(struct pipe_ctx *pipe_ctx)
* stream->dst.width / stream->src.width -
src.x * plane_state->dst_rect.width / src.width
* stream->dst.width / stream->src.width);
+ /*modified recout_skip_h calculation due to odm having no recout offset*/
+ while (odm_pipe) {
+ odm_idx++;
+ odm_pipe = odm_pipe->prev_odm_pipe;
+ }
+ if (odm_idx)
+ recout_skip_h += odm_idx * data->recout.width;
+
recout_skip_v = data->recout.y - (stream->dst.y + (plane_state->dst_rect.y - stream->src.y)
* stream->dst.height / stream->src.height -
src.y * plane_state->dst_rect.height / src.height
@@ -1021,6 +1085,8 @@ bool resource_build_scaling_params(struct pipe_ctx *pipe_ctx)
store_h_border_left + timing->h_border_right;
pipe_ctx->plane_res.scl_data.v_active = timing->v_addressable +
timing->v_border_top + timing->v_border_bottom;
+ if (pipe_ctx->next_odm_pipe || pipe_ctx->prev_odm_pipe)
+ pipe_ctx->plane_res.scl_data.h_active /= get_num_odm_splits(pipe_ctx) + 1;
/* Taps calculations */
if (pipe_ctx->plane_res.xfm != NULL)
@@ -2034,7 +2100,7 @@ enum dc_status resource_map_pool_resources(
for (i = 0; i < context->stream_count; i++)
if (context->streams[i] == stream) {
context->stream_status[i].primary_otg_inst = pipe_ctx->stream_res.tg->inst;
- context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->id;
+ context->stream_status[i].stream_enc_inst = pipe_ctx->stream_res.stream_enc->stream_enc_inst;
context->stream_status[i].audio_inst =
pipe_ctx->stream_res.audio ? pipe_ctx->stream_res.audio->inst : -1;
@@ -2108,10 +2174,10 @@ enum dc_status dc_validate_global_state(
if (pipe_ctx->stream != stream)
continue;
- if (dc->res_pool->funcs->get_default_swizzle_mode &&
+ if (dc->res_pool->funcs->patch_unknown_plane_state &&
pipe_ctx->plane_state &&
pipe_ctx->plane_state->tiling_info.gfx9.swizzle == DC_SW_UNKNOWN) {
- result = dc->res_pool->funcs->get_default_swizzle_mode(pipe_ctx->plane_state);
+ result = dc->res_pool->funcs->patch_unknown_plane_state(pipe_ctx->plane_state);
if (result != DC_OK)
return result;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
index a96d8de9380e..64cf24a9ab08 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_vm_helper.c
@@ -47,6 +47,9 @@ int dc_setup_system_context(struct dc *dc, struct dc_phy_addr_space_config *pa_c
*/
memcpy(&dc->vm_pa_config, pa_config, sizeof(struct dc_phy_addr_space_config));
dc->vm_pa_config.valid = true;
+
+ if (pa_config->is_hvm_enabled == 0)
+ dc->debug.nv12_iflip_vm_wa = false;
}
return num_vmids;
@@ -62,7 +65,7 @@ int dc_get_vmid_use_vector(struct dc *dc)
int i;
int in_use = 0;
- for (i = 0; i < dc->vm_helper->num_vmid; i++)
+ for (i = 0; i < MAX_HUBP; i++)
in_use |= dc->vm_helper->hubp_vmid_usage[i].vmid_usage[0]
| dc->vm_helper->hubp_vmid_usage[i].vmid_usage[1];
return in_use;
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index 8ff25b5dd2f6..d3ceb39e428e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -39,7 +39,7 @@
#include "inc/hw/dmcu.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.2.69"
+#define DC_VER "3.2.76"
#define MAX_SURFACES 3
#define MAX_PLANES 6
@@ -126,6 +126,7 @@ struct dc_bug_wa {
bool no_connect_phy_config;
bool dedcn20_305_wa;
bool skip_clock_update;
+ bool lt_early_cr_pattern;
};
struct dc_dcc_surface_param {
@@ -229,6 +230,7 @@ struct dc_config {
bool forced_clocks;
bool disable_extended_timeout_support; // Used to disable extended timeout and lttpr feature as well
bool multi_mon_pp_mclk_switch;
+ bool psr_on_dmub;
};
enum visual_confirm {
@@ -388,6 +390,7 @@ struct dc_debug_options {
int always_scale;
bool disable_pplib_clock_request;
bool disable_clock_gate;
+ bool disable_mem_low_power;
bool disable_dmcu;
bool disable_psr;
bool force_abm_enable;
@@ -453,6 +456,7 @@ struct dc_phy_addr_space_config {
} gart_config;
bool valid;
+ bool is_hvm_enabled;
uint64_t page_table_default_page_addr;
};
@@ -518,6 +522,7 @@ struct dc {
/* Require to optimize clocks and bandwidth for added/removed planes */
bool optimized_required;
+ bool wm_optimized_required;
/* Require to maintain clocks and bandwidth for UEFI enabled HW */
int optimize_seamless_boot_streams;
@@ -526,6 +531,7 @@ struct dc {
struct compressor *fbc_compressor;
struct dc_debug_data debug_data;
+ struct dpcd_vendor_signature vendor_signature;
const char *build_id;
struct vm_helper *vm_helper;
@@ -565,12 +571,14 @@ struct dc_init_data {
struct dc_reg_helper_state *dmub_offload;
struct dc_config flags;
- uint32_t log_mask;
+ uint64_t log_mask;
+
/**
* gpu_info FW provided soc bounding box struct or 0 if not
* available in FW
*/
const struct gpu_info_soc_bounding_box_v1_0 *soc_bounding_box;
+ struct dpcd_vendor_signature vendor_signature;
};
struct dc_callback_init {
@@ -682,7 +690,6 @@ struct dc_3dlut {
struct kref refcount;
struct tetrahedral_params lut_3d;
struct fixed31_32 hdr_multiplier;
- bool initialized; /*remove after diag fix*/
union dc_3dlut_state state;
struct dc_context *ctx;
};
@@ -865,6 +872,7 @@ struct dc_flip_addrs {
unsigned int flip_timestamp_in_us;
bool flip_immediate;
/* TODO: add flip duration for FreeSync */
+ bool triplebuffer_flips;
};
bool dc_post_update_surfaces_to_stream(
@@ -979,6 +987,20 @@ struct dpcd_caps {
};
+union dpcd_sink_ext_caps {
+ struct {
+ /* 0 - Sink supports backlight adjust via PWM during SDR/HDR mode
+ * 1 - Sink supports backlight adjust via AUX during SDR/HDR mode.
+ */
+ uint8_t sdr_aux_backlight_control : 1;
+ uint8_t hdr_aux_backlight_control : 1;
+ uint8_t reserved_1 : 2;
+ uint8_t oled : 1;
+ uint8_t reserved : 3;
+ } bits;
+ uint8_t raw;
+};
+
#include "dc_link.h"
/*******************************************************************************
@@ -1004,6 +1026,11 @@ struct dc_sink_dsc_caps {
struct dsc_dec_dpcd_caps dsc_dec_caps;
};
+struct dc_sink_fec_caps {
+ bool is_rx_fec_supported;
+ bool is_topology_fec_supported;
+};
+
/*
* The sink structure contains EDID and other display device properties
*/
@@ -1017,7 +1044,10 @@ struct dc_sink {
struct stereo_3d_features features_3d[TIMING_3D_FORMAT_MAX];
bool converter_disable_audio;
- struct dc_sink_dsc_caps sink_dsc_caps;
+ struct dc_sink_dsc_caps dsc_caps;
+ struct dc_sink_fec_caps fec_caps;
+
+ bool is_vsc_sdp_colorimetry_supported;
/* private to DC core */
struct dc_link *link;
@@ -1075,7 +1105,6 @@ unsigned int dc_get_current_backlight_pwm(struct dc *dc);
unsigned int dc_get_target_backlight_pwm(struct dc *dc);
bool dc_is_dmcu_initialized(struct dc *dc);
-bool dc_is_hw_initialized(struct dc *dc);
enum dc_status dc_set_clock(struct dc *dc, enum dc_clock_type clock_type, uint32_t clk_khz, uint32_t stepping);
void dc_get_clock(struct dc *dc, enum dc_clock_type clock_type, struct dc_clock_config *clock_cfg);
diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
index dfe4472c9e40..bb2730e9521e 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
@@ -432,6 +432,54 @@ struct dp_sink_hw_fw_revision {
uint8_t ieee_fw_rev[2];
};
+struct dpcd_vendor_signature {
+ bool is_valid;
+
+ union dpcd_ieee_vendor_signature {
+ struct {
+ uint8_t ieee_oui[3];/*24-bit IEEE OUI*/
+ uint8_t ieee_device_id[6];/*usually 6-byte ASCII name*/
+ uint8_t ieee_hw_rev;
+ uint8_t ieee_fw_rev[2];
+ };
+ uint8_t raw[12];
+ } data;
+};
+
+struct dpcd_amd_signature {
+ uint8_t AMD_IEEE_TxSignature_byte1;
+ uint8_t AMD_IEEE_TxSignature_byte2;
+ uint8_t AMD_IEEE_TxSignature_byte3;
+ uint8_t device_id_byte1;
+ uint8_t device_id_byte2;
+ uint8_t zero[4];
+ uint8_t dce_version;
+ uint8_t dal_version_byte1;
+ uint8_t dal_version_byte2;
+};
+
+struct dpcd_source_backlight_set {
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ uint8_t byte2;
+ uint8_t byte3;
+ } backlight_level_millinits;
+
+ struct {
+ uint8_t byte0;
+ uint8_t byte1;
+ } backlight_transition_time_ms;
+};
+
+union dpcd_source_backlight_get {
+ struct {
+ uint32_t backlight_millinits_peak; /* 326h */
+ uint32_t backlight_millinits_avg; /* 32Ah */
+ } bytes;
+ uint8_t raw[8];
+};
+
/*DPCD register of DP receiver capability field bits-*/
union edp_configuration_cap {
struct {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index d25603128394..00ff5e98278c 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -26,6 +26,7 @@
#ifndef DC_LINK_H_
#define DC_LINK_H_
+#include "dc.h"
#include "dc_types.h"
#include "grph_object_defs.h"
@@ -128,6 +129,7 @@ struct dc_link {
enum edp_revision edp_revision;
bool psr_feature_enabled;
bool psr_allow_active;
+ union dpcd_sink_ext_caps dpcd_sink_ext_caps;
/* MST record stream using this link */
struct link_flags {
@@ -178,6 +180,21 @@ bool dc_link_set_backlight_level(const struct dc_link *dc_link,
uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp);
+/* Set/get nits-based backlight level of an embedded panel (eDP, LVDS). */
+bool dc_link_set_backlight_level_nits(struct dc_link *link,
+ bool isHDR,
+ uint32_t backlight_millinits,
+ uint32_t transition_time_in_ms);
+
+bool dc_link_get_backlight_level_nits(struct dc_link *link,
+ uint32_t *backlight_millinits,
+ uint32_t *backlight_millinits_peak);
+
+bool dc_link_backlight_enable_aux(struct dc_link *link, bool enable);
+
+bool dc_link_read_default_bl_aux(struct dc_link *link, uint32_t *backlight_millinits);
+bool dc_link_set_default_brightness_aux(struct dc_link *link);
+
int dc_link_get_backlight_level(const struct dc_link *dc_link);
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
@@ -316,4 +333,7 @@ bool dc_submit_i2c_oem(
uint32_t dc_bandwidth_in_kbps_from_timing(
const struct dc_crtc_timing *timing);
+
+bool dc_link_is_fec_supported(const struct dc_link *link);
+
#endif /* DC_LINK_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h
index 92096de79dec..a5c7ef47b8d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_stream.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h
@@ -118,6 +118,7 @@ union stream_update_flags {
uint32_t dpms_off:1;
uint32_t gamut_remap:1;
uint32_t wb_update:1;
+ uint32_t dsc_changed : 1;
} bits;
uint32_t raw;
diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
index e59532d98cb4..0d210104ba0a 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
@@ -229,7 +229,9 @@ struct dc_panel_patch {
unsigned int extra_t12_ms;
unsigned int extra_delay_backlight_off;
unsigned int extra_t7_ms;
- unsigned int manage_secondary_link;
+ unsigned int skip_scdc_overwrite;
+ unsigned int delay_ignore_msa;
+ unsigned int disable_fec;
};
struct dc_edid_caps {
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index fdf3d8f87eee..fbfcff700971 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -29,7 +29,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o \
-dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o
+dce_i2c.o dce_i2c_hw.o dce_i2c_sw.o dmub_psr.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
index f1a5d2c6aa37..743042d5905a 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -400,7 +400,7 @@ static bool acquire(
{
enum gpio_result result;
- if (!is_engine_available(engine))
+ if ((engine == NULL) || !is_engine_available(engine))
return false;
result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
@@ -645,7 +645,7 @@ bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
case AUX_TRANSACTION_REPLY_AUX_DEFER:
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
retry_on_defer = true;
- /* fall through */
+ fallthrough;
case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES) {
goto fail;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 30d953acd016..f0cebe721bcc 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -378,6 +378,11 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
const struct dc_config *config = &dmcu->ctx->dc->config;
bool status = false;
+ struct dc_context *ctx = dmcu->ctx;
+ unsigned int i;
+ // 5 4 3 2 1 0
+ // F E D C B A - bit 0 is A, bit 5 is F
+ unsigned int tx_interrupt_mask = 0;
PERF_TRACE();
/* Definition of DC_DMCU_SCRATCH
@@ -387,6 +392,15 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
*/
dmcu->dmcu_state = REG_READ(DC_DMCU_SCRATCH);
+ for (i = 0; i < ctx->dc->link_count; i++) {
+ if (ctx->dc->links[i]->link_enc->features.flags.bits.DP_IS_USB_C) {
+ if (ctx->dc->links[i]->link_enc->transmitter >= TRANSMITTER_UNIPHY_A &&
+ ctx->dc->links[i]->link_enc->transmitter <= TRANSMITTER_UNIPHY_F) {
+ tx_interrupt_mask |= 1 << ctx->dc->links[i]->link_enc->transmitter;
+ }
+ }
+ }
+
switch (dmcu->dmcu_state) {
case DMCU_UNLOADED:
status = false;
@@ -401,6 +415,8 @@ static bool dcn10_dmcu_init(struct dmcu *dmcu)
/* Set backlight ramping stepsize */
REG_WRITE(MASTER_COMM_DATA_REG2, abm_gain_stepsize);
+ REG_WRITE(MASTER_COMM_DATA_REG3, tx_interrupt_mask);
+
/* Set command to initialize microcontroller */
REG_UPDATE(MASTER_COMM_CMD_REG, MASTER_COMM_CMD_REG_BYTE0,
MCP_INIT_DMCU);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
index 066188ba7949..24adec407972 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_i2c_hw.c
@@ -267,6 +267,9 @@ static void set_speed(
uint32_t xtal_ref_div = 0;
uint32_t prescale = 0;
+ if (speed == 0)
+ return;
+
REG_GET(MICROSECOND_TIME_BASE_DIV, XTAL_REF_DIV, &xtal_ref_div);
if (xtal_ref_div == 0)
@@ -274,17 +277,15 @@ static void set_speed(
prescale = ((dce_i2c_hw->reference_frequency * 2) / xtal_ref_div) / speed;
- if (speed) {
- if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
- REG_UPDATE_N(SPEED, 3,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
- else
- REG_UPDATE_N(SPEED, 2,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
- FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
- }
+ if (dce_i2c_hw->masks->DC_I2C_DDC1_START_STOP_TIMING_CNTL)
+ REG_UPDATE_N(SPEED, 3,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_START_STOP_TIMING_CNTL), speed > 50 ? 2:1);
+ else
+ REG_UPDATE_N(SPEED, 2,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_PRESCALE), prescale,
+ FN(DC_I2C_DDC1_SPEED, DC_I2C_DDC1_THRESHOLD), 2);
}
static bool setup_engine(
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
index 8aa937f496c4..51481e922eb9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_mem_input.c
@@ -479,7 +479,7 @@ static void program_grph_pixel_format(
case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F:
sign = 1;
floating = 1;
- /* fall through */
+ fallthrough;
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: /* shouldn't this get float too? */
case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616:
grph_depth = 3;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
index 48862bebf29e..7311f312369f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters.c
@@ -22,1004 +22,1330 @@
* Authors: AMD
*
*/
-
#include "transform.h"
+//=========================================
+// <num_taps> = 2
+// <num_phases> = 16
+// <scale_ratio> = 0.833333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = s1.10
+// <CoefOut> = s1.12
+//=========================================
static const uint16_t filter_2tap_16p[18] = {
- 4096, 0,
- 3840, 256,
- 3584, 512,
- 3328, 768,
- 3072, 1024,
- 2816, 1280,
- 2560, 1536,
- 2304, 1792,
- 2048, 2048
+ 0x1000, 0x0000,
+ 0x0FF0, 0x0010,
+ 0x0FB0, 0x0050,
+ 0x0F34, 0x00CC,
+ 0x0E68, 0x0198,
+ 0x0D44, 0x02BC,
+ 0x0BC4, 0x043C,
+ 0x09FC, 0x0604,
+ 0x0800, 0x0800
};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_16p_upscale[27] = {
- 2048, 2048, 0,
- 1708, 2424, 16348,
- 1372, 2796, 16308,
- 1056, 3148, 16272,
- 768, 3464, 16244,
- 512, 3728, 16236,
- 296, 3928, 16252,
- 124, 4052, 16296,
- 0, 4096, 0
+ 0x0804, 0x07FC, 0x0000,
+ 0x06AC, 0x0978, 0x3FDC,
+ 0x055C, 0x0AF0, 0x3FB4,
+ 0x0420, 0x0C50, 0x3F90,
+ 0x0300, 0x0D88, 0x3F78,
+ 0x0200, 0x0E90, 0x3F70,
+ 0x0128, 0x0F5C, 0x3F7C,
+ 0x007C, 0x0FD8, 0x3FAC,
+ 0x0000, 0x1000, 0x0000
};
-static const uint16_t filter_3tap_16p_117[27] = {
- 2048, 2048, 0,
- 1824, 2276, 16376,
- 1600, 2496, 16380,
- 1376, 2700, 16,
- 1156, 2880, 52,
- 948, 3032, 108,
- 756, 3144, 192,
- 580, 3212, 296,
- 428, 3236, 428
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_16p_116[27] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x0700, 0x0914, 0x3FEC,
+ 0x0604, 0x0A1C, 0x3FE0,
+ 0x050C, 0x0B14, 0x3FE0,
+ 0x041C, 0x0BF4, 0x3FF0,
+ 0x0340, 0x0CB0, 0x0010,
+ 0x0274, 0x0D3C, 0x0050,
+ 0x01C0, 0x0D94, 0x00AC,
+ 0x0128, 0x0DB4, 0x0124
};
-static const uint16_t filter_3tap_16p_150[27] = {
- 2048, 2048, 0,
- 1872, 2184, 36,
- 1692, 2308, 88,
- 1516, 2420, 156,
- 1340, 2516, 236,
- 1168, 2592, 328,
- 1004, 2648, 440,
- 844, 2684, 560,
- 696, 2696, 696
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_16p_149[27] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x0730, 0x08CC, 0x0004,
+ 0x0660, 0x098C, 0x0014,
+ 0x0590, 0x0A3C, 0x0034,
+ 0x04C4, 0x0AD4, 0x0068,
+ 0x0400, 0x0B54, 0x00AC,
+ 0x0348, 0x0BB0, 0x0108,
+ 0x029C, 0x0BEC, 0x0178,
+ 0x0200, 0x0C00, 0x0200
};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 16
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_16p_183[27] = {
- 2048, 2048, 0,
- 1892, 2104, 92,
- 1744, 2152, 196,
- 1592, 2196, 300,
- 1448, 2232, 412,
- 1304, 2256, 528,
- 1168, 2276, 648,
- 1032, 2288, 772,
- 900, 2292, 900
+ 0x0804, 0x07FC, 0x0000,
+ 0x0754, 0x0880, 0x002C,
+ 0x06A8, 0x08F0, 0x0068,
+ 0x05FC, 0x0954, 0x00B0,
+ 0x0550, 0x09AC, 0x0104,
+ 0x04A8, 0x09F0, 0x0168,
+ 0x0408, 0x0A20, 0x01D8,
+ 0x036C, 0x0A40, 0x0254,
+ 0x02DC, 0x0A48, 0x02DC
};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_16p_upscale[36] = {
- 0, 4096, 0, 0,
- 16240, 4056, 180, 16380,
- 16136, 3952, 404, 16364,
- 16072, 3780, 664, 16344,
- 16040, 3556, 952, 16312,
- 16036, 3284, 1268, 16272,
- 16052, 2980, 1604, 16224,
- 16084, 2648, 1952, 16176,
- 16128, 2304, 2304, 16128
+ 0x0000, 0x1000, 0x0000, 0x0000,
+ 0x3F74, 0x0FDC, 0x00B4, 0x3FFC,
+ 0x3F0C, 0x0F70, 0x0194, 0x3FF0,
+ 0x3ECC, 0x0EC4, 0x0298, 0x3FD8,
+ 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8,
+ 0x3EA4, 0x0CD8, 0x04F4, 0x3F90,
+ 0x3EB8, 0x0BA0, 0x0644, 0x3F64,
+ 0x3ED8, 0x0A54, 0x07A0, 0x3F34,
+ 0x3F00, 0x08FC, 0x0900, 0x3F04
};
-static const uint16_t filter_4tap_16p_117[36] = {
- 428, 3236, 428, 0,
- 276, 3232, 604, 16364,
- 148, 3184, 800, 16340,
- 44, 3104, 1016, 16312,
- 16344, 2984, 1244, 16284,
- 16284, 2832, 1488, 16256,
- 16244, 2648, 1732, 16236,
- 16220, 2440, 1976, 16220,
- 16212, 2216, 2216, 16212
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_16p_116[36] = {
+ 0x01A8, 0x0CB4, 0x01A4, 0x0000,
+ 0x0110, 0x0CB0, 0x0254, 0x3FEC,
+ 0x0090, 0x0C80, 0x031C, 0x3FD4,
+ 0x0024, 0x0C2C, 0x03F4, 0x3FBC,
+ 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0,
+ 0x3F9C, 0x0B14, 0x05CC, 0x3F84,
+ 0x3F70, 0x0A60, 0x06C4, 0x3F6C,
+ 0x3F5C, 0x098C, 0x07BC, 0x3F5C,
+ 0x3F54, 0x08AC, 0x08AC, 0x3F54
};
-static const uint16_t filter_4tap_16p_150[36] = {
- 696, 2700, 696, 0,
- 560, 2700, 848, 16364,
- 436, 2676, 1008, 16348,
- 328, 2628, 1180, 16336,
- 232, 2556, 1356, 16328,
- 152, 2460, 1536, 16328,
- 84, 2344, 1716, 16332,
- 28, 2208, 1888, 16348,
- 16376, 2052, 2052, 16376
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_16p_149[36] = {
+ 0x02B8, 0x0A90, 0x02B8, 0x0000,
+ 0x0230, 0x0A90, 0x0350, 0x3FF0,
+ 0x01B8, 0x0A78, 0x03F0, 0x3FE0,
+ 0x0148, 0x0A48, 0x049C, 0x3FD4,
+ 0x00E8, 0x0A00, 0x054C, 0x3FCC,
+ 0x0098, 0x09A0, 0x0600, 0x3FC8,
+ 0x0054, 0x0928, 0x06B4, 0x3FD0,
+ 0x001C, 0x08A4, 0x0760, 0x3FE0,
+ 0x3FFC, 0x0804, 0x0804, 0x3FFC
};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 16
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_16p_183[36] = {
- 940, 2208, 940, 0,
- 832, 2200, 1052, 4,
- 728, 2180, 1164, 16,
- 628, 2148, 1280, 36,
- 536, 2100, 1392, 60,
- 448, 2044, 1504, 92,
- 368, 1976, 1612, 132,
- 296, 1900, 1716, 176,
- 232, 1812, 1812, 232
+ 0x03B0, 0x08A0, 0x03B0, 0x0000,
+ 0x0348, 0x0898, 0x041C, 0x0004,
+ 0x02DC, 0x0884, 0x0490, 0x0010,
+ 0x0278, 0x0864, 0x0500, 0x0024,
+ 0x021C, 0x0838, 0x0570, 0x003C,
+ 0x01C8, 0x07FC, 0x05E0, 0x005C,
+ 0x0178, 0x07B8, 0x064C, 0x0084,
+ 0x0130, 0x076C, 0x06B0, 0x00B4,
+ 0x00F0, 0x0714, 0x0710, 0x00EC
};
+//=========================================
+// <num_taps> = 2
+// <num_phases> = 64
+// <scale_ratio> = 0.833333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = s1.10
+// <CoefOut> = s1.12
+//=========================================
static const uint16_t filter_2tap_64p[66] = {
- 4096, 0,
- 4032, 64,
- 3968, 128,
- 3904, 192,
- 3840, 256,
- 3776, 320,
- 3712, 384,
- 3648, 448,
- 3584, 512,
- 3520, 576,
- 3456, 640,
- 3392, 704,
- 3328, 768,
- 3264, 832,
- 3200, 896,
- 3136, 960,
- 3072, 1024,
- 3008, 1088,
- 2944, 1152,
- 2880, 1216,
- 2816, 1280,
- 2752, 1344,
- 2688, 1408,
- 2624, 1472,
- 2560, 1536,
- 2496, 1600,
- 2432, 1664,
- 2368, 1728,
- 2304, 1792,
- 2240, 1856,
- 2176, 1920,
- 2112, 1984,
- 2048, 2048 };
+ 0x1000, 0x0000,
+ 0x1000, 0x0000,
+ 0x0FFC, 0x0004,
+ 0x0FF8, 0x0008,
+ 0x0FF0, 0x0010,
+ 0x0FE4, 0x001C,
+ 0x0FD8, 0x0028,
+ 0x0FC4, 0x003C,
+ 0x0FB0, 0x0050,
+ 0x0F98, 0x0068,
+ 0x0F7C, 0x0084,
+ 0x0F58, 0x00A8,
+ 0x0F34, 0x00CC,
+ 0x0F08, 0x00F8,
+ 0x0ED8, 0x0128,
+ 0x0EA4, 0x015C,
+ 0x0E68, 0x0198,
+ 0x0E28, 0x01D8,
+ 0x0DE4, 0x021C,
+ 0x0D98, 0x0268,
+ 0x0D44, 0x02BC,
+ 0x0CEC, 0x0314,
+ 0x0C90, 0x0370,
+ 0x0C2C, 0x03D4,
+ 0x0BC4, 0x043C,
+ 0x0B58, 0x04A8,
+ 0x0AE8, 0x0518,
+ 0x0A74, 0x058C,
+ 0x09FC, 0x0604,
+ 0x0980, 0x0680,
+ 0x0900, 0x0700,
+ 0x0880, 0x0780,
+ 0x0800, 0x0800
+};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_64p_upscale[99] = {
- 2048, 2048, 0,
- 1960, 2140, 16376,
- 1876, 2236, 16364,
- 1792, 2328, 16356,
- 1708, 2424, 16348,
- 1620, 2516, 16336,
- 1540, 2612, 16328,
- 1456, 2704, 16316,
- 1372, 2796, 16308,
- 1292, 2884, 16296,
- 1212, 2976, 16288,
- 1136, 3060, 16280,
- 1056, 3148, 16272,
- 984, 3228, 16264,
- 908, 3312, 16256,
- 836, 3388, 16248,
- 768, 3464, 16244,
- 700, 3536, 16240,
- 636, 3604, 16236,
- 572, 3668, 16236,
- 512, 3728, 16236,
- 456, 3784, 16236,
- 400, 3836, 16240,
- 348, 3884, 16244,
- 296, 3928, 16252,
- 252, 3964, 16260,
- 204, 4000, 16268,
- 164, 4028, 16284,
- 124, 4052, 16296,
- 88, 4072, 16316,
- 56, 4084, 16336,
- 24, 4092, 16356,
- 0, 4096, 0
+ 0x0804, 0x07FC, 0x0000,
+ 0x07A8, 0x0860, 0x3FF8,
+ 0x0754, 0x08BC, 0x3FF0,
+ 0x0700, 0x0918, 0x3FE8,
+ 0x06AC, 0x0978, 0x3FDC,
+ 0x0654, 0x09D8, 0x3FD4,
+ 0x0604, 0x0A34, 0x3FC8,
+ 0x05B0, 0x0A90, 0x3FC0,
+ 0x055C, 0x0AF0, 0x3FB4,
+ 0x050C, 0x0B48, 0x3FAC,
+ 0x04BC, 0x0BA0, 0x3FA4,
+ 0x0470, 0x0BF4, 0x3F9C,
+ 0x0420, 0x0C50, 0x3F90,
+ 0x03D8, 0x0C9C, 0x3F8C,
+ 0x038C, 0x0CF0, 0x3F84,
+ 0x0344, 0x0D40, 0x3F7C,
+ 0x0300, 0x0D88, 0x3F78,
+ 0x02BC, 0x0DD0, 0x3F74,
+ 0x027C, 0x0E14, 0x3F70,
+ 0x023C, 0x0E54, 0x3F70,
+ 0x0200, 0x0E90, 0x3F70,
+ 0x01C8, 0x0EC8, 0x3F70,
+ 0x0190, 0x0EFC, 0x3F74,
+ 0x015C, 0x0F2C, 0x3F78,
+ 0x0128, 0x0F5C, 0x3F7C,
+ 0x00FC, 0x0F7C, 0x3F88,
+ 0x00CC, 0x0FA4, 0x3F90,
+ 0x00A4, 0x0FC0, 0x3F9C,
+ 0x007C, 0x0FD8, 0x3FAC,
+ 0x0058, 0x0FE8, 0x3FC0,
+ 0x0038, 0x0FF4, 0x3FD4,
+ 0x0018, 0x1000, 0x3FE8,
+ 0x0000, 0x1000, 0x0000
};
-static const uint16_t filter_3tap_64p_117[99] = {
- 2048, 2048, 0,
- 1992, 2104, 16380,
- 1936, 2160, 16380,
- 1880, 2220, 16376,
- 1824, 2276, 16376,
- 1768, 2332, 16376,
- 1712, 2388, 16376,
- 1656, 2444, 16376,
- 1600, 2496, 16380,
- 1544, 2548, 0,
- 1488, 2600, 4,
- 1432, 2652, 8,
- 1376, 2700, 16,
- 1320, 2748, 20,
- 1264, 2796, 32,
- 1212, 2840, 40,
- 1156, 2880, 52,
- 1104, 2920, 64,
- 1052, 2960, 80,
- 1000, 2996, 92,
- 948, 3032, 108,
- 900, 3060, 128,
- 852, 3092, 148,
- 804, 3120, 168,
- 756, 3144, 192,
- 712, 3164, 216,
- 668, 3184, 240,
- 624, 3200, 268,
- 580, 3212, 296,
- 540, 3220, 328,
- 500, 3228, 360,
- 464, 3232, 392,
- 428, 3236, 428
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_64p_116[99] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x07C0, 0x0844, 0x3FFC,
+ 0x0780, 0x0888, 0x3FF8,
+ 0x0740, 0x08D0, 0x3FF0,
+ 0x0700, 0x0914, 0x3FEC,
+ 0x06C0, 0x0958, 0x3FE8,
+ 0x0684, 0x0998, 0x3FE4,
+ 0x0644, 0x09DC, 0x3FE0,
+ 0x0604, 0x0A1C, 0x3FE0,
+ 0x05C4, 0x0A5C, 0x3FE0,
+ 0x0588, 0x0A9C, 0x3FDC,
+ 0x0548, 0x0ADC, 0x3FDC,
+ 0x050C, 0x0B14, 0x3FE0,
+ 0x04CC, 0x0B54, 0x3FE0,
+ 0x0490, 0x0B8C, 0x3FE4,
+ 0x0458, 0x0BC0, 0x3FE8,
+ 0x041C, 0x0BF4, 0x3FF0,
+ 0x03E0, 0x0C28, 0x3FF8,
+ 0x03A8, 0x0C58, 0x0000,
+ 0x0374, 0x0C88, 0x0004,
+ 0x0340, 0x0CB0, 0x0010,
+ 0x0308, 0x0CD8, 0x0020,
+ 0x02D8, 0x0CFC, 0x002C,
+ 0x02A0, 0x0D20, 0x0040,
+ 0x0274, 0x0D3C, 0x0050,
+ 0x0244, 0x0D58, 0x0064,
+ 0x0214, 0x0D70, 0x007C,
+ 0x01E8, 0x0D84, 0x0094,
+ 0x01C0, 0x0D94, 0x00AC,
+ 0x0198, 0x0DA0, 0x00C8,
+ 0x0170, 0x0DAC, 0x00E4,
+ 0x014C, 0x0DB0, 0x0104,
+ 0x0128, 0x0DB4, 0x0124
};
-static const uint16_t filter_3tap_64p_150[99] = {
- 2048, 2048, 0,
- 2004, 2080, 8,
- 1960, 2116, 16,
- 1916, 2148, 28,
- 1872, 2184, 36,
- 1824, 2216, 48,
- 1780, 2248, 60,
- 1736, 2280, 76,
- 1692, 2308, 88,
- 1648, 2336, 104,
- 1604, 2368, 120,
- 1560, 2392, 136,
- 1516, 2420, 156,
- 1472, 2444, 172,
- 1428, 2472, 192,
- 1384, 2492, 212,
- 1340, 2516, 236,
- 1296, 2536, 256,
- 1252, 2556, 280,
- 1212, 2576, 304,
- 1168, 2592, 328,
- 1124, 2608, 356,
- 1084, 2624, 384,
- 1044, 2636, 412,
- 1004, 2648, 440,
- 964, 2660, 468,
- 924, 2668, 500,
- 884, 2676, 528,
- 844, 2684, 560,
- 808, 2688, 596,
- 768, 2692, 628,
- 732, 2696, 664,
- 696, 2696, 696
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_3tap_64p_149[99] = {
+ 0x0804, 0x07FC, 0x0000,
+ 0x07CC, 0x0834, 0x0000,
+ 0x0798, 0x0868, 0x0000,
+ 0x0764, 0x089C, 0x0000,
+ 0x0730, 0x08CC, 0x0004,
+ 0x0700, 0x08FC, 0x0004,
+ 0x06CC, 0x092C, 0x0008,
+ 0x0698, 0x095C, 0x000C,
+ 0x0660, 0x098C, 0x0014,
+ 0x062C, 0x09B8, 0x001C,
+ 0x05FC, 0x09E4, 0x0020,
+ 0x05C4, 0x0A10, 0x002C,
+ 0x0590, 0x0A3C, 0x0034,
+ 0x055C, 0x0A64, 0x0040,
+ 0x0528, 0x0A8C, 0x004C,
+ 0x04F8, 0x0AB0, 0x0058,
+ 0x04C4, 0x0AD4, 0x0068,
+ 0x0490, 0x0AF8, 0x0078,
+ 0x0460, 0x0B18, 0x0088,
+ 0x0430, 0x0B38, 0x0098,
+ 0x0400, 0x0B54, 0x00AC,
+ 0x03D0, 0x0B6C, 0x00C4,
+ 0x03A0, 0x0B88, 0x00D8,
+ 0x0374, 0x0B9C, 0x00F0,
+ 0x0348, 0x0BB0, 0x0108,
+ 0x0318, 0x0BC4, 0x0124,
+ 0x02EC, 0x0BD4, 0x0140,
+ 0x02C4, 0x0BE0, 0x015C,
+ 0x029C, 0x0BEC, 0x0178,
+ 0x0274, 0x0BF4, 0x0198,
+ 0x024C, 0x0BFC, 0x01B8,
+ 0x0228, 0x0BFC, 0x01DC,
+ 0x0200, 0x0C00, 0x0200
};
+//=========================================
+// <num_taps> = 3
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_3tap_64p_183[99] = {
- 2048, 2048, 0,
- 2008, 2060, 20,
- 1968, 2076, 44,
- 1932, 2088, 68,
- 1892, 2104, 92,
- 1856, 2116, 120,
- 1816, 2128, 144,
- 1780, 2140, 168,
- 1744, 2152, 196,
- 1704, 2164, 220,
- 1668, 2176, 248,
- 1632, 2188, 272,
- 1592, 2196, 300,
- 1556, 2204, 328,
- 1520, 2216, 356,
- 1484, 2224, 384,
- 1448, 2232, 412,
- 1412, 2240, 440,
- 1376, 2244, 468,
- 1340, 2252, 496,
- 1304, 2256, 528,
- 1272, 2264, 556,
- 1236, 2268, 584,
- 1200, 2272, 616,
- 1168, 2276, 648,
- 1132, 2280, 676,
- 1100, 2284, 708,
- 1064, 2288, 740,
- 1032, 2288, 772,
- 996, 2292, 800,
- 964, 2292, 832,
- 932, 2292, 868,
- 900, 2292, 900
+ 0x0804, 0x07FC, 0x0000,
+ 0x07D4, 0x0824, 0x0008,
+ 0x07AC, 0x0840, 0x0014,
+ 0x0780, 0x0860, 0x0020,
+ 0x0754, 0x0880, 0x002C,
+ 0x0728, 0x089C, 0x003C,
+ 0x0700, 0x08B8, 0x0048,
+ 0x06D4, 0x08D4, 0x0058,
+ 0x06A8, 0x08F0, 0x0068,
+ 0x067C, 0x090C, 0x0078,
+ 0x0650, 0x0924, 0x008C,
+ 0x0628, 0x093C, 0x009C,
+ 0x05FC, 0x0954, 0x00B0,
+ 0x05D0, 0x096C, 0x00C4,
+ 0x05A8, 0x0980, 0x00D8,
+ 0x0578, 0x0998, 0x00F0,
+ 0x0550, 0x09AC, 0x0104,
+ 0x0528, 0x09BC, 0x011C,
+ 0x04FC, 0x09D0, 0x0134,
+ 0x04D4, 0x09E0, 0x014C,
+ 0x04A8, 0x09F0, 0x0168,
+ 0x0480, 0x09FC, 0x0184,
+ 0x045C, 0x0A08, 0x019C,
+ 0x0434, 0x0A14, 0x01B8,
+ 0x0408, 0x0A20, 0x01D8,
+ 0x03E0, 0x0A2C, 0x01F4,
+ 0x03B8, 0x0A34, 0x0214,
+ 0x0394, 0x0A38, 0x0234,
+ 0x036C, 0x0A40, 0x0254,
+ 0x0348, 0x0A44, 0x0274,
+ 0x0324, 0x0A48, 0x0294,
+ 0x0300, 0x0A48, 0x02B8,
+ 0x02DC, 0x0A48, 0x02DC
};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_64p_upscale[132] = {
- 0, 4096, 0, 0,
- 16344, 4092, 40, 0,
- 16308, 4084, 84, 16380,
- 16272, 4072, 132, 16380,
- 16240, 4056, 180, 16380,
- 16212, 4036, 232, 16376,
- 16184, 4012, 288, 16372,
- 16160, 3984, 344, 16368,
- 16136, 3952, 404, 16364,
- 16116, 3916, 464, 16360,
- 16100, 3872, 528, 16356,
- 16084, 3828, 596, 16348,
- 16072, 3780, 664, 16344,
- 16060, 3728, 732, 16336,
- 16052, 3676, 804, 16328,
- 16044, 3616, 876, 16320,
- 16040, 3556, 952, 16312,
- 16036, 3492, 1028, 16300,
- 16032, 3424, 1108, 16292,
- 16032, 3356, 1188, 16280,
- 16036, 3284, 1268, 16272,
- 16036, 3212, 1352, 16260,
- 16040, 3136, 1436, 16248,
- 16044, 3056, 1520, 16236,
- 16052, 2980, 1604, 16224,
- 16060, 2896, 1688, 16212,
- 16064, 2816, 1776, 16200,
- 16076, 2732, 1864, 16188,
- 16084, 2648, 1952, 16176,
- 16092, 2564, 2040, 16164,
- 16104, 2476, 2128, 16152,
- 16116, 2388, 2216, 16140,
- 16128, 2304, 2304, 16128 };
+ 0x0000, 0x1000, 0x0000, 0x0000,
+ 0x3FDC, 0x0FFC, 0x0028, 0x0000,
+ 0x3FB4, 0x0FF8, 0x0054, 0x0000,
+ 0x3F94, 0x0FE8, 0x0084, 0x0000,
+ 0x3F74, 0x0FDC, 0x00B4, 0x3FFC,
+ 0x3F58, 0x0FC4, 0x00E8, 0x3FFC,
+ 0x3F3C, 0x0FAC, 0x0120, 0x3FF8,
+ 0x3F24, 0x0F90, 0x0158, 0x3FF4,
+ 0x3F0C, 0x0F70, 0x0194, 0x3FF0,
+ 0x3EF8, 0x0F4C, 0x01D0, 0x3FEC,
+ 0x3EE8, 0x0F20, 0x0210, 0x3FE8,
+ 0x3ED8, 0x0EF4, 0x0254, 0x3FE0,
+ 0x3ECC, 0x0EC4, 0x0298, 0x3FD8,
+ 0x3EC0, 0x0E90, 0x02DC, 0x3FD4,
+ 0x3EB8, 0x0E58, 0x0324, 0x3FCC,
+ 0x3EB0, 0x0E20, 0x036C, 0x3FC4,
+ 0x3EAC, 0x0DE4, 0x03B8, 0x3FB8,
+ 0x3EA8, 0x0DA4, 0x0404, 0x3FB0,
+ 0x3EA4, 0x0D60, 0x0454, 0x3FA8,
+ 0x3EA4, 0x0D1C, 0x04A4, 0x3F9C,
+ 0x3EA4, 0x0CD8, 0x04F4, 0x3F90,
+ 0x3EA8, 0x0C88, 0x0548, 0x3F88,
+ 0x3EAC, 0x0C3C, 0x059C, 0x3F7C,
+ 0x3EB0, 0x0BF0, 0x05F0, 0x3F70,
+ 0x3EB8, 0x0BA0, 0x0644, 0x3F64,
+ 0x3EBC, 0x0B54, 0x0698, 0x3F58,
+ 0x3EC4, 0x0B00, 0x06F0, 0x3F4C,
+ 0x3ECC, 0x0AAC, 0x0748, 0x3F40,
+ 0x3ED8, 0x0A54, 0x07A0, 0x3F34,
+ 0x3EE0, 0x0A04, 0x07F8, 0x3F24,
+ 0x3EEC, 0x09AC, 0x0850, 0x3F18,
+ 0x3EF8, 0x0954, 0x08A8, 0x3F0C,
+ 0x3F00, 0x08FC, 0x0900, 0x3F04
+};
-static const uint16_t filter_4tap_64p_117[132] = {
- 420, 3248, 420, 0,
- 380, 3248, 464, 16380,
- 344, 3248, 508, 16372,
- 308, 3248, 552, 16368,
- 272, 3240, 596, 16364,
- 236, 3236, 644, 16356,
- 204, 3224, 692, 16352,
- 172, 3212, 744, 16344,
- 144, 3196, 796, 16340,
- 116, 3180, 848, 16332,
- 88, 3160, 900, 16324,
- 60, 3136, 956, 16320,
- 36, 3112, 1012, 16312,
- 16, 3084, 1068, 16304,
- 16380, 3056, 1124, 16296,
- 16360, 3024, 1184, 16292,
- 16340, 2992, 1244, 16284,
- 16324, 2956, 1304, 16276,
- 16308, 2920, 1364, 16268,
- 16292, 2880, 1424, 16264,
- 16280, 2836, 1484, 16256,
- 16268, 2792, 1548, 16252,
- 16256, 2748, 1608, 16244,
- 16248, 2700, 1668, 16240,
- 16240, 2652, 1732, 16232,
- 16232, 2604, 1792, 16228,
- 16228, 2552, 1856, 16224,
- 16220, 2500, 1916, 16220,
- 16216, 2444, 1980, 16216,
- 16216, 2388, 2040, 16216,
- 16212, 2332, 2100, 16212,
- 16212, 2276, 2160, 16212,
- 16212, 2220, 2220, 16212 };
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_64p_116[132] = {
+ 0x01A8, 0x0CB4, 0x01A4, 0x0000,
+ 0x017C, 0x0CB8, 0x01D0, 0x3FFC,
+ 0x0158, 0x0CB8, 0x01F8, 0x3FF8,
+ 0x0130, 0x0CB4, 0x0228, 0x3FF4,
+ 0x0110, 0x0CB0, 0x0254, 0x3FEC,
+ 0x00EC, 0x0CA8, 0x0284, 0x3FE8,
+ 0x00CC, 0x0C9C, 0x02B4, 0x3FE4,
+ 0x00AC, 0x0C90, 0x02E8, 0x3FDC,
+ 0x0090, 0x0C80, 0x031C, 0x3FD4,
+ 0x0070, 0x0C70, 0x0350, 0x3FD0,
+ 0x0058, 0x0C5C, 0x0384, 0x3FC8,
+ 0x003C, 0x0C48, 0x03BC, 0x3FC0,
+ 0x0024, 0x0C2C, 0x03F4, 0x3FBC,
+ 0x0010, 0x0C10, 0x042C, 0x3FB4,
+ 0x3FFC, 0x0BF4, 0x0464, 0x3FAC,
+ 0x3FE8, 0x0BD4, 0x04A0, 0x3FA4,
+ 0x3FD8, 0x0BAC, 0x04DC, 0x3FA0,
+ 0x3FC4, 0x0B8C, 0x0518, 0x3F98,
+ 0x3FB4, 0x0B68, 0x0554, 0x3F90,
+ 0x3FA8, 0x0B40, 0x0590, 0x3F88,
+ 0x3F9C, 0x0B14, 0x05CC, 0x3F84,
+ 0x3F90, 0x0AEC, 0x0608, 0x3F7C,
+ 0x3F84, 0x0ABC, 0x0648, 0x3F78,
+ 0x3F7C, 0x0A90, 0x0684, 0x3F70,
+ 0x3F70, 0x0A60, 0x06C4, 0x3F6C,
+ 0x3F6C, 0x0A2C, 0x0700, 0x3F68,
+ 0x3F64, 0x09F8, 0x0740, 0x3F64,
+ 0x3F60, 0x09C4, 0x077C, 0x3F60,
+ 0x3F5C, 0x098C, 0x07BC, 0x3F5C,
+ 0x3F58, 0x0958, 0x07F8, 0x3F58,
+ 0x3F58, 0x091C, 0x0834, 0x3F58,
+ 0x3F54, 0x08E4, 0x0870, 0x3F58,
+ 0x3F54, 0x08AC, 0x08AC, 0x3F54
+};
-static const uint16_t filter_4tap_64p_150[132] = {
- 696, 2700, 696, 0,
- 660, 2704, 732, 16380,
- 628, 2704, 768, 16376,
- 596, 2704, 804, 16372,
- 564, 2700, 844, 16364,
- 532, 2696, 884, 16360,
- 500, 2692, 924, 16356,
- 472, 2684, 964, 16352,
- 440, 2676, 1004, 16352,
- 412, 2668, 1044, 16348,
- 384, 2656, 1088, 16344,
- 360, 2644, 1128, 16340,
- 332, 2632, 1172, 16336,
- 308, 2616, 1216, 16336,
- 284, 2600, 1260, 16332,
- 260, 2580, 1304, 16332,
- 236, 2560, 1348, 16328,
- 216, 2540, 1392, 16328,
- 196, 2516, 1436, 16328,
- 176, 2492, 1480, 16324,
- 156, 2468, 1524, 16324,
- 136, 2440, 1568, 16328,
- 120, 2412, 1612, 16328,
- 104, 2384, 1656, 16328,
- 88, 2352, 1700, 16332,
- 72, 2324, 1744, 16332,
- 60, 2288, 1788, 16336,
- 48, 2256, 1828, 16340,
- 36, 2220, 1872, 16344,
- 24, 2184, 1912, 16352,
- 12, 2148, 1952, 16356,
- 4, 2112, 1996, 16364,
- 16380, 2072, 2036, 16372 };
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_4tap_64p_149[132] = {
+ 0x02B8, 0x0A90, 0x02B8, 0x0000,
+ 0x0294, 0x0A94, 0x02DC, 0x3FFC,
+ 0x0274, 0x0A94, 0x0300, 0x3FF8,
+ 0x0250, 0x0A94, 0x0328, 0x3FF4,
+ 0x0230, 0x0A90, 0x0350, 0x3FF0,
+ 0x0214, 0x0A8C, 0x0374, 0x3FEC,
+ 0x01F0, 0x0A88, 0x03A0, 0x3FE8,
+ 0x01D4, 0x0A80, 0x03C8, 0x3FE4,
+ 0x01B8, 0x0A78, 0x03F0, 0x3FE0,
+ 0x0198, 0x0A70, 0x041C, 0x3FDC,
+ 0x0180, 0x0A64, 0x0444, 0x3FD8,
+ 0x0164, 0x0A54, 0x0470, 0x3FD8,
+ 0x0148, 0x0A48, 0x049C, 0x3FD4,
+ 0x0130, 0x0A38, 0x04C8, 0x3FD0,
+ 0x0118, 0x0A24, 0x04F4, 0x3FD0,
+ 0x0100, 0x0A14, 0x0520, 0x3FCC,
+ 0x00E8, 0x0A00, 0x054C, 0x3FCC,
+ 0x00D4, 0x09E8, 0x057C, 0x3FC8,
+ 0x00C0, 0x09D0, 0x05A8, 0x3FC8,
+ 0x00AC, 0x09B8, 0x05D4, 0x3FC8,
+ 0x0098, 0x09A0, 0x0600, 0x3FC8,
+ 0x0084, 0x0984, 0x0630, 0x3FC8,
+ 0x0074, 0x0964, 0x065C, 0x3FCC,
+ 0x0064, 0x0948, 0x0688, 0x3FCC,
+ 0x0054, 0x0928, 0x06B4, 0x3FD0,
+ 0x0044, 0x0908, 0x06E0, 0x3FD4,
+ 0x0038, 0x08E8, 0x070C, 0x3FD4,
+ 0x002C, 0x08C4, 0x0738, 0x3FD8,
+ 0x001C, 0x08A4, 0x0760, 0x3FE0,
+ 0x0014, 0x087C, 0x078C, 0x3FE4,
+ 0x0008, 0x0858, 0x07B4, 0x3FEC,
+ 0x0000, 0x0830, 0x07DC, 0x3FF4,
+ 0x3FFC, 0x0804, 0x0804, 0x3FFC
+};
+//=========================================
+// <num_taps> = 4
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_4tap_64p_183[132] = {
- 944, 2204, 944, 0,
- 916, 2204, 972, 0,
- 888, 2200, 996, 0,
- 860, 2200, 1024, 4,
- 832, 2196, 1052, 4,
- 808, 2192, 1080, 8,
- 780, 2188, 1108, 12,
- 756, 2180, 1140, 12,
- 728, 2176, 1168, 16,
- 704, 2168, 1196, 20,
- 680, 2160, 1224, 24,
- 656, 2152, 1252, 28,
- 632, 2144, 1280, 36,
- 608, 2132, 1308, 40,
- 584, 2120, 1336, 48,
- 560, 2112, 1364, 52,
- 536, 2096, 1392, 60,
- 516, 2084, 1420, 68,
- 492, 2072, 1448, 76,
- 472, 2056, 1476, 84,
- 452, 2040, 1504, 92,
- 428, 2024, 1532, 100,
- 408, 2008, 1560, 112,
- 392, 1992, 1584, 120,
- 372, 1972, 1612, 132,
- 352, 1956, 1636, 144,
- 336, 1936, 1664, 156,
- 316, 1916, 1688, 168,
- 300, 1896, 1712, 180,
- 284, 1876, 1736, 192,
- 268, 1852, 1760, 208,
- 252, 1832, 1784, 220,
- 236, 1808, 1808, 236 };
+ 0x03B0, 0x08A0, 0x03B0, 0x0000,
+ 0x0394, 0x08A0, 0x03CC, 0x0000,
+ 0x037C, 0x089C, 0x03E8, 0x0000,
+ 0x0360, 0x089C, 0x0400, 0x0004,
+ 0x0348, 0x0898, 0x041C, 0x0004,
+ 0x032C, 0x0894, 0x0438, 0x0008,
+ 0x0310, 0x0890, 0x0454, 0x000C,
+ 0x02F8, 0x0888, 0x0474, 0x000C,
+ 0x02DC, 0x0884, 0x0490, 0x0010,
+ 0x02C4, 0x087C, 0x04AC, 0x0014,
+ 0x02AC, 0x0874, 0x04C8, 0x0018,
+ 0x0290, 0x086C, 0x04E4, 0x0020,
+ 0x0278, 0x0864, 0x0500, 0x0024,
+ 0x0264, 0x0858, 0x051C, 0x0028,
+ 0x024C, 0x084C, 0x0538, 0x0030,
+ 0x0234, 0x0844, 0x0554, 0x0034,
+ 0x021C, 0x0838, 0x0570, 0x003C,
+ 0x0208, 0x0828, 0x058C, 0x0044,
+ 0x01F0, 0x081C, 0x05A8, 0x004C,
+ 0x01DC, 0x080C, 0x05C4, 0x0054,
+ 0x01C8, 0x07FC, 0x05E0, 0x005C,
+ 0x01B4, 0x07EC, 0x05FC, 0x0064,
+ 0x019C, 0x07DC, 0x0618, 0x0070,
+ 0x018C, 0x07CC, 0x0630, 0x0078,
+ 0x0178, 0x07B8, 0x064C, 0x0084,
+ 0x0164, 0x07A8, 0x0664, 0x0090,
+ 0x0150, 0x0794, 0x0680, 0x009C,
+ 0x0140, 0x0780, 0x0698, 0x00A8,
+ 0x0130, 0x076C, 0x06B0, 0x00B4,
+ 0x0120, 0x0758, 0x06C8, 0x00C0,
+ 0x0110, 0x0740, 0x06E0, 0x00D0,
+ 0x0100, 0x072C, 0x06F8, 0x00DC,
+ 0x00F0, 0x0714, 0x0710, 0x00EC
+};
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_5tap_64p_upscale[165] = {
- 15936, 2496, 2496, 15936, 0,
- 15948, 2404, 2580, 15924, 0,
- 15960, 2312, 2664, 15912, 4,
- 15976, 2220, 2748, 15904, 8,
- 15992, 2128, 2832, 15896, 12,
- 16004, 2036, 2912, 15888, 16,
- 16020, 1944, 2992, 15880, 20,
- 16036, 1852, 3068, 15876, 20,
- 16056, 1760, 3140, 15876, 24,
- 16072, 1668, 3216, 15872, 28,
- 16088, 1580, 3284, 15872, 32,
- 16104, 1492, 3352, 15876, 32,
- 16120, 1404, 3420, 15876, 36,
- 16140, 1316, 3480, 15884, 40,
- 16156, 1228, 3540, 15892, 40,
- 16172, 1144, 3600, 15900, 40,
- 16188, 1060, 3652, 15908, 44,
- 16204, 980, 3704, 15924, 44,
- 16220, 900, 3756, 15936, 44,
- 16236, 824, 3800, 15956, 44,
- 16248, 744, 3844, 15972, 44,
- 16264, 672, 3884, 15996, 44,
- 16276, 600, 3920, 16020, 44,
- 16292, 528, 3952, 16044, 40,
- 16304, 460, 3980, 16072, 40,
- 16316, 396, 4008, 16104, 36,
- 16328, 332, 4032, 16136, 32,
- 16336, 272, 4048, 16172, 28,
- 16348, 212, 4064, 16208, 24,
- 16356, 156, 4080, 16248, 16,
- 16368, 100, 4088, 16292, 12,
- 16376, 48, 4092, 16336, 4,
- 0, 0, 4096, 0, 0 };
+ 0x3E40, 0x09C0, 0x09C0, 0x3E40, 0x0000,
+ 0x3E50, 0x0964, 0x0A18, 0x3E34, 0x0000,
+ 0x3E5C, 0x0908, 0x0A6C, 0x3E2C, 0x0004,
+ 0x3E6C, 0x08AC, 0x0AC0, 0x3E20, 0x0008,
+ 0x3E78, 0x0850, 0x0B14, 0x3E18, 0x000C,
+ 0x3E88, 0x07F4, 0x0B60, 0x3E14, 0x0010,
+ 0x3E98, 0x0798, 0x0BB0, 0x3E0C, 0x0014,
+ 0x3EA8, 0x073C, 0x0C00, 0x3E08, 0x0014,
+ 0x3EB8, 0x06E4, 0x0C48, 0x3E04, 0x0018,
+ 0x3ECC, 0x0684, 0x0C90, 0x3E04, 0x001C,
+ 0x3EDC, 0x062C, 0x0CD4, 0x3E04, 0x0020,
+ 0x3EEC, 0x05D4, 0x0D1C, 0x3E04, 0x0020,
+ 0x3EFC, 0x057C, 0x0D5C, 0x3E08, 0x0024,
+ 0x3F0C, 0x0524, 0x0D98, 0x3E10, 0x0028,
+ 0x3F20, 0x04CC, 0x0DD8, 0x3E14, 0x0028,
+ 0x3F30, 0x0478, 0x0E14, 0x3E1C, 0x0028,
+ 0x3F40, 0x0424, 0x0E48, 0x3E28, 0x002C,
+ 0x3F50, 0x03D4, 0x0E7C, 0x3E34, 0x002C,
+ 0x3F60, 0x0384, 0x0EAC, 0x3E44, 0x002C,
+ 0x3F6C, 0x0338, 0x0EDC, 0x3E54, 0x002C,
+ 0x3F7C, 0x02E8, 0x0F08, 0x3E68, 0x002C,
+ 0x3F8C, 0x02A0, 0x0F2C, 0x3E7C, 0x002C,
+ 0x3F98, 0x0258, 0x0F50, 0x3E94, 0x002C,
+ 0x3FA4, 0x0210, 0x0F74, 0x3EB0, 0x0028,
+ 0x3FB0, 0x01CC, 0x0F90, 0x3ECC, 0x0028,
+ 0x3FC0, 0x018C, 0x0FA8, 0x3EE8, 0x0024,
+ 0x3FC8, 0x014C, 0x0FC0, 0x3F0C, 0x0020,
+ 0x3FD4, 0x0110, 0x0FD4, 0x3F2C, 0x001C,
+ 0x3FE0, 0x00D4, 0x0FE0, 0x3F54, 0x0018,
+ 0x3FE8, 0x009C, 0x0FF0, 0x3F7C, 0x0010,
+ 0x3FF0, 0x0064, 0x0FFC, 0x3FA4, 0x000C,
+ 0x3FFC, 0x0030, 0x0FFC, 0x3FD4, 0x0004,
+ 0x0000, 0x0000, 0x1000, 0x0000, 0x0000
+};
-static const uint16_t filter_5tap_64p_117[165] = {
- 16056, 2372, 2372, 16056, 0,
- 16052, 2312, 2432, 16060, 0,
- 16052, 2252, 2488, 16064, 0,
- 16052, 2188, 2548, 16072, 0,
- 16052, 2124, 2600, 16076, 0,
- 16052, 2064, 2656, 16088, 0,
- 16052, 2000, 2708, 16096, 0,
- 16056, 1932, 2760, 16108, 0,
- 16060, 1868, 2808, 16120, 0,
- 16064, 1804, 2856, 16132, 0,
- 16068, 1740, 2904, 16148, 16380,
- 16076, 1676, 2948, 16164, 16380,
- 16080, 1612, 2992, 16180, 16376,
- 16088, 1544, 3032, 16200, 16372,
- 16096, 1480, 3072, 16220, 16372,
- 16104, 1420, 3108, 16244, 16368,
- 16112, 1356, 3144, 16268, 16364,
- 16120, 1292, 3180, 16292, 16360,
- 16128, 1232, 3212, 16320, 16356,
- 16136, 1168, 3240, 16344, 16352,
- 16144, 1108, 3268, 16376, 16344,
- 16156, 1048, 3292, 20, 16340,
- 16164, 988, 3316, 52, 16332,
- 16172, 932, 3336, 88, 16328,
- 16184, 872, 3356, 124, 16320,
- 16192, 816, 3372, 160, 16316,
- 16204, 760, 3388, 196, 16308,
- 16212, 708, 3400, 236, 16300,
- 16220, 656, 3412, 276, 16292,
- 16232, 604, 3420, 320, 16284,
- 16240, 552, 3424, 364, 16276,
- 16248, 504, 3428, 408, 16268,
- 16256, 456, 3428, 456, 16256 };
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_5tap_64p_116[165] = {
+ 0x3EDC, 0x0924, 0x0924, 0x3EDC, 0x0000,
+ 0x3ED8, 0x08EC, 0x095C, 0x3EE0, 0x0000,
+ 0x3ED4, 0x08B0, 0x0994, 0x3EE8, 0x0000,
+ 0x3ED0, 0x0878, 0x09C8, 0x3EF0, 0x0000,
+ 0x3ED0, 0x083C, 0x09FC, 0x3EF8, 0x0000,
+ 0x3ED0, 0x0800, 0x0A2C, 0x3F04, 0x0000,
+ 0x3ED0, 0x07C4, 0x0A5C, 0x3F10, 0x0000,
+ 0x3ED0, 0x0788, 0x0A8C, 0x3F1C, 0x0000,
+ 0x3ED0, 0x074C, 0x0AC0, 0x3F28, 0x3FFC,
+ 0x3ED4, 0x0710, 0x0AE8, 0x3F38, 0x3FFC,
+ 0x3ED8, 0x06D0, 0x0B18, 0x3F48, 0x3FF8,
+ 0x3EDC, 0x0694, 0x0B3C, 0x3F5C, 0x3FF8,
+ 0x3EE0, 0x0658, 0x0B68, 0x3F6C, 0x3FF4,
+ 0x3EE4, 0x061C, 0x0B90, 0x3F80, 0x3FF0,
+ 0x3EEC, 0x05DC, 0x0BB4, 0x3F98, 0x3FEC,
+ 0x3EF0, 0x05A0, 0x0BD8, 0x3FB0, 0x3FE8,
+ 0x3EF8, 0x0564, 0x0BF8, 0x3FC8, 0x3FE4,
+ 0x3EFC, 0x0528, 0x0C1C, 0x3FE0, 0x3FE0,
+ 0x3F04, 0x04EC, 0x0C38, 0x3FFC, 0x3FDC,
+ 0x3F0C, 0x04B4, 0x0C54, 0x0014, 0x3FD8,
+ 0x3F14, 0x047C, 0x0C70, 0x0030, 0x3FD0,
+ 0x3F1C, 0x0440, 0x0C88, 0x0050, 0x3FCC,
+ 0x3F24, 0x0408, 0x0CA0, 0x0070, 0x3FC4,
+ 0x3F2C, 0x03D0, 0x0CB0, 0x0094, 0x3FC0,
+ 0x3F34, 0x0398, 0x0CC4, 0x00B8, 0x3FB8,
+ 0x3F3C, 0x0364, 0x0CD4, 0x00DC, 0x3FB0,
+ 0x3F48, 0x032C, 0x0CE0, 0x0100, 0x3FAC,
+ 0x3F50, 0x02F8, 0x0CEC, 0x0128, 0x3FA4,
+ 0x3F58, 0x02C4, 0x0CF8, 0x0150, 0x3F9C,
+ 0x3F60, 0x0290, 0x0D00, 0x017C, 0x3F94,
+ 0x3F68, 0x0260, 0x0D04, 0x01A8, 0x3F8C,
+ 0x3F74, 0x0230, 0x0D04, 0x01D4, 0x3F84,
+ 0x3F7C, 0x0200, 0x0D08, 0x0200, 0x3F7C
+};
-static const uint16_t filter_5tap_64p_150[165] = {
- 16368, 2064, 2064, 16368, 0,
- 16352, 2028, 2100, 16380, 16380,
- 16340, 1996, 2132, 12, 16376,
- 16328, 1960, 2168, 24, 16376,
- 16316, 1924, 2204, 44, 16372,
- 16308, 1888, 2236, 60, 16368,
- 16296, 1848, 2268, 76, 16364,
- 16288, 1812, 2300, 96, 16360,
- 16280, 1772, 2328, 116, 16356,
- 16272, 1736, 2360, 136, 16352,
- 16268, 1696, 2388, 160, 16348,
- 16260, 1656, 2416, 180, 16344,
- 16256, 1616, 2440, 204, 16340,
- 16248, 1576, 2464, 228, 16336,
- 16244, 1536, 2492, 252, 16332,
- 16240, 1496, 2512, 276, 16324,
- 16240, 1456, 2536, 304, 16320,
- 16236, 1416, 2556, 332, 16316,
- 16232, 1376, 2576, 360, 16312,
- 16232, 1336, 2592, 388, 16308,
- 16232, 1296, 2612, 416, 16300,
- 16232, 1256, 2628, 448, 16296,
- 16232, 1216, 2640, 480, 16292,
- 16232, 1172, 2652, 512, 16288,
- 16232, 1132, 2664, 544, 16284,
- 16232, 1092, 2676, 576, 16280,
- 16236, 1056, 2684, 608, 16272,
- 16236, 1016, 2692, 644, 16268,
- 16240, 976, 2700, 680, 16264,
- 16240, 936, 2704, 712, 16260,
- 16244, 900, 2708, 748, 16256,
- 16248, 860, 2708, 788, 16252,
- 16248, 824, 2708, 824, 16248 };
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_5tap_64p_149[165] = {
+ 0x3FF4, 0x080C, 0x080C, 0x3FF4, 0x0000,
+ 0x3FE8, 0x07E8, 0x0830, 0x0000, 0x0000,
+ 0x3FDC, 0x07C8, 0x0850, 0x0010, 0x3FFC,
+ 0x3FD0, 0x07A4, 0x0878, 0x001C, 0x3FF8,
+ 0x3FC4, 0x0780, 0x0898, 0x0030, 0x3FF4,
+ 0x3FB8, 0x075C, 0x08B8, 0x0040, 0x3FF4,
+ 0x3FB0, 0x0738, 0x08D8, 0x0050, 0x3FF0,
+ 0x3FA8, 0x0710, 0x08F8, 0x0064, 0x3FEC,
+ 0x3FA0, 0x06EC, 0x0914, 0x0078, 0x3FE8,
+ 0x3F98, 0x06C4, 0x0934, 0x008C, 0x3FE4,
+ 0x3F90, 0x06A0, 0x094C, 0x00A4, 0x3FE0,
+ 0x3F8C, 0x0678, 0x0968, 0x00B8, 0x3FDC,
+ 0x3F84, 0x0650, 0x0984, 0x00D0, 0x3FD8,
+ 0x3F80, 0x0628, 0x099C, 0x00E8, 0x3FD4,
+ 0x3F7C, 0x0600, 0x09B8, 0x0100, 0x3FCC,
+ 0x3F78, 0x05D8, 0x09D0, 0x0118, 0x3FC8,
+ 0x3F74, 0x05B0, 0x09E4, 0x0134, 0x3FC4,
+ 0x3F70, 0x0588, 0x09F8, 0x0150, 0x3FC0,
+ 0x3F70, 0x0560, 0x0A08, 0x016C, 0x3FBC,
+ 0x3F6C, 0x0538, 0x0A20, 0x0188, 0x3FB4,
+ 0x3F6C, 0x0510, 0x0A30, 0x01A4, 0x3FB0,
+ 0x3F6C, 0x04E8, 0x0A3C, 0x01C4, 0x3FAC,
+ 0x3F6C, 0x04C0, 0x0A48, 0x01E4, 0x3FA8,
+ 0x3F6C, 0x0498, 0x0A58, 0x0200, 0x3FA4,
+ 0x3F6C, 0x0470, 0x0A60, 0x0224, 0x3FA0,
+ 0x3F6C, 0x0448, 0x0A70, 0x0244, 0x3F98,
+ 0x3F70, 0x0420, 0x0A78, 0x0264, 0x3F94,
+ 0x3F70, 0x03F8, 0x0A80, 0x0288, 0x3F90,
+ 0x3F74, 0x03D4, 0x0A84, 0x02A8, 0x3F8C,
+ 0x3F74, 0x03AC, 0x0A8C, 0x02CC, 0x3F88,
+ 0x3F78, 0x0384, 0x0A90, 0x02F0, 0x3F84,
+ 0x3F7C, 0x0360, 0x0A90, 0x0314, 0x3F80,
+ 0x3F7C, 0x033C, 0x0A90, 0x033C, 0x3F7C
+};
+//=========================================
+// <num_taps> = 5
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_5tap_64p_183[165] = {
- 228, 1816, 1816, 228, 0,
- 216, 1792, 1836, 248, 16380,
- 200, 1772, 1860, 264, 16376,
- 184, 1748, 1884, 280, 16376,
- 168, 1728, 1904, 300, 16372,
- 156, 1704, 1928, 316, 16368,
- 144, 1680, 1948, 336, 16364,
- 128, 1656, 1968, 356, 16364,
- 116, 1632, 1988, 376, 16360,
- 104, 1604, 2008, 396, 16356,
- 96, 1580, 2024, 416, 16356,
- 84, 1556, 2044, 440, 16352,
- 72, 1528, 2060, 460, 16348,
- 64, 1504, 2076, 484, 16348,
- 52, 1476, 2092, 504, 16344,
- 44, 1448, 2104, 528, 16344,
- 36, 1424, 2120, 552, 16340,
- 28, 1396, 2132, 576, 16340,
- 20, 1368, 2144, 600, 16340,
- 12, 1340, 2156, 624, 16336,
- 4, 1312, 2168, 652, 16336,
- 0, 1284, 2180, 676, 16336,
- 16376, 1256, 2188, 700, 16332,
- 16372, 1228, 2196, 728, 16332,
- 16368, 1200, 2204, 752, 16332,
- 16364, 1172, 2212, 780, 16332,
- 16356, 1144, 2216, 808, 16332,
- 16352, 1116, 2220, 836, 16332,
- 16352, 1084, 2224, 860, 16332,
- 16348, 1056, 2228, 888, 16336,
- 16344, 1028, 2232, 916, 16336,
- 16340, 1000, 2232, 944, 16336,
- 16340, 972, 2232, 972, 16340 };
+ 0x0168, 0x069C, 0x0698, 0x0164, 0x0000,
+ 0x0154, 0x068C, 0x06AC, 0x0174, 0x0000,
+ 0x0144, 0x0674, 0x06C0, 0x0188, 0x0000,
+ 0x0138, 0x0664, 0x06D0, 0x0198, 0x3FFC,
+ 0x0128, 0x0654, 0x06E0, 0x01A8, 0x3FFC,
+ 0x0118, 0x0640, 0x06F0, 0x01BC, 0x3FFC,
+ 0x010C, 0x0630, 0x0700, 0x01CC, 0x3FF8,
+ 0x00FC, 0x061C, 0x0710, 0x01E0, 0x3FF8,
+ 0x00F0, 0x060C, 0x071C, 0x01F0, 0x3FF8,
+ 0x00E4, 0x05F4, 0x072C, 0x0204, 0x3FF8,
+ 0x00D8, 0x05E4, 0x0738, 0x0218, 0x3FF4,
+ 0x00CC, 0x05D0, 0x0744, 0x022C, 0x3FF4,
+ 0x00C0, 0x05B8, 0x0754, 0x0240, 0x3FF4,
+ 0x00B4, 0x05A4, 0x0760, 0x0254, 0x3FF4,
+ 0x00A8, 0x0590, 0x076C, 0x0268, 0x3FF4,
+ 0x009C, 0x057C, 0x0778, 0x027C, 0x3FF4,
+ 0x0094, 0x0564, 0x0780, 0x0294, 0x3FF4,
+ 0x0088, 0x0550, 0x0788, 0x02A8, 0x3FF8,
+ 0x0080, 0x0538, 0x0794, 0x02BC, 0x3FF8,
+ 0x0074, 0x0524, 0x079C, 0x02D4, 0x3FF8,
+ 0x006C, 0x0510, 0x07A4, 0x02E8, 0x3FF8,
+ 0x0064, 0x04F4, 0x07AC, 0x0300, 0x3FFC,
+ 0x005C, 0x04E4, 0x07B0, 0x0314, 0x3FFC,
+ 0x0054, 0x04C8, 0x07B8, 0x032C, 0x0000,
+ 0x004C, 0x04B4, 0x07C0, 0x0340, 0x0000,
+ 0x0044, 0x04A0, 0x07C4, 0x0358, 0x0000,
+ 0x003C, 0x0488, 0x07C8, 0x0370, 0x0004,
+ 0x0038, 0x0470, 0x07CC, 0x0384, 0x0008,
+ 0x0030, 0x045C, 0x07D0, 0x039C, 0x0008,
+ 0x002C, 0x0444, 0x07D0, 0x03B4, 0x000C,
+ 0x0024, 0x042C, 0x07D4, 0x03CC, 0x0010,
+ 0x0020, 0x0414, 0x07D4, 0x03E0, 0x0018,
+ 0x001C, 0x03FC, 0x07D4, 0x03F8, 0x001C
+};
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_6tap_64p_upscale[198] = {
- 0, 0, 4092, 0, 0, 0,
- 12, 16332, 4092, 52, 16368, 0,
- 24, 16280, 4088, 108, 16356, 0,
- 36, 16236, 4080, 168, 16340, 0,
- 44, 16188, 4064, 228, 16324, 0,
- 56, 16148, 4052, 292, 16308, 0,
- 64, 16108, 4032, 356, 16292, 4,
- 72, 16072, 4008, 424, 16276, 4,
- 80, 16036, 3980, 492, 16256, 4,
- 88, 16004, 3952, 564, 16240, 8,
- 96, 15972, 3920, 636, 16220, 8,
- 100, 15944, 3884, 712, 16204, 12,
- 108, 15916, 3844, 788, 16184, 16,
- 112, 15896, 3800, 864, 16164, 20,
- 116, 15872, 3756, 944, 16144, 20,
- 120, 15852, 3708, 1024, 16124, 24,
- 120, 15836, 3656, 1108, 16104, 28,
- 124, 15824, 3600, 1192, 16084, 32,
- 124, 15808, 3544, 1276, 16064, 36,
- 124, 15800, 3484, 1360, 16044, 40,
- 128, 15792, 3420, 1448, 16024, 44,
- 128, 15784, 3352, 1536, 16004, 48,
- 124, 15780, 3288, 1624, 15988, 52,
- 124, 15776, 3216, 1712, 15968, 56,
- 124, 15776, 3144, 1800, 15948, 64,
- 120, 15776, 3068, 1888, 15932, 68,
- 120, 15780, 2992, 1976, 15912, 72,
- 116, 15784, 2916, 2064, 15896, 76,
- 112, 15792, 2836, 2152, 15880, 80,
- 108, 15796, 2752, 2244, 15868, 84,
- 104, 15804, 2672, 2328, 15852, 88,
- 104, 15816, 2588, 2416, 15840, 92,
- 100, 15828, 2504, 2504, 15828, 100 };
+ 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000,
+ 0x000C, 0x3FD0, 0x0FFC, 0x0034, 0x3FF4, 0x0000,
+ 0x0018, 0x3F9C, 0x0FF8, 0x006C, 0x3FE8, 0x0000,
+ 0x0024, 0x3F6C, 0x0FF0, 0x00A8, 0x3FD8, 0x0000,
+ 0x002C, 0x3F44, 0x0FE4, 0x00E4, 0x3FC8, 0x0000,
+ 0x0038, 0x3F18, 0x0FD4, 0x0124, 0x3FB8, 0x0000,
+ 0x0040, 0x3EF0, 0x0FC0, 0x0164, 0x3FA8, 0x0004,
+ 0x0048, 0x3EC8, 0x0FAC, 0x01A8, 0x3F98, 0x0004,
+ 0x0050, 0x3EA8, 0x0F94, 0x01EC, 0x3F84, 0x0004,
+ 0x0058, 0x3E84, 0x0F74, 0x0234, 0x3F74, 0x0008,
+ 0x0060, 0x3E68, 0x0F54, 0x027C, 0x3F60, 0x0008,
+ 0x0064, 0x3E4C, 0x0F30, 0x02C8, 0x3F4C, 0x000C,
+ 0x006C, 0x3E30, 0x0F04, 0x0314, 0x3F3C, 0x0010,
+ 0x0070, 0x3E18, 0x0EDC, 0x0360, 0x3F28, 0x0014,
+ 0x0074, 0x3E04, 0x0EB0, 0x03B0, 0x3F14, 0x0014,
+ 0x0078, 0x3DF0, 0x0E80, 0x0400, 0x3F00, 0x0018,
+ 0x0078, 0x3DE0, 0x0E4C, 0x0454, 0x3EEC, 0x001C,
+ 0x007C, 0x3DD0, 0x0E14, 0x04A8, 0x3ED8, 0x0020,
+ 0x007C, 0x3DC4, 0x0DDC, 0x04FC, 0x3EC4, 0x0024,
+ 0x007C, 0x3DBC, 0x0DA0, 0x0550, 0x3EB0, 0x0028,
+ 0x0080, 0x3DB4, 0x0D5C, 0x05A8, 0x3E9C, 0x002C,
+ 0x0080, 0x3DAC, 0x0D1C, 0x0600, 0x3E88, 0x0030,
+ 0x007C, 0x3DA8, 0x0CDC, 0x0658, 0x3E74, 0x0034,
+ 0x007C, 0x3DA4, 0x0C94, 0x06B0, 0x3E64, 0x0038,
+ 0x007C, 0x3DA4, 0x0C48, 0x0708, 0x3E50, 0x0040,
+ 0x0078, 0x3DA4, 0x0C00, 0x0760, 0x3E40, 0x0044,
+ 0x0078, 0x3DA8, 0x0BB4, 0x07B8, 0x3E2C, 0x0048,
+ 0x0074, 0x3DAC, 0x0B68, 0x0810, 0x3E1C, 0x004C,
+ 0x0070, 0x3DB4, 0x0B18, 0x0868, 0x3E0C, 0x0050,
+ 0x006C, 0x3DBC, 0x0AC4, 0x08C4, 0x3DFC, 0x0054,
+ 0x0068, 0x3DC4, 0x0A74, 0x0918, 0x3DF0, 0x0058,
+ 0x0068, 0x3DCC, 0x0A20, 0x0970, 0x3DE0, 0x005C,
+ 0x0064, 0x3DD4, 0x09C8, 0x09C8, 0x3DD4, 0x0064
+};
-static const uint16_t filter_6tap_64p_117[198] = {
- 16168, 476, 3568, 476, 16168, 0,
- 16180, 428, 3564, 528, 16156, 0,
- 16192, 376, 3556, 584, 16144, 4,
- 16204, 328, 3548, 636, 16128, 4,
- 16216, 280, 3540, 692, 16116, 8,
- 16228, 232, 3524, 748, 16104, 12,
- 16240, 188, 3512, 808, 16092, 12,
- 16252, 148, 3492, 864, 16080, 16,
- 16264, 104, 3472, 924, 16068, 16,
- 16276, 64, 3452, 984, 16056, 20,
- 16284, 28, 3428, 1044, 16048, 24,
- 16296, 16376, 3400, 1108, 16036, 24,
- 16304, 16340, 3372, 1168, 16024, 28,
- 16316, 16304, 3340, 1232, 16016, 32,
- 16324, 16272, 3308, 1296, 16004, 32,
- 16332, 16244, 3272, 1360, 15996, 36,
- 16344, 16212, 3236, 1424, 15988, 36,
- 16352, 16188, 3200, 1488, 15980, 40,
- 16360, 16160, 3160, 1552, 15972, 40,
- 16368, 16136, 3116, 1616, 15964, 40,
- 16372, 16112, 3072, 1680, 15956, 44,
- 16380, 16092, 3028, 1744, 15952, 44,
- 0, 16072, 2980, 1808, 15948, 44,
- 8, 16052, 2932, 1872, 15944, 48,
- 12, 16036, 2880, 1936, 15940, 48,
- 16, 16020, 2828, 2000, 15936, 48,
- 20, 16008, 2776, 2064, 15936, 48,
- 24, 15996, 2724, 2128, 15936, 48,
- 28, 15984, 2668, 2192, 15936, 48,
- 32, 15972, 2612, 2252, 15940, 44,
- 36, 15964, 2552, 2316, 15940, 44,
- 40, 15956, 2496, 2376, 15944, 44,
- 40, 15952, 2436, 2436, 15952, 40 };
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_6tap_64p_116[198] = {
+ 0x3F0C, 0x0240, 0x0D68, 0x0240, 0x3F0C, 0x0000,
+ 0x3F18, 0x0210, 0x0D64, 0x0274, 0x3F00, 0x0000,
+ 0x3F24, 0x01E0, 0x0D58, 0x02A8, 0x3EF8, 0x0004,
+ 0x3F2C, 0x01B0, 0x0D58, 0x02DC, 0x3EEC, 0x0004,
+ 0x3F38, 0x0180, 0x0D50, 0x0310, 0x3EE0, 0x0008,
+ 0x3F44, 0x0154, 0x0D40, 0x0348, 0x3ED8, 0x0008,
+ 0x3F50, 0x0128, 0x0D34, 0x037C, 0x3ECC, 0x000C,
+ 0x3F5C, 0x00FC, 0x0D20, 0x03B4, 0x3EC4, 0x0010,
+ 0x3F64, 0x00D4, 0x0D14, 0x03EC, 0x3EB8, 0x0010,
+ 0x3F70, 0x00AC, 0x0CFC, 0x0424, 0x3EB0, 0x0014,
+ 0x3F78, 0x0084, 0x0CE8, 0x0460, 0x3EA8, 0x0014,
+ 0x3F84, 0x0060, 0x0CCC, 0x0498, 0x3EA0, 0x0018,
+ 0x3F90, 0x003C, 0x0CB4, 0x04D0, 0x3E98, 0x0018,
+ 0x3F98, 0x0018, 0x0C9C, 0x050C, 0x3E90, 0x0018,
+ 0x3FA0, 0x3FFC, 0x0C78, 0x0548, 0x3E88, 0x001C,
+ 0x3FAC, 0x3FDC, 0x0C54, 0x0584, 0x3E84, 0x001C,
+ 0x3FB4, 0x3FBC, 0x0C3C, 0x05BC, 0x3E7C, 0x001C,
+ 0x3FBC, 0x3FA0, 0x0C14, 0x05F8, 0x3E78, 0x0020,
+ 0x3FC4, 0x3F84, 0x0BF0, 0x0634, 0x3E74, 0x0020,
+ 0x3FCC, 0x3F68, 0x0BCC, 0x0670, 0x3E70, 0x0020,
+ 0x3FD4, 0x3F50, 0x0BA4, 0x06AC, 0x3E6C, 0x0020,
+ 0x3FDC, 0x3F38, 0x0B78, 0x06E8, 0x3E6C, 0x0020,
+ 0x3FE0, 0x3F24, 0x0B50, 0x0724, 0x3E68, 0x0020,
+ 0x3FE8, 0x3F0C, 0x0B24, 0x0760, 0x3E68, 0x0020,
+ 0x3FF0, 0x3EFC, 0x0AF4, 0x0798, 0x3E68, 0x0020,
+ 0x3FF4, 0x3EE8, 0x0AC8, 0x07D4, 0x3E68, 0x0020,
+ 0x3FFC, 0x3ED8, 0x0A94, 0x0810, 0x3E6C, 0x001C,
+ 0x0000, 0x3EC8, 0x0A64, 0x0848, 0x3E70, 0x001C,
+ 0x0000, 0x3EB8, 0x0A38, 0x0880, 0x3E74, 0x001C,
+ 0x0004, 0x3EAC, 0x0A04, 0x08BC, 0x3E78, 0x0018,
+ 0x0008, 0x3EA4, 0x09D0, 0x08F4, 0x3E7C, 0x0014,
+ 0x000C, 0x3E98, 0x0998, 0x092C, 0x3E84, 0x0014,
+ 0x0010, 0x3E90, 0x0964, 0x0960, 0x3E8C, 0x0010
+};
-static const uint16_t filter_6tap_64p_150[198] = {
- 16148, 920, 2724, 920, 16148, 0,
- 16152, 880, 2724, 956, 16148, 0,
- 16152, 844, 2720, 996, 16144, 0,
- 16156, 804, 2716, 1032, 16144, 0,
- 16156, 768, 2712, 1072, 16144, 0,
- 16160, 732, 2708, 1112, 16144, 16380,
- 16164, 696, 2700, 1152, 16144, 16380,
- 16168, 660, 2692, 1192, 16148, 16380,
- 16172, 628, 2684, 1232, 16148, 16380,
- 16176, 592, 2672, 1272, 16152, 16376,
- 16180, 560, 2660, 1312, 16152, 16376,
- 16184, 524, 2648, 1348, 16156, 16376,
- 16192, 492, 2632, 1388, 16160, 16372,
- 16196, 460, 2616, 1428, 16164, 16372,
- 16200, 432, 2600, 1468, 16168, 16368,
- 16204, 400, 2584, 1508, 16176, 16364,
- 16212, 368, 2564, 1548, 16180, 16364,
- 16216, 340, 2544, 1588, 16188, 16360,
- 16220, 312, 2524, 1628, 16196, 16356,
- 16228, 284, 2504, 1668, 16204, 16356,
- 16232, 256, 2480, 1704, 16212, 16352,
- 16240, 232, 2456, 1744, 16224, 16348,
- 16244, 204, 2432, 1780, 16232, 16344,
- 16248, 180, 2408, 1820, 16244, 16340,
- 16256, 156, 2380, 1856, 16256, 16336,
- 16260, 132, 2352, 1896, 16268, 16332,
- 16268, 108, 2324, 1932, 16280, 16328,
- 16272, 88, 2296, 1968, 16292, 16324,
- 16276, 64, 2268, 2004, 16308, 16320,
- 16284, 44, 2236, 2036, 16324, 16312,
- 16288, 24, 2204, 2072, 16340, 16308,
- 16292, 8, 2172, 2108, 16356, 16304,
- 16300, 16372, 2140, 2140, 16372, 16300 };
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_6tap_64p_149[198] = {
+ 0x3F14, 0x0394, 0x0AB0, 0x0394, 0x3F14, 0x0000,
+ 0x3F18, 0x036C, 0x0AB0, 0x03B8, 0x3F14, 0x0000,
+ 0x3F18, 0x0348, 0x0AAC, 0x03E0, 0x3F14, 0x0000,
+ 0x3F1C, 0x0320, 0x0AAC, 0x0408, 0x3F10, 0x0000,
+ 0x3F20, 0x02FC, 0x0AA8, 0x042C, 0x3F10, 0x0000,
+ 0x3F24, 0x02D8, 0x0AA0, 0x0454, 0x3F10, 0x0000,
+ 0x3F28, 0x02B4, 0x0A98, 0x047C, 0x3F10, 0x0000,
+ 0x3F28, 0x0290, 0x0A90, 0x04A4, 0x3F14, 0x0000,
+ 0x3F30, 0x026C, 0x0A84, 0x04CC, 0x3F14, 0x0000,
+ 0x3F34, 0x024C, 0x0A7C, 0x04F4, 0x3F14, 0x3FFC,
+ 0x3F38, 0x0228, 0x0A70, 0x051C, 0x3F18, 0x3FFC,
+ 0x3F3C, 0x0208, 0x0A64, 0x0544, 0x3F1C, 0x3FF8,
+ 0x3F40, 0x01E8, 0x0A54, 0x056C, 0x3F20, 0x3FF8,
+ 0x3F44, 0x01C8, 0x0A48, 0x0594, 0x3F24, 0x3FF4,
+ 0x3F4C, 0x01A8, 0x0A34, 0x05BC, 0x3F28, 0x3FF4,
+ 0x3F50, 0x0188, 0x0A28, 0x05E4, 0x3F2C, 0x3FF0,
+ 0x3F54, 0x016C, 0x0A10, 0x060C, 0x3F34, 0x3FF0,
+ 0x3F5C, 0x014C, 0x09FC, 0x0634, 0x3F3C, 0x3FEC,
+ 0x3F60, 0x0130, 0x09EC, 0x065C, 0x3F40, 0x3FE8,
+ 0x3F68, 0x0114, 0x09D0, 0x0684, 0x3F48, 0x3FE8,
+ 0x3F6C, 0x00F8, 0x09B8, 0x06AC, 0x3F54, 0x3FE4,
+ 0x3F74, 0x00E0, 0x09A0, 0x06D0, 0x3F5C, 0x3FE0,
+ 0x3F78, 0x00C4, 0x098C, 0x06F8, 0x3F64, 0x3FDC,
+ 0x3F7C, 0x00AC, 0x0970, 0x0720, 0x3F70, 0x3FD8,
+ 0x3F84, 0x0094, 0x0954, 0x0744, 0x3F7C, 0x3FD4,
+ 0x3F88, 0x007C, 0x093C, 0x0768, 0x3F88, 0x3FD0,
+ 0x3F90, 0x0064, 0x091C, 0x0790, 0x3F94, 0x3FCC,
+ 0x3F94, 0x0050, 0x08FC, 0x07B4, 0x3FA4, 0x3FC8,
+ 0x3F98, 0x003C, 0x08E0, 0x07D8, 0x3FB0, 0x3FC4,
+ 0x3FA0, 0x0024, 0x08C0, 0x07FC, 0x3FC0, 0x3FC0,
+ 0x3FA4, 0x0014, 0x08A4, 0x081C, 0x3FD0, 0x3FB8,
+ 0x3FAC, 0x0000, 0x0880, 0x0840, 0x3FE0, 0x3FB4,
+ 0x3FB0, 0x3FF0, 0x0860, 0x0860, 0x3FF0, 0x3FB0
+};
+//=========================================
+// <num_taps> = 6
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_6tap_64p_183[198] = {
- 16296, 1032, 2196, 1032, 16296, 0,
- 16292, 1004, 2200, 1060, 16304, 16380,
- 16288, 976, 2200, 1088, 16308, 16380,
- 16284, 952, 2196, 1116, 16312, 16376,
- 16284, 924, 2196, 1144, 16320, 16376,
- 16280, 900, 2192, 1172, 16324, 16372,
- 16276, 872, 2192, 1200, 16332, 16368,
- 16276, 848, 2188, 1228, 16340, 16368,
- 16272, 820, 2180, 1256, 16348, 16364,
- 16272, 796, 2176, 1280, 16356, 16360,
- 16268, 768, 2168, 1308, 16364, 16360,
- 16268, 744, 2164, 1336, 16372, 16356,
- 16268, 716, 2156, 1364, 16380, 16352,
- 16264, 692, 2148, 1392, 4, 16352,
- 16264, 668, 2136, 1420, 16, 16348,
- 16264, 644, 2128, 1448, 28, 16344,
- 16264, 620, 2116, 1472, 36, 16340,
- 16264, 596, 2108, 1500, 48, 16340,
- 16268, 572, 2096, 1524, 60, 16336,
- 16268, 548, 2080, 1552, 72, 16332,
- 16268, 524, 2068, 1576, 88, 16328,
- 16268, 504, 2056, 1604, 100, 16324,
- 16272, 480, 2040, 1628, 112, 16324,
- 16272, 456, 2024, 1652, 128, 16320,
- 16272, 436, 2008, 1680, 144, 16316,
- 16276, 416, 1992, 1704, 156, 16312,
- 16276, 392, 1976, 1724, 172, 16308,
- 16280, 372, 1956, 1748, 188, 16308,
- 16280, 352, 1940, 1772, 204, 16304,
- 16284, 332, 1920, 1796, 224, 16300,
- 16288, 312, 1900, 1816, 240, 16296,
- 16288, 296, 1880, 1840, 256, 16296,
- 16292, 276, 1860, 1860, 276, 16292 };
+ 0x002C, 0x0420, 0x076C, 0x041C, 0x002C, 0x0000,
+ 0x0028, 0x040C, 0x0768, 0x0430, 0x0034, 0x0000,
+ 0x0020, 0x03F8, 0x0768, 0x0448, 0x003C, 0x3FFC,
+ 0x0018, 0x03E4, 0x0768, 0x045C, 0x0044, 0x3FFC,
+ 0x0014, 0x03D0, 0x0768, 0x0470, 0x004C, 0x3FF8,
+ 0x000C, 0x03BC, 0x0764, 0x0484, 0x0058, 0x3FF8,
+ 0x0008, 0x03A4, 0x0764, 0x049C, 0x0060, 0x3FF4,
+ 0x0004, 0x0390, 0x0760, 0x04B0, 0x0068, 0x3FF4,
+ 0x0000, 0x037C, 0x0760, 0x04C4, 0x0070, 0x3FF0,
+ 0x3FFC, 0x0364, 0x075C, 0x04D8, 0x007C, 0x3FF0,
+ 0x3FF8, 0x0350, 0x0758, 0x04F0, 0x0084, 0x3FEC,
+ 0x3FF4, 0x033C, 0x0750, 0x0504, 0x0090, 0x3FEC,
+ 0x3FF0, 0x0328, 0x074C, 0x0518, 0x009C, 0x3FE8,
+ 0x3FEC, 0x0314, 0x0744, 0x052C, 0x00A8, 0x3FE8,
+ 0x3FE8, 0x0304, 0x0740, 0x0540, 0x00B0, 0x3FE4,
+ 0x3FE4, 0x02EC, 0x073C, 0x0554, 0x00BC, 0x3FE4,
+ 0x3FE0, 0x02DC, 0x0734, 0x0568, 0x00C8, 0x3FE0,
+ 0x3FE0, 0x02C4, 0x072C, 0x057C, 0x00D4, 0x3FE0,
+ 0x3FDC, 0x02B4, 0x0724, 0x058C, 0x00E4, 0x3FDC,
+ 0x3FDC, 0x02A0, 0x0718, 0x05A0, 0x00F0, 0x3FDC,
+ 0x3FD8, 0x028C, 0x0714, 0x05B4, 0x00FC, 0x3FD8,
+ 0x3FD8, 0x0278, 0x0704, 0x05C8, 0x010C, 0x3FD8,
+ 0x3FD4, 0x0264, 0x0700, 0x05D8, 0x0118, 0x3FD8,
+ 0x3FD4, 0x0254, 0x06F0, 0x05EC, 0x0128, 0x3FD4,
+ 0x3FD0, 0x0244, 0x06E8, 0x05FC, 0x0134, 0x3FD4,
+ 0x3FD0, 0x0230, 0x06DC, 0x060C, 0x0144, 0x3FD4,
+ 0x3FD0, 0x021C, 0x06D0, 0x0620, 0x0154, 0x3FD0,
+ 0x3FD0, 0x0208, 0x06C4, 0x0630, 0x0164, 0x3FD0,
+ 0x3FD0, 0x01F8, 0x06B8, 0x0640, 0x0170, 0x3FD0,
+ 0x3FCC, 0x01E8, 0x06AC, 0x0650, 0x0180, 0x3FD0,
+ 0x3FCC, 0x01D8, 0x069C, 0x0660, 0x0190, 0x3FD0,
+ 0x3FCC, 0x01C4, 0x068C, 0x0670, 0x01A4, 0x3FD0,
+ 0x3FCC, 0x01B8, 0x0680, 0x067C, 0x01B4, 0x3FCC
+};
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_7tap_64p_upscale[231] = {
- 176, 15760, 2488, 2488, 15760, 176, 0,
- 172, 15772, 2404, 2572, 15752, 180, 16380,
- 168, 15784, 2324, 2656, 15740, 184, 16380,
- 164, 15800, 2240, 2736, 15732, 188, 16376,
- 160, 15812, 2152, 2816, 15728, 192, 16376,
- 152, 15828, 2068, 2896, 15724, 192, 16376,
- 148, 15848, 1984, 2972, 15720, 196, 16372,
- 140, 15864, 1896, 3048, 15720, 196, 16372,
- 136, 15884, 1812, 3124, 15720, 196, 16368,
- 128, 15900, 1724, 3196, 15720, 196, 16368,
- 120, 15920, 1640, 3268, 15724, 196, 16368,
- 116, 15940, 1552, 3336, 15732, 196, 16364,
- 108, 15964, 1468, 3400, 15740, 196, 16364,
- 104, 15984, 1384, 3464, 15748, 192, 16364,
- 96, 16004, 1300, 3524, 15760, 188, 16364,
- 88, 16028, 1216, 3584, 15776, 184, 16364,
- 84, 16048, 1132, 3640, 15792, 180, 16360,
- 76, 16072, 1048, 3692, 15812, 176, 16360,
- 68, 16092, 968, 3744, 15832, 168, 16360,
- 64, 16116, 888, 3788, 15856, 160, 16360,
- 56, 16140, 812, 3832, 15884, 152, 16360,
- 52, 16160, 732, 3876, 15912, 144, 16360,
- 44, 16184, 656, 3912, 15944, 136, 16364,
- 40, 16204, 584, 3944, 15976, 124, 16364,
- 32, 16228, 512, 3976, 16012, 116, 16364,
- 28, 16248, 440, 4004, 16048, 104, 16364,
- 24, 16268, 372, 4028, 16092, 88, 16368,
- 20, 16288, 304, 4048, 16132, 76, 16368,
- 12, 16308, 240, 4064, 16180, 60, 16372,
- 8, 16328, 176, 4076, 16228, 48, 16372,
- 4, 16348, 112, 4088, 16276, 32, 16376,
- 0, 16364, 56, 4092, 16328, 16, 16380,
- 0, 0, 0, 4096, 0, 0, 0 };
+ 0x00B0, 0x3D98, 0x09BC, 0x09B8, 0x3D94, 0x00B0, 0x0000,
+ 0x00AC, 0x3DA0, 0x0968, 0x0A10, 0x3D88, 0x00B4, 0x0000,
+ 0x00A8, 0x3DAC, 0x0914, 0x0A60, 0x3D80, 0x00B8, 0x0000,
+ 0x00A4, 0x3DB8, 0x08C0, 0x0AB4, 0x3D78, 0x00BC, 0x3FFC,
+ 0x00A0, 0x3DC8, 0x0868, 0x0B00, 0x3D74, 0x00C0, 0x3FFC,
+ 0x0098, 0x3DD8, 0x0818, 0x0B54, 0x3D6C, 0x00C0, 0x3FF8,
+ 0x0094, 0x3DE8, 0x07C0, 0x0B9C, 0x3D6C, 0x00C4, 0x3FF8,
+ 0x008C, 0x3DFC, 0x0768, 0x0BEC, 0x3D68, 0x00C4, 0x3FF8,
+ 0x0088, 0x3E0C, 0x0714, 0x0C38, 0x3D68, 0x00C4, 0x3FF4,
+ 0x0080, 0x3E20, 0x06BC, 0x0C80, 0x3D6C, 0x00C4, 0x3FF4,
+ 0x0078, 0x3E34, 0x0668, 0x0CC4, 0x3D70, 0x00C4, 0x3FF4,
+ 0x0074, 0x3E48, 0x0610, 0x0D08, 0x3D78, 0x00C4, 0x3FF0,
+ 0x006C, 0x3E5C, 0x05BC, 0x0D48, 0x3D80, 0x00C4, 0x3FF0,
+ 0x0068, 0x3E74, 0x0568, 0x0D84, 0x3D88, 0x00C0, 0x3FF0,
+ 0x0060, 0x3E88, 0x0514, 0x0DC8, 0x3D94, 0x00BC, 0x3FEC,
+ 0x0058, 0x3E9C, 0x04C0, 0x0E04, 0x3DA4, 0x00B8, 0x3FEC,
+ 0x0054, 0x3EB4, 0x046C, 0x0E38, 0x3DB4, 0x00B4, 0x3FEC,
+ 0x004C, 0x3ECC, 0x0418, 0x0E6C, 0x3DC8, 0x00B0, 0x3FEC,
+ 0x0044, 0x3EE0, 0x03C8, 0x0EA4, 0x3DDC, 0x00A8, 0x3FEC,
+ 0x0040, 0x3EF8, 0x0378, 0x0ED0, 0x3DF4, 0x00A0, 0x3FEC,
+ 0x0038, 0x3F0C, 0x032C, 0x0EFC, 0x3E10, 0x0098, 0x3FEC,
+ 0x0034, 0x3F24, 0x02DC, 0x0F24, 0x3E2C, 0x0090, 0x3FEC,
+ 0x002C, 0x3F38, 0x0294, 0x0F4C, 0x3E48, 0x0088, 0x3FEC,
+ 0x0028, 0x3F50, 0x0248, 0x0F68, 0x3E6C, 0x007C, 0x3FF0,
+ 0x0020, 0x3F64, 0x0200, 0x0F88, 0x3E90, 0x0074, 0x3FF0,
+ 0x001C, 0x3F7C, 0x01B8, 0x0FA4, 0x3EB4, 0x0068, 0x3FF0,
+ 0x0018, 0x3F90, 0x0174, 0x0FBC, 0x3EDC, 0x0058, 0x3FF4,
+ 0x0014, 0x3FA4, 0x0130, 0x0FD0, 0x3F08, 0x004C, 0x3FF4,
+ 0x000C, 0x3FB8, 0x00F0, 0x0FE4, 0x3F34, 0x003C, 0x3FF8,
+ 0x0008, 0x3FCC, 0x00B0, 0x0FF0, 0x3F64, 0x0030, 0x3FF8,
+ 0x0004, 0x3FDC, 0x0070, 0x0FFC, 0x3F98, 0x0020, 0x3FFC,
+ 0x0000, 0x3FF0, 0x0038, 0x0FFC, 0x3FCC, 0x0010, 0x0000,
+ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000
+};
-static const uint16_t filter_7tap_64p_117[231] = {
- 92, 15868, 2464, 2464, 15868, 92, 0,
- 96, 15864, 2404, 2528, 15876, 88, 0,
- 100, 15860, 2344, 2584, 15884, 84, 0,
- 104, 15856, 2280, 2644, 15892, 76, 0,
- 108, 15852, 2216, 2700, 15904, 72, 0,
- 108, 15852, 2152, 2756, 15916, 64, 0,
- 112, 15852, 2088, 2812, 15932, 60, 0,
- 112, 15852, 2024, 2864, 15948, 52, 0,
- 112, 15856, 1960, 2916, 15964, 44, 0,
- 116, 15860, 1892, 2964, 15984, 36, 0,
- 116, 15864, 1828, 3016, 16004, 24, 4,
- 116, 15868, 1760, 3060, 16024, 16, 4,
- 116, 15876, 1696, 3108, 16048, 8, 8,
- 116, 15884, 1628, 3152, 16072, 16380, 8,
- 112, 15892, 1564, 3192, 16100, 16372, 8,
- 112, 15900, 1496, 3232, 16124, 16360, 12,
- 112, 15908, 1428, 3268, 16156, 16348, 12,
- 108, 15920, 1364, 3304, 16188, 16336, 16,
- 108, 15928, 1300, 3340, 16220, 16324, 20,
- 104, 15940, 1232, 3372, 16252, 16312, 20,
- 104, 15952, 1168, 3400, 16288, 16300, 24,
- 100, 15964, 1104, 3428, 16328, 16284, 28,
- 96, 15980, 1040, 3452, 16364, 16272, 28,
- 96, 15992, 976, 3476, 20, 16256, 32,
- 92, 16004, 916, 3496, 64, 16244, 36,
- 88, 16020, 856, 3516, 108, 16228, 40,
- 84, 16032, 792, 3532, 152, 16216, 44,
- 80, 16048, 732, 3544, 200, 16200, 48,
- 80, 16064, 676, 3556, 248, 16184, 48,
- 76, 16080, 616, 3564, 296, 16168, 52,
- 72, 16092, 560, 3568, 344, 16156, 56,
- 68, 16108, 504, 3572, 396, 16140, 60,
- 64, 16124, 452, 3576, 452, 16124, 64 };
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_7tap_64p_116[231] = {
+ 0x0020, 0x3E58, 0x0988, 0x0988, 0x3E58, 0x0020, 0x0000,
+ 0x0024, 0x3E4C, 0x0954, 0x09C0, 0x3E64, 0x0018, 0x0000,
+ 0x002C, 0x3E44, 0x091C, 0x09F4, 0x3E70, 0x0010, 0x0000,
+ 0x0030, 0x3E3C, 0x08E8, 0x0A24, 0x3E80, 0x0008, 0x0000,
+ 0x0034, 0x3E34, 0x08AC, 0x0A5C, 0x3E90, 0x0000, 0x0000,
+ 0x003C, 0x3E30, 0x0870, 0x0A84, 0x3EA0, 0x3FFC, 0x0004,
+ 0x0040, 0x3E28, 0x0838, 0x0AB4, 0x3EB4, 0x3FF4, 0x0004,
+ 0x0044, 0x3E24, 0x07FC, 0x0AE4, 0x3EC8, 0x3FEC, 0x0004,
+ 0x0048, 0x3E24, 0x07C4, 0x0B08, 0x3EDC, 0x3FE4, 0x0008,
+ 0x0048, 0x3E20, 0x0788, 0x0B3C, 0x3EF4, 0x3FD8, 0x0008,
+ 0x004C, 0x3E20, 0x074C, 0x0B60, 0x3F0C, 0x3FD0, 0x000C,
+ 0x0050, 0x3E20, 0x0710, 0x0B8C, 0x3F24, 0x3FC4, 0x000C,
+ 0x0050, 0x3E20, 0x06D4, 0x0BB0, 0x3F40, 0x3FBC, 0x0010,
+ 0x0054, 0x3E24, 0x0698, 0x0BD4, 0x3F5C, 0x3FB0, 0x0010,
+ 0x0054, 0x3E24, 0x065C, 0x0BFC, 0x3F78, 0x3FA4, 0x0014,
+ 0x0054, 0x3E28, 0x0624, 0x0C1C, 0x3F98, 0x3F98, 0x0014,
+ 0x0058, 0x3E2C, 0x05E4, 0x0C3C, 0x3FB8, 0x3F8C, 0x0018,
+ 0x0058, 0x3E34, 0x05A8, 0x0C58, 0x3FD8, 0x3F80, 0x001C,
+ 0x0058, 0x3E38, 0x0570, 0x0C78, 0x3FF8, 0x3F74, 0x001C,
+ 0x0058, 0x3E40, 0x0534, 0x0C94, 0x0018, 0x3F68, 0x0020,
+ 0x0058, 0x3E48, 0x04F4, 0x0CAC, 0x0040, 0x3F5C, 0x0024,
+ 0x0058, 0x3E50, 0x04BC, 0x0CC4, 0x0064, 0x3F50, 0x0024,
+ 0x0054, 0x3E58, 0x0484, 0x0CD8, 0x008C, 0x3F44, 0x0028,
+ 0x0054, 0x3E60, 0x0448, 0x0CEC, 0x00B4, 0x3F38, 0x002C,
+ 0x0054, 0x3E68, 0x0410, 0x0CFC, 0x00E0, 0x3F28, 0x0030,
+ 0x0054, 0x3E74, 0x03D4, 0x0D0C, 0x010C, 0x3F1C, 0x0030,
+ 0x0050, 0x3E7C, 0x03A0, 0x0D18, 0x0138, 0x3F10, 0x0034,
+ 0x0050, 0x3E88, 0x0364, 0x0D24, 0x0164, 0x3F04, 0x0038,
+ 0x004C, 0x3E94, 0x0330, 0x0D30, 0x0194, 0x3EF4, 0x0038,
+ 0x004C, 0x3EA0, 0x02F8, 0x0D34, 0x01C4, 0x3EE8, 0x003C,
+ 0x0048, 0x3EAC, 0x02C0, 0x0D3C, 0x01F4, 0x3EDC, 0x0040,
+ 0x0048, 0x3EB8, 0x0290, 0x0D3C, 0x0224, 0x3ED0, 0x0040,
+ 0x0044, 0x3EC4, 0x0258, 0x0D40, 0x0258, 0x3EC4, 0x0044
+};
-static const uint16_t filter_7tap_64p_150[231] = {
- 16224, 16380, 2208, 2208, 16380, 16224, 0,
- 16232, 16360, 2172, 2236, 16, 16216, 0,
- 16236, 16340, 2140, 2268, 40, 16212, 0,
- 16244, 16324, 2104, 2296, 60, 16204, 4,
- 16252, 16304, 2072, 2324, 84, 16196, 4,
- 16256, 16288, 2036, 2352, 108, 16192, 4,
- 16264, 16268, 2000, 2380, 132, 16184, 8,
- 16272, 16252, 1960, 2408, 160, 16176, 8,
- 16276, 16240, 1924, 2432, 184, 16172, 8,
- 16284, 16224, 1888, 2456, 212, 16164, 8,
- 16288, 16212, 1848, 2480, 240, 16160, 12,
- 16296, 16196, 1812, 2500, 268, 16152, 12,
- 16300, 16184, 1772, 2524, 296, 16144, 12,
- 16308, 16172, 1736, 2544, 324, 16140, 12,
- 16312, 16164, 1696, 2564, 356, 16136, 12,
- 16320, 16152, 1656, 2584, 388, 16128, 12,
- 16324, 16144, 1616, 2600, 416, 16124, 12,
- 16328, 16136, 1576, 2616, 448, 16116, 12,
- 16332, 16128, 1536, 2632, 480, 16112, 12,
- 16340, 16120, 1496, 2648, 516, 16108, 12,
- 16344, 16112, 1456, 2660, 548, 16104, 12,
- 16348, 16104, 1416, 2672, 580, 16100, 12,
- 16352, 16100, 1376, 2684, 616, 16096, 12,
- 16356, 16096, 1336, 2696, 652, 16092, 12,
- 16360, 16092, 1296, 2704, 688, 16088, 12,
- 16364, 16088, 1256, 2712, 720, 16084, 12,
- 16368, 16084, 1220, 2720, 760, 16084, 8,
- 16368, 16080, 1180, 2724, 796, 16080, 8,
- 16372, 16080, 1140, 2732, 832, 16080, 8,
- 16376, 16076, 1100, 2732, 868, 16076, 4,
- 16380, 16076, 1060, 2736, 908, 16076, 4,
- 16380, 16076, 1020, 2740, 944, 16076, 0,
- 0, 16076, 984, 2740, 984, 16076, 0 };
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_7tap_64p_149[231] = {
+ 0x3F68, 0x3FEC, 0x08A8, 0x08AC, 0x3FF0, 0x3F68, 0x0000,
+ 0x3F70, 0x3FDC, 0x0888, 0x08CC, 0x0000, 0x3F60, 0x0000,
+ 0x3F74, 0x3FC8, 0x0868, 0x08F0, 0x0014, 0x3F58, 0x0000,
+ 0x3F7C, 0x3FB4, 0x0844, 0x0908, 0x002C, 0x3F54, 0x0004,
+ 0x3F84, 0x3FA4, 0x0820, 0x0924, 0x0044, 0x3F4C, 0x0004,
+ 0x3F88, 0x3F90, 0x0800, 0x0944, 0x005C, 0x3F44, 0x0004,
+ 0x3F90, 0x3F80, 0x07D8, 0x095C, 0x0074, 0x3F40, 0x0008,
+ 0x3F98, 0x3F70, 0x07B0, 0x097C, 0x008C, 0x3F38, 0x0008,
+ 0x3F9C, 0x3F60, 0x0790, 0x0994, 0x00A8, 0x3F30, 0x0008,
+ 0x3FA4, 0x3F54, 0x0764, 0x09B0, 0x00C4, 0x3F28, 0x0008,
+ 0x3FA8, 0x3F48, 0x0740, 0x09C4, 0x00DC, 0x3F24, 0x000C,
+ 0x3FB0, 0x3F38, 0x0718, 0x09DC, 0x00FC, 0x3F1C, 0x000C,
+ 0x3FB4, 0x3F2C, 0x06F0, 0x09F4, 0x0118, 0x3F18, 0x000C,
+ 0x3FBC, 0x3F24, 0x06C8, 0x0A08, 0x0134, 0x3F10, 0x000C,
+ 0x3FC0, 0x3F18, 0x06A0, 0x0A1C, 0x0154, 0x3F08, 0x0010,
+ 0x3FC8, 0x3F10, 0x0678, 0x0A2C, 0x0170, 0x3F04, 0x0010,
+ 0x3FCC, 0x3F04, 0x0650, 0x0A40, 0x0190, 0x3F00, 0x0010,
+ 0x3FD0, 0x3EFC, 0x0628, 0x0A54, 0x01B0, 0x3EF8, 0x0010,
+ 0x3FD4, 0x3EF4, 0x0600, 0x0A64, 0x01D0, 0x3EF4, 0x0010,
+ 0x3FDC, 0x3EEC, 0x05D8, 0x0A6C, 0x01F4, 0x3EF0, 0x0010,
+ 0x3FE0, 0x3EE8, 0x05B0, 0x0A7C, 0x0214, 0x3EE8, 0x0010,
+ 0x3FE4, 0x3EE0, 0x0588, 0x0A88, 0x0238, 0x3EE4, 0x0010,
+ 0x3FE8, 0x3EDC, 0x055C, 0x0A98, 0x0258, 0x3EE0, 0x0010,
+ 0x3FEC, 0x3ED8, 0x0534, 0x0AA0, 0x027C, 0x3EDC, 0x0010,
+ 0x3FF0, 0x3ED4, 0x050C, 0x0AAC, 0x02A0, 0x3ED8, 0x000C,
+ 0x3FF4, 0x3ED0, 0x04E4, 0x0AB4, 0x02C4, 0x3ED4, 0x000C,
+ 0x3FF4, 0x3ECC, 0x04C0, 0x0ABC, 0x02E8, 0x3ED0, 0x000C,
+ 0x3FF8, 0x3ECC, 0x0494, 0x0AC0, 0x030C, 0x3ED0, 0x000C,
+ 0x3FFC, 0x3EC8, 0x046C, 0x0AC8, 0x0334, 0x3ECC, 0x0008,
+ 0x0000, 0x3EC8, 0x0444, 0x0AC8, 0x0358, 0x3ECC, 0x0008,
+ 0x0000, 0x3EC8, 0x041C, 0x0ACC, 0x0380, 0x3EC8, 0x0008,
+ 0x0000, 0x3EC8, 0x03F4, 0x0AD0, 0x03A8, 0x3EC8, 0x0004,
+ 0x0004, 0x3EC8, 0x03CC, 0x0AD0, 0x03CC, 0x3EC8, 0x0004
+};
+//=========================================
+// <num_taps> = 7
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_7tap_64p_183[231] = {
- 16216, 324, 1884, 1884, 324, 16216, 0,
- 16220, 304, 1864, 1904, 344, 16216, 0,
- 16224, 284, 1844, 1924, 364, 16216, 0,
- 16224, 264, 1824, 1944, 384, 16212, 16380,
- 16228, 248, 1804, 1960, 408, 16212, 16380,
- 16228, 228, 1784, 1976, 428, 16208, 16380,
- 16232, 212, 1760, 1996, 452, 16208, 16380,
- 16236, 192, 1740, 2012, 472, 16208, 16376,
- 16240, 176, 1716, 2028, 496, 16208, 16376,
- 16240, 160, 1696, 2040, 516, 16208, 16376,
- 16244, 144, 1672, 2056, 540, 16208, 16376,
- 16248, 128, 1648, 2068, 564, 16208, 16372,
- 16252, 112, 1624, 2084, 588, 16208, 16372,
- 16256, 96, 1600, 2096, 612, 16208, 16368,
- 16256, 84, 1576, 2108, 636, 16208, 16368,
- 16260, 68, 1552, 2120, 660, 16208, 16368,
- 16264, 56, 1524, 2132, 684, 16212, 16364,
- 16268, 40, 1500, 2140, 712, 16212, 16364,
- 16272, 28, 1476, 2152, 736, 16216, 16360,
- 16276, 16, 1448, 2160, 760, 16216, 16356,
- 16280, 4, 1424, 2168, 788, 16220, 16356,
- 16284, 16376, 1396, 2176, 812, 16224, 16352,
- 16288, 16368, 1372, 2184, 840, 16224, 16352,
- 16292, 16356, 1344, 2188, 864, 16228, 16348,
- 16292, 16344, 1320, 2196, 892, 16232, 16344,
- 16296, 16336, 1292, 2200, 916, 16236, 16344,
- 16300, 16324, 1264, 2204, 944, 16240, 16340,
- 16304, 16316, 1240, 2208, 972, 16248, 16336,
- 16308, 16308, 1212, 2212, 996, 16252, 16332,
- 16312, 16300, 1184, 2216, 1024, 16256, 16332,
- 16316, 16292, 1160, 2216, 1052, 16264, 16328,
- 16316, 16284, 1132, 2216, 1076, 16268, 16324,
- 16320, 16276, 1104, 2216, 1104, 16276, 16320 };
+ 0x3FA4, 0x01E8, 0x0674, 0x0674, 0x01E8, 0x3FA4, 0x0000,
+ 0x3FA4, 0x01D4, 0x0668, 0x0684, 0x01F8, 0x3FA4, 0x0000,
+ 0x3FA4, 0x01C4, 0x0658, 0x0690, 0x0208, 0x3FA8, 0x0000,
+ 0x3FA0, 0x01B4, 0x064C, 0x06A0, 0x021C, 0x3FA8, 0x3FFC,
+ 0x3FA0, 0x01A4, 0x063C, 0x06AC, 0x022C, 0x3FAC, 0x3FFC,
+ 0x3FA0, 0x0194, 0x0630, 0x06B4, 0x0240, 0x3FAC, 0x3FFC,
+ 0x3FA0, 0x0184, 0x0620, 0x06C4, 0x0250, 0x3FB0, 0x3FF8,
+ 0x3FA0, 0x0174, 0x0614, 0x06CC, 0x0264, 0x3FB0, 0x3FF8,
+ 0x3FA0, 0x0164, 0x0604, 0x06D8, 0x0278, 0x3FB4, 0x3FF4,
+ 0x3FA0, 0x0154, 0x05F4, 0x06E4, 0x0288, 0x3FB8, 0x3FF4,
+ 0x3FA0, 0x0148, 0x05E4, 0x06EC, 0x029C, 0x3FBC, 0x3FF0,
+ 0x3FA0, 0x0138, 0x05D4, 0x06F4, 0x02B0, 0x3FC0, 0x3FF0,
+ 0x3FA0, 0x0128, 0x05C4, 0x0704, 0x02C4, 0x3FC0, 0x3FEC,
+ 0x3FA0, 0x011C, 0x05B4, 0x0708, 0x02D8, 0x3FC4, 0x3FEC,
+ 0x3FA4, 0x010C, 0x05A4, 0x0714, 0x02E8, 0x3FC8, 0x3FE8,
+ 0x3FA4, 0x0100, 0x0590, 0x0718, 0x02FC, 0x3FD0, 0x3FE8,
+ 0x3FA4, 0x00F0, 0x0580, 0x0724, 0x0310, 0x3FD4, 0x3FE4,
+ 0x3FA4, 0x00E4, 0x056C, 0x072C, 0x0324, 0x3FD8, 0x3FE4,
+ 0x3FA8, 0x00D8, 0x055C, 0x0730, 0x0338, 0x3FDC, 0x3FE0,
+ 0x3FA8, 0x00CC, 0x0548, 0x0738, 0x034C, 0x3FE4, 0x3FDC,
+ 0x3FA8, 0x00BC, 0x0538, 0x0740, 0x0360, 0x3FE8, 0x3FDC,
+ 0x3FAC, 0x00B0, 0x0528, 0x0744, 0x0374, 0x3FEC, 0x3FD8,
+ 0x3FAC, 0x00A4, 0x0514, 0x0748, 0x0388, 0x3FF4, 0x3FD8,
+ 0x3FB0, 0x0098, 0x0500, 0x074C, 0x039C, 0x3FFC, 0x3FD4,
+ 0x3FB0, 0x0090, 0x04EC, 0x0750, 0x03B0, 0x0000, 0x3FD4,
+ 0x3FB0, 0x0084, 0x04DC, 0x0758, 0x03C4, 0x0004, 0x3FD0,
+ 0x3FB4, 0x0078, 0x04CC, 0x0758, 0x03D8, 0x000C, 0x3FCC,
+ 0x3FB4, 0x006C, 0x04B8, 0x075C, 0x03EC, 0x0014, 0x3FCC,
+ 0x3FB8, 0x0064, 0x04A0, 0x0760, 0x0400, 0x001C, 0x3FC8,
+ 0x3FB8, 0x0058, 0x0490, 0x0760, 0x0414, 0x0024, 0x3FC8,
+ 0x3FBC, 0x0050, 0x047C, 0x0760, 0x0428, 0x002C, 0x3FC4,
+ 0x3FBC, 0x0048, 0x0464, 0x0764, 0x043C, 0x0034, 0x3FC4,
+ 0x3FC0, 0x003C, 0x0454, 0x0764, 0x0450, 0x003C, 0x3FC0
+};
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 0.83333 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_8tap_64p_upscale[264] = {
- 0, 0, 0, 4096, 0, 0, 0, 0,
- 16376, 20, 16328, 4092, 56, 16364, 4, 0,
- 16372, 36, 16272, 4088, 116, 16340, 12, 0,
- 16364, 56, 16220, 4080, 180, 16320, 20, 0,
- 16360, 76, 16172, 4064, 244, 16296, 24, 16380,
- 16356, 92, 16124, 4048, 312, 16276, 32, 16380,
- 16352, 108, 16080, 4032, 380, 16252, 40, 16380,
- 16344, 124, 16036, 4008, 452, 16228, 48, 16380,
- 16340, 136, 15996, 3980, 524, 16204, 56, 16380,
- 16340, 152, 15956, 3952, 600, 16180, 64, 16376,
- 16336, 164, 15920, 3920, 672, 16156, 76, 16376,
- 16332, 176, 15888, 3884, 752, 16132, 84, 16376,
- 16328, 188, 15860, 3844, 828, 16104, 92, 16372,
- 16328, 200, 15828, 3800, 908, 16080, 100, 16372,
- 16324, 208, 15804, 3756, 992, 16056, 108, 16372,
- 16324, 216, 15780, 3708, 1072, 16032, 120, 16368,
- 16320, 224, 15760, 3656, 1156, 16008, 128, 16368,
- 16320, 232, 15740, 3604, 1240, 15984, 136, 16364,
- 16320, 240, 15724, 3548, 1324, 15960, 144, 16364,
- 16320, 244, 15708, 3488, 1412, 15936, 152, 16360,
- 16320, 248, 15696, 3428, 1496, 15912, 160, 16360,
- 16320, 252, 15688, 3364, 1584, 15892, 172, 16356,
- 16320, 256, 15680, 3296, 1672, 15868, 180, 16352,
- 16320, 256, 15672, 3228, 1756, 15848, 188, 16352,
- 16320, 256, 15668, 3156, 1844, 15828, 192, 16348,
- 16320, 260, 15668, 3084, 1932, 15808, 200, 16348,
- 16320, 256, 15668, 3012, 2020, 15792, 208, 16344,
- 16324, 256, 15668, 2936, 2108, 15772, 216, 16344,
- 16324, 256, 15672, 2856, 2192, 15756, 220, 16340,
- 16324, 252, 15676, 2776, 2280, 15740, 228, 16336,
- 16328, 252, 15684, 2696, 2364, 15728, 232, 16336,
- 16328, 248, 15692, 2616, 2448, 15716, 240, 16332,
- 16332, 244, 15704, 2532, 2532, 15704, 244, 16332 };
+ 0x0000, 0x0000, 0x0000, 0x1000, 0x0000, 0x0000, 0x0000, 0x0000,
+ 0x3FFC, 0x0014, 0x3FC8, 0x1000, 0x0038, 0x3FEC, 0x0004, 0x0000,
+ 0x3FF4, 0x0024, 0x3F94, 0x0FFC, 0x0074, 0x3FD8, 0x000C, 0x0000,
+ 0x3FF0, 0x0038, 0x3F60, 0x0FEC, 0x00B4, 0x3FC4, 0x0014, 0x0000,
+ 0x3FEC, 0x004C, 0x3F2C, 0x0FE4, 0x00F4, 0x3FAC, 0x0018, 0x0000,
+ 0x3FE4, 0x005C, 0x3F00, 0x0FD4, 0x0138, 0x3F94, 0x0020, 0x0000,
+ 0x3FE0, 0x006C, 0x3ED0, 0x0FC4, 0x017C, 0x3F7C, 0x0028, 0x0000,
+ 0x3FDC, 0x007C, 0x3EA8, 0x0FA4, 0x01C4, 0x3F68, 0x0030, 0x0000,
+ 0x3FD8, 0x0088, 0x3E80, 0x0F90, 0x020C, 0x3F50, 0x0038, 0x3FFC,
+ 0x3FD4, 0x0098, 0x3E58, 0x0F70, 0x0258, 0x3F38, 0x0040, 0x3FFC,
+ 0x3FD0, 0x00A4, 0x3E34, 0x0F54, 0x02A0, 0x3F1C, 0x004C, 0x3FFC,
+ 0x3FD0, 0x00B0, 0x3E14, 0x0F28, 0x02F0, 0x3F04, 0x0054, 0x3FFC,
+ 0x3FCC, 0x00BC, 0x3DF4, 0x0F08, 0x033C, 0x3EEC, 0x005C, 0x3FF8,
+ 0x3FC8, 0x00C8, 0x3DD8, 0x0EDC, 0x038C, 0x3ED4, 0x0064, 0x3FF8,
+ 0x3FC8, 0x00D0, 0x3DC0, 0x0EAC, 0x03E0, 0x3EBC, 0x006C, 0x3FF4,
+ 0x3FC4, 0x00D8, 0x3DA8, 0x0E7C, 0x0430, 0x3EA4, 0x0078, 0x3FF4,
+ 0x3FC4, 0x00E0, 0x3D94, 0x0E48, 0x0484, 0x3E8C, 0x0080, 0x3FF0,
+ 0x3FC4, 0x00E8, 0x3D80, 0x0E10, 0x04D8, 0x3E74, 0x0088, 0x3FF0,
+ 0x3FC4, 0x00F0, 0x3D70, 0x0DD8, 0x052C, 0x3E5C, 0x0090, 0x3FEC,
+ 0x3FC0, 0x00F4, 0x3D60, 0x0DA0, 0x0584, 0x3E44, 0x0098, 0x3FEC,
+ 0x3FC0, 0x00F8, 0x3D54, 0x0D68, 0x05D8, 0x3E2C, 0x00A0, 0x3FE8,
+ 0x3FC0, 0x00FC, 0x3D48, 0x0D20, 0x0630, 0x3E18, 0x00AC, 0x3FE8,
+ 0x3FC0, 0x0100, 0x3D40, 0x0CE0, 0x0688, 0x3E00, 0x00B4, 0x3FE4,
+ 0x3FC4, 0x0100, 0x3D3C, 0x0C98, 0x06DC, 0x3DEC, 0x00BC, 0x3FE4,
+ 0x3FC4, 0x0100, 0x3D38, 0x0C58, 0x0734, 0x3DD8, 0x00C0, 0x3FE0,
+ 0x3FC4, 0x0104, 0x3D38, 0x0C0C, 0x078C, 0x3DC4, 0x00C8, 0x3FDC,
+ 0x3FC4, 0x0100, 0x3D38, 0x0BC4, 0x07E4, 0x3DB0, 0x00D0, 0x3FDC,
+ 0x3FC4, 0x0100, 0x3D38, 0x0B78, 0x083C, 0x3DA0, 0x00D8, 0x3FD8,
+ 0x3FC8, 0x0100, 0x3D3C, 0x0B28, 0x0890, 0x3D90, 0x00DC, 0x3FD8,
+ 0x3FC8, 0x00FC, 0x3D40, 0x0ADC, 0x08E8, 0x3D80, 0x00E4, 0x3FD4,
+ 0x3FCC, 0x00FC, 0x3D48, 0x0A84, 0x093C, 0x3D74, 0x00E8, 0x3FD4,
+ 0x3FCC, 0x00F8, 0x3D50, 0x0A38, 0x0990, 0x3D64, 0x00F0, 0x3FD0,
+ 0x3FD0, 0x00F4, 0x3D58, 0x09E0, 0x09E4, 0x3D5C, 0x00F4, 0x3FD0
+};
-static const uint16_t filter_8tap_64p_117[264] = {
- 116, 16100, 428, 3564, 428, 16100, 116, 0,
- 112, 16116, 376, 3564, 484, 16084, 120, 16380,
- 104, 16136, 324, 3560, 540, 16064, 124, 16380,
- 100, 16152, 272, 3556, 600, 16048, 128, 16380,
- 96, 16168, 220, 3548, 656, 16032, 136, 16376,
- 88, 16188, 172, 3540, 716, 16016, 140, 16376,
- 84, 16204, 124, 3528, 780, 16000, 144, 16376,
- 80, 16220, 76, 3512, 840, 15984, 148, 16372,
- 76, 16236, 32, 3496, 904, 15968, 152, 16372,
- 68, 16252, 16376, 3480, 968, 15952, 156, 16372,
- 64, 16268, 16332, 3456, 1032, 15936, 160, 16372,
- 60, 16284, 16292, 3432, 1096, 15920, 164, 16368,
- 56, 16300, 16252, 3408, 1164, 15908, 164, 16368,
- 48, 16316, 16216, 3380, 1228, 15892, 168, 16368,
- 44, 16332, 16180, 3348, 1296, 15880, 168, 16368,
- 40, 16348, 16148, 3316, 1364, 15868, 172, 16364,
- 36, 16360, 16116, 3284, 1428, 15856, 172, 16364,
- 32, 16376, 16084, 3248, 1496, 15848, 176, 16364,
- 28, 4, 16052, 3208, 1564, 15836, 176, 16364,
- 24, 16, 16028, 3168, 1632, 15828, 176, 16364,
- 20, 28, 16000, 3124, 1700, 15820, 176, 16364,
- 16, 40, 15976, 3080, 1768, 15812, 176, 16364,
- 12, 52, 15952, 3036, 1836, 15808, 176, 16364,
- 8, 64, 15932, 2988, 1904, 15800, 176, 16364,
- 4, 76, 15912, 2940, 1972, 15800, 172, 16364,
- 4, 84, 15892, 2888, 2040, 15796, 172, 16364,
- 0, 96, 15876, 2836, 2104, 15792, 168, 16364,
- 16380, 104, 15864, 2780, 2172, 15792, 164, 16364,
- 16380, 112, 15848, 2724, 2236, 15792, 160, 16364,
- 16376, 120, 15836, 2668, 2300, 15796, 156, 16368,
- 16376, 128, 15828, 2608, 2364, 15800, 152, 16368,
- 16372, 136, 15816, 2548, 2428, 15804, 148, 16368,
- 16372, 140, 15812, 2488, 2488, 15812, 140, 16372 };
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 1.16666 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_8tap_64p_116[264] = {
+ 0x0080, 0x3E90, 0x0268, 0x0D14, 0x0264, 0x3E90, 0x0080, 0x0000,
+ 0x007C, 0x3E9C, 0x0238, 0x0D14, 0x0298, 0x3E84, 0x0080, 0x0000,
+ 0x0078, 0x3EAC, 0x0200, 0x0D10, 0x02D0, 0x3E78, 0x0084, 0x0000,
+ 0x0078, 0x3EB8, 0x01D0, 0x0D0C, 0x0304, 0x3E6C, 0x0084, 0x0000,
+ 0x0074, 0x3EC8, 0x01A0, 0x0D00, 0x033C, 0x3E60, 0x0088, 0x0000,
+ 0x0070, 0x3ED4, 0x0170, 0x0D00, 0x0374, 0x3E54, 0x0088, 0x3FFC,
+ 0x006C, 0x3EE4, 0x0140, 0x0CF8, 0x03AC, 0x3E48, 0x0088, 0x3FFC,
+ 0x006C, 0x3EF0, 0x0114, 0x0CE8, 0x03E4, 0x3E3C, 0x008C, 0x3FFC,
+ 0x0068, 0x3F00, 0x00E8, 0x0CD8, 0x041C, 0x3E34, 0x008C, 0x3FFC,
+ 0x0064, 0x3F10, 0x00BC, 0x0CCC, 0x0454, 0x3E28, 0x008C, 0x3FFC,
+ 0x0060, 0x3F1C, 0x0090, 0x0CBC, 0x0490, 0x3E20, 0x008C, 0x3FFC,
+ 0x005C, 0x3F2C, 0x0068, 0x0CA4, 0x04CC, 0x3E18, 0x008C, 0x3FFC,
+ 0x0058, 0x3F38, 0x0040, 0x0C94, 0x0504, 0x3E10, 0x008C, 0x3FFC,
+ 0x0054, 0x3F48, 0x001C, 0x0C7C, 0x0540, 0x3E08, 0x0088, 0x3FFC,
+ 0x0050, 0x3F54, 0x3FF8, 0x0C60, 0x057C, 0x3E04, 0x0088, 0x3FFC,
+ 0x004C, 0x3F64, 0x3FD4, 0x0C44, 0x05B8, 0x3DFC, 0x0088, 0x3FFC,
+ 0x0048, 0x3F70, 0x3FB4, 0x0C28, 0x05F4, 0x3DF8, 0x0084, 0x3FFC,
+ 0x0044, 0x3F80, 0x3F90, 0x0C0C, 0x0630, 0x3DF4, 0x0080, 0x3FFC,
+ 0x0040, 0x3F8C, 0x3F70, 0x0BE8, 0x066C, 0x3DF4, 0x0080, 0x3FFC,
+ 0x003C, 0x3F9C, 0x3F50, 0x0BC8, 0x06A8, 0x3DF0, 0x007C, 0x3FFC,
+ 0x0038, 0x3FA8, 0x3F34, 0x0BA0, 0x06E4, 0x3DF0, 0x0078, 0x0000,
+ 0x0034, 0x3FB4, 0x3F18, 0x0B80, 0x071C, 0x3DF0, 0x0074, 0x0000,
+ 0x0030, 0x3FC0, 0x3EFC, 0x0B5C, 0x0758, 0x3DF0, 0x0070, 0x0000,
+ 0x002C, 0x3FCC, 0x3EE4, 0x0B34, 0x0794, 0x3DF4, 0x0068, 0x0000,
+ 0x002C, 0x3FDC, 0x3ECC, 0x0B08, 0x07CC, 0x3DF4, 0x0064, 0x0000,
+ 0x0028, 0x3FE4, 0x3EB4, 0x0AE0, 0x0808, 0x3DF8, 0x0060, 0x0000,
+ 0x0024, 0x3FF0, 0x3EA0, 0x0AB0, 0x0840, 0x3E00, 0x0058, 0x0004,
+ 0x0020, 0x3FFC, 0x3E90, 0x0A84, 0x0878, 0x3E04, 0x0050, 0x0004,
+ 0x001C, 0x0004, 0x3E7C, 0x0A54, 0x08B0, 0x3E0C, 0x004C, 0x0008,
+ 0x0018, 0x000C, 0x3E68, 0x0A28, 0x08E8, 0x3E18, 0x0044, 0x0008,
+ 0x0018, 0x0018, 0x3E54, 0x09F4, 0x0920, 0x3E20, 0x003C, 0x000C,
+ 0x0014, 0x0020, 0x3E48, 0x09C0, 0x0954, 0x3E2C, 0x0034, 0x0010,
+ 0x0010, 0x002C, 0x3E3C, 0x098C, 0x0988, 0x3E38, 0x002C, 0x0010
+};
-static const uint16_t filter_8tap_64p_150[264] = {
- 16380, 16020, 1032, 2756, 1032, 16020, 16380, 0,
- 0, 16020, 992, 2756, 1068, 16024, 16376, 0,
- 4, 16020, 952, 2752, 1108, 16024, 16372, 0,
- 8, 16020, 916, 2748, 1148, 16028, 16368, 0,
- 12, 16020, 876, 2744, 1184, 16032, 16364, 4,
- 16, 16020, 840, 2740, 1224, 16036, 16356, 4,
- 20, 16024, 800, 2732, 1264, 16040, 16352, 4,
- 20, 16024, 764, 2724, 1304, 16044, 16348, 8,
- 24, 16028, 728, 2716, 1344, 16052, 16340, 8,
- 28, 16028, 692, 2704, 1380, 16056, 16336, 12,
- 28, 16032, 656, 2696, 1420, 16064, 16328, 12,
- 32, 16036, 620, 2684, 1460, 16072, 16324, 12,
- 36, 16040, 584, 2668, 1500, 16080, 16316, 16,
- 36, 16044, 548, 2656, 1536, 16088, 16308, 16,
- 36, 16048, 516, 2640, 1576, 16096, 16304, 20,
- 40, 16052, 480, 2624, 1612, 16108, 16296, 20,
- 40, 16060, 448, 2608, 1652, 16120, 16288, 20,
- 44, 16064, 416, 2588, 1692, 16132, 16280, 24,
- 44, 16068, 384, 2568, 1728, 16144, 16276, 24,
- 44, 16076, 352, 2548, 1764, 16156, 16268, 28,
- 44, 16080, 320, 2528, 1804, 16168, 16260, 28,
- 44, 16088, 292, 2508, 1840, 16184, 16252, 28,
- 44, 16096, 264, 2484, 1876, 16200, 16244, 32,
- 48, 16100, 232, 2460, 1912, 16216, 16236, 32,
- 48, 16108, 204, 2436, 1948, 16232, 16228, 32,
- 48, 16116, 176, 2412, 1980, 16248, 16220, 36,
- 48, 16124, 152, 2384, 2016, 16264, 16216, 36,
- 44, 16128, 124, 2356, 2052, 16284, 16208, 36,
- 44, 16136, 100, 2328, 2084, 16304, 16200, 40,
- 44, 16144, 72, 2300, 2116, 16324, 16192, 40,
- 44, 16152, 48, 2272, 2148, 16344, 16184, 40,
- 44, 16160, 24, 2244, 2180, 16364, 16176, 40,
- 44, 16168, 4, 2212, 2212, 4, 16168, 44 };
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 1.49999 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
+static const uint16_t filter_8tap_64p_149[264] = {
+ 0x0008, 0x3E8C, 0x03F8, 0x0AE8, 0x03F8, 0x3E8C, 0x0008, 0x0000,
+ 0x000C, 0x3E8C, 0x03D0, 0x0AE8, 0x0420, 0x3E90, 0x0000, 0x0000,
+ 0x000C, 0x3E8C, 0x03AC, 0x0AE8, 0x0444, 0x3E90, 0x0000, 0x0000,
+ 0x0010, 0x3E90, 0x0384, 0x0AE0, 0x046C, 0x3E94, 0x3FFC, 0x0000,
+ 0x0014, 0x3E90, 0x035C, 0x0ADC, 0x0494, 0x3E94, 0x3FF8, 0x0004,
+ 0x0018, 0x3E90, 0x0334, 0x0AD8, 0x04BC, 0x3E98, 0x3FF4, 0x0004,
+ 0x001C, 0x3E94, 0x0310, 0x0AD0, 0x04E4, 0x3E9C, 0x3FEC, 0x0004,
+ 0x0020, 0x3E98, 0x02E8, 0x0AC4, 0x050C, 0x3EA0, 0x3FE8, 0x0008,
+ 0x0020, 0x3E98, 0x02C4, 0x0AC0, 0x0534, 0x3EA4, 0x3FE4, 0x0008,
+ 0x0024, 0x3E9C, 0x02A0, 0x0AB4, 0x055C, 0x3EAC, 0x3FDC, 0x0008,
+ 0x0024, 0x3EA0, 0x027C, 0x0AA8, 0x0584, 0x3EB0, 0x3FD8, 0x000C,
+ 0x0028, 0x3EA4, 0x0258, 0x0A9C, 0x05AC, 0x3EB8, 0x3FD0, 0x000C,
+ 0x0028, 0x3EA8, 0x0234, 0x0A90, 0x05D4, 0x3EC0, 0x3FC8, 0x0010,
+ 0x002C, 0x3EAC, 0x0210, 0x0A80, 0x05FC, 0x3EC8, 0x3FC4, 0x0010,
+ 0x002C, 0x3EB4, 0x01F0, 0x0A70, 0x0624, 0x3ED0, 0x3FBC, 0x0010,
+ 0x002C, 0x3EB8, 0x01CC, 0x0A60, 0x064C, 0x3EDC, 0x3FB4, 0x0014,
+ 0x0030, 0x3EBC, 0x01A8, 0x0A50, 0x0674, 0x3EE4, 0x3FB0, 0x0014,
+ 0x0030, 0x3EC4, 0x0188, 0x0A38, 0x069C, 0x3EF0, 0x3FA8, 0x0018,
+ 0x0030, 0x3ECC, 0x0168, 0x0A28, 0x06C0, 0x3EFC, 0x3FA0, 0x0018,
+ 0x0030, 0x3ED0, 0x0148, 0x0A14, 0x06E8, 0x3F08, 0x3F98, 0x001C,
+ 0x0030, 0x3ED8, 0x012C, 0x0A00, 0x070C, 0x3F14, 0x3F90, 0x001C,
+ 0x0034, 0x3EE0, 0x0108, 0x09E4, 0x0734, 0x3F24, 0x3F8C, 0x001C,
+ 0x0034, 0x3EE4, 0x00EC, 0x09CC, 0x0758, 0x3F34, 0x3F84, 0x0020,
+ 0x0034, 0x3EEC, 0x00D0, 0x09B8, 0x077C, 0x3F40, 0x3F7C, 0x0020,
+ 0x0034, 0x3EF4, 0x00B4, 0x0998, 0x07A4, 0x3F50, 0x3F74, 0x0024,
+ 0x0030, 0x3EFC, 0x0098, 0x0980, 0x07C8, 0x3F64, 0x3F6C, 0x0024,
+ 0x0030, 0x3F04, 0x0080, 0x0968, 0x07E8, 0x3F74, 0x3F64, 0x0024,
+ 0x0030, 0x3F0C, 0x0060, 0x094C, 0x080C, 0x3F88, 0x3F5C, 0x0028,
+ 0x0030, 0x3F14, 0x0048, 0x0930, 0x0830, 0x3F98, 0x3F54, 0x0028,
+ 0x0030, 0x3F1C, 0x0030, 0x0914, 0x0850, 0x3FAC, 0x3F4C, 0x0028,
+ 0x0030, 0x3F24, 0x0018, 0x08F0, 0x0874, 0x3FC0, 0x3F44, 0x002C,
+ 0x002C, 0x3F2C, 0x0000, 0x08D4, 0x0894, 0x3FD8, 0x3F3C, 0x002C,
+ 0x002C, 0x3F34, 0x3FEC, 0x08B4, 0x08B4, 0x3FEC, 0x3F34, 0x002C
+};
+//=========================================
+// <num_taps> = 8
+// <num_phases> = 64
+// <scale_ratio> = 1.83332 (input/output)
+// <sharpness> = 0
+// <CoefType> = ModifiedLanczos
+// <CoefQuant> = 1.10
+// <CoefOut> = 1.12
+//=========================================
static const uint16_t filter_8tap_64p_183[264] = {
- 16264, 16264, 1164, 2244, 1164, 16264, 16264, 0,
- 16268, 16256, 1136, 2240, 1188, 16272, 16260, 0,
- 16272, 16248, 1108, 2240, 1216, 16280, 16256, 0,
- 16276, 16240, 1080, 2236, 1240, 16292, 16252, 0,
- 16280, 16232, 1056, 2236, 1268, 16300, 16248, 0,
- 16284, 16224, 1028, 2232, 1292, 16312, 16244, 0,
- 16288, 16216, 1000, 2228, 1320, 16324, 16240, 0,
- 16292, 16212, 976, 2224, 1344, 16336, 16236, 0,
- 16296, 16204, 948, 2220, 1372, 16348, 16232, 0,
- 16300, 16200, 920, 2212, 1396, 16360, 16228, 4,
- 16304, 16196, 896, 2204, 1424, 16372, 16224, 4,
- 16308, 16188, 868, 2200, 1448, 0, 16220, 4,
- 16312, 16184, 844, 2192, 1472, 12, 16216, 4,
- 16316, 16180, 816, 2184, 1500, 28, 16212, 4,
- 16320, 16176, 792, 2172, 1524, 40, 16208, 4,
- 16324, 16172, 764, 2164, 1548, 56, 16204, 0,
- 16328, 16172, 740, 2156, 1572, 72, 16200, 0,
- 16328, 16168, 712, 2144, 1596, 88, 16196, 0,
- 16332, 16164, 688, 2132, 1620, 100, 16192, 0,
- 16336, 16164, 664, 2120, 1644, 120, 16192, 0,
- 16340, 16160, 640, 2108, 1668, 136, 16188, 0,
- 16344, 16160, 616, 2096, 1688, 152, 16184, 0,
- 16344, 16160, 592, 2080, 1712, 168, 16180, 0,
- 16348, 16156, 568, 2068, 1736, 188, 16176, 16380,
- 16352, 16156, 544, 2052, 1756, 204, 16176, 16380,
- 16352, 16156, 520, 2036, 1780, 224, 16172, 16380,
- 16356, 16156, 496, 2024, 1800, 244, 16172, 16380,
- 16360, 16156, 472, 2008, 1820, 260, 16168, 16376,
- 16360, 16156, 452, 1988, 1840, 280, 16164, 16376,
- 16364, 16156, 428, 1972, 1860, 300, 16164, 16376,
- 16364, 16156, 408, 1956, 1880, 320, 16164, 16372,
- 16368, 16160, 384, 1936, 1900, 344, 16160, 16372,
- 16368, 16160, 364, 1920, 1920, 364, 16160, 16368 };
+ 0x3F88, 0x0048, 0x047C, 0x0768, 0x047C, 0x0048, 0x3F88, 0x0000,
+ 0x3F88, 0x003C, 0x0468, 0x076C, 0x0490, 0x0054, 0x3F84, 0x0000,
+ 0x3F8C, 0x0034, 0x0454, 0x0768, 0x04A4, 0x005C, 0x3F84, 0x0000,
+ 0x3F8C, 0x0028, 0x0444, 0x076C, 0x04B4, 0x0068, 0x3F80, 0x0000,
+ 0x3F90, 0x0020, 0x042C, 0x0768, 0x04C8, 0x0074, 0x3F80, 0x0000,
+ 0x3F90, 0x0018, 0x041C, 0x0764, 0x04DC, 0x0080, 0x3F7C, 0x0000,
+ 0x3F94, 0x0010, 0x0408, 0x075C, 0x04F0, 0x008C, 0x3F7C, 0x0000,
+ 0x3F94, 0x0004, 0x03F8, 0x0760, 0x0500, 0x0098, 0x3F7C, 0x3FFC,
+ 0x3F98, 0x0000, 0x03E0, 0x075C, 0x0514, 0x00A4, 0x3F78, 0x3FFC,
+ 0x3F9C, 0x3FF8, 0x03CC, 0x0754, 0x0528, 0x00B0, 0x3F78, 0x3FFC,
+ 0x3F9C, 0x3FF0, 0x03B8, 0x0754, 0x0538, 0x00BC, 0x3F78, 0x3FFC,
+ 0x3FA0, 0x3FE8, 0x03A4, 0x0750, 0x054C, 0x00CC, 0x3F74, 0x3FF8,
+ 0x3FA4, 0x3FE0, 0x0390, 0x074C, 0x055C, 0x00D8, 0x3F74, 0x3FF8,
+ 0x3FA4, 0x3FDC, 0x037C, 0x0744, 0x0570, 0x00E4, 0x3F74, 0x3FF8,
+ 0x3FA8, 0x3FD4, 0x0368, 0x0740, 0x0580, 0x00F4, 0x3F74, 0x3FF4,
+ 0x3FA8, 0x3FCC, 0x0354, 0x073C, 0x0590, 0x0104, 0x3F74, 0x3FF4,
+ 0x3FAC, 0x3FC8, 0x0340, 0x0730, 0x05A4, 0x0110, 0x3F74, 0x3FF4,
+ 0x3FB0, 0x3FC0, 0x0330, 0x0728, 0x05B4, 0x0120, 0x3F74, 0x3FF0,
+ 0x3FB0, 0x3FBC, 0x031C, 0x0724, 0x05C4, 0x0130, 0x3F70, 0x3FF0,
+ 0x3FB4, 0x3FB4, 0x0308, 0x0720, 0x05D4, 0x013C, 0x3F70, 0x3FF0,
+ 0x3FB8, 0x3FB0, 0x02F4, 0x0714, 0x05E4, 0x014C, 0x3F74, 0x3FEC,
+ 0x3FB8, 0x3FAC, 0x02E0, 0x0708, 0x05F8, 0x015C, 0x3F74, 0x3FEC,
+ 0x3FBC, 0x3FA8, 0x02CC, 0x0704, 0x0604, 0x016C, 0x3F74, 0x3FE8,
+ 0x3FC0, 0x3FA0, 0x02BC, 0x06F8, 0x0614, 0x017C, 0x3F74, 0x3FE8,
+ 0x3FC0, 0x3F9C, 0x02A8, 0x06F4, 0x0624, 0x018C, 0x3F74, 0x3FE4,
+ 0x3FC4, 0x3F98, 0x0294, 0x06E8, 0x0634, 0x019C, 0x3F74, 0x3FE4,
+ 0x3FC8, 0x3F94, 0x0284, 0x06D8, 0x0644, 0x01AC, 0x3F78, 0x3FE0,
+ 0x3FC8, 0x3F90, 0x0270, 0x06D4, 0x0650, 0x01BC, 0x3F78, 0x3FE0,
+ 0x3FCC, 0x3F8C, 0x025C, 0x06C8, 0x0660, 0x01D0, 0x3F78, 0x3FDC,
+ 0x3FCC, 0x3F8C, 0x024C, 0x06B8, 0x066C, 0x01E0, 0x3F7C, 0x3FDC,
+ 0x3FD0, 0x3F88, 0x0238, 0x06B0, 0x067C, 0x01F0, 0x3F7C, 0x3FD8,
+ 0x3FD4, 0x3F84, 0x0228, 0x069C, 0x0688, 0x0204, 0x3F80, 0x3FD8,
+ 0x3FD4, 0x3F84, 0x0214, 0x0694, 0x0694, 0x0214, 0x3F84, 0x3FD4
+};
const uint16_t *get_filter_3tap_16p(struct fixed31_32 ratio)
{
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_16p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_3tap_16p_117;
+ return filter_3tap_16p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_3tap_16p_150;
+ return filter_3tap_16p_149;
else
return filter_3tap_16p_183;
}
@@ -1029,9 +1355,9 @@ const uint16_t *get_filter_3tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_3tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_3tap_64p_117;
+ return filter_3tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_3tap_64p_150;
+ return filter_3tap_64p_149;
else
return filter_3tap_64p_183;
}
@@ -1041,9 +1367,9 @@ const uint16_t *get_filter_4tap_16p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_16p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_4tap_16p_117;
+ return filter_4tap_16p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_4tap_16p_150;
+ return filter_4tap_16p_149;
else
return filter_4tap_16p_183;
}
@@ -1053,9 +1379,9 @@ const uint16_t *get_filter_4tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_4tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_4tap_64p_117;
+ return filter_4tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_4tap_64p_150;
+ return filter_4tap_64p_149;
else
return filter_4tap_64p_183;
}
@@ -1065,9 +1391,9 @@ const uint16_t *get_filter_5tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_5tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_5tap_64p_117;
+ return filter_5tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_5tap_64p_150;
+ return filter_5tap_64p_149;
else
return filter_5tap_64p_183;
}
@@ -1077,9 +1403,9 @@ const uint16_t *get_filter_6tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_6tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_6tap_64p_117;
+ return filter_6tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_6tap_64p_150;
+ return filter_6tap_64p_149;
else
return filter_6tap_64p_183;
}
@@ -1089,9 +1415,9 @@ const uint16_t *get_filter_7tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_7tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_7tap_64p_117;
+ return filter_7tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_7tap_64p_150;
+ return filter_7tap_64p_149;
else
return filter_7tap_64p_183;
}
@@ -1101,9 +1427,9 @@ const uint16_t *get_filter_8tap_64p(struct fixed31_32 ratio)
if (ratio.value < dc_fixpt_one.value)
return filter_8tap_64p_upscale;
else if (ratio.value < dc_fixpt_from_fraction(4, 3).value)
- return filter_8tap_64p_117;
+ return filter_8tap_64p_116;
else if (ratio.value < dc_fixpt_from_fraction(5, 3).value)
- return filter_8tap_64p_150;
+ return filter_8tap_64p_149;
else
return filter_8tap_64p_183;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c
new file mode 100644
index 000000000000..bb0e1b80ec3c
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_scl_filters_old.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright 2012-16 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
index 225955ec6d39..bc109d4fc6e6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c
@@ -27,25 +27,55 @@
#include "dc.h"
#include "dc_dmub_srv.h"
#include "../../dmub/inc/dmub_srv.h"
-#include "dmub_fw_state.h"
+#include "../../dmub/inc/dmub_gpint_cmd.h"
#include "core_types.h"
-#include "ipp.h"
#define MAX_PIPES 6
/**
* Get PSR state from firmware.
*/
-static void dmub_get_psr_state(uint32_t *psr_state)
+static void dmub_psr_get_state(struct dmub_psr *dmub, uint32_t *psr_state)
{
- // Not yet implemented
- // Trigger GPINT interrupt from firmware
+ struct dmub_srv *srv = dmub->ctx->dmub_srv->dmub;
+
+ // Send gpint command and wait for ack
+ dmub_srv_send_gpint_command(srv, DMUB_GPINT__GET_PSR_STATE, 0, 30);
+
+ dmub_srv_get_gpint_response(srv, psr_state);
+}
+
+/**
+ * Set PSR version.
+ */
+static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state *stream)
+{
+ union dmub_rb_cmd cmd;
+ struct dc_context *dc = dmub->ctx;
+
+ cmd.psr_set_version.header.type = DMUB_CMD__PSR;
+ cmd.psr_set_version.header.sub_type = DMUB_CMD__PSR_SET_VERSION;
+
+ if (stream->psr_version == 0x0) // Unsupported
+ return false;
+ else if (stream->psr_version == 0x1)
+ cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_1;
+ else if (stream->psr_version == 0x2)
+ cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_2;
+
+ cmd.psr_enable.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data);
+
+ dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_enable.header);
+ dc_dmub_srv_cmd_execute(dc->dmub_srv);
+ dc_dmub_srv_wait_idle(dc->dmub_srv);
+
+ return true;
}
/**
* Enable/Disable PSR.
*/
-static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
+static void dmub_psr_enable(struct dmub_psr *dmub, bool enable)
{
union dmub_rb_cmd cmd;
struct dc_context *dc = dmub->ctx;
@@ -67,13 +97,13 @@ static void dmub_set_psr_enable(struct dmub_psr *dmub, bool enable)
/**
* Set PSR level.
*/
-static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
+static void dmub_psr_set_level(struct dmub_psr *dmub, uint16_t psr_level)
{
union dmub_rb_cmd cmd;
uint32_t psr_state = 0;
struct dc_context *dc = dmub->ctx;
- dmub_get_psr_state(&psr_state);
+ dmub_psr_get_state(dmub, &psr_state);
if (psr_state == 0)
return;
@@ -91,7 +121,7 @@ static void dmub_set_psr_level(struct dmub_psr *dmub, uint16_t psr_level)
/**
* Setup PSR by programming phy registers and sending psr hw context values to firmware.
*/
-static bool dmub_setup_psr(struct dmub_psr *dmub,
+static bool dmub_psr_copy_settings(struct dmub_psr *dmub,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -101,21 +131,22 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
= &cmd.psr_copy_settings.psr_copy_settings_data;
struct pipe_ctx *pipe_ctx = NULL;
struct resource_context *res_ctx = &link->ctx->dc->current_state->res_ctx;
+ int i = 0;
- for (int i = 0; i < MAX_PIPES; i++) {
- if (res_ctx &&
- res_ctx->pipe_ctx[i].stream &&
- res_ctx->pipe_ctx[i].stream->link &&
- res_ctx->pipe_ctx[i].stream->link == link &&
- res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
+ for (i = 0; i < MAX_PIPES; i++) {
+ if (res_ctx->pipe_ctx[i].stream &&
+ res_ctx->pipe_ctx[i].stream->link == link &&
+ res_ctx->pipe_ctx[i].stream->link->connector_signal == SIGNAL_TYPE_EDP) {
pipe_ctx = &res_ctx->pipe_ctx[i];
break;
}
}
- if (!pipe_ctx ||
- !&pipe_ctx->plane_res ||
- !&pipe_ctx->stream_res)
+ if (!pipe_ctx)
+ return false;
+
+ // First, set the psr version
+ if (!dmub_psr_set_version(dmub, pipe_ctx->stream))
return false;
// Program DP DPHY fast training registers
@@ -138,10 +169,6 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
copy_settings_data->mpcc_inst = pipe_ctx->plane_res.mpcc_inst;
- if (pipe_ctx->plane_res.hubp)
- copy_settings_data->hubp_inst = pipe_ctx->plane_res.hubp->inst;
- else
- copy_settings_data->hubp_inst = 0;
if (pipe_ctx->plane_res.dpp)
copy_settings_data->dpp_inst = pipe_ctx->plane_res.dpp->inst;
else
@@ -157,18 +184,9 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
// Misc
copy_settings_data->psr_level = psr_context->psr_level.u32all;
- copy_settings_data->hyst_frames = psr_context->timehyst_frames;
- copy_settings_data->hyst_lines = psr_context->hyst_lines;
- copy_settings_data->phy_type = psr_context->phyType;
- copy_settings_data->aux_repeat = psr_context->aux_repeats;
- copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
- copy_settings_data->skip_wait_for_pll_lock = psr_context->skipPsrWaitForPllLock;
+ copy_settings_data->smu_optimizations_en = psr_context->allow_smu_optimizations;
copy_settings_data->frame_delay = psr_context->frame_delay;
- copy_settings_data->smu_phy_id = psr_context->smuPhyId;
- copy_settings_data->num_of_controllers = psr_context->numberOfControllers;
copy_settings_data->frame_cap_ind = psr_context->psrFrameCaptureIndicationReq;
- copy_settings_data->phy_num = psr_context->frame_delay & 0x7;
- copy_settings_data->link_rate = psr_context->frame_delay & 0xF;
dc_dmub_srv_cmd_queue(dc->dmub_srv, &cmd.psr_copy_settings.header);
dc_dmub_srv_cmd_execute(dc->dmub_srv);
@@ -178,10 +196,10 @@ static bool dmub_setup_psr(struct dmub_psr *dmub,
}
static const struct dmub_psr_funcs psr_funcs = {
- .set_psr_enable = dmub_set_psr_enable,
- .setup_psr = dmub_setup_psr,
- .get_psr_state = dmub_get_psr_state,
- .set_psr_level = dmub_set_psr_level,
+ .psr_copy_settings = dmub_psr_copy_settings,
+ .psr_enable = dmub_psr_enable,
+ .psr_get_state = dmub_psr_get_state,
+ .psr_set_level = dmub_psr_set_level,
};
/**
@@ -215,6 +233,6 @@ struct dmub_psr *dmub_psr_create(struct dc_context *ctx)
*/
void dmub_psr_destroy(struct dmub_psr **dmub)
{
- kfree(dmub);
+ kfree(*dmub);
*dmub = NULL;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
index 229958de3035..f404fecd6410 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.h
@@ -27,6 +27,7 @@
#define _DMUB_PSR_H_
#include "os_types.h"
+#include "dc_link.h"
struct dmub_psr {
struct dc_context *ctx;
@@ -34,14 +35,14 @@ struct dmub_psr {
};
struct dmub_psr_funcs {
- void (*set_psr_enable)(struct dmub_psr *dmub, bool enable);
- bool (*setup_psr)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
- void (*get_psr_state)(uint32_t *psr_state);
- void (*set_psr_level)(struct dmub_psr *dmub, uint16_t psr_level);
+ bool (*psr_copy_settings)(struct dmub_psr *dmub, struct dc_link *link, struct psr_context *psr_context);
+ void (*psr_enable)(struct dmub_psr *dmub, bool enable);
+ void (*psr_get_state)(struct dmub_psr *dmub, uint32_t *psr_state);
+ void (*psr_set_level)(struct dmub_psr *dmub, uint16_t psr_level);
};
struct dmub_psr *dmub_psr_create(struct dc_context *ctx);
void dmub_psr_destroy(struct dmub_psr **dmub);
-#endif /* _DCE_DMUB_H_ */
+#endif /* _DMUB_PSR_H_ */
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 5b689273ff44..0976e378659f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -71,6 +71,8 @@
#define PANEL_POWER_UP_TIMEOUT 300
#define PANEL_POWER_DOWN_TIMEOUT 500
#define HPD_CHECK_INTERVAL 10
+#define OLED_POST_T7_DELAY 100
+#define OLED_PRE_T11_DELAY 150
#define CTX \
hws->ctx
@@ -696,8 +698,10 @@ void dce110_enable_stream(struct pipe_ctx *pipe_ctx)
}
/*todo: cloned in stream enc, fix*/
-static bool is_panel_backlight_on(struct dce_hwseq *hws)
+bool dce110_is_panel_backlight_on(struct dc_link *link)
{
+ struct dc_context *ctx = link->ctx;
+ struct dce_hwseq *hws = ctx->dc->hwseq;
uint32_t value;
REG_GET(LVTMA_PWRSEQ_CNTL, LVTMA_BLON, &value);
@@ -705,11 +709,12 @@ static bool is_panel_backlight_on(struct dce_hwseq *hws)
return value;
}
-static bool is_panel_powered_on(struct dce_hwseq *hws)
+bool dce110_is_panel_powered_on(struct dc_link *link)
{
+ struct dc_context *ctx = link->ctx;
+ struct dce_hwseq *hws = ctx->dc->hwseq;
uint32_t pwr_seq_state, dig_on, dig_on_ovrd;
-
REG_GET(LVTMA_PWRSEQ_STATE, LVTMA_PWRSEQ_TARGET_STATE_R, &pwr_seq_state);
REG_GET_2(LVTMA_PWRSEQ_CNTL, LVTMA_DIGON, &dig_on, LVTMA_DIGON_OVRD, &dig_on_ovrd);
@@ -816,7 +821,7 @@ void dce110_edp_power_control(
return;
}
- if (power_up != is_panel_powered_on(hwseq)) {
+ if (power_up != hwseq->funcs.is_panel_powered_on(link)) {
/* Send VBIOS command to prompt eDP panel power */
if (power_up) {
unsigned long long current_ts = dm_get_timestamp(ctx);
@@ -896,7 +901,7 @@ void dce110_edp_backlight_control(
return;
}
- if (enable && is_panel_backlight_on(hws)) {
+ if (enable && hws->funcs.is_panel_backlight_on(link)) {
DC_LOG_HW_RESUME_S3(
"%s: panel already powered up. Do nothing.\n",
__func__);
@@ -936,9 +941,21 @@ void dce110_edp_backlight_control(
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_ON)
edp_receiver_ready_T7(link);
link_transmitter_control(ctx->dc_bios, &cntl);
+
+ if (enable && link->dpcd_sink_ext_caps.bits.oled)
+ msleep(OLED_POST_T7_DELAY);
+
+ if (link->dpcd_sink_ext_caps.bits.oled ||
+ link->dpcd_sink_ext_caps.bits.hdr_aux_backlight_control == 1 ||
+ link->dpcd_sink_ext_caps.bits.sdr_aux_backlight_control == 1)
+ dc_link_backlight_enable_aux(link, enable);
+
/*edp 1.2*/
if (cntl.action == TRANSMITTER_CONTROL_BACKLIGHT_OFF)
edp_receiver_ready_T9(link);
+
+ if (!enable && link->dpcd_sink_ext_caps.bits.oled)
+ msleep(OLED_PRE_T11_DELAY);
}
void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
@@ -2576,17 +2593,6 @@ static void dce110_apply_ctx_for_surface(
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if (stream == pipe_ctx->stream) {
- if (!pipe_ctx->top_pipe &&
- (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
- }
- }
-
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream != stream)
continue;
@@ -2607,20 +2613,16 @@ static void dce110_apply_ctx_for_surface(
}
- for (i = 0; i < dc->res_pool->pipe_count; i++) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
- struct pipe_ctx *old_pipe_ctx = &dc->current_state->res_ctx.pipe_ctx[i];
-
- if ((stream == pipe_ctx->stream) &&
- (!pipe_ctx->top_pipe) &&
- (pipe_ctx->plane_state || old_pipe_ctx->plane_state))
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
- }
-
if (dc->fbc_compressor)
enable_fbc(dc, context);
}
+static void dce110_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+}
+
static void dce110_power_down_fe(struct dc *dc, struct pipe_ctx *pipe_ctx)
{
struct dce_hwseq *hws = dc->hwseq;
@@ -2722,6 +2724,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.init_hw = init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dce110_apply_ctx_for_surface,
+ .post_unlock_program_front_end = dce110_post_unlock_program_front_end,
.update_plane_addr = update_plane_addr,
.update_pending_status = dce110_update_pending_status,
.enable_accelerated_mode = dce110_enable_accelerated_mode,
@@ -2736,6 +2739,7 @@ static const struct hw_sequencer_funcs dce110_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dce110_power_down_fe,
.pipe_control_lock = dce_pipe_control_lock,
+ .interdependent_update_lock = NULL,
.prepare_bandwidth = dce110_prepare_bandwidth,
.optimize_bandwidth = dce110_optimize_bandwidth,
.set_drr = set_drr,
@@ -2763,6 +2767,8 @@ static const struct hwseq_private_funcs dce110_private_funcs = {
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
};
void dce110_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index 26a9c14a58b1..34be166e8ff0 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -85,5 +85,9 @@ void dce110_edp_wait_for_hpd_ready(
struct dc_link *link,
bool power_up);
+bool dce110_is_panel_backlight_on(struct dc_link *link);
+
+bool dce110_is_panel_powered_on(struct dc_link *link);
+
#endif /* __DC_HWSS_DCE110_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
index bbd6e01b3eca..47a39eb9400b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_cm_common.c
@@ -316,6 +316,7 @@ bool cm_helper_translate_curve_to_hw_format(
struct pwl_result_data *rgb_resulted;
struct pwl_result_data *rgb;
struct pwl_result_data *rgb_plus_1;
+ struct pwl_result_data *rgb_minus_1;
int32_t region_start, region_end;
int32_t i;
@@ -465,9 +466,20 @@ bool cm_helper_translate_curve_to_hw_format(
rgb = rgb_resulted;
rgb_plus_1 = rgb_resulted + 1;
+ rgb_minus_1 = rgb;
i = 1;
while (i != hw_points + 1) {
+
+ if (i >= hw_points - 1) {
+ if (dc_fixpt_lt(rgb_plus_1->red, rgb->red))
+ rgb_plus_1->red = dc_fixpt_add(rgb->red, rgb_minus_1->delta_red);
+ if (dc_fixpt_lt(rgb_plus_1->green, rgb->green))
+ rgb_plus_1->green = dc_fixpt_add(rgb->green, rgb_minus_1->delta_green);
+ if (dc_fixpt_lt(rgb_plus_1->blue, rgb->blue))
+ rgb_plus_1->blue = dc_fixpt_add(rgb->blue, rgb_minus_1->delta_blue);
+ }
+
rgb->delta_red = dc_fixpt_sub(rgb_plus_1->red, rgb->red);
rgb->delta_green = dc_fixpt_sub(rgb_plus_1->green, rgb->green);
rgb->delta_blue = dc_fixpt_sub(rgb_plus_1->blue, rgb->blue);
@@ -482,6 +494,7 @@ bool cm_helper_translate_curve_to_hw_format(
}
++rgb_plus_1;
+ rgb_minus_1 = rgb;
++rgb;
++i;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
index f36a0d8cedfe..deccab0228d2 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.c
@@ -128,8 +128,8 @@ bool hubbub1_verify_allow_pstate_change_high(
* pstate takes around ~100us on linux. Unknown currently as to
* why it takes that long on linux
*/
- static unsigned int pstate_wait_timeout_us = 200;
- static unsigned int pstate_wait_expected_timeout_us = 40;
+ const unsigned int pstate_wait_timeout_us = 200;
+ const unsigned int pstate_wait_expected_timeout_us = 40;
static unsigned int max_sampled_pstate_wait_us; /* data collection */
static bool forced_pstate_allow; /* help with revert wa */
@@ -147,8 +147,9 @@ bool hubbub1_verify_allow_pstate_change_high(
forced_pstate_allow = false;
}
- /* RV2:
- * dchubbubdebugind, at: 0xB
+ /* The following table only applies to DCN1 and DCN2,
+ * for newer DCNs, need to consult with HW IP folks to read RTL
+ * HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
* description
* 0: Pipe0 Plane0 Allow Pstate Change
* 1: Pipe0 Plane1 Allow Pstate Change
@@ -181,64 +182,6 @@ bool hubbub1_verify_allow_pstate_change_high(
* 28: WB0 Allow Pstate Change
* 29: WB1 Allow Pstate Change
* 30: Arbiter's allow_pstate_change
- * 31: SOC pstate change request"
- */
- /*DCN2.x:
- HUBBUB:DCHUBBUB_TEST_ARB_DEBUG10 DCHUBBUBDEBUGIND:0xB
- 0: Pipe0 Plane0 Allow P-state Change
- 1: Pipe0 Plane1 Allow P-state Change
- 2: Pipe0 Cursor0 Allow P-state Change
- 3: Pipe0 Cursor1 Allow P-state Change
- 4: Pipe1 Plane0 Allow P-state Change
- 5: Pipe1 Plane1 Allow P-state Change
- 6: Pipe1 Cursor0 Allow P-state Change
- 7: Pipe1 Cursor1 Allow P-state Change
- 8: Pipe2 Plane0 Allow P-state Change
- 9: Pipe2 Plane1 Allow P-state Change
- 10: Pipe2 Cursor0 Allow P-state Change
- 11: Pipe2 Cursor1 Allow P-state Change
- 12: Pipe3 Plane0 Allow P-state Change
- 13: Pipe3 Plane1 Allow P-state Change
- 14: Pipe3 Cursor0 Allow P-state Change
- 15: Pipe3 Cursor1 Allow P-state Change
- 16: Pipe4 Plane0 Allow P-state Change
- 17: Pipe4 Plane1 Allow P-state Change
- 18: Pipe4 Cursor0 Allow P-state Change
- 19: Pipe4 Cursor1 Allow P-state Change
- 20: Pipe5 Plane0 Allow P-state Change
- 21: Pipe5 Plane1 Allow P-state Change
- 22: Pipe5 Cursor0 Allow P-state Change
- 23: Pipe5 Cursor1 Allow P-state Change
- 24: Pipe6 Plane0 Allow P-state Change
- 25: Pipe6 Plane1 Allow P-state Change
- 26: Pipe6 Cursor0 Allow P-state Change
- 27: Pipe6 Cursor1 Allow P-state Change
- 28: WB0 Allow P-state Change
- 29: WB1 Allow P-state Change
- 30: Arbiter`s Allow P-state Change
- 31: SOC P-state Change request
- */
- /* RV1:
- * dchubbubdebugind, at: 0x7
- * description "3-0: Pipe0 cursor0 QOS
- * 7-4: Pipe1 cursor0 QOS
- * 11-8: Pipe2 cursor0 QOS
- * 15-12: Pipe3 cursor0 QOS
- * 16: Pipe0 Plane0 Allow Pstate Change
- * 17: Pipe1 Plane0 Allow Pstate Change
- * 18: Pipe2 Plane0 Allow Pstate Change
- * 19: Pipe3 Plane0 Allow Pstate Change
- * 20: Pipe0 Plane1 Allow Pstate Change
- * 21: Pipe1 Plane1 Allow Pstate Change
- * 22: Pipe2 Plane1 Allow Pstate Change
- * 23: Pipe3 Plane1 Allow Pstate Change
- * 24: Pipe0 cursor0 Allow Pstate Change
- * 25: Pipe1 cursor0 Allow Pstate Change
- * 26: Pipe2 cursor0 Allow Pstate Change
- * 27: Pipe3 cursor0 Allow Pstate Change
- * 28: WB0 Allow Pstate Change
- * 29: WB1 Allow Pstate Change
- * 30: Arbiter's allow_pstate_change
* 31: SOC pstate change request
*/
@@ -300,7 +243,7 @@ void hubbub1_wm_change_req_wa(struct hubbub *hubbub)
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
}
-void hubbub1_program_urgent_watermarks(
+bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -308,6 +251,7 @@ void hubbub1_program_urgent_watermarks(
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
@@ -321,7 +265,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.pte_meta_urgent_ns > hubbub1->watermarks.a.pte_meta_urgent_ns) {
hubbub1->watermarks.a.pte_meta_urgent_ns = watermarks->a.pte_meta_urgent_ns;
@@ -331,7 +276,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->a.pte_meta_urgent_ns < hubbub1->watermarks.a.pte_meta_urgent_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -344,7 +290,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.pte_meta_urgent_ns > hubbub1->watermarks.b.pte_meta_urgent_ns) {
hubbub1->watermarks.b.pte_meta_urgent_ns = watermarks->b.pte_meta_urgent_ns;
@@ -354,7 +301,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->b.pte_meta_urgent_ns < hubbub1->watermarks.b.pte_meta_urgent_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
@@ -367,7 +315,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.pte_meta_urgent_ns > hubbub1->watermarks.c.pte_meta_urgent_ns) {
hubbub1->watermarks.c.pte_meta_urgent_ns = watermarks->c.pte_meta_urgent_ns;
@@ -377,7 +326,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->c.pte_meta_urgent_ns < hubbub1->watermarks.c.pte_meta_urgent_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
@@ -390,7 +340,8 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.pte_meta_urgent_ns > hubbub1->watermarks.d.pte_meta_urgent_ns) {
hubbub1->watermarks.d.pte_meta_urgent_ns = watermarks->d.pte_meta_urgent_ns;
@@ -400,10 +351,13 @@ void hubbub1_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("PTE_META_URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.pte_meta_urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->d.pte_meta_urgent_ns < hubbub1->watermarks.d.pte_meta_urgent_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub1_program_stutter_watermarks(
+bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -411,6 +365,7 @@ void hubbub1_program_stutter_watermarks(
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
@@ -425,7 +380,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
@@ -439,7 +396,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
@@ -454,7 +413,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
@@ -468,7 +429,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
@@ -483,7 +446,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
@@ -497,7 +462,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
@@ -512,7 +479,9 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
@@ -526,11 +495,14 @@ void hubbub1_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
+ return wm_pending;
}
-void hubbub1_program_pstate_watermarks(
+bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -538,6 +510,7 @@ void hubbub1_program_pstate_watermarks(
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
@@ -552,7 +525,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
@@ -567,7 +542,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
@@ -582,7 +559,9 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
@@ -597,23 +576,33 @@ void hubbub1_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub1_program_watermarks(
+bool hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn10_hubbub *hubbub1 = TO_DCN10_HUBBUB(hubbub);
+ bool wm_pending = false;
/*
* Need to clamp to max of the register values (i.e. no wrap)
* for dcn1, all wm registers are 21-bit wide
*/
- hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
REG_UPDATE(DCHUBBUB_ARB_SAT_LEVEL,
DCHUBBUB_ARB_SAT_LEVEL, 60 * refclk_mhz);
@@ -627,6 +616,7 @@ void hubbub1_program_watermarks(
DCHUBBUB_ARB_WATERMARK_CHANGE_DONE_INTERRUPT_DISABLE, 1,
DCHUBBUB_ARB_WATERMARK_CHANGE_REQUEST, 1);
#endif
+ return wm_pending;
}
void hubbub1_update_dchub(
@@ -840,8 +830,8 @@ static void hubbub1_det_request_size(
hubbub1_get_blk256_size(&blk256_width, &blk256_height, bpe);
- swath_bytes_horz_wc = height * blk256_height * bpe;
- swath_bytes_vert_wc = width * blk256_width * bpe;
+ swath_bytes_horz_wc = width * blk256_height * bpe;
+ swath_bytes_vert_wc = height * blk256_width * bpe;
*req128_horz_wc = (2 * swath_bytes_horz_wc <= detile_buf_size) ?
false : /* full 256B request */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
index af57751253de..343a537172c7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubbub.h
@@ -308,7 +308,7 @@ bool hubbub1_verify_allow_pstate_change_high(
void hubbub1_wm_change_req_wa(struct hubbub *hubbub);
-void hubbub1_program_watermarks(
+bool hubbub1_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -331,17 +331,17 @@ void hubbub1_construct(struct hubbub *hubbub,
const struct dcn_hubbub_shift *hubbub_shift,
const struct dcn_hubbub_mask *hubbub_mask);
-void hubbub1_program_urgent_watermarks(
+bool hubbub1_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub1_program_stutter_watermarks(
+bool hubbub1_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub1_program_pstate_watermarks(
+bool hubbub1_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index 1008ac8a0f2a..9cc3314966bd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -48,8 +48,8 @@
#include "dc_link_dp.h"
#include "dccg.h"
#include "clk_mgr.h"
-
-
+#include "link_hwss.h"
+#include "dpcd_defs.h"
#include "dsc.h"
#define DC_LOGGER_INIT(logger)
@@ -82,7 +82,7 @@ void print_microsec(struct dc_context *dc_ctx,
us_x10 % frac);
}
-static void dcn10_lock_all_pipes(struct dc *dc,
+void dcn10_lock_all_pipes(struct dc *dc,
struct dc_state *context,
bool lock)
{
@@ -93,6 +93,7 @@ static void dcn10_lock_all_pipes(struct dc *dc,
for (i = 0; i < dc->res_pool->pipe_count; i++) {
pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
+
/*
* Only lock the top pipe's tg to prevent redundant
* (un)locking. Also skip if pipe is disabled.
@@ -103,9 +104,9 @@ static void dcn10_lock_all_pipes(struct dc *dc,
continue;
if (lock)
- tg->funcs->lock(tg);
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
else
- tg->funcs->unlock(tg);
+ dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
}
}
@@ -900,6 +901,10 @@ static void dcn10_reset_back_end_for_pipe(
* parent pipe.
*/
if (pipe_ctx->top_pipe == NULL) {
+
+ if (pipe_ctx->stream_res.abm)
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
@@ -1263,7 +1268,8 @@ void dcn10_init_hw(struct dc *dc)
}
//Enable ability to power gate / don't force power on permanently
- hws->funcs.enable_power_gating_plane(hws, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(hws, true);
return;
}
@@ -1317,6 +1323,31 @@ void dcn10_init_hw(struct dc *dc)
if (hws->funcs.dsc_pg_control != NULL)
hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false);
+ /* we want to turn off all dp displays before doing detection */
+ if (dc->config.power_down_display_on_boot) {
+ uint8_t dpcd_power_state = '\0';
+ enum dc_status status = DC_ERROR_UNEXPECTED;
+
+ for (i = 0; i < dc->link_count; i++) {
+ if (dc->links[i]->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)
+ continue;
+
+ /*
+ * core_link_read_dpcd() will invoke dm_helpers_dp_read_dpcd(),
+ * which needs to read dpcd info with the help of aconnector.
+ * If aconnector (dc->links[i]->prev) is NULL, then dpcd status
+ * cannot be read.
+ */
+ if (dc->links[i]->priv) {
+ /* if any of the displays are lit up turn them off */
+ status = core_link_read_dpcd(dc->links[i], DP_SET_POWER,
+ &dpcd_power_state, sizeof(dpcd_power_state));
+ if (status == DC_OK && dpcd_power_state == DP_POWER_STATE_D0)
+ dp_receiver_power_ctrl(dc->links[i], false);
+ }
+ }
+ }
+
/* If taking control over from VBIOS, we may want to optimize our first
* mode set, so we need to skip powering down pipes until we know which
* pipes we want to use.
@@ -1325,6 +1356,9 @@ void dcn10_init_hw(struct dc *dc)
*/
if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) {
hws->funcs.init_pipes(dc, dc->current_state);
+ if (dc->res_pool->hubbub->funcs->allow_self_refresh_control)
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub,
+ !dc->res_pool->hubbub->ctx->dc->debug.disable_stutter);
}
for (i = 0; i < res_pool->audio_count; i++) {
@@ -1355,8 +1389,8 @@ void dcn10_init_hw(struct dc *dc)
REG_UPDATE(DCFCLK_CNTL, DCFCLK_GATE_DIS, 0);
}
-
- hws->funcs.enable_power_gating_plane(dc->hwseq, true);
+ if (hws->funcs.enable_power_gating_plane)
+ hws->funcs.enable_power_gating_plane(dc->hwseq, true);
if (dc->clk_mgr->funcs->notify_wm_ranges)
dc->clk_mgr->funcs->notify_wm_ranges(dc->clk_mgr);
@@ -1576,7 +1610,7 @@ void dcn10_pipe_control_lock(
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
- if (pipe->top_pipe)
+ if (!pipe || pipe->top_pipe)
return;
if (dc->debug.sanity_checks)
@@ -2090,6 +2124,10 @@ void dcn10_get_hdr_visual_confirm_color(
if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_PQ) {
/* HDR10, ARGB2101010 - set boarder color to red */
color->color_r_cr = color_value;
+ } else if (top_pipe_ctx->stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) {
+ /* FreeSync 2 ARGB2101010 - set boarder color to pink */
+ color->color_r_cr = color_value;
+ color->color_b_cb = color_value;
}
break;
case PIXEL_FORMAT_FP16:
@@ -2512,12 +2550,17 @@ void dcn10_apply_ctx_for_surface(
int i;
struct timing_generator *tg;
uint32_t underflow_check_delay_us;
- bool removed_pipe[4] = { false };
bool interdependent_update = false;
struct pipe_ctx *top_pipe_to_program =
dcn10_find_top_pipe_for_stream(dc, context, stream);
DC_LOGGER_INIT(dc->ctx->logger);
+ // Clear pipe_ctx flag
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+ pipe_ctx->update_flags.raw = 0;
+ }
+
if (!top_pipe_to_program)
return;
@@ -2531,11 +2574,6 @@ void dcn10_apply_ctx_for_surface(
if (underflow_check_delay_us != 0xFFFFFFFF && hws->funcs.did_underflow_occur)
ASSERT(hws->funcs.did_underflow_occur(dc, top_pipe_to_program));
- if (interdependent_update)
- dcn10_lock_all_pipes(dc, context, true);
- else
- dcn10_pipe_control_lock(dc, top_pipe_to_program, true);
-
if (underflow_check_delay_us != 0xFFFFFFFF)
udelay(underflow_check_delay_us);
@@ -2552,18 +2590,6 @@ void dcn10_apply_ctx_for_surface(
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
struct pipe_ctx *old_pipe_ctx =
&dc->current_state->res_ctx.pipe_ctx[i];
- /*
- * Powergate reused pipes that are not powergated
- * fairly hacky right now, using opp_id as indicator
- * TODO: After move dc_post to dc_update, this will
- * be removed.
- */
- if (pipe_ctx->plane_state && !old_pipe_ctx->plane_state) {
- if (old_pipe_ctx->stream_res.tg == tg &&
- old_pipe_ctx->plane_res.hubp &&
- old_pipe_ctx->plane_res.hubp->opp_id != OPP_ID_INVALID)
- dc->hwss.disable_plane(dc, old_pipe_ctx);
- }
if ((!pipe_ctx->plane_state ||
pipe_ctx->stream_res.tg != old_pipe_ctx->stream_res.tg) &&
@@ -2571,7 +2597,7 @@ void dcn10_apply_ctx_for_surface(
old_pipe_ctx->stream_res.tg == tg) {
hws->funcs.plane_atomic_disconnect(dc, old_pipe_ctx);
- removed_pipe[i] = true;
+ pipe_ctx->update_flags.bits.disable = 1;
DC_LOG_DC("Reset mpcc for pipe %d\n",
old_pipe_ctx->pipe_idx);
@@ -2597,21 +2623,35 @@ void dcn10_apply_ctx_for_surface(
&pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs);
}
+}
- if (interdependent_update)
- dcn10_lock_all_pipes(dc, context, false);
- else
- dcn10_pipe_control_lock(dc, top_pipe_to_program, false);
+void dcn10_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
- if (num_planes == 0)
- false_optc_underflow_wa(dc, stream, tg);
+ DC_LOGGER_INIT(dc->ctx->logger);
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->top_pipe &&
+ !pipe_ctx->prev_odm_pipe &&
+ pipe_ctx->stream) {
+ struct timing_generator *tg = pipe_ctx->stream_res.tg;
+
+ if (context->stream_status[i].plane_count == 0)
+ false_optc_underflow_wa(dc, pipe_ctx->stream, tg);
+ }
+ }
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i])
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
dc->hwss.disable_plane(dc, &dc->current_state->res_ctx.pipe_ctx[i]);
for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (removed_pipe[i]) {
+ if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable) {
dc->hwss.optimize_bandwidth(dc, context);
break;
}
@@ -2656,7 +2696,7 @@ void dcn10_prepare_bandwidth(
false);
}
- hubbub->funcs->program_watermarks(hubbub,
+ dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
@@ -2693,6 +2733,7 @@ void dcn10_optimize_bandwidth(
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
true);
+
dcn10_stereo_hw_frame_pack_wa(dc, context);
if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE)
@@ -2884,6 +2925,7 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
struct dc_plane_state *plane_state = pipe_ctx->plane_state;
struct timing_generator *tg = pipe_ctx->stream_res.tg;
bool flip_pending;
+ struct dc *dc = plane_state->ctx->dc;
if (plane_state == NULL)
return;
@@ -2901,6 +2943,19 @@ void dcn10_update_pending_status(struct pipe_ctx *pipe_ctx)
plane_state->status.is_right_eye =
!tg->funcs->is_stereo_left_eye(pipe_ctx->stream_res.tg);
}
+
+ if (dc->hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied) {
+ struct dce_hwseq *hwseq = dc->hwseq;
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+ unsigned int cur_frame = tg->funcs->get_frame_count(tg);
+
+ if (cur_frame != hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame) {
+ struct hubbub *hubbub = dc->res_pool->hubbub;
+
+ hubbub->funcs->allow_self_refresh_control(hubbub, !dc->debug.disable_stutter);
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = false;
+ }
+ }
}
void dcn10_update_dchub(struct dce_hwseq *hws, struct dchub_init_data *dh_data)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
index 4d20f6586bb5..16a50e05ffbf 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.h
@@ -70,11 +70,18 @@ void dcn10_reset_hw_ctx_wrap(
struct dc *dc,
struct dc_state *context);
void dcn10_disable_plane(struct dc *dc, struct pipe_ctx *pipe_ctx);
+void dcn10_lock_all_pipes(
+ struct dc *dc,
+ struct dc_state *context,
+ bool lock);
void dcn10_apply_ctx_for_surface(
struct dc *dc,
const struct dc_stream_state *stream,
int num_planes,
struct dc_state *context);
+void dcn10_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context);
void dcn10_hubp_pg_control(
struct dce_hwseq *hws,
unsigned int hubp_inst,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
index e7e5352ec424..dd02d3983695 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_init.c
@@ -32,6 +32,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.init_hw = dcn10_init_hw,
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = dcn10_apply_ctx_for_surface,
+ .post_unlock_program_front_end = dcn10_post_unlock_program_front_end,
.update_plane_addr = dcn10_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -49,6 +50,7 @@ static const struct hw_sequencer_funcs dcn10_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn10_disable_plane,
.pipe_control_lock = dcn10_pipe_control_lock,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn10_prepare_bandwidth,
.optimize_bandwidth = dcn10_optimize_bandwidth,
.set_drr = dcn10_set_drr,
@@ -85,6 +87,8 @@ static const struct hwseq_private_funcs dcn10_private_funcs = {
.reset_hw_ctx_wrap = dcn10_reset_hw_ctx_wrap,
.enable_stream_timing = dcn10_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = NULL,
.enable_stream_gating = NULL,
.setup_vupdate_interrupt = dcn10_setup_vupdate_interrupt,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
index 1a37c90e9d43..d3617d6785a7 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.c
@@ -782,6 +782,11 @@ bool dcn10_link_encoder_validate_output_with_stream(
struct dcn10_link_encoder *enc10 = TO_DCN10_LINK_ENC(enc);
bool is_valid;
+ //if SCDC (340-600MHz) is disabled, set to HDMI 1.4 timing limit
+ if (stream->sink->edid_caps.panel_patch.skip_scdc_overwrite &&
+ enc10->base.features.max_hdmi_pixel_clock > 300000)
+ enc10->base.features.max_hdmi_pixel_clock = 300000;
+
switch (stream->signal) {
case SIGNAL_TYPE_DVI_SINGLE_LINK:
case SIGNAL_TYPE_DVI_DUAL_LINK:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
index eb13589b9a81..762109174fb8 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_link_encoder.h
@@ -62,11 +62,11 @@
SRI(DP_DPHY_FAST_TRAINING, DP, id), \
SRI(DP_SEC_CNTL1, DP, id), \
SRI(DP_DPHY_BS_SR_SWAP_CNTL, DP, id), \
- SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
SRI(DP_DPHY_HBR2_PATTERN_CONTROL, DP, id)
#define LE_DCN10_REG_LIST(id)\
+ SRI(DP_DPHY_INTERNAL_CTRL, DP, id), \
LE_DCN_COMMON_REG_LIST(id)
struct dcn10_link_enc_aux_registers {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
index a9a43b397db9..63acb8ff7462 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_optc.c
@@ -299,7 +299,6 @@ void optc1_set_vtg_params(struct timing_generator *optc,
uint32_t asic_blank_end;
uint32_t v_init;
uint32_t v_fp2 = 0;
- int32_t vertical_line_start;
struct optc *optc1 = DCN10TG_FROM_TG(optc);
@@ -316,9 +315,8 @@ void optc1_set_vtg_params(struct timing_generator *optc,
patched_crtc_timing.v_border_top;
/* if VSTARTUP is before VSYNC, FP2 is the offset, otherwise 0 */
- vertical_line_start = asic_blank_end - optc1->vstartup_start + 1;
- if (vertical_line_start < 0)
- v_fp2 = -vertical_line_start;
+ if (optc1->vstartup_start > asic_blank_end)
+ v_fp2 = optc1->vstartup_start - asic_blank_end;
/* Interlace */
if (REG(OTG_INTERLACE_CONTROL)) {
@@ -1195,7 +1193,7 @@ static void optc1_enable_stereo(struct timing_generator *optc,
REG_UPDATE_3(OTG_STEREO_CONTROL,
OTG_STEREO_EN, stereo_en,
OTG_STEREO_SYNC_OUTPUT_LINE_NUM, 0,
- OTG_STEREO_SYNC_OUTPUT_POLARITY, 0);
+ OTG_STEREO_SYNC_OUTPUT_POLARITY, flags->RIGHT_EYE_POLARITY == 0 ? 0 : 1);
if (flags->PROGRAM_POLARITY)
REG_UPDATE(OTG_STEREO_CONTROL,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 3b71898e859e..261bdc3a8218 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -570,7 +570,7 @@ static const struct dc_plane_cap plane_cap = {
static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
@@ -598,7 +598,7 @@ static const struct dc_debug_options debug_defaults_drv = {
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -1233,7 +1233,7 @@ static enum dc_status dcn10_validate_global(struct dc *dc, struct dc_state *cont
return DC_OK;
}
-static enum dc_status dcn10_get_default_swizzle_mode(struct dc_plane_state *plane_state)
+static enum dc_status dcn10_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
@@ -1295,7 +1295,7 @@ static const struct resource_funcs dcn10_res_pool_funcs = {
.validate_plane = dcn10_validate_plane,
.validate_global = dcn10_validate_global,
.add_stream_to_ctx = dcn10_add_stream_to_ctx,
- .get_default_swizzle_mode = dcn10_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn10_patch_unknown_plane_state,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
index 376c4264d295..7eba9333c328 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_stream_encoder.c
@@ -1667,5 +1667,6 @@ void dcn10_stream_encoder_construct(
enc1->regs = regs;
enc1->se_shift = se_shift;
enc1->se_mask = se_mask;
+ enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
index 50bffbfdd394..62cc2651e00c 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.c
@@ -70,6 +70,8 @@ void dccg2_update_dpp_dto(struct dccg *dccg, int dpp_inst, int req_dppclk)
REG_UPDATE(DPPCLK_DTO_CTRL,
DPPCLK_DTO_ENABLE[dpp_inst], 0);
}
+
+ dccg->pipe_dppclk_khz[dpp_inst] = req_dppclk;
}
void dccg2_get_dccg_ref_freq(struct dccg *dccg,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
index 13e057d7ee93..42bba7c9548b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dpp.c
@@ -369,84 +369,6 @@ void dpp2_set_cursor_attributes(
}
}
-#define IDENTITY_RATIO(ratio) (dc_fixpt_u3d19(ratio) == (1 << 19))
-
-bool dpp2_get_optimal_number_of_taps(
- struct dpp *dpp,
- struct scaler_data *scl_data,
- const struct scaling_taps *in_taps)
-{
- /* Some ASICs does not support FP16 scaling, so we reject modes require this*/
- if (scl_data->viewport.width != scl_data->h_active &&
- scl_data->viewport.height != scl_data->v_active &&
- dpp->caps->dscl_data_proc_format == DSCL_DATA_PRCESSING_FIXED_FORMAT &&
- scl_data->format == PIXEL_FORMAT_FP16)
- return false;
-
- if (scl_data->viewport.width > scl_data->h_active &&
- dpp->ctx->dc->debug.max_downscale_src_width != 0 &&
- scl_data->viewport.width > dpp->ctx->dc->debug.max_downscale_src_width)
- return false;
-
- /* TODO: add lb check */
-
- /* No support for programming ratio of 8, drop to 7.99999.. */
- if (scl_data->ratios.horz.value == (8ll << 32))
- scl_data->ratios.horz.value--;
- if (scl_data->ratios.vert.value == (8ll << 32))
- scl_data->ratios.vert.value--;
- if (scl_data->ratios.horz_c.value == (8ll << 32))
- scl_data->ratios.horz_c.value--;
- if (scl_data->ratios.vert_c.value == (8ll << 32))
- scl_data->ratios.vert_c.value--;
-
- /* Set default taps if none are provided */
- if (in_taps->h_taps == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.horz) > 4)
- scl_data->taps.h_taps = 8;
- else
- scl_data->taps.h_taps = 4;
- } else
- scl_data->taps.h_taps = in_taps->h_taps;
- if (in_taps->v_taps == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.vert) > 4)
- scl_data->taps.v_taps = 8;
- else
- scl_data->taps.v_taps = 4;
- } else
- scl_data->taps.v_taps = in_taps->v_taps;
- if (in_taps->v_taps_c == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.vert_c) > 4)
- scl_data->taps.v_taps_c = 4;
- else
- scl_data->taps.v_taps_c = 2;
- } else
- scl_data->taps.v_taps_c = in_taps->v_taps_c;
- if (in_taps->h_taps_c == 0) {
- if (dc_fixpt_ceil(scl_data->ratios.horz_c) > 4)
- scl_data->taps.h_taps_c = 4;
- else
- scl_data->taps.h_taps_c = 2;
- } else if ((in_taps->h_taps_c % 2) != 0 && in_taps->h_taps_c != 1)
- /* Only 1 and even h_taps_c are supported by hw */
- scl_data->taps.h_taps_c = in_taps->h_taps_c - 1;
- else
- scl_data->taps.h_taps_c = in_taps->h_taps_c;
-
- if (!dpp->ctx->dc->debug.always_scale) {
- if (IDENTITY_RATIO(scl_data->ratios.horz))
- scl_data->taps.h_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert))
- scl_data->taps.v_taps = 1;
- if (IDENTITY_RATIO(scl_data->ratios.horz_c))
- scl_data->taps.h_taps_c = 1;
- if (IDENTITY_RATIO(scl_data->ratios.vert_c))
- scl_data->taps.v_taps_c = 1;
- }
-
- return true;
-}
-
void oppn20_dummy_program_regamma_pwl(
struct dpp *dpp,
const struct pwl_params *params,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
index 6bdfee20b6a7..1b1ae9ce2799 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dsc.c
@@ -369,6 +369,7 @@ static bool dsc_prepare_config(const struct dsc_config *dsc_cfg, struct dsc_reg_
dsc_reg_vals->pps.block_pred_enable = dsc_cfg->dc_dsc_cfg.block_pred_enable;
dsc_reg_vals->pps.line_buf_depth = dsc_cfg->dc_dsc_cfg.linebuf_depth;
dsc_reg_vals->alternate_ich_encoding_en = dsc_reg_vals->pps.dsc_version_minor == 1 ? 0 : 1;
+ dsc_reg_vals->ich_reset_at_eol = (dsc_cfg->is_odm || dsc_reg_vals->num_slices_h > 1) ? 0xF : 0;
// TODO: in addition to validating slice height (pic height must be divisible by slice height),
// see what happens when the same condition doesn't apply for slice_width/pic_width.
@@ -531,7 +532,6 @@ static void dsc_update_from_dsc_parameters(struct dsc_reg_values *reg_vals, cons
reg_vals->pps.rc_buf_thresh[i] = reg_vals->pps.rc_buf_thresh[i] >> 6;
reg_vals->rc_buffer_model_size = dsc_params->rc_buffer_model_size;
- reg_vals->ich_reset_at_eol = reg_vals->num_slices_h == 1 ? 0 : 0xf;
}
static void dsc_write_to_registers(struct display_stream_compressor *dsc, const struct dsc_reg_values *reg_vals)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
index 9235f7d29454..c0b21d7450d4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hubbub.c
@@ -562,19 +562,23 @@ void hubbub2_get_dchub_ref_freq(struct hubbub *hubbub,
}
}
-static void hubbub2_program_watermarks(
+static bool hubbub2_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
/*
* Need to clamp to max of the register values (i.e. no wrap)
* for dcn1, all wm registers are 21-bit wide
*/
- hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub1_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub1_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
/*
* There's a special case when going from p-state support to p-state unsupported
@@ -592,6 +596,7 @@ static void hubbub2_program_watermarks(
REG_UPDATE(DCHUBBUB_ARB_DF_REQ_OUTSTAND, DCHUBBUB_ARB_MIN_REQ_OUTSTAND, 180);
hubbub->funcs->allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+ return wm_pending;
}
static const struct hubbub_funcs hubbub2_funcs = {
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
index cfbbaffa8654..233318260da4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
@@ -307,7 +307,8 @@ void dcn20_init_blank(
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
- otg_active_height);
+ otg_active_height,
+ 0);
if (num_opps == 2) {
bottom_opp->funcs->opp_set_disp_pattern_generator(
@@ -317,7 +318,8 @@ void dcn20_init_blank(
COLOR_DEPTH_UNDEFINED,
&black_color,
otg_active_width,
- otg_active_height);
+ otg_active_height,
+ 0);
}
hws->funcs.wait_for_blank_complete(opp);
@@ -572,7 +574,6 @@ void dcn20_plane_atomic_disable(struct dc *dc, struct pipe_ctx *pipe_ctx)
dpp->funcs->dpp_dppclk_control(dpp, false, false);
hubp->power_gated = true;
- dc->optimized_required = false; /* We're powering off, no need to optimize */
hws->funcs.plane_atomic_power_down(dc,
pipe_ctx->plane_res.dpp,
@@ -646,6 +647,9 @@ enum dc_status dcn20_enable_stream_timing(
return DC_ERROR_UNEXPECTED;
}
+ if (dc->hwseq->funcs.PLAT_58856_wa && (!dc_is_dp_signal(stream->signal)))
+ dc->hwseq->funcs.PLAT_58856_wa(context, pipe_ctx);
+
pipe_ctx->stream_res.tg->funcs->program_timing(
pipe_ctx->stream_res.tg,
&stream->timing,
@@ -975,7 +979,8 @@ void dcn20_blank_pixel_data(
stream->timing.display_color_depth,
&black_color,
width,
- height);
+ height,
+ 0);
for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) {
odm_pipe->stream_res.opp->funcs->opp_set_disp_pattern_generator(
@@ -986,7 +991,8 @@ void dcn20_blank_pixel_data(
stream->timing.display_color_depth,
&black_color,
width,
- height);
+ height,
+ 0);
}
if (!blank)
@@ -1089,29 +1095,6 @@ void dcn20_enable_plane(
// }
}
-
-void dcn20_pipe_control_lock_global(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock)
-{
- if (lock) {
- pipe->stream_res.tg->funcs->lock_doublebuffer_enable(
- pipe->stream_res.tg);
- pipe->stream_res.tg->funcs->lock(pipe->stream_res.tg);
- } else {
- pipe->stream_res.tg->funcs->unlock(pipe->stream_res.tg);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VBLANK);
- pipe->stream_res.tg->funcs->wait_for_state(pipe->stream_res.tg,
- CRTC_STATE_VACTIVE);
- pipe->stream_res.tg->funcs->lock_doublebuffer_disable(
- pipe->stream_res.tg);
- }
-}
-
void dcn20_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
@@ -1122,7 +1105,7 @@ void dcn20_pipe_control_lock(
/* use TG master update lock to lock everything on the TG
* therefore only top pipe need to lock
*/
- if (pipe->top_pipe)
+ if (!pipe || pipe->top_pipe)
return;
if (pipe->plane_state != NULL)
@@ -1537,48 +1520,32 @@ static void dcn20_program_pipe(
}
}
-static bool does_pipe_need_lock(struct pipe_ctx *pipe)
-{
- if ((pipe->plane_state && pipe->plane_state->update_flags.raw)
- || pipe->update_flags.raw)
- return true;
- if (pipe->bottom_pipe)
- return does_pipe_need_lock(pipe->bottom_pipe);
-
- return false;
-}
-
void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context)
{
- const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
int i;
struct dce_hwseq *hws = dc->hwseq;
- bool pipe_locked[MAX_PIPES] = {false};
DC_LOGGER_INIT(dc->ctx->logger);
- /* Carry over GSL groups in case the context is changing. */
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (context->res_ctx.pipe_ctx[i].stream == dc->current_state->res_ctx.pipe_ctx[i].stream)
- context->res_ctx.pipe_ctx[i].stream_res.gsl_group =
- dc->current_state->res_ctx.pipe_ctx[i].stream_res.gsl_group;
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+
+ if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) {
+ ASSERT(!pipe_ctx->plane_state->triplebuffer_flips);
+ if (dc->hwss.program_triplebuffer != NULL &&
+ !dc->debug.disable_tri_buf) {
+ /*turn off triple buffer for full update*/
+ dc->hwss.program_triplebuffer(
+ dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips);
+ }
+ }
+ }
/* Set pipe update flags and lock pipes */
for (i = 0; i < dc->res_pool->pipe_count; i++)
dcn20_detect_pipe_changes(&dc->current_state->res_ctx.pipe_ctx[i],
&context->res_ctx.pipe_ctx[i]);
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (!context->res_ctx.pipe_ctx[i].top_pipe &&
- does_pipe_need_lock(&context->res_ctx.pipe_ctx[i])) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
-
- if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
- if (!pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], true);
- pipe_locked[i] = true;
- }
/* OTG blank before disabling all front ends */
for (i = 0; i < dc->res_pool->pipe_count; i++)
@@ -1616,17 +1583,17 @@ void dcn20_program_front_end_for_ctx(
hws->funcs.program_all_writeback_pipes_in_tree(dc, pipe->stream, context);
}
}
+}
- /* Unlock all locked pipes */
- for (i = 0; i < dc->res_pool->pipe_count; i++)
- if (pipe_locked[i]) {
- struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
+void dcn20_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context)
+{
+ int i;
+ const unsigned int TIMEOUT_FOR_PIPE_ENABLE_MS = 100;
+ struct dce_hwseq *hwseq = dc->hwseq;
- if (pipe_ctx->update_flags.bits.tg_changed || pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
- if (!pipe_ctx->update_flags.bits.enable)
- dc->hwss.pipe_control_lock(dc, &dc->current_state->res_ctx.pipe_ctx[i], false);
- }
+ DC_LOGGER_INIT(dc->ctx->logger);
for (i = 0; i < dc->res_pool->pipe_count; i++)
if (context->res_ctx.pipe_ctx[i].update_flags.bits.disable)
@@ -1652,11 +1619,26 @@ void dcn20_program_front_end_for_ctx(
}
/* WA to apply WM setting*/
- if (dc->hwseq->wa.DEGVIDCN21)
+ if (hwseq->wa.DEGVIDCN21)
dc->res_pool->hubbub->funcs->apply_DEDCN21_147_wa(dc->res_pool->hubbub);
-}
+ /* WA for stutter underflow during MPO transitions when adding 2nd plane */
+ if (hwseq->wa.disallow_self_refresh_during_multi_plane_transition) {
+
+ if (dc->current_state->stream_status[0].plane_count == 1 &&
+ context->stream_status[0].plane_count > 1) {
+
+ struct timing_generator *tg = dc->res_pool->timing_generators[0];
+
+ dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, false);
+
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied = true;
+ hwseq->wa_state.disallow_self_refresh_during_multi_plane_transition_applied_on_frame = tg->funcs->get_frame_count(tg);
+ }
+ }
+}
+
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context)
@@ -1669,7 +1651,7 @@ void dcn20_prepare_bandwidth(
false);
/* program dchubbub watermarks */
- hubbub->funcs->program_watermarks(hubbub,
+ dc->wm_optimized_required = hubbub->funcs->program_watermarks(hubbub,
&context->bw_ctx.bw.dcn.watermarks,
dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000,
false);
@@ -2053,6 +2035,10 @@ static void dcn20_reset_back_end_for_pipe(
* parent pipe.
*/
if (pipe_ctx->top_pipe == NULL) {
+
+ if (pipe_ctx->stream_res.abm)
+ pipe_ctx->stream_res.abm->funcs->set_abm_immediate_disable(pipe_ctx->stream_res.abm);
+
pipe_ctx->stream_res.tg->funcs->disable_crtc(pipe_ctx->stream_res.tg);
pipe_ctx->stream_res.tg->funcs->enable_optc_clock(pipe_ctx->stream_res.tg, false);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
index 02c9be5ebd47..63ce763f148e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.h
@@ -35,6 +35,9 @@ bool dcn20_set_shaper_3dlut(
void dcn20_program_front_end_for_ctx(
struct dc *dc,
struct dc_state *context);
+void dcn20_post_unlock_program_front_end(
+ struct dc *dc,
+ struct dc_state *context);
void dcn20_update_plane_addr(const struct dc *dc, struct pipe_ctx *pipe_ctx);
void dcn20_update_mpcc(struct dc *dc, struct pipe_ctx *pipe_ctx);
bool dcn20_set_input_transfer_func(struct dc *dc, struct pipe_ctx *pipe_ctx,
@@ -58,10 +61,6 @@ void dcn20_pipe_control_lock(
struct dc *dc,
struct pipe_ctx *pipe,
bool lock);
-void dcn20_pipe_control_lock_global(
- struct dc *dc,
- struct pipe_ctx *pipe,
- bool lock);
void dcn20_prepare_bandwidth(
struct dc *dc,
struct dc_state *context);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
index d51e02fdab4d..1e73357eda34 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_init.c
@@ -33,6 +33,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -50,7 +51,7 @@ static const struct hw_sequencer_funcs dcn20_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
- .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
@@ -96,6 +97,8 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
@@ -108,7 +111,6 @@ static const struct hwseq_private_funcs dcn20_private_funcs = {
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
.dpp_pg_control = dcn20_dpp_pg_control,
.hubp_pg_control = dcn20_hubp_pg_control,
- .dsc_pg_control = NULL,
.update_odm = dcn20_update_odm,
.dsc_pg_control = dcn20_dsc_pg_control,
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
index 3fccd5eeecbb..7bcee5894d2e 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_mmhubbub.h
@@ -36,26 +36,6 @@
#define BASE(seg) \
BASE_INNER(seg)
-#define SR(reg_name)\
- .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
- mm ## reg_name
-
-#define SRI(reg_name, block, id)\
- .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SRI2(reg_name, block, id)\
- .reg_name = BASE(mm ## reg_name ## _BASE_IDX) + \
- mm ## reg_name
-
-#define SRII(reg_name, block, id)\
- .reg_name[id] = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
- .field_name = reg_name ## __ ## field_name ## post_fix
-
-
#define MCIF_WB_COMMON_REG_LIST_DCN2_0(inst) \
SRI(MCIF_WB_BUFMGR_SW_CONTROL, MCIF_WB, inst),\
SRI(MCIF_WB_BUFMGR_CUR_LINE_R, MCIF_WB, inst),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
index 023cc71fad0f..138321e151eb 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c
@@ -45,7 +45,8 @@ void opp2_set_disp_pattern_generator(
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height)
+ int height,
+ int offset)
{
struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp);
enum test_pattern_color_format bit_depth;
@@ -92,6 +93,11 @@ void opp2_set_disp_pattern_generator(
DPG_ACTIVE_WIDTH, width,
DPG_ACTIVE_HEIGHT, height);
+ /* set DPG offset */
+ REG_SET_2(DPG_OFFSET_SEGMENT, 0,
+ DPG_X_OFFSET, offset,
+ DPG_SEGMENT_WIDTH, 0);
+
switch (test_pattern) {
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES:
case CONTROLLER_DP_TEST_PATTERN_COLORSQUARES_CEA:
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
index 4093bec172c1..64c5b429c79a 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h
@@ -36,6 +36,7 @@
#define OPP_DPG_REG_LIST(id) \
SRI(DPG_CONTROL, DPG, id), \
SRI(DPG_DIMENSIONS, DPG, id), \
+ SRI(DPG_OFFSET_SEGMENT, DPG, id), \
SRI(DPG_COLOUR_B_CB, DPG, id), \
SRI(DPG_COLOUR_G_Y, DPG, id), \
SRI(DPG_COLOUR_R_CR, DPG, id), \
@@ -53,6 +54,7 @@
uint32_t FMT_422_CONTROL; \
uint32_t DPG_CONTROL; \
uint32_t DPG_DIMENSIONS; \
+ uint32_t DPG_OFFSET_SEGMENT; \
uint32_t DPG_COLOUR_B_CB; \
uint32_t DPG_COLOUR_G_Y; \
uint32_t DPG_COLOUR_R_CR; \
@@ -68,6 +70,8 @@
OPP_SF(DPG0_DPG_CONTROL, DPG_HRES, mask_sh), \
OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_WIDTH, mask_sh), \
OPP_SF(DPG0_DPG_DIMENSIONS, DPG_ACTIVE_HEIGHT, mask_sh), \
+ OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_X_OFFSET, mask_sh), \
+ OPP_SF(DPG0_DPG_OFFSET_SEGMENT, DPG_SEGMENT_WIDTH, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR0_R_CR, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_R_CR, DPG_COLOUR1_R_CR, mask_sh), \
OPP_SF(DPG0_DPG_COLOUR_B_CB, DPG_COLOUR0_B_CB, mask_sh), \
@@ -97,6 +101,8 @@
type DPG_HRES; \
type DPG_ACTIVE_WIDTH; \
type DPG_ACTIVE_HEIGHT; \
+ type DPG_X_OFFSET; \
+ type DPG_SEGMENT_WIDTH; \
type DPG_COLOUR0_R_CR; \
type DPG_COLOUR1_R_CR; \
type DPG_COLOUR0_B_CB; \
@@ -144,7 +150,8 @@ void opp2_set_disp_pattern_generator(
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height);
+ int height,
+ int offset);
bool opp2_dpg_is_blanked(struct output_pixel_processor *opp);
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
index 85f90f3e24cb..a67395208991 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c
@@ -153,6 +153,7 @@ struct _vcs_dpi_ip_params_st dcn2_0_ip = {
.xfc_supported = true,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
+ .number_of_cursors = 1,
};
struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
@@ -220,7 +221,8 @@ struct _vcs_dpi_ip_params_st dcn2_0_nv14_ip = {
.xfc_supported = true,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
- .ptoi_supported = 0
+ .ptoi_supported = 0,
+ .number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
@@ -335,6 +337,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
.use_urgent_burst_bw = 0
};
+struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
+ .clock_limits = {
+ {
+ .state = 0,
+ .dcfclk_mhz = 560.0,
+ .fabricclk_mhz = 560.0,
+ .dispclk_mhz = 513.0,
+ .dppclk_mhz = 513.0,
+ .phyclk_mhz = 540.0,
+ .socclk_mhz = 560.0,
+ .dscclk_mhz = 171.0,
+ .dram_speed_mts = 8960.0,
+ },
+ {
+ .state = 1,
+ .dcfclk_mhz = 694.0,
+ .fabricclk_mhz = 694.0,
+ .dispclk_mhz = 642.0,
+ .dppclk_mhz = 642.0,
+ .phyclk_mhz = 600.0,
+ .socclk_mhz = 694.0,
+ .dscclk_mhz = 214.0,
+ .dram_speed_mts = 11104.0,
+ },
+ {
+ .state = 2,
+ .dcfclk_mhz = 875.0,
+ .fabricclk_mhz = 875.0,
+ .dispclk_mhz = 734.0,
+ .dppclk_mhz = 734.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 875.0,
+ .dscclk_mhz = 245.0,
+ .dram_speed_mts = 14000.0,
+ },
+ {
+ .state = 3,
+ .dcfclk_mhz = 1000.0,
+ .fabricclk_mhz = 1000.0,
+ .dispclk_mhz = 1100.0,
+ .dppclk_mhz = 1100.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1000.0,
+ .dscclk_mhz = 367.0,
+ .dram_speed_mts = 16000.0,
+ },
+ {
+ .state = 4,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 16000.0,
+ },
+ /*Extra state, no dispclk ramping*/
+ {
+ .state = 5,
+ .dcfclk_mhz = 1200.0,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 1284.0,
+ .dppclk_mhz = 1284.0,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 1200.0,
+ .dscclk_mhz = 428.0,
+ .dram_speed_mts = 16000.0,
+ },
+ },
+ .num_states = 5,
+ .sr_exit_time_us = 8.6,
+ .sr_enter_plus_exit_time_us = 10.9,
+ .urgent_latency_us = 4.0,
+ .urgent_latency_pixel_data_only_us = 4.0,
+ .urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
+ .urgent_latency_vm_data_only_us = 4.0,
+ .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
+ .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
+ .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
+ .max_avg_sdp_bw_use_normal_percent = 40.0,
+ .max_avg_dram_bw_use_normal_percent = 40.0,
+ .writeback_latency_us = 12.0,
+ .ideal_dram_bw_after_urgent_percent = 40.0,
+ .max_request_size_bytes = 256,
+ .dram_channel_width_bytes = 2,
+ .fabric_datapath_to_dcn_data_return_bytes = 64,
+ .dcn_downspread_percent = 0.5,
+ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+ .round_trip_ping_latency_dcfclk_cycles = 131,
+ .urgent_out_of_order_return_per_channel_bytes = 256,
+ .channel_interleave_bytes = 256,
+ .num_banks = 8,
+ .num_chans = 8,
+ .vmm_page_size_bytes = 4096,
+ .dram_clock_change_latency_us = 404.0,
+ .dummy_pstate_latency_us = 5.0,
+ .writeback_dram_clock_change_latency_us = 23.0,
+ .return_bus_width_bytes = 64,
+ .dispclk_dppclk_vco_speed_mhz = 3850,
+ .xfc_bus_transport_time_us = 20,
+ .xfc_xbuf_latency_tolerance_us = 4,
+ .use_urgent_burst_bw = 0
+};
+
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
@@ -928,7 +1041,7 @@ static const struct resource_caps res_cap_nv14 = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
@@ -947,7 +1060,7 @@ static const struct dc_debug_options debug_defaults_drv = {
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -1143,6 +1256,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
+ .fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -1557,7 +1671,7 @@ static void acquire_dsc(struct resource_context *res_ctx,
}
}
-static void release_dsc(struct resource_context *res_ctx,
+void dcn20_release_dsc(struct resource_context *res_ctx,
const struct resource_pool *pool,
struct display_stream_compressor **dsc)
{
@@ -1617,7 +1731,7 @@ static enum dc_status remove_dsc_from_stream_resource(struct dc *dc,
pipe_ctx = &new_ctx->res_ctx.pipe_ctx[i];
if (pipe_ctx->stream_res.dsc)
- release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
+ dcn20_release_dsc(&new_ctx->res_ctx, dc->res_pool, &pipe_ctx->stream_res.dsc);
}
}
@@ -1861,22 +1975,6 @@ void dcn20_populate_dml_writeback_from_context(
}
-static int get_num_odm_heads(struct pipe_ctx *pipe)
-{
- int odm_head_count = 0;
- struct pipe_ctx *next_pipe = pipe->next_odm_pipe;
- while (next_pipe) {
- odm_head_count++;
- next_pipe = next_pipe->next_odm_pipe;
- }
- pipe = pipe->prev_odm_pipe;
- while (pipe) {
- odm_head_count++;
- pipe = pipe->prev_odm_pipe;
- }
- return odm_head_count ? odm_head_count + 1 : 0;
-}
-
int dcn20_populate_dml_pipes_from_context(
struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes)
{
@@ -1956,8 +2054,8 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].dout.dp_lanes = 4;
pipes[pipe_cnt].pipe.dest.vtotal_min = res_ctx->pipe_ctx[i].stream->adjust.v_total_min;
pipes[pipe_cnt].pipe.dest.vtotal_max = res_ctx->pipe_ctx[i].stream->adjust.v_total_max;
- switch (get_num_odm_heads(&res_ctx->pipe_ctx[i])) {
- case 2:
+ switch (get_num_odm_splits(&res_ctx->pipe_ctx[i])) {
+ case 1:
pipes[pipe_cnt].pipe.dest.odm_combine = dm_odm_combine_mode_2to1;
break;
default:
@@ -1965,9 +2063,14 @@ int dcn20_populate_dml_pipes_from_context(
}
pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].pipe_idx;
if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state
- == res_ctx->pipe_ctx[i].plane_state)
- pipes[pipe_cnt].pipe.src.hsplit_grp = res_ctx->pipe_ctx[i].top_pipe->pipe_idx;
- else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
+ == res_ctx->pipe_ctx[i].plane_state) {
+ struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].top_pipe;
+
+ while (first_pipe->top_pipe && first_pipe->top_pipe->plane_state
+ == res_ctx->pipe_ctx[i].plane_state)
+ first_pipe = first_pipe->top_pipe;
+ pipes[pipe_cnt].pipe.src.hsplit_grp = first_pipe->pipe_idx;
+ } else if (res_ctx->pipe_ctx[i].prev_odm_pipe) {
struct pipe_ctx *first_pipe = res_ctx->pipe_ctx[i].prev_odm_pipe;
while (first_pipe->prev_odm_pipe)
@@ -2052,16 +2155,20 @@ int dcn20_populate_dml_pipes_from_context(
/* todo: default max for now, until there is logic reflecting this in dc*/
pipes[pipe_cnt].dout.output_bpc = 12;
/*
- * Use max cursor settings for calculations to minimize
+ * For graphic plane, cursor number is 1, nv12 is 0
* bw calculations due to cursor on/off
*/
- pipes[pipe_cnt].pipe.src.num_cursors = 2;
+ if (res_ctx->pipe_ctx[i].plane_state &&
+ res_ctx->pipe_ctx[i].plane_state->address.type == PLN_ADDR_TYPE_VIDEO_PROGRESSIVE)
+ pipes[pipe_cnt].pipe.src.num_cursors = 0;
+ else
+ pipes[pipe_cnt].pipe.src.num_cursors = dc->dml.ip.number_of_cursors;
+
pipes[pipe_cnt].pipe.src.cur0_src_width = 256;
pipes[pipe_cnt].pipe.src.cur0_bpp = dm_cur_32bit;
- pipes[pipe_cnt].pipe.src.cur1_src_width = 256;
- pipes[pipe_cnt].pipe.src.cur1_bpp = dm_cur_32bit;
if (!res_ctx->pipe_ctx[i].plane_state) {
+ pipes[pipe_cnt].pipe.src.is_hsplit = pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = dm_horz;
pipes[pipe_cnt].pipe.src.sw_mode = dm_sw_linear;
pipes[pipe_cnt].pipe.src.macro_tile_size = dm_64k_tile;
@@ -2087,19 +2194,21 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.scale_ratio_depth.scl_enable = 0; /*Lb only or Full scl*/
pipes[pipe_cnt].pipe.scale_taps.htaps = 1;
pipes[pipe_cnt].pipe.scale_taps.vtaps = 1;
- pipes[pipe_cnt].pipe.src.is_hsplit = 0;
- pipes[pipe_cnt].pipe.dest.odm_combine = 0;
pipes[pipe_cnt].pipe.dest.vtotal_min = v_total;
pipes[pipe_cnt].pipe.dest.vtotal_max = v_total;
+
+ if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1) {
+ pipes[pipe_cnt].pipe.src.viewport_width /= 2;
+ pipes[pipe_cnt].pipe.dest.recout_width /= 2;
+ }
} else {
struct dc_plane_state *pln = res_ctx->pipe_ctx[i].plane_state;
struct scaler_data *scl = &res_ctx->pipe_ctx[i].plane_res.scl_data;
pipes[pipe_cnt].pipe.src.immediate_flip = pln->flip_immediate;
- pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe
- && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
- || (res_ctx->pipe_ctx[i].top_pipe
- && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln);
+ pipes[pipe_cnt].pipe.src.is_hsplit = (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln)
+ || (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln)
+ || pipes[pipe_cnt].pipe.dest.odm_combine != dm_odm_combine_mode_disabled;
pipes[pipe_cnt].pipe.src.source_scan = pln->rotation == ROTATION_ANGLE_90
|| pln->rotation == ROTATION_ANGLE_270 ? dm_vert : dm_horz;
pipes[pipe_cnt].pipe.src.viewport_y_y = scl->viewport.y;
@@ -2124,18 +2233,22 @@ int dcn20_populate_dml_pipes_from_context(
pipes[pipe_cnt].pipe.src.dcc = pln->dcc.enable;
pipes[pipe_cnt].pipe.dest.recout_width = scl->recout.width;
pipes[pipe_cnt].pipe.dest.recout_height = scl->recout.height;
- pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
pipes[pipe_cnt].pipe.dest.full_recout_height = scl->recout.height;
- if (res_ctx->pipe_ctx[i].bottom_pipe && res_ctx->pipe_ctx[i].bottom_pipe->plane_state == pln) {
- pipes[pipe_cnt].pipe.dest.full_recout_width +=
- res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.width;
- pipes[pipe_cnt].pipe.dest.full_recout_height +=
- res_ctx->pipe_ctx[i].bottom_pipe->plane_res.scl_data.recout.height;
- } else if (res_ctx->pipe_ctx[i].top_pipe && res_ctx->pipe_ctx[i].top_pipe->plane_state == pln) {
- pipes[pipe_cnt].pipe.dest.full_recout_width +=
- res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.width;
- pipes[pipe_cnt].pipe.dest.full_recout_height +=
- res_ctx->pipe_ctx[i].top_pipe->plane_res.scl_data.recout.height;
+ pipes[pipe_cnt].pipe.dest.full_recout_width = scl->recout.width;
+ if (pipes[pipe_cnt].pipe.dest.odm_combine == dm_odm_combine_mode_2to1)
+ pipes[pipe_cnt].pipe.dest.full_recout_width *= 2;
+ else {
+ struct pipe_ctx *split_pipe = res_ctx->pipe_ctx[i].bottom_pipe;
+
+ while (split_pipe && split_pipe->plane_state == pln) {
+ pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->bottom_pipe;
+ }
+ split_pipe = res_ctx->pipe_ctx[i].top_pipe;
+ while (split_pipe && split_pipe->plane_state == pln) {
+ pipes[pipe_cnt].pipe.dest.full_recout_width += split_pipe->plane_res.scl_data.recout.width;
+ split_pipe = split_pipe->top_pipe;
+ }
}
pipes[pipe_cnt].pipe.scale_ratio_depth.lb_depth = dm_lb_16;
@@ -2302,6 +2415,7 @@ bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx)
+ stream->timing.v_border_bottom;
dsc_cfg.pixel_encoding = stream->timing.pixel_encoding;
dsc_cfg.color_depth = stream->timing.display_color_depth;
+ dsc_cfg.is_odm = pipe_ctx->next_odm_pipe ? true : false;
dsc_cfg.dc_dsc_cfg = stream->timing.dsc_cfg;
dsc_cfg.dc_dsc_cfg.num_slices_h /= opp_cnt;
@@ -2388,7 +2502,7 @@ struct pipe_ctx *dcn20_find_secondary_pipe(struct dc *dc,
return secondary_pipe;
}
-void dcn20_merge_pipes_for_validate(
+static void dcn20_merge_pipes_for_validate(
struct dc *dc,
struct dc_state *context)
{
@@ -2413,7 +2527,7 @@ void dcn20_merge_pipes_for_validate(
odm_pipe->prev_odm_pipe = NULL;
odm_pipe->next_odm_pipe = NULL;
if (odm_pipe->stream_res.dsc)
- release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
+ dcn20_release_dsc(&context->res_ctx, dc->res_pool, &odm_pipe->stream_res.dsc);
/* Clear plane_res and stream_res */
memset(&odm_pipe->plane_res, 0, sizeof(odm_pipe->plane_res));
memset(&odm_pipe->stream_res, 0, sizeof(odm_pipe->stream_res));
@@ -2451,41 +2565,29 @@ int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split)
+ bool *split,
+ bool *merge)
{
int i, pipe_idx, vlevel_split;
+ int plane_count = 0;
bool force_split = false;
- bool avoid_split = dc->debug.pipe_split_policy != MPC_SPLIT_DYNAMIC;
+ bool avoid_split = dc->debug.pipe_split_policy == MPC_SPLIT_AVOID;
- /* Single display loop, exits if there is more than one display */
+ if (context->stream_count > 1) {
+ if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP)
+ avoid_split = true;
+ } else if (dc->debug.force_single_disp_pipe_split)
+ force_split = true;
+
+ /* TODO: fix dc bugs and remove this split threshold thing */
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
- bool exit_loop = false;
-
- if (!pipe->stream || pipe->top_pipe)
- continue;
- if (dc->debug.force_single_disp_pipe_split) {
- if (!force_split)
- force_split = true;
- else {
- force_split = false;
- exit_loop = true;
- }
- }
- if (dc->debug.pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP) {
- if (avoid_split)
- avoid_split = false;
- else {
- avoid_split = true;
- exit_loop = true;
- }
- }
- if (exit_loop)
- break;
+ if (pipe->stream && !pipe->prev_odm_pipe &&
+ (!pipe->top_pipe || pipe->top_pipe->plane_state != pipe->plane_state))
+ ++plane_count;
}
- /* TODO: fix dc bugs and remove this split threshold thing */
- if (context->stream_count > dc->res_pool->pipe_count / 2)
+ if (plane_count > dc->res_pool->pipe_count / 2)
avoid_split = true;
/* Avoid split loop looks for lowest voltage level that allows most unsplit pipes possible */
@@ -2508,11 +2610,12 @@ int dcn20_validate_apply_pipe_split_flags(
/* Split loop sets which pipe should be split based on dml outputs and dc flags */
for (i = 0, pipe_idx = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i];
+ int pipe_plane = context->bw_ctx.dml.vba.pipe_plane[pipe_idx];
if (!context->res_ctx.pipe_ctx[i].stream)
continue;
- if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] > 1)
+ if (force_split || context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_plane] > 1)
split[i] = true;
if ((pipe->stream->view_format ==
VIEW_3D_FORMAT_SIDE_BY_SIDE ||
@@ -2525,10 +2628,44 @@ int dcn20_validate_apply_pipe_split_flags(
split[i] = true;
if (dc->debug.force_odm_combine & (1 << pipe->stream_res.tg->inst)) {
split[i] = true;
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx] = dm_odm_combine_mode_2to1;
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane] = dm_odm_combine_mode_2to1;
+ }
+ context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] =
+ context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_plane];
+
+ if (pipe->prev_odm_pipe && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] != dm_odm_combine_mode_disabled) {
+ /*Already split odm pipe tree, don't try to split again*/
+ split[i] = false;
+ split[pipe->prev_odm_pipe->pipe_idx] = false;
+ } else if (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state
+ && context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_plane] == dm_odm_combine_mode_disabled) {
+ /*Already split mpc tree, don't try to split again, assumes only 2x mpc combine*/
+ split[i] = false;
+ split[pipe->top_pipe->pipe_idx] = false;
+ } else if (pipe->prev_odm_pipe || (pipe->top_pipe && pipe->plane_state == pipe->top_pipe->plane_state)) {
+ if (split[i] == false) {
+ /*Exiting mpc/odm combine*/
+ merge[i] = true;
+ if (pipe->prev_odm_pipe) {
+ ASSERT(0); /*should not actually happen yet*/
+ merge[pipe->prev_odm_pipe->pipe_idx] = true;
+ } else
+ merge[pipe->top_pipe->pipe_idx] = true;
+ } else {
+ /*Transition from mpc combine to odm combine or vice versa*/
+ ASSERT(0); /*should not actually happen yet*/
+ split[i] = true;
+ merge[i] = true;
+ if (pipe->prev_odm_pipe) {
+ split[pipe->prev_odm_pipe->pipe_idx] = true;
+ merge[pipe->prev_odm_pipe->pipe_idx] = true;
+ } else {
+ split[pipe->top_pipe->pipe_idx] = true;
+ merge[pipe->top_pipe->pipe_idx] = true;
+ }
+ }
}
- context->bw_ctx.dml.vba.ODMCombineEnabled[pipe_idx] =
- context->bw_ctx.dml.vba.ODMCombineEnablePerState[vlevel][pipe_idx];
+
/* Adjust dppclk when split is forced, do not bother with dispclk */
if (split[i] && context->bw_ctx.dml.vba.NoOfDPP[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] == 1)
context->bw_ctx.dml.vba.RequiredDPPCLK[vlevel][context->bw_ctx.dml.vba.maxMpcComb][pipe_idx] /= 2;
@@ -2570,7 +2707,7 @@ bool dcn20_fast_validate_bw(
if (vlevel > context->bw_ctx.dml.soc.num_states)
goto validate_fail;
- vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split);
+ vlevel = dcn20_validate_apply_pipe_split_flags(dc, context, vlevel, split, NULL);
/*initialize pipe_just_split_from to invalid idx*/
for (i = 0; i < MAX_PIPES; i++)
@@ -2790,6 +2927,9 @@ void dcn20_calculate_dlg_params(
!= dm_dram_clock_change_unsupported;
context->bw_ctx.bw.dcn.clk.dppclk_khz = 0;
+ if (context->bw_ctx.bw.dcn.clk.dispclk_khz < dc->debug.min_disp_clk_khz)
+ context->bw_ctx.bw.dcn.clk.dispclk_khz = dc->debug.min_disp_clk_khz;
+
/*
* An artifact of dml pipe split/odm is that pipes get merged back together for
* calculation. Therefore we need to only extract for first pipe in ascending index order
@@ -3027,7 +3167,7 @@ static struct dc_cap_funcs cap_funcs = {
};
-enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state)
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state)
{
enum dc_status result = DC_OK;
@@ -3053,7 +3193,7 @@ static struct resource_funcs dcn20_res_pool_funcs = {
.add_stream_to_ctx = dcn20_add_stream_to_ctx,
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
- .get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn20_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.populate_dml_pipes = dcn20_populate_dml_pipes_from_context,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link
@@ -3291,6 +3431,9 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
uint32_t hw_internal_rev)
{
+ if (ASICREV_IS_NAVI14_M(hw_internal_rev))
+ return &dcn2_0_nv14_soc;
+
if (ASICREV_IS_NAVI12_P(hw_internal_rev))
return &dcn2_0_nv12_soc;
@@ -3772,6 +3915,15 @@ static bool dcn20_resource_construct(
dcn20_hw_sequencer_construct(dc);
+ // IF NV12, set PG function pointer to NULL. It's not that
+ // PG isn't supported for NV12, it's that we don't want to
+ // program the registers because that will cause more power
+ // to be consumed. We could have created dcn20_init_hw to get
+ // the same effect by checking ASIC rev, but there was a
+ // request at some point to not check ASIC rev on hw sequencer.
+ if (ASICREV_IS_NAVI12_P(dc->ctx->asic_id.hw_internal_rev))
+ dc->hwseq->funcs.enable_power_gating_plane = NULL;
+
dc->caps.max_planes = pool->base.pipe_count;
for (i = 0; i < dc->caps.max_planes; ++i)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
index f5893840b79b..9d5bff9455fd 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.h
@@ -119,14 +119,15 @@ void dcn20_set_mcif_arb_params(
display_e2e_pipe_params_st *pipes,
int pipe_cnt);
bool dcn20_validate_bandwidth(struct dc *dc, struct dc_state *context, bool fast_validate);
-void dcn20_merge_pipes_for_validate(
- struct dc *dc,
- struct dc_state *context);
int dcn20_validate_apply_pipe_split_flags(
struct dc *dc,
struct dc_state *context,
int vlevel,
- bool *split);
+ bool *split,
+ bool *merge);
+void dcn20_release_dsc(struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct display_stream_compressor **dsc);
bool dcn20_validate_dsc(struct dc *dc, struct dc_state *new_ctx);
void dcn20_split_stream_for_mpc(
struct resource_context *res_ctx,
@@ -159,7 +160,7 @@ enum dc_status dcn20_build_mapped_resource(const struct dc *dc, struct dc_state
enum dc_status dcn20_add_stream_to_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_add_dsc_to_stream_resource(struct dc *dc, struct dc_state *dc_ctx, struct dc_stream_state *dc_stream);
enum dc_status dcn20_remove_stream_from_ctx(struct dc *dc, struct dc_state *new_ctx, struct dc_stream_state *dc_stream);
-enum dc_status dcn20_get_default_swizzle_mode(struct dc_plane_state *plane_state);
+enum dc_status dcn20_patch_unknown_plane_state(struct dc_plane_state *plane_state);
void dcn20_patch_bounding_box(
struct dc *dc,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
index 9b70a1e7b962..99a7ef6ab878 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_stream_encoder.c
@@ -616,5 +616,6 @@ void dcn20_stream_encoder_construct(
enc1->regs = regs;
enc1->se_shift = se_shift;
enc1->se_mask = se_mask;
+ enc1->base.stream_enc_inst = eng_id - ENGINE_ID_DIGA;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
index 02fafb013fc6..f1ef46e8da5b 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_vmid.h
@@ -34,13 +34,6 @@
#define BASE(seg) \
BASE_INNER(seg)
-#define SRI(reg_name, block, id)\
- .reg_name = BASE(mm ## block ## id ## _ ## reg_name ## _BASE_IDX) + \
- mm ## block ## id ## _ ## reg_name
-
-#define SF(reg_name, field_name, post_fix)\
- .field_name = reg_name ## __ ## field_name ## post_fix
-
#define DCN20_VMID_REG_LIST(id)\
SRI(CNTL, DCN_VM_CONTEXT, id),\
SRI(PAGE_TABLE_BASE_ADDR_HI32, DCN_VM_CONTEXT, id),\
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
index f546260c15b7..5e2d14b897af 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.c
@@ -141,7 +141,7 @@ int hubbub21_init_dchub(struct hubbub *hubbub,
return NUM_VMID;
}
-void hubbub21_program_urgent_watermarks(
+bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -149,6 +149,7 @@ void hubbub21_program_urgent_watermarks(
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* Repeat for water mark set A, B, C and D. */
/* clock state A */
@@ -163,7 +164,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->a.urgent_ns < hubbub1->watermarks.a.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -172,7 +174,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_A, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -180,14 +184,18 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_A, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
+
if (safe_to_lower || watermarks->a.urgent_latency_ns > hubbub1->watermarks.a.urgent_latency_ns) {
hubbub1->watermarks.a.urgent_latency_ns = watermarks->a.urgent_latency_ns;
prog_wm_value = convert_and_clamp(watermarks->a.urgent_latency_ns,
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_A, prog_wm_value);
- }
+ } else if (watermarks->a.urgent_latency_ns < hubbub1->watermarks.a.urgent_latency_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.urgent_ns > hubbub1->watermarks.b.urgent_ns) {
@@ -201,7 +209,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->b.urgent_ns < hubbub1->watermarks.b.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -210,7 +219,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_B, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -218,7 +229,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_B, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.urgent_latency_ns > hubbub1->watermarks.b.urgent_latency_ns) {
hubbub1->watermarks.b.urgent_latency_ns = watermarks->b.urgent_latency_ns;
@@ -226,7 +239,8 @@ void hubbub21_program_urgent_watermarks(
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_B, prog_wm_value);
- }
+ } else if (watermarks->b.urgent_latency_ns < hubbub1->watermarks.b.urgent_latency_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.urgent_ns > hubbub1->watermarks.c.urgent_ns) {
@@ -240,7 +254,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->c.urgent_ns < hubbub1->watermarks.c.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -249,7 +264,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_C, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -257,7 +274,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_C, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.urgent_latency_ns > hubbub1->watermarks.c.urgent_latency_ns) {
hubbub1->watermarks.c.urgent_latency_ns = watermarks->c.urgent_latency_ns;
@@ -265,7 +284,8 @@ void hubbub21_program_urgent_watermarks(
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_C, prog_wm_value);
- }
+ } else if (watermarks->c.urgent_latency_ns < hubbub1->watermarks.c.urgent_latency_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.urgent_ns > hubbub1->watermarks.d.urgent_ns) {
@@ -279,7 +299,8 @@ void hubbub21_program_urgent_watermarks(
DC_LOG_BANDWIDTH_CALCS("URGENCY_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.urgent_ns, prog_wm_value);
- }
+ } else if (watermarks->d.urgent_ns < hubbub1->watermarks.d.urgent_ns)
+ wm_pending = true;
/* determine the transfer time for a quantity of data for a particular requestor.*/
if (safe_to_lower || watermarks->a.frac_urg_bw_flip
@@ -288,7 +309,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_FLIP_D, watermarks->a.frac_urg_bw_flip);
- }
+ } else if (watermarks->a.frac_urg_bw_flip
+ < hubbub1->watermarks.a.frac_urg_bw_flip)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.frac_urg_bw_nom
> hubbub1->watermarks.a.frac_urg_bw_nom) {
@@ -296,7 +319,9 @@ void hubbub21_program_urgent_watermarks(
REG_SET(DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, 0,
DCHUBBUB_ARB_FRAC_URG_BW_NOM_D, watermarks->a.frac_urg_bw_nom);
- }
+ } else if (watermarks->a.frac_urg_bw_nom
+ < hubbub1->watermarks.a.frac_urg_bw_nom)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.urgent_latency_ns > hubbub1->watermarks.d.urgent_latency_ns) {
hubbub1->watermarks.d.urgent_latency_ns = watermarks->d.urgent_latency_ns;
@@ -304,10 +329,13 @@ void hubbub21_program_urgent_watermarks(
refclk_mhz, 0x1fffff);
REG_SET(DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, 0,
DCHUBBUB_ARB_REFCYC_PER_TRIP_TO_MEMORY_D, prog_wm_value);
- }
+ } else if (watermarks->d.urgent_latency_ns < hubbub1->watermarks.d.urgent_latency_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub21_program_stutter_watermarks(
+bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -315,6 +343,7 @@ void hubbub21_program_stutter_watermarks(
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
@@ -330,7 +359,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->a.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns) {
@@ -345,7 +376,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->a.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.a.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
@@ -361,7 +394,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->b.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns) {
@@ -376,7 +411,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->b.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.b.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
@@ -392,7 +429,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->c.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns) {
@@ -407,7 +446,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->c.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.c.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
@@ -423,7 +464,9 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_ENTER_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_enter_plus_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns)
+ wm_pending = true;
if (safe_to_lower || watermarks->d.cstate_pstate.cstate_exit_ns
> hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns) {
@@ -438,10 +481,14 @@ void hubbub21_program_stutter_watermarks(
DC_LOG_BANDWIDTH_CALCS("SR_EXIT_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n",
watermarks->d.cstate_pstate.cstate_exit_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.cstate_exit_ns
+ < hubbub1->watermarks.d.cstate_pstate.cstate_exit_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub21_program_pstate_watermarks(
+bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
@@ -450,6 +497,8 @@ void hubbub21_program_pstate_watermarks(
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
uint32_t prog_wm_value;
+ bool wm_pending = false;
+
/* clock state A */
if (safe_to_lower || watermarks->a.cstate_pstate.pstate_change_ns
> hubbub1->watermarks.a.cstate_pstate.pstate_change_ns) {
@@ -464,7 +513,9 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_A calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->a.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->a.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.a.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state B */
if (safe_to_lower || watermarks->b.cstate_pstate.pstate_change_ns
@@ -480,7 +531,9 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_B calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->b.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->b.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.b.cstate_pstate.pstate_change_ns)
+ wm_pending = false;
/* clock state C */
if (safe_to_lower || watermarks->c.cstate_pstate.pstate_change_ns
@@ -496,7 +549,9 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_C calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->c.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->c.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.c.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
/* clock state D */
if (safe_to_lower || watermarks->d.cstate_pstate.pstate_change_ns
@@ -512,20 +567,30 @@ void hubbub21_program_pstate_watermarks(
DC_LOG_BANDWIDTH_CALCS("DRAM_CLK_CHANGE_WATERMARK_D calculated =%d\n"
"HW register value = 0x%x\n\n",
watermarks->d.cstate_pstate.pstate_change_ns, prog_wm_value);
- }
+ } else if (watermarks->d.cstate_pstate.pstate_change_ns
+ < hubbub1->watermarks.d.cstate_pstate.pstate_change_ns)
+ wm_pending = true;
+
+ return wm_pending;
}
-void hubbub21_program_watermarks(
+bool hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower)
{
struct dcn20_hubbub *hubbub1 = TO_DCN20_HUBBUB(hubbub);
+ bool wm_pending = false;
+
+ if (hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
- hubbub21_program_urgent_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
- hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower);
+ if (hubbub21_program_stutter_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
+
+ if (hubbub21_program_pstate_watermarks(hubbub, watermarks, refclk_mhz, safe_to_lower))
+ wm_pending = true;
/*
* The DCHub arbiter has a mechanism to dynamically rate limit the DCHub request stream to the fabric.
@@ -549,6 +614,8 @@ void hubbub21_program_watermarks(
DCHUBBUB_ARB_MAX_QOS_COMMIT_THRESHOLD, 0xF);
hubbub1_allow_self_refresh_control(hubbub, !hubbub->ctx->dc->debug.disable_stutter);
+
+ return wm_pending;
}
void hubbub21_wm_read_state(struct hubbub *hubbub,
@@ -635,6 +702,7 @@ static const struct hubbub_funcs hubbub21_funcs = {
.wm_read_state = hubbub21_wm_read_state,
.get_dchub_ref_freq = hubbub2_get_dchub_ref_freq,
.program_watermarks = hubbub21_program_watermarks,
+ .allow_self_refresh_control = hubbub1_allow_self_refresh_control,
.apply_DEDCN21_147_wa = hubbub21_apply_DEDCN21_147_wa,
};
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
index c4840dfb1fa5..ef3ef28509ed 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubbub.h
@@ -113,22 +113,22 @@
void dcn21_dchvm_init(struct hubbub *hubbub);
int hubbub21_init_dchub(struct hubbub *hubbub,
struct dcn_hubbub_phys_addr_config *pa_config);
-void hubbub21_program_watermarks(
+bool hubbub21_program_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub21_program_urgent_watermarks(
+bool hubbub21_program_urgent_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub21_program_stutter_watermarks(
+bool hubbub21_program_stutter_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
bool safe_to_lower);
-void hubbub21_program_pstate_watermarks(
+bool hubbub21_program_pstate_watermarks(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
index cf09b9335728..d285ba622d61 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hubp.c
@@ -79,32 +79,47 @@ void apply_DEDCN21_142_wa_for_hostvm_deadline(
struct _vcs_dpi_display_dlg_regs_st *dlg_attr)
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- uint32_t cur_value;
+ uint32_t refcyc_per_vm_group_vblank;
+ uint32_t refcyc_per_vm_req_vblank;
+ uint32_t refcyc_per_vm_group_flip;
+ uint32_t refcyc_per_vm_req_flip;
+ const uint32_t uninitialized_hw_default = 0;
- REG_GET(VBLANK_PARAMETERS_5, REFCYC_PER_VM_GROUP_VBLANK, &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_group_vblank)
+ REG_GET(VBLANK_PARAMETERS_5,
+ REFCYC_PER_VM_GROUP_VBLANK, &refcyc_per_vm_group_vblank);
+
+ if (refcyc_per_vm_group_vblank == uninitialized_hw_default ||
+ refcyc_per_vm_group_vblank > dlg_attr->refcyc_per_vm_group_vblank)
REG_SET(VBLANK_PARAMETERS_5, 0,
REFCYC_PER_VM_GROUP_VBLANK, dlg_attr->refcyc_per_vm_group_vblank);
REG_GET(VBLANK_PARAMETERS_6,
- REFCYC_PER_VM_REQ_VBLANK,
- &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_req_vblank)
+ REFCYC_PER_VM_REQ_VBLANK, &refcyc_per_vm_req_vblank);
+
+ if (refcyc_per_vm_req_vblank == uninitialized_hw_default ||
+ refcyc_per_vm_req_vblank > dlg_attr->refcyc_per_vm_req_vblank)
REG_SET(VBLANK_PARAMETERS_6, 0,
REFCYC_PER_VM_REQ_VBLANK, dlg_attr->refcyc_per_vm_req_vblank);
- REG_GET(FLIP_PARAMETERS_3, REFCYC_PER_VM_GROUP_FLIP, &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_group_flip)
+ REG_GET(FLIP_PARAMETERS_3,
+ REFCYC_PER_VM_GROUP_FLIP, &refcyc_per_vm_group_flip);
+
+ if (refcyc_per_vm_group_flip == uninitialized_hw_default ||
+ refcyc_per_vm_group_flip > dlg_attr->refcyc_per_vm_group_flip)
REG_SET(FLIP_PARAMETERS_3, 0,
REFCYC_PER_VM_GROUP_FLIP, dlg_attr->refcyc_per_vm_group_flip);
- REG_GET(FLIP_PARAMETERS_4, REFCYC_PER_VM_REQ_FLIP, &cur_value);
- if (cur_value > dlg_attr->refcyc_per_vm_req_flip)
+ REG_GET(FLIP_PARAMETERS_4,
+ REFCYC_PER_VM_REQ_FLIP, &refcyc_per_vm_req_flip);
+
+ if (refcyc_per_vm_req_flip == uninitialized_hw_default ||
+ refcyc_per_vm_req_flip > dlg_attr->refcyc_per_vm_req_flip)
REG_SET(FLIP_PARAMETERS_4, 0,
REFCYC_PER_VM_REQ_FLIP, dlg_attr->refcyc_per_vm_req_flip);
REG_SET(FLIP_PARAMETERS_5, 0,
REFCYC_PER_PTE_GROUP_FLIP_C, dlg_attr->refcyc_per_pte_group_flip_c);
+
REG_SET(FLIP_PARAMETERS_6, 0,
REFCYC_PER_META_CHUNK_FLIP_C, dlg_attr->refcyc_per_meta_chunk_flip_c);
}
@@ -325,13 +340,9 @@ void hubp21_set_vm_system_aperture_settings(struct hubp *hubp,
{
struct dcn21_hubp *hubp21 = TO_DCN21_HUBP(hubp);
- PHYSICAL_ADDRESS_LOC mc_vm_apt_default;
PHYSICAL_ADDRESS_LOC mc_vm_apt_low;
PHYSICAL_ADDRESS_LOC mc_vm_apt_high;
- // The format of default addr is 48:12 of the 48 bit addr
- mc_vm_apt_default.quad_part = apt->sys_default.quad_part >> 12;
-
// The format of high/low are 48:18 of the 48 bit addr
mc_vm_apt_low.quad_part = apt->sys_low.quad_part >> 18;
mc_vm_apt_high.quad_part = apt->sys_high.quad_part >> 18;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
index 081ad8e43d58..ada65b1a7eb1 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.c
@@ -112,3 +112,25 @@ void dcn21_optimize_pwr_state(
true);
}
+/* If user hotplug a HDMI monitor while in monitor off,
+ * OS will do a mode set (with output timing) but keep output off.
+ * In this case DAL will ask vbios to power up the pll in the PHY.
+ * If user unplug the monitor (while we are on monitor off) or
+ * system attempt to enter modern standby (which we will disable PLL),
+ * PHY will hang on the next mode set attempt.
+ * if enable PLL follow by disable PLL (without executing lane enable/disable),
+ * RDPCS_PHY_DP_MPLLB_STATE remains 1,
+ * which indicate that PLL disable attempt actually didn�t go through.
+ * As a workaround, insert PHY lane enable/disable before PLL disable.
+ */
+void dcn21_PLAT_58856_wa(struct dc_state *context, struct pipe_ctx *pipe_ctx)
+{
+ if (!pipe_ctx->stream->dpms_off)
+ return;
+
+ pipe_ctx->stream->dpms_off = false;
+ core_link_enable_stream(context, pipe_ctx);
+ core_link_disable_stream(pipe_ctx);
+ pipe_ctx->stream->dpms_off = true;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
index 182736096123..26bf24d3b59f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_hwseq.h
@@ -44,4 +44,7 @@ void dcn21_optimize_pwr_state(
const struct dc *dc,
struct dc_state *context);
+void dcn21_PLAT_58856_wa(struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
+
#endif /* __DC_HWSS_DCN21_H__ */
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
index 4861aa5c59ae..b9ff9767e08f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_init.c
@@ -34,6 +34,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.apply_ctx_to_hw = dce110_apply_ctx_to_hw,
.apply_ctx_for_surface = NULL,
.program_front_end_for_ctx = dcn20_program_front_end_for_ctx,
+ .post_unlock_program_front_end = dcn20_post_unlock_program_front_end,
.update_plane_addr = dcn20_update_plane_addr,
.update_dchub = dcn10_update_dchub,
.update_pending_status = dcn10_update_pending_status,
@@ -51,7 +52,7 @@ static const struct hw_sequencer_funcs dcn21_funcs = {
.disable_audio_stream = dce110_disable_audio_stream,
.disable_plane = dcn20_disable_plane,
.pipe_control_lock = dcn20_pipe_control_lock,
- .pipe_control_lock_global = dcn20_pipe_control_lock_global,
+ .interdependent_update_lock = dcn10_lock_all_pipes,
.prepare_bandwidth = dcn20_prepare_bandwidth,
.optimize_bandwidth = dcn20_optimize_bandwidth,
.update_bandwidth = dcn20_update_bandwidth,
@@ -104,6 +105,8 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.reset_hw_ctx_wrap = dcn20_reset_hw_ctx_wrap,
.enable_stream_timing = dcn20_enable_stream_timing,
.edp_backlight_control = dce110_edp_backlight_control,
+ .is_panel_backlight_on = dce110_is_panel_backlight_on,
+ .is_panel_powered_on = dce110_is_panel_powered_on,
.disable_stream_gating = dcn20_disable_stream_gating,
.enable_stream_gating = dcn20_enable_stream_gating,
.setup_vupdate_interrupt = dcn20_setup_vupdate_interrupt,
@@ -116,7 +119,6 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.enable_power_gating_plane = dcn20_enable_power_gating_plane,
.dpp_pg_control = dcn20_dpp_pg_control,
.hubp_pg_control = dcn20_hubp_pg_control,
- .dsc_pg_control = NULL,
.update_odm = dcn20_update_odm,
.dsc_pg_control = dcn20_dsc_pg_control,
.get_surface_visual_confirm_color = dcn10_get_surface_visual_confirm_color,
@@ -128,6 +130,7 @@ static const struct hwseq_private_funcs dcn21_private_funcs = {
.dccg_init = dcn20_dccg_init,
.set_blend_lut = dcn20_set_blend_lut,
.set_shaper_3dlut = dcn20_set_shaper_3dlut,
+ .PLAT_58856_wa = dcn21_PLAT_58856_wa,
};
void dcn21_hw_sequencer_construct(struct dc *dc)
diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
index 0d506d30d6b6..51b5910cd05f 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
@@ -60,6 +60,7 @@
#include "dcn20/dcn20_dccg.h"
#include "dcn21_hubbub.h"
#include "dcn10/dcn10_resource.h"
+#include "dce110/dce110_resource.h"
#include "dcn20/dcn20_dwb.h"
#include "dcn20/dcn20_mmhubbub.h"
@@ -83,7 +84,7 @@
#include "dcn21_resource.h"
#include "vm_helper.h"
#include "dcn20/dcn20_vmid.h"
-#include "../dce/dmub_psr.h"
+#include "dce/dmub_psr.h"
#define SOC_BOUNDING_BOX_VALID false
#define DC_LOGGER_INIT(logger)
@@ -155,17 +156,18 @@ struct _vcs_dpi_ip_params_st dcn2_1_ip = {
.xfc_supported = false,
.xfc_fill_bw_overhead_percent = 10.0,
.xfc_fill_constant_bytes = 0,
- .ptoi_supported = 0
+ .ptoi_supported = 0,
+ .number_of_cursors = 1,
};
struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.clock_limits = {
{
.state = 0,
- .dcfclk_mhz = 304.0,
- .fabricclk_mhz = 600.0,
- .dispclk_mhz = 618.0,
- .dppclk_mhz = 440.0,
+ .dcfclk_mhz = 400.0,
+ .fabricclk_mhz = 400.0,
+ .dispclk_mhz = 600.0,
+ .dppclk_mhz = 400.00,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 205.67,
@@ -173,10 +175,10 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
},
{
.state = 1,
- .dcfclk_mhz = 304.0,
- .fabricclk_mhz = 600.0,
- .dispclk_mhz = 618.0,
- .dppclk_mhz = 618.0,
+ .dcfclk_mhz = 464.52,
+ .fabricclk_mhz = 800.0,
+ .dispclk_mhz = 654.55,
+ .dppclk_mhz = 626.09,
.phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 205.67,
@@ -184,32 +186,65 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
},
{
.state = 2,
- .dcfclk_mhz = 608.0,
- .fabricclk_mhz = 1066.0,
- .dispclk_mhz = 888.0,
- .dppclk_mhz = 888.0,
- .phyclk_mhz = 810.0,
+ .dcfclk_mhz = 514.29,
+ .fabricclk_mhz = 933.0,
+ .dispclk_mhz = 757.89,
+ .dppclk_mhz = 685.71,
+ .phyclk_mhz = 600.0,
.socclk_mhz = 278.0,
.dscclk_mhz = 287.67,
- .dram_speed_mts = 2133.0,
+ .dram_speed_mts = 1866.0,
},
{
.state = 3,
- .dcfclk_mhz = 676.0,
- .fabricclk_mhz = 1600.0,
- .dispclk_mhz = 1015.0,
- .dppclk_mhz = 1015.0,
- .phyclk_mhz = 810.0,
+ .dcfclk_mhz = 576.00,
+ .fabricclk_mhz = 1067.0,
+ .dispclk_mhz = 847.06,
+ .dppclk_mhz = 757.89,
+ .phyclk_mhz = 600.0,
.socclk_mhz = 715.0,
.dscclk_mhz = 318.334,
- .dram_speed_mts = 4266.0,
+ .dram_speed_mts = 2134.0,
},
{
.state = 4,
- .dcfclk_mhz = 810.0,
+ .dcfclk_mhz = 626.09,
+ .fabricclk_mhz = 1200.0,
+ .dispclk_mhz = 900.00,
+ .dppclk_mhz = 847.06,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 953.0,
+ .dscclk_mhz = 489.0,
+ .dram_speed_mts = 2400.0,
+ },
+ {
+ .state = 5,
+ .dcfclk_mhz = 685.71,
+ .fabricclk_mhz = 1333.0,
+ .dispclk_mhz = 1028.57,
+ .dppclk_mhz = 960.00,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 278.0,
+ .dscclk_mhz = 287.67,
+ .dram_speed_mts = 2666.0,
+ },
+ {
+ .state = 6,
+ .dcfclk_mhz = 757.89,
+ .fabricclk_mhz = 1467.0,
+ .dispclk_mhz = 1107.69,
+ .dppclk_mhz = 1028.57,
+ .phyclk_mhz = 810.0,
+ .socclk_mhz = 715.0,
+ .dscclk_mhz = 318.334,
+ .dram_speed_mts = 3200.0,
+ },
+ {
+ .state = 7,
+ .dcfclk_mhz = 847.06,
.fabricclk_mhz = 1600.0,
.dispclk_mhz = 1395.0,
- .dppclk_mhz = 1285.0,
+ .dppclk_mhz = 1285.00,
.phyclk_mhz = 1325.0,
.socclk_mhz = 953.0,
.dscclk_mhz = 489.0,
@@ -217,8 +252,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
},
/*Extra state, no dispclk ramping*/
{
- .state = 5,
- .dcfclk_mhz = 810.0,
+ .state = 8,
+ .dcfclk_mhz = 847.06,
.fabricclk_mhz = 1600.0,
.dispclk_mhz = 1395.0,
.dppclk_mhz = 1285.0,
@@ -265,7 +300,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
.xfc_bus_transport_time_us = 4,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 1,
- .num_states = 5
+ .num_states = 9
};
#ifndef MAX
@@ -820,11 +855,12 @@ static const struct dc_plane_cap plane_cap = {
};
static const struct dc_debug_options debug_defaults_drv = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = false,
.clock_trace = true,
.disable_pplib_clock_request = true,
+ .min_disp_clk_khz = 100000,
.pipe_split_policy = MPC_SPLIT_AVOID_MULT_DISP,
.force_single_disp_pipe_split = false,
.disable_dcc = DCC_ENABLE,
@@ -840,7 +876,7 @@ static const struct dc_debug_options debug_defaults_drv = {
};
static const struct dc_debug_options debug_defaults_diags = {
- .disable_dmcu = true,
+ .disable_dmcu = false,
.force_abm_enable = false,
.timing_trace = true,
.clock_trace = true,
@@ -856,6 +892,7 @@ static const struct dc_debug_options debug_defaults_diags = {
enum dcn20_clk_src_array_id {
DCN20_CLK_SRC_PLL0,
DCN20_CLK_SRC_PLL1,
+ DCN20_CLK_SRC_PLL2,
DCN20_CLK_SRC_TOTAL_DCN21
};
@@ -960,6 +997,9 @@ static void dcn21_resource_destruct(struct dcn21_resource_pool *pool)
if (pool->base.dmcu != NULL)
dce_dmcu_destroy(&pool->base.dmcu);
+ if (pool->base.psr != NULL)
+ dmub_psr_destroy(&pool->base.psr);
+
if (pool->base.dccg != NULL)
dcn_dccg_destroy(&pool->base.dccg);
@@ -1333,26 +1373,78 @@ static void update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_param
{
struct dcn21_resource_pool *pool = TO_DCN21_RES_POOL(dc->res_pool);
struct clk_limit_table *clk_table = &bw_params->clk_table;
- int i;
+ unsigned int i, j, k;
+ int closest_clk_lvl;
+
+ // diags does not retrieve proper values from SMU
+ // cap states to 5 and make state 5 the max state
+ if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) || IS_DIAG_DC(dc->ctx->dce_environment)) {
+ dcn2_1_soc.num_states = 5;
+
+ dcn2_1_soc.clock_limits[5].state = 5;
+ dcn2_1_soc.clock_limits[5].dcfclk_mhz = 810.0;
+ dcn2_1_soc.clock_limits[5].fabricclk_mhz = 1600.0;
+ dcn2_1_soc.clock_limits[5].dispclk_mhz = 1395.0;
+ dcn2_1_soc.clock_limits[5].dppclk_mhz = 1285.0;
+ dcn2_1_soc.clock_limits[5].phyclk_mhz = 1325.0;
+ dcn2_1_soc.clock_limits[5].socclk_mhz = 953.0;
+ dcn2_1_soc.clock_limits[5].dscclk_mhz = 489.0;
+ dcn2_1_soc.clock_limits[5].dram_speed_mts = 4266.0;
+ } else {
+ dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
+ dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
+ dcn2_1_soc.num_chans = bw_params->num_channels;
+
+ /* Vmin: leave lowest DCN clocks, override with dcfclk, fclk, memclk from fuse */
+ dcn2_1_soc.clock_limits[0].state = 0;
+ dcn2_1_soc.clock_limits[0].dcfclk_mhz = clk_table->entries[0].dcfclk_mhz;
+ dcn2_1_soc.clock_limits[0].fabricclk_mhz = clk_table->entries[0].fclk_mhz;
+ dcn2_1_soc.clock_limits[0].socclk_mhz = clk_table->entries[0].socclk_mhz;
+ dcn2_1_soc.clock_limits[0].dram_speed_mts = clk_table->entries[0].memclk_mhz * 2;
+
+ /*
+ * Other levels: find cloest DCN clocks that fit the given clock limit using dcfclk
+ * as indicater
+ */
- dcn2_1_ip.max_num_otg = pool->base.res_cap->num_timing_generator;
- dcn2_1_ip.max_num_dpp = pool->base.pipe_count;
- dcn2_1_soc.num_chans = bw_params->num_channels;
+ closest_clk_lvl = -1;
+ /* index currently being filled */
+ k = 1;
+ for (i = 1; i < clk_table->num_entries; i++) {
+ /* loop backwards, skip duplicate state, +1 because SMU has precision issue */
+ for (j = dcn2_1_soc.num_states - 2; j >= k; j--) {
+ if ((unsigned int) dcn2_1_soc.clock_limits[j].dcfclk_mhz <= clk_table->entries[i].dcfclk_mhz) {
+ closest_clk_lvl = j;
+ break;
+ }
+ }
- for (i = 0; i < clk_table->num_entries; i++) {
+ /* if found a lvl that fits, use the DCN clks from it, if not, go to next clk limit*/
+ if (closest_clk_lvl != -1) {
+ dcn2_1_soc.clock_limits[k].state = i;
+ dcn2_1_soc.clock_limits[k].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
+ dcn2_1_soc.clock_limits[k].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
+ dcn2_1_soc.clock_limits[k].socclk_mhz = clk_table->entries[i].socclk_mhz;
+ dcn2_1_soc.clock_limits[k].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+
+ dcn2_1_soc.clock_limits[k].dispclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dispclk_mhz;
+ dcn2_1_soc.clock_limits[k].dppclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dppclk_mhz;
+ dcn2_1_soc.clock_limits[k].dram_bw_per_chan_gbps = dcn2_1_soc.clock_limits[closest_clk_lvl].dram_bw_per_chan_gbps;
+ dcn2_1_soc.clock_limits[k].dscclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dscclk_mhz;
+ dcn2_1_soc.clock_limits[k].dtbclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].dtbclk_mhz;
+ dcn2_1_soc.clock_limits[k].phyclk_d18_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_d18_mhz;
+ dcn2_1_soc.clock_limits[k].phyclk_mhz = dcn2_1_soc.clock_limits[closest_clk_lvl].phyclk_mhz;
+ k++;
+ }
+ }
- dcn2_1_soc.clock_limits[i].state = i;
- dcn2_1_soc.clock_limits[i].dcfclk_mhz = clk_table->entries[i].dcfclk_mhz;
- dcn2_1_soc.clock_limits[i].fabricclk_mhz = clk_table->entries[i].fclk_mhz;
- dcn2_1_soc.clock_limits[i].socclk_mhz = clk_table->entries[i].socclk_mhz;
- dcn2_1_soc.clock_limits[i].dram_speed_mts = clk_table->entries[i].memclk_mhz * 2;
+ /* duplicate last level */
+ dcn2_1_soc.clock_limits[k] = dcn2_1_soc.clock_limits[k - 1];
+ dcn2_1_soc.clock_limits[k].state = k;
+ dcn2_1_soc.num_states = k + 1;
}
- dcn2_1_soc.clock_limits[i] = dcn2_1_soc.clock_limits[i - 1];
- dcn2_1_soc.num_states = i;
- // diags does not retrieve proper values from SMU, do not update DML instance for diags
- if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !IS_DIAG_DC(dc->ctx->dce_environment))
- dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
+ dml_init_instance(&dc->dml, &dcn2_1_soc, &dcn2_1_ip, DML_PROJECT_DCN21);
}
/* Temporary Place holder until we can get them from fuse */
@@ -1474,6 +1566,7 @@ static struct dce_hwseq *dcn21_hwseq_create(
hws->shifts = &hwseq_shift;
hws->masks = &hwseq_mask;
hws->wa.DEGVIDCN21 = true;
+ hws->wa.disallow_self_refresh_during_multi_plane_transition = true;
}
return hws;
}
@@ -1497,6 +1590,7 @@ static const struct encoder_feature_support link_enc_feature = {
.max_hdmi_pixel_clock = 600000,
.hdmi_ycbcr420_supported = true,
.dp_ycbcr420_supported = true,
+ .fec_supported = true,
.flags.bits.IS_HBR2_CAPABLE = true,
.flags.bits.IS_HBR3_CAPABLE = true,
.flags.bits.IS_TPS3_CAPABLE = true,
@@ -1637,6 +1731,19 @@ static int dcn21_populate_dml_pipes_from_context(
return pipe_cnt;
}
+enum dc_status dcn21_patch_unknown_plane_state(struct dc_plane_state *plane_state)
+{
+ enum dc_status result = DC_OK;
+
+ if (plane_state->ctx->dc->debug.disable_dcc == DCC_ENABLE) {
+ plane_state->dcc.enable = 1;
+ /* align to our worst case block width */
+ plane_state->dcc.meta_pitch = ((plane_state->src_rect.width + 1023) / 1024) * 1024;
+ }
+ result = dcn20_patch_unknown_plane_state(plane_state);
+ return result;
+}
+
static struct resource_funcs dcn21_res_pool_funcs = {
.destroy = dcn21_destroy_resource_pool,
.link_enc_create = dcn21_link_encoder_create,
@@ -1646,7 +1753,7 @@ static struct resource_funcs dcn21_res_pool_funcs = {
.remove_stream_from_ctx = dcn20_remove_stream_from_ctx,
.acquire_idle_pipe_for_layer = dcn20_acquire_idle_pipe_for_layer,
.populate_dml_writeback_from_context = dcn20_populate_dml_writeback_from_context,
- .get_default_swizzle_mode = dcn20_get_default_swizzle_mode,
+ .patch_unknown_plane_state = dcn21_patch_unknown_plane_state,
.set_mcif_arb_params = dcn20_set_mcif_arb_params,
.find_first_free_match_stream_enc_for_link = dcn10_find_first_free_match_stream_enc_for_link,
.update_bw_bounding_box = update_bw_bounding_box
@@ -1693,6 +1800,7 @@ static bool dcn21_resource_construct(
dc->caps.force_dp_tps4_for_cp2520 = true;
dc->caps.extended_aux_timeout_support = true;
dc->caps.dmcub_support = true;
+ dc->caps.is_apu = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1718,6 +1826,10 @@ static bool dcn21_resource_construct(
dcn21_clock_source_create(ctx, ctx->dc_bios,
CLOCK_SOURCE_COMBO_PHY_PLL1,
&clk_src_regs[1], false);
+ pool->base.clock_sources[DCN20_CLK_SRC_PLL2] =
+ dcn21_clock_source_create(ctx, ctx->dc_bios,
+ CLOCK_SOURCE_COMBO_PHY_PLL2,
+ &clk_src_regs[2], false);
pool->base.clk_src_count = DCN20_CLK_SRC_TOTAL_DCN21;
@@ -1752,9 +1864,15 @@ static bool dcn21_resource_construct(
goto create_fail;
}
- // Leave as NULL to not affect current dmcu psr programming sequence
- // Will be uncommented when functionality is confirmed to be working
- pool->base.psr = NULL;
+ if (dc->debug.disable_dmcu) {
+ pool->base.psr = dmub_psr_create(ctx);
+
+ if (pool->base.psr == NULL) {
+ dm_error("DC: failed to create psr obj!\n");
+ BREAK_TO_DEBUGGER();
+ goto create_fail;
+ }
+ }
pool->base.abm = dce_abm_create(ctx,
&abm_regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
index 626d22d437f4..968c46dfb506 100644
--- a/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
+++ b/drivers/gpu/drm/amd/display/dc/dm_cp_psp.h
@@ -32,6 +32,7 @@ struct cp_psp_stream_config {
uint8_t otg_inst;
uint8_t link_enc_inst;
uint8_t stream_enc_inst;
+ uint8_t mst_supported;
void *dm_stream_ctx;
bool dpms_off;
};
diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
index 485a9c62ec58..5bbbafacc720 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_mode_vba_20v2.c
@@ -2614,6 +2614,14 @@ static void dml20v2_DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndP
if (mode_lib->vba.DRAMClockChangeSupportsVActive &&
mode_lib->vba.MinActiveDRAMClockChangeMargin > 60) {
+
+ for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; ++k) {
+ if (mode_lib->vba.PrefetchMode[mode_lib->vba.VoltageLevel][mode_lib->vba.maxMpcComb] == 0) {
+ if (mode_lib->vba.DRAMClockChangeWatermark >
+ dml_max(mode_lib->vba.StutterEnterPlusExitWatermark, mode_lib->vba.UrgentWatermark))
+ mode_lib->vba.MinTTUVBlank[k] += 25;
+ }
+ }
mode_lib->vba.DRAMClockChangeWatermark += 25;
mode_lib->vba.DRAMClockChangeSupport[0][0] = dm_dram_clock_change_vactive;
} else if (mode_lib->vba.DummyPStateCheck &&
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
index 658f81e757e9..dfd3be452766 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h
@@ -25,7 +25,7 @@
#ifndef __DISPLAY_MODE_STRUCTS_H__
#define __DISPLAY_MODE_STRUCTS_H__
-#define MAX_CLOCK_LIMIT_STATES 8
+#define MAX_CLOCK_LIMIT_STATES 9
typedef struct _vcs_dpi_voltage_scaling_st voltage_scaling_st;
typedef struct _vcs_dpi_soc_bounding_box_st soc_bounding_box_st;
@@ -61,12 +61,15 @@ struct _vcs_dpi_voltage_scaling_st {
double dram_speed_mts;
double fabricclk_mhz;
double dispclk_mhz;
+ double dram_bw_per_chan_gbps;
double phyclk_mhz;
double dppclk_mhz;
double dtbclk_mhz;
};
struct _vcs_dpi_soc_bounding_box_st {
+ struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
+ unsigned int num_states;
double sr_exit_time_us;
double sr_enter_plus_exit_time_us;
double urgent_latency_us;
@@ -109,8 +112,7 @@ struct _vcs_dpi_soc_bounding_box_st {
double xfc_bus_transport_time_us;
double xfc_xbuf_latency_tolerance_us;
int use_urgent_burst_bw;
- unsigned int num_states;
- struct _vcs_dpi_voltage_scaling_st clock_limits[MAX_CLOCK_LIMIT_STATES];
+ double min_dcfclk;
bool do_urgent_latency_adjustment;
double urgent_latency_adjustment_fabric_clock_component_us;
double urgent_latency_adjustment_fabric_clock_reference_mhz;
@@ -189,7 +191,7 @@ struct _vcs_dpi_ip_params_st {
unsigned int min_vblank_lines;
unsigned int dppclk_delay_subtotal;
unsigned int dispclk_delay_subtotal;
- unsigned int dcfclk_cstate_latency;
+ double dcfclk_cstate_latency;
unsigned int dppclk_delay_scl;
unsigned int dppclk_delay_scl_lb_only;
unsigned int dppclk_delay_cnvc_formatter;
@@ -202,6 +204,7 @@ struct _vcs_dpi_ip_params_st {
unsigned int LineBufferFixedBpp;
unsigned int can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one;
unsigned int bug_forcing_LC_req_same_size_fixed;
+ unsigned int number_of_cursors;
};
struct _vcs_dpi_display_xfc_params_st {
@@ -325,7 +328,6 @@ struct _vcs_dpi_display_pipe_dest_params_st {
unsigned int vupdate_width;
unsigned int vready_offset;
unsigned char interlaced;
- unsigned char embedded;
double pixel_rate_mhz;
unsigned char synchronized_vblank_all_planes;
unsigned char otg_inst;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
index b3c96d9b472f..6b525c52124c 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c
@@ -266,8 +266,6 @@ static void fetch_socbb_params(struct display_mode_lib *mode_lib)
mode_lib->vba.MaxDispclk[i] = soc->clock_limits[i].dispclk_mhz;
mode_lib->vba.DTBCLKPerState[i] = soc->clock_limits[i].dtbclk_mhz;
}
- mode_lib->vba.MinVoltageLevel = 0;
- mode_lib->vba.MaxVoltageLevel = mode_lib->vba.soc.num_states;
mode_lib->vba.DoUrgentLatencyAdjustment =
soc->do_urgent_latency_adjustment;
@@ -379,7 +377,6 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.pipe_plane[j] = mode_lib->vba.NumberOfActivePlanes;
- mode_lib->vba.EmbeddedPanel[mode_lib->vba.NumberOfActivePlanes] = dst->embedded;
mode_lib->vba.DPPPerPlane[mode_lib->vba.NumberOfActivePlanes] = 1;
mode_lib->vba.SourceScan[mode_lib->vba.NumberOfActivePlanes] =
(enum scan_direction_class) (src->source_scan);
@@ -396,11 +393,11 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib)
mode_lib->vba.ViewportYStartC[mode_lib->vba.NumberOfActivePlanes] =
src->viewport_y_c;
mode_lib->vba.PitchY[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch;
- mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height;
- mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width;
+ mode_lib->vba.SurfaceWidthY[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_y;
+ mode_lib->vba.SurfaceHeightY[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_y;
mode_lib->vba.PitchC[mode_lib->vba.NumberOfActivePlanes] = src->data_pitch_c;
- mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_height_c;
- mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->viewport_width_c;
+ mode_lib->vba.SurfaceHeightC[mode_lib->vba.NumberOfActivePlanes] = src->surface_height_c;
+ mode_lib->vba.SurfaceWidthC[mode_lib->vba.NumberOfActivePlanes] = src->surface_width_c;
mode_lib->vba.DCCMetaPitchY[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch;
mode_lib->vba.DCCMetaPitchC[mode_lib->vba.NumberOfActivePlanes] = src->meta_pitch_c;
mode_lib->vba.HRatio[mode_lib->vba.NumberOfActivePlanes] = scl->hscl_ratio;
diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
index 2875efd85467..5d82fc5a7ed7 100644
--- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
+++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h
@@ -389,7 +389,6 @@ struct vba_vars_st {
/* vba mode support */
/*inputs*/
- bool EmbeddedPanel[DC__NUM_DPP__MAX];
bool SupportGFX7CompatibleTilingIn32bppAnd64bpp;
double MaxHSCLRatio;
double MaxVSCLRatio;
@@ -842,8 +841,6 @@ struct vba_vars_st {
double DCCRateChroma[DC__NUM_DPP__MAX];
double PHYCLKD18PerState[DC__VOLTAGE_STATES + 1];
- int MinVoltageLevel;
- int MaxVoltageLevel;
bool WritebackSupportInterleaveAndUsingWholeBufferForASingleStream;
bool NumberOfHDMIFRLSupport;
@@ -880,7 +877,6 @@ struct vba_vars_st {
double TotalMetaRowBandwidth[DC__VOLTAGE_STATES + 1][2];
double TotalVActiveCursorBandwidth[DC__VOLTAGE_STATES + 1][2];
double TotalVActivePixelBandwidth[DC__VOLTAGE_STATES + 1][2];
- bool UseMinimumRequiredDCFCLK;
double WritebackDelayTime[DC__NUM_DPP__MAX];
unsigned int DCCYIndependentBlock[DC__NUM_DPP__MAX];
unsigned int DCCCIndependentBlock[DC__NUM_DPP__MAX];
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index d2d36d48caaa..f252af1947c3 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -47,9 +47,9 @@
#include "dce120/hw_factory_dce120.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_factory_dcn10.h"
-#endif
#include "dcn20/hw_factory_dcn20.h"
#include "dcn21/hw_factory_dcn21.h"
+#endif
#include "diagnostics/hw_factory_diag.h"
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index 5d396657a1ee..04e2c0f74cb0 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -45,9 +45,9 @@
#include "dce120/hw_translate_dce120.h"
#if defined(CONFIG_DRM_AMD_DC_DCN)
#include "dcn10/hw_translate_dcn10.h"
-#endif
#include "dcn20/hw_translate_dcn20.h"
#include "dcn21/hw_translate_dcn21.h"
+#endif
#include "diagnostics/hw_translate_diag.h"
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index f285b76888fb..d523fc9547e7 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -124,7 +124,7 @@ struct resource_funcs {
struct dc *dc,
struct dc_state *new_ctx,
struct dc_stream_state *stream);
- enum dc_status (*get_default_swizzle_mode)(
+ enum dc_status (*patch_unknown_plane_state)(
struct dc_plane_state *plane_state);
struct stream_encoder *(*find_first_free_match_stream_enc_for_link)(
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 8b1f0ce6c2a7..e94e5fbf2aa2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -78,6 +78,8 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode);
bool dp_overwrite_extended_receiver_cap(struct dc_link *link);
+void dpcd_set_source_specific_data(struct dc_link *link);
+
void dp_set_fec_ready(struct dc_link *link, bool ready);
void dp_set_fec_enable(struct dc_link *link, bool enable);
bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
index ac530c057ddd..ce65678c03b2 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr.h
@@ -27,6 +27,7 @@
#define __DAL_CLK_MGR_H__
#include "dc.h"
+#include "dm_pp_smu.h"
#define DCN_MINIMUM_DISPCLK_Khz 100000
#define DCN_MINIMUM_DPPCLK_Khz 100000
@@ -193,6 +194,7 @@ struct clk_mgr {
int dentist_vco_freq_khz;
struct clk_state_registers_and_bypass boot_snapshot;
struct clk_bw_params *bw_params;
+ struct pp_smu_wm_range_sets ranges;
};
/* forward declarations */
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
index 862952c0286a..9311d0de377f 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/clk_mgr_internal.h
@@ -296,6 +296,10 @@ int clk_mgr_helper_get_active_display_cnt(
struct dc *dc,
struct dc_state *context);
+int clk_mgr_helper_get_active_plane_cnt(
+ struct dc *dc,
+ struct dc_state *context);
+
#endif //__DAL_CLK_MGR_INTERNAL_H__
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
index 05ee5295d2c1..336c80a18175 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h
@@ -27,11 +27,12 @@
#define __DAL_DCCG_H__
#include "dc_types.h"
+#include "hw_shared.h"
struct dccg {
struct dc_context *ctx;
const struct dccg_funcs *funcs;
-
+ int pipe_dppclk_khz[MAX_PIPES];
int ref_dppclk;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
index c0dc1d0f5cae..f5dd0cc73c63 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dchubbub.h
@@ -134,7 +134,7 @@ struct hubbub_funcs {
unsigned int dccg_ref_freq_inKhz,
unsigned int *dchub_ref_freq_inKhz);
- void (*program_watermarks)(
+ bool (*program_watermarks)(
struct hubbub *hubbub,
struct dcn_watermark_set *watermarks,
unsigned int refclk_mhz,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
index c59740084ebc..7c2a3328b208 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dsc.h
@@ -39,6 +39,7 @@ struct dsc_config {
uint32_t pic_height;
enum dc_pixel_encoding pixel_encoding;
enum dc_color_depth color_depth; /* Bits per component */
+ bool is_odm;
struct dc_dsc_config dc_dsc_cfg;
};
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
index 459f95f52486..f30ab4916242 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dwb.h
@@ -25,16 +25,15 @@
#ifndef __DC_DWBC_H__
#define __DC_DWBC_H__
+#include "dal_types.h"
#include "dc_hw_types.h"
-
#define DWB_SW_V2 1
#define DWB_MCIF_BUF_COUNT 4
/* forward declaration of mcif_wb struct */
struct mcif_wb;
-enum dce_version;
enum dwb_sw_version {
dwb_ver_1_0 = 1,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
index fb748f082c56..c2b392a533b1 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h
@@ -68,6 +68,7 @@ struct encoder_feature_support {
unsigned int max_hdmi_pixel_clock;
bool hdmi_ycbcr420_supported;
bool dp_ycbcr420_supported;
+ bool fec_supported;
};
union dpcd_psr_configuration {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
index 7575564b2265..2717352eb697 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h
@@ -310,7 +310,8 @@ struct opp_funcs {
enum dc_color_depth color_depth,
const struct tg_color *solid_color,
int width,
- int height);
+ int height,
+ int offset);
bool (*dpg_is_blanked)(
struct output_pixel_processor *opp);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
index 351b387ad606..ac6523c0828e 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h
@@ -103,6 +103,7 @@ struct stream_encoder {
struct dc_context *ctx;
struct dc_bios *bp;
enum engine_id id;
+ uint32_t stream_enc_inst;
};
struct enc_state {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
index 209118f9f193..d4c1fb242c63 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer.h
@@ -66,6 +66,8 @@ struct hw_sequencer_funcs {
int num_planes, struct dc_state *context);
void (*program_front_end_for_ctx)(struct dc *dc,
struct dc_state *context);
+ void (*post_unlock_program_front_end)(struct dc *dc,
+ struct dc_state *context);
void (*update_plane_addr)(const struct dc *dc,
struct pipe_ctx *pipe_ctx);
void (*update_dchub)(struct dce_hwseq *hws,
@@ -78,10 +80,10 @@ struct hw_sequencer_funcs {
void (*update_pending_status)(struct pipe_ctx *pipe_ctx);
/* Pipe Lock Related */
- void (*pipe_control_lock_global)(struct dc *dc,
- struct pipe_ctx *pipe, bool lock);
void (*pipe_control_lock)(struct dc *dc,
struct pipe_ctx *pipe, bool lock);
+ void (*interdependent_update_lock)(struct dc *dc,
+ struct dc_state *context, bool lock);
void (*set_flip_control_gsl)(struct pipe_ctx *pipe_ctx,
bool flip_immediate);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
index ecf566378ccd..52a26e6be066 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h
@@ -40,10 +40,13 @@ struct dce_hwseq_wa {
bool false_optc_underflow;
bool DEGVIDCN10_254;
bool DEGVIDCN21;
+ bool disallow_self_refresh_during_multi_plane_transition;
};
struct hwseq_wa_state {
bool DEGVIDCN10_253_applied;
+ bool disallow_self_refresh_during_multi_plane_transition_applied;
+ unsigned int disallow_self_refresh_during_multi_plane_transition_applied_on_frame;
};
struct pipe_ctx;
@@ -97,6 +100,8 @@ struct hwseq_private_funcs {
struct dc *dc);
void (*edp_backlight_control)(struct dc_link *link,
bool enable);
+ bool (*is_panel_backlight_on)(struct dc_link *link);
+ bool (*is_panel_powered_on)(struct dc_link *link);
void (*setup_vupdate_interrupt)(struct dc *dc,
struct pipe_ctx *pipe_ctx);
bool (*did_underflow_occur)(struct dc *dc, struct pipe_ctx *pipe_ctx);
@@ -140,6 +145,8 @@ struct hwseq_private_funcs {
const struct dc_plane_state *plane_state);
bool (*set_shaper_3dlut)(struct pipe_ctx *pipe_ctx,
const struct dc_plane_state *plane_state);
+ void (*PLAT_58856_wa)(struct dc_state *context,
+ struct pipe_ctx *pipe_ctx);
};
struct dce_hwseq {
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index 5ae8ada154ef..ca4c36c0c9bc 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -179,4 +179,7 @@ unsigned int resource_pixel_format_to_bpp(enum surface_pixel_format format);
void get_audio_check(struct audio_info *aud_modes,
struct audio_check *aud_chk);
+
+int get_num_odm_splits(struct pipe_ctx *pipe);
+
#endif /* DRIVERS_GPU_DRM_AMD_DC_DEV_DC_INC_RESOURCE_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
index cd9532b4f14d..10b5fa9d2588 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h
@@ -50,6 +50,7 @@ enum dmub_cmd_type {
DMUB_CMD__REG_REG_WAIT = 4,
DMUB_CMD__PLAT_54186_WA = 5,
DMUB_CMD__PSR = 64,
+ DMUB_CMD__ABM = 66,
DMUB_CMD__VBIOS = 128,
};
@@ -216,7 +217,6 @@ struct dmub_rb_cmd_dpphy_init {
struct dmub_cmd_psr_copy_settings_data {
uint16_t psr_level;
- uint8_t hubp_inst;
uint8_t dpp_inst;
uint8_t mpcc_inst;
uint8_t opp_inst;
@@ -225,17 +225,8 @@ struct dmub_cmd_psr_copy_settings_data {
uint8_t digbe_inst;
uint8_t dpphy_inst;
uint8_t aux_inst;
- uint8_t hyst_frames;
- uint8_t hyst_lines;
- uint8_t phy_num;
- uint8_t phy_type;
- uint8_t aux_repeat;
uint8_t smu_optimizations_en;
- uint8_t skip_wait_for_pll_lock;
uint8_t frame_delay;
- uint8_t smu_phy_id;
- uint8_t num_of_controllers;
- uint8_t link_rate;
uint8_t frame_cap_ind;
};
@@ -257,13 +248,59 @@ struct dmub_rb_cmd_psr_enable {
struct dmub_cmd_header header;
};
-struct dmub_cmd_psr_setup_data {
+struct dmub_cmd_psr_set_version_data {
enum psr_version version; // PSR version 1 or 2
};
-struct dmub_rb_cmd_psr_setup {
+struct dmub_rb_cmd_psr_set_version {
struct dmub_cmd_header header;
- struct dmub_cmd_psr_setup_data psr_setup_data;
+ struct dmub_cmd_psr_set_version_data psr_set_version_data;
+};
+
+struct dmub_cmd_abm_set_pipe_data {
+ uint32_t ramping_boundary;
+ uint32_t otg_inst;
+};
+
+struct dmub_rb_cmd_abm_set_pipe {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_pipe_data abm_set_pipe_data;
+};
+
+struct dmub_cmd_abm_set_backlight_data {
+ uint32_t frame_ramp;
+};
+
+struct dmub_rb_cmd_abm_set_backlight {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_backlight_data abm_set_backlight_data;
+};
+
+struct dmub_cmd_abm_set_level_data {
+ uint32_t level;
+};
+
+struct dmub_rb_cmd_abm_set_level {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_level_data abm_set_level_data;
+};
+
+struct dmub_cmd_abm_set_ambient_level_data {
+ uint32_t ambient_lux;
+};
+
+struct dmub_rb_cmd_abm_set_ambient_level {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_ambient_level_data abm_set_ambient_level_data;
+};
+
+struct dmub_cmd_abm_set_pwm_frac_data {
+ uint32_t fractional_pwm;
+};
+
+struct dmub_rb_cmd_abm_set_pwm_frac {
+ struct dmub_cmd_header header;
+ struct dmub_cmd_abm_set_pwm_frac_data abm_set_pwm_frac_data;
};
union dmub_rb_cmd {
@@ -277,11 +314,16 @@ union dmub_rb_cmd {
struct dmub_rb_cmd_enable_disp_power_gating enable_disp_power_gating;
struct dmub_rb_cmd_dpphy_init dpphy_init;
struct dmub_rb_cmd_dig1_transmitter_control dig1_transmitter_control;
- struct dmub_rb_cmd_psr_enable psr_enable;
+ struct dmub_rb_cmd_psr_set_version psr_set_version;
struct dmub_rb_cmd_psr_copy_settings psr_copy_settings;
+ struct dmub_rb_cmd_psr_enable psr_enable;
struct dmub_rb_cmd_psr_set_level psr_set_level;
struct dmub_rb_cmd_PLAT_54186_wa PLAT_54186_wa;
- struct dmub_rb_cmd_psr_setup psr_setup;
+ struct dmub_rb_cmd_abm_set_pipe abm_set_pipe;
+ struct dmub_rb_cmd_abm_set_backlight abm_set_backlight;
+ struct dmub_rb_cmd_abm_set_level abm_set_level;
+ struct dmub_rb_cmd_abm_set_ambient_level abm_set_ambient_level;
+ struct dmub_rb_cmd_abm_set_pwm_frac abm_set_pwm_frac;
};
#pragma pack(pop)
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
index 7b69eb37f762..d37535d21928 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd_dal.h
@@ -32,7 +32,7 @@
*/
enum dmub_cmd_psr_type {
- DMUB_CMD__PSR_SETUP = 0,
+ DMUB_CMD__PSR_SET_VERSION = 0,
DMUB_CMD__PSR_COPY_SETTINGS = 1,
DMUB_CMD__PSR_ENABLE = 2,
DMUB_CMD__PSR_DISABLE = 3,
@@ -42,7 +42,16 @@ enum dmub_cmd_psr_type {
enum psr_version {
PSR_VERSION_1 = 0x10, // PSR Version 1
PSR_VERSION_2 = 0x20, // PSR Version 2, includes selective update
- PSR_VERSION_2_Y_COORD = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+ PSR_VERSION_2_1 = 0x21, // PSR Version 2, includes Y-coordinate support for SU
+};
+
+enum dmub_cmd_abm_type {
+ DMUB_CMD__ABM_INIT_CONFIG = 0,
+ DMUB_CMD__ABM_SET_PIPE = 1,
+ DMUB_CMD__ABM_SET_BACKLIGHT = 2,
+ DMUB_CMD__ABM_SET_LEVEL = 3,
+ DMUB_CMD__ABM_SET_AMBIENT_LEVEL = 4,
+ DMUB_CMD__ABM_SET_PWM_FRAC = 5,
};
#endif /* _DMUB_CMD_DAL_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h
new file mode 100644
index 000000000000..652d6fc061b6
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_gpint_cmd.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef _DMUB_GPINT_CMD_H_
+#define _DMUB_GPINT_CMD_H_
+
+#include "dmub_types.h"
+
+/**
+ * The register format for sending a command via the GPINT.
+ */
+union dmub_gpint_data_register {
+ struct {
+ uint32_t param : 16;
+ uint32_t command_code : 12;
+ uint32_t status : 4;
+ } bits;
+ uint32_t all;
+};
+
+/**
+ * The shifts and masks below may alternatively be used to format and read
+ * the command register bits.
+ */
+
+#define DMUB_GPINT_DATA_PARAM_MASK 0xFFFF
+#define DMUB_GPINT_DATA_PARAM_SHIFT 0
+
+#define DMUB_GPINT_DATA_COMMAND_CODE_MASK 0xFFF
+#define DMUB_GPINT_DATA_COMMAND_CODE_SHIFT 16
+
+#define DMUB_GPINT_DATA_STATUS_MASK 0xF
+#define DMUB_GPINT_DATA_STATUS_SHIFT 28
+
+/*
+ * Command IDs should be treated as stable ABI.
+ * Do not reuse or modify IDs.
+ */
+
+enum dmub_gpint_command {
+ DMUB_GPINT__INVALID_COMMAND = 0,
+ DMUB_GPINT__GET_FW_VERSION = 1,
+ DMUB_GPINT__STOP_FW = 2,
+ DMUB_GPINT__GET_PSR_STATE = 7,
+};
+
+/**
+ * Command responses.
+ */
+
+#define DMUB_GPINT__STOP_FW_RESPONSE 0xDEADDEAD
+
+#endif /* _DMUB_GPINT_CMD_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
index f8917594036a..c2671f2616c8 100644
--- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
+++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_srv.h
@@ -66,6 +66,7 @@
#include "dmub_types.h"
#include "dmub_cmd.h"
+#include "dmub_gpint_cmd.h"
#include "dmub_rb.h"
#if defined(__cplusplus)
@@ -103,7 +104,7 @@ enum dmub_window_id {
DMUB_WINDOW_4_MAILBOX,
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
- DMUB_WINDOW_7_RESERVED,
+ DMUB_WINDOW_7_SCRATCH_MEM,
DMUB_WINDOW_TOTAL,
};
@@ -262,6 +263,14 @@ struct dmub_srv_hw_funcs {
bool (*is_phy_init)(struct dmub_srv *dmub);
bool (*is_auto_load_done)(struct dmub_srv *dmub);
+
+ void (*set_gpint)(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+ bool (*is_gpint_acked)(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+ uint32_t (*get_gpint_response)(struct dmub_srv *dmub);
};
/**
@@ -307,6 +316,7 @@ struct dmub_srv {
enum dmub_asic asic;
void *user_ctx;
bool is_virtual;
+ struct dmub_fb scratch_mem_fb;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
@@ -516,6 +526,45 @@ enum dmub_status dmub_srv_wait_for_phy_init(struct dmub_srv *dmub,
enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
uint32_t timeout_us);
+/**
+ * dmub_srv_send_gpint_command() - Sends a GPINT based command.
+ * @dmub: the dmub service
+ * @command_code: the command code to send
+ * @param: the command parameter to send
+ * @timeout_us: the maximum number of microseconds to wait
+ *
+ * Sends a command via the general purpose interrupt (GPINT).
+ * Waits for the number of microseconds specified by timeout_us
+ * for the command ACK before returning.
+ *
+ * Can be called after software initialization.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_TIMEOUT - wait for ACK timed out
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status
+dmub_srv_send_gpint_command(struct dmub_srv *dmub,
+ enum dmub_gpint_command command_code,
+ uint16_t param, uint32_t timeout_us);
+
+/**
+ * dmub_srv_get_gpint_response() - Queries the GPINT response.
+ * @dmub: the dmub service
+ * @response: the response for the last GPINT
+ *
+ * Returns the response code for the last GPINT interrupt.
+ *
+ * Can be called after software initialization.
+ *
+ * Return:
+ * DMUB_STATUS_OK - success
+ * DMUB_STATUS_INVALID - unspecified error
+ */
+enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
+ uint32_t *response);
+
#if defined(__cplusplus)
}
#endif
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
index b2ca8e0dbac9..63bb9e2c81de 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.c
@@ -60,6 +60,12 @@ static void dmub_dcn20_get_fb_base_offset(struct dmub_srv *dmub,
{
uint32_t tmp;
+ if (dmub->fb_base || dmub->fb_offset) {
+ *fb_base = dmub->fb_base;
+ *fb_offset = dmub->fb_offset;
+ return;
+ }
+
REG_GET(DCN_VM_FB_LOCATION_BASE, FB_BASE, &tmp);
*fb_base = (uint64_t)tmp << 24;
@@ -77,11 +83,52 @@ static inline void dmub_dcn20_translate_addr(const union dmub_addr *addr_in,
void dmub_dcn20_reset(struct dmub_srv *dmub)
{
+ union dmub_gpint_data_register cmd;
+ const uint32_t timeout = 30;
+ uint32_t in_reset, scratch, i;
+
+ REG_GET(DMCUB_CNTL, DMCUB_SOFT_RESET, &in_reset);
+
+ if (in_reset == 0) {
+ cmd.bits.status = 1;
+ cmd.bits.command_code = DMUB_GPINT__STOP_FW;
+ cmd.bits.param = 0;
+
+ dmub->hw_funcs.set_gpint(dmub, cmd);
+
+ /**
+ * Timeout covers both the ACK and the wait
+ * for remaining work to finish.
+ *
+ * This is mostly bound by the PHY disable sequence.
+ * Each register check will be greater than 1us, so
+ * don't bother using udelay.
+ */
+
+ for (i = 0; i < timeout; ++i) {
+ if (dmub->hw_funcs.is_gpint_acked(dmub, cmd))
+ break;
+ }
+
+ for (i = 0; i < timeout; ++i) {
+ scratch = dmub->hw_funcs.get_gpint_response(dmub);
+ if (scratch == DMUB_GPINT__STOP_FW_RESPONSE)
+ break;
+ }
+
+ /* Clear the GPINT command manually so we don't reset again. */
+ cmd.all = 0;
+ dmub->hw_funcs.set_gpint(dmub, cmd);
+
+ /* Force reset in case we timed out, DMCUB is likely hung. */
+ }
+
REG_UPDATE(DMCUB_CNTL, DMCUB_SOFT_RESET, 1);
REG_UPDATE(DMCUB_CNTL, DMCUB_ENABLE, 0);
REG_UPDATE(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET, 1);
REG_WRITE(DMCUB_INBOX1_RPTR, 0);
REG_WRITE(DMCUB_INBOX1_WPTR, 0);
+ REG_WRITE(DMCUB_SCRATCH0, 0);
}
void dmub_dcn20_reset_release(struct dmub_srv *dmub)
@@ -217,3 +264,25 @@ bool dmub_dcn20_is_supported(struct dmub_srv *dmub)
return supported;
}
+
+void dmub_dcn20_set_gpint(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg)
+{
+ REG_WRITE(DMCUB_GPINT_DATAIN1, reg.all);
+}
+
+bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg)
+{
+ union dmub_gpint_data_register test;
+
+ reg.bits.status = 0;
+ test.all = REG_READ(DMCUB_GPINT_DATAIN1);
+
+ return test.all == reg.all;
+}
+
+uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub)
+{
+ return REG_READ(DMCUB_SCRATCH7);
+}
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
index 04b0fa13153d..7f046c73927e 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn20.h
@@ -91,6 +91,7 @@ struct dmub_srv;
DMUB_SR(DMCUB_SCRATCH13) \
DMUB_SR(DMCUB_SCRATCH14) \
DMUB_SR(DMCUB_SCRATCH15) \
+ DMUB_SR(DMCUB_GPINT_DATAIN1) \
DMUB_SR(CC_DC_PIPE_DIS) \
DMUB_SR(MMHUBBUB_SOFT_RESET) \
DMUB_SR(DCN_VM_FB_LOCATION_BASE) \
@@ -183,4 +184,12 @@ bool dmub_dcn20_is_hw_init(struct dmub_srv *dmub);
bool dmub_dcn20_is_supported(struct dmub_srv *dmub);
+void dmub_dcn20_set_gpint(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+bool dmub_dcn20_is_gpint_acked(struct dmub_srv *dmub,
+ union dmub_gpint_data_register reg);
+
+uint32_t dmub_dcn20_get_gpint_response(struct dmub_srv *dmub);
+
#endif /* _DMUB_DCN20_H_ */
diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
index 85a518bf8a76..ce32cc7933c4 100644
--- a/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
+++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_srv.c
@@ -52,8 +52,11 @@
/* Default tracebuffer size if meta is absent. */
#define DMUB_TRACE_BUFFER_SIZE (1024)
+/* Default scratch mem size. */
+#define DMUB_SCRATCH_MEM_SIZE (256)
+
/* Number of windows in use. */
-#define DMUB_NUM_WINDOWS (DMUB_WINDOW_6_FW_STATE + 1)
+#define DMUB_NUM_WINDOWS (DMUB_WINDOW_TOTAL)
/* Base addresses. */
#define DMUB_CW0_BASE (0x60000000)
@@ -126,6 +129,9 @@ static bool dmub_srv_hw_setup(struct dmub_srv *dmub, enum dmub_asic asic)
funcs->set_inbox1_wptr = dmub_dcn20_set_inbox1_wptr;
funcs->is_supported = dmub_dcn20_is_supported;
funcs->is_hw_init = dmub_dcn20_is_hw_init;
+ funcs->set_gpint = dmub_dcn20_set_gpint;
+ funcs->is_gpint_acked = dmub_dcn20_is_gpint_acked;
+ funcs->get_gpint_response = dmub_dcn20_get_gpint_response;
if (asic == DMUB_ASIC_DCN21) {
dmub->regs = &dmub_srv_dcn21_regs;
@@ -208,9 +214,11 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
struct dmub_region *mail = &out->regions[DMUB_WINDOW_4_MAILBOX];
struct dmub_region *trace_buff = &out->regions[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_region *fw_state = &out->regions[DMUB_WINDOW_6_FW_STATE];
+ struct dmub_region *scratch_mem = &out->regions[DMUB_WINDOW_7_SCRATCH_MEM];
const struct dmub_fw_meta_info *fw_info;
uint32_t fw_state_size = DMUB_FW_STATE_SIZE;
uint32_t trace_buffer_size = DMUB_TRACE_BUFFER_SIZE;
+ uint32_t scratch_mem_size = DMUB_SCRATCH_MEM_SIZE;
if (!dmub->sw_init)
return DMUB_STATUS_INVALID;
@@ -253,7 +261,10 @@ dmub_srv_calc_region_info(struct dmub_srv *dmub,
fw_state->base = dmub_align(trace_buff->top, 256);
fw_state->top = fw_state->base + dmub_align(fw_state_size, 64);
- out->fb_size = dmub_align(fw_state->top, 4096);
+ scratch_mem->base = dmub_align(fw_state->top, 256);
+ scratch_mem->top = scratch_mem->base + dmub_align(scratch_mem_size, 64);
+
+ out->fb_size = dmub_align(scratch_mem->top, 4096);
return DMUB_STATUS_OK;
}
@@ -331,6 +342,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *mail_fb = params->fb[DMUB_WINDOW_4_MAILBOX];
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
+ struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
struct dmub_rb_init_params rb_params;
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
@@ -367,7 +379,7 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->hw_funcs.reset(dmub);
if (inst_fb && data_fb && bios_fb && mail_fb && tracebuff_fb &&
- fw_state_fb) {
+ fw_state_fb && scratch_mem_fb) {
cw2.offset.quad_part = data_fb->gpu_addr;
cw2.region.base = DMUB_CW0_BASE + inst_fb->size;
cw2.region.top = cw2.region.base + data_fb->size;
@@ -393,6 +405,8 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->fw_state = fw_state_fb->cpu_addr;
+ dmub->scratch_mem_fb = *scratch_mem_fb;
+
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4,
&cw5, &cw6);
@@ -522,3 +536,50 @@ enum dmub_status dmub_srv_wait_for_idle(struct dmub_srv *dmub,
return DMUB_STATUS_TIMEOUT;
}
+
+enum dmub_status
+dmub_srv_send_gpint_command(struct dmub_srv *dmub,
+ enum dmub_gpint_command command_code,
+ uint16_t param, uint32_t timeout_us)
+{
+ union dmub_gpint_data_register reg;
+ uint32_t i;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.set_gpint)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.is_gpint_acked)
+ return DMUB_STATUS_INVALID;
+
+ reg.bits.status = 1;
+ reg.bits.command_code = command_code;
+ reg.bits.param = param;
+
+ dmub->hw_funcs.set_gpint(dmub, reg);
+
+ for (i = 0; i < timeout_us; ++i) {
+ if (dmub->hw_funcs.is_gpint_acked(dmub, reg))
+ return DMUB_STATUS_OK;
+ }
+
+ return DMUB_STATUS_TIMEOUT;
+}
+
+enum dmub_status dmub_srv_get_gpint_response(struct dmub_srv *dmub,
+ uint32_t *response)
+{
+ *response = 0;
+
+ if (!dmub->sw_init)
+ return DMUB_STATUS_INVALID;
+
+ if (!dmub->hw_funcs.get_gpint_response)
+ return DMUB_STATUS_INVALID;
+
+ *response = dmub->hw_funcs.get_gpint_response(dmub);
+
+ return DMUB_STATUS_OK;
+}
diff --git a/drivers/gpu/drm/amd/display/include/dal_asic_id.h b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
index a2903985b9e8..8a87d0ed90ae 100644
--- a/drivers/gpu/drm/amd/display/include/dal_asic_id.h
+++ b/drivers/gpu/drm/amd/display/include/dal_asic_id.h
@@ -134,31 +134,27 @@
#define PICASSO_A0 0x41
/* DCN1_01 */
#define RAVEN2_A0 0x81
-#define RAVEN2_15D8_REV_94 0x94
-#define RAVEN2_15D8_REV_95 0x95
-#define RAVEN2_15D8_REV_E3 0xE3
-#define RAVEN2_15D8_REV_E4 0xE4
-#define RAVEN2_15D8_REV_E9 0xE9
-#define RAVEN2_15D8_REV_EA 0xEA
-#define RAVEN2_15D8_REV_EB 0xEB
#define RAVEN1_F0 0xF0
#define RAVEN_UNKNOWN 0xFF
#ifndef ASICREV_IS_RAVEN
#define ASICREV_IS_RAVEN(eChipRev) ((eChipRev >= RAVEN_A0) && eChipRev < RAVEN_UNKNOWN)
#endif
+#define PRID_DALI_DE 0xDE
+#define PRID_DALI_DF 0xDF
+#define PRID_DALI_E3 0xE3
+#define PRID_DALI_E4 0xE4
+
+#define PRID_POLLOCK_94 0x94
+#define PRID_POLLOCK_95 0x95
+#define PRID_POLLOCK_E9 0xE9
+#define PRID_POLLOCK_EA 0xEA
+#define PRID_POLLOCK_EB 0xEB
#define ASICREV_IS_PICASSO(eChipRev) ((eChipRev >= PICASSO_A0) && (eChipRev < RAVEN2_A0))
#ifndef ASICREV_IS_RAVEN2
-#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RAVEN1_F0))
+#define ASICREV_IS_RAVEN2(eChipRev) ((eChipRev >= RAVEN2_A0) && (eChipRev < RENOIR_A0))
#endif
#define ASICREV_IS_RV1_F0(eChipRev) ((eChipRev >= RAVEN1_F0) && (eChipRev < RAVEN_UNKNOWN))
-#define ASICREV_IS_DALI(eChipRev) ((eChipRev == RAVEN2_15D8_REV_E3) \
- || (eChipRev == RAVEN2_15D8_REV_E4))
-#define ASICREV_IS_POLLOCK(eChipRev) (eChipRev == RAVEN2_15D8_REV_94 \
- || eChipRev == RAVEN2_15D8_REV_95 \
- || eChipRev == RAVEN2_15D8_REV_E9 \
- || eChipRev == RAVEN2_15D8_REV_EA \
- || eChipRev == RAVEN2_15D8_REV_EB)
#define FAMILY_RV 142 /* DCN 1*/
@@ -177,7 +173,7 @@ enum {
#define ASICREV_IS_NAVI14_M(eChipRev) ((eChipRev >= NV_NAVI14_M_A0) && (eChipRev < NV_UNKNOWN))
#define RENOIR_A0 0x91
#define DEVICE_ID_RENOIR_1636 0x1636 // Renoir
-#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < 0xFF))
+#define ASICREV_IS_RENOIR(eChipRev) ((eChipRev >= RENOIR_A0) && (eChipRev < RAVEN1_F0))
/*
* ASIC chip ID
diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
index 2c90d1b46c8b..3d29646c7cb4 100644
--- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h
+++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h
@@ -149,4 +149,12 @@ enum dpcd_psr_sink_states {
PSR_SINK_STATE_SINK_INTERNAL_ERROR = 7,
};
+#define DP_SOURCE_TABLE_REVISION 0x310
+#define DP_SOURCE_PAYLOAD_SIZE 0x311
+#define DP_SOURCE_SINK_CAP 0x317
+#define DP_SOURCE_BACKLIGHT_LEVEL 0x320
+#define DP_SOURCE_BACKLIGHT_CURRENT_PEAK 0x326
+#define DP_SOURCE_BACKLIGHT_CONTROL 0x32E
+#define DP_SOURCE_BACKLIGHT_ENABLE 0x32F
+
#endif /* __DAL_DPCD_DEFS_H__ */
diff --git a/drivers/gpu/drm/amd/display/include/logger_types.h b/drivers/gpu/drm/amd/display/include/logger_types.h
index 89a709267019..d66f9d8eefb4 100644
--- a/drivers/gpu/drm/amd/display/include/logger_types.h
+++ b/drivers/gpu/drm/amd/display/include/logger_types.h
@@ -124,36 +124,37 @@ enum dc_log_type {
#define DC_MIN_LOG_MASK ((1 << LOG_ERROR) | \
(1 << LOG_DETECTION_EDID_PARSER))
-#define DC_DEFAULT_LOG_MASK ((1 << LOG_ERROR) | \
- (1 << LOG_WARNING) | \
- (1 << LOG_EVENT_MODE_SET) | \
- (1 << LOG_EVENT_DETECTION) | \
- (1 << LOG_EVENT_LINK_TRAINING) | \
- (1 << LOG_EVENT_LINK_LOSS) | \
- (1 << LOG_EVENT_UNDERFLOW) | \
- (1 << LOG_RESOURCE) | \
- (1 << LOG_FEATURE_OVERRIDE) | \
- (1 << LOG_DETECTION_EDID_PARSER) | \
- (1 << LOG_DC) | \
- (1 << LOG_HW_HOTPLUG) | \
- (1 << LOG_HW_SET_MODE) | \
- (1 << LOG_HW_RESUME_S3) | \
- (1 << LOG_HW_HPD_IRQ) | \
- (1 << LOG_SYNC) | \
- (1 << LOG_BANDWIDTH_VALIDATION) | \
- (1 << LOG_MST) | \
- (1 << LOG_DETECTION_DP_CAPS) | \
- (1 << LOG_BACKLIGHT)) | \
- (1 << LOG_I2C_AUX) | \
- (1 << LOG_IF_TRACE) | \
- (1 << LOG_DTN) /* | \
- (1 << LOG_DEBUG) | \
- (1 << LOG_BIOS) | \
- (1 << LOG_SURFACE) | \
- (1 << LOG_SCALER) | \
- (1 << LOG_DML) | \
- (1 << LOG_HW_LINK_TRAINING) | \
- (1 << LOG_HW_AUDIO)| \
- (1 << LOG_BANDWIDTH_CALCS)*/
+#define DC_DEFAULT_LOG_MASK ((1ULL << LOG_ERROR) | \
+ (1ULL << LOG_WARNING) | \
+ (1ULL << LOG_EVENT_MODE_SET) | \
+ (1ULL << LOG_EVENT_DETECTION) | \
+ (1ULL << LOG_EVENT_LINK_TRAINING) | \
+ (1ULL << LOG_EVENT_LINK_LOSS) | \
+ (1ULL << LOG_EVENT_UNDERFLOW) | \
+ (1ULL << LOG_RESOURCE) | \
+ (1ULL << LOG_FEATURE_OVERRIDE) | \
+ (1ULL << LOG_DETECTION_EDID_PARSER) | \
+ (1ULL << LOG_DC) | \
+ (1ULL << LOG_HW_HOTPLUG) | \
+ (1ULL << LOG_HW_SET_MODE) | \
+ (1ULL << LOG_HW_RESUME_S3) | \
+ (1ULL << LOG_HW_HPD_IRQ) | \
+ (1ULL << LOG_SYNC) | \
+ (1ULL << LOG_BANDWIDTH_VALIDATION) | \
+ (1ULL << LOG_MST) | \
+ (1ULL << LOG_DETECTION_DP_CAPS) | \
+ (1ULL << LOG_BACKLIGHT)) | \
+ (1ULL << LOG_I2C_AUX) | \
+ (1ULL << LOG_IF_TRACE) | \
+ (1ULL << LOG_HDMI_FRL) | \
+ (1ULL << LOG_DTN) /* | \
+ (1ULL << LOG_DEBUG) | \
+ (1ULL << LOG_BIOS) | \
+ (1ULL << LOG_SURFACE) | \
+ (1ULL << LOG_SCALER) | \
+ (1ULL << LOG_DML) | \
+ (1ULL << LOG_HW_LINK_TRAINING) | \
+ (1ULL << LOG_HW_AUDIO)| \
+ (1ULL << LOG_BANDWIDTH_CALCS)*/
#endif /* __DAL_LOGGER_TYPES_H__ */
diff --git a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
index b9992ebf77a6..4e542826cd26 100644
--- a/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
+++ b/drivers/gpu/drm/amd/display/modules/freesync/freesync.c
@@ -524,12 +524,12 @@ static void build_vrr_infopacket_data(const struct mod_vrr_params *vrr,
infopacket->sb[6] |= 0x04;
/* PB7 = FreeSync Minimum refresh rate (Hz) */
- infopacket->sb[7] = (unsigned char)(vrr->min_refresh_in_uhz / 1000000);
+ infopacket->sb[7] = (unsigned char)((vrr->min_refresh_in_uhz + 500000) / 1000000);
/* PB8 = FreeSync Maximum refresh rate (Hz)
* Note: We should never go above the field rate of the mode timing set.
*/
- infopacket->sb[8] = (unsigned char)(vrr->max_refresh_in_uhz / 1000000);
+ infopacket->sb[8] = (unsigned char)((vrr->max_refresh_in_uhz + 500000) / 1000000);
//FreeSync HDR
@@ -747,10 +747,6 @@ void mod_freesync_build_vrr_params(struct mod_freesync *mod_freesync,
nominal_field_rate_in_uhz =
mod_freesync_calc_nominal_field_rate(stream);
- /* Rounded to the nearest Hz */
- nominal_field_rate_in_uhz = 1000000ULL *
- div_u64(nominal_field_rate_in_uhz + 500000, 1000000);
-
min_refresh_in_uhz = in_config->min_refresh_in_uhz;
max_refresh_in_uhz = in_config->max_refresh_in_uhz;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
index 8aa528e874c4..e9fbd94f8635 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.c
@@ -52,8 +52,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
* hdcp is not desired
*/
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
- !hdcp->connection.displays[i].adjust.disable) {
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->displays[i].adjust.disable) {
is_auth_needed = 1;
break;
}
@@ -61,7 +61,8 @@ static uint8_t is_cp_desired_hdcp1(struct mod_hdcp *hdcp)
return (hdcp->connection.hdcp1_retry_count < MAX_NUM_OF_ATTEMPTS) &&
is_auth_needed &&
- !hdcp->connection.link.adjust.hdcp1.disable;
+ !hdcp->connection.link.adjust.hdcp1.disable &&
+ !hdcp->connection.is_hdcp1_revoked;
}
static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
@@ -72,8 +73,8 @@ static uint8_t is_cp_desired_hdcp2(struct mod_hdcp *hdcp)
* hdcp is not desired
*/
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
- !hdcp->connection.displays[i].adjust.disable) {
+ if (hdcp->displays[i].state != MOD_HDCP_DISPLAY_INACTIVE &&
+ !hdcp->displays[i].adjust.disable) {
is_auth_needed = 1;
break;
}
@@ -103,8 +104,6 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
- /* update topology event if hdcp is not desired */
- status = mod_hdcp_add_display_topology(hdcp);
} else if (is_in_hdcp1_states(hdcp)) {
status = mod_hdcp_hdcp1_execution(hdcp, event_ctx, &input->hdcp1);
} else if (is_in_hdcp1_dp_states(hdcp)) {
@@ -115,6 +114,9 @@ static enum mod_hdcp_status execution(struct mod_hdcp *hdcp,
} else if (is_in_hdcp2_dp_states(hdcp)) {
status = mod_hdcp_hdcp2_dp_execution(hdcp,
event_ctx, &input->hdcp2);
+ } else {
+ event_ctx->unexpected_event = 1;
+ goto out;
}
out:
return status;
@@ -191,14 +193,7 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
mod_hdcp_hdcp1_destroy_session(hdcp);
}
- if (hdcp->auth.trans_input.hdcp1.add_topology == PASS) {
- status = mod_hdcp_remove_display_topology(hdcp);
- if (status != MOD_HDCP_STATUS_SUCCESS) {
- output->callback_needed = 0;
- output->watchdog_timer_needed = 0;
- goto out;
- }
- }
+
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
@@ -212,25 +207,12 @@ static enum mod_hdcp_status reset_authentication(struct mod_hdcp *hdcp,
goto out;
}
}
- if (hdcp->auth.trans_input.hdcp2.add_topology == PASS) {
- status = mod_hdcp_remove_display_topology(hdcp);
- if (status != MOD_HDCP_STATUS_SUCCESS) {
- output->callback_needed = 0;
- output->watchdog_timer_needed = 0;
- goto out;
- }
- }
+
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
set_state_id(hdcp, output, HDCP_INITIALIZED);
} else if (is_in_cp_not_desired_state(hdcp)) {
- status = mod_hdcp_remove_display_topology(hdcp);
- if (status != MOD_HDCP_STATUS_SUCCESS) {
- output->callback_needed = 0;
- output->watchdog_timer_needed = 0;
- goto out;
- }
HDCP_TOP_RESET_AUTH_TRACE(hdcp);
memset(&hdcp->auth, 0, sizeof(struct mod_hdcp_authentication));
memset(&hdcp->state, 0, sizeof(struct mod_hdcp_state));
@@ -337,16 +319,20 @@ enum mod_hdcp_status mod_hdcp_add_display(struct mod_hdcp *hdcp,
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- /* add display to connection */
- hdcp->connection.link = *link;
- *display_container = *display;
-
/* reset retry counters */
reset_retry_counts(hdcp);
/* reset error trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
+ /* add display to connection */
+ hdcp->connection.link = *link;
+ *display_container = *display;
+ status = mod_hdcp_add_display_to_topology(hdcp, display_container);
+
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+
/* request authentication */
if (current_state(hdcp) != HDCP_INITIALIZED)
set_state_id(hdcp, output, HDCP_INITIALIZED);
@@ -379,17 +365,20 @@ enum mod_hdcp_status mod_hdcp_remove_display(struct mod_hdcp *hdcp,
if (status != MOD_HDCP_STATUS_SUCCESS)
goto out;
- /* remove display */
- display->state = MOD_HDCP_DISPLAY_INACTIVE;
-
/* clear retry counters */
reset_retry_counts(hdcp);
/* reset error trace */
memset(&hdcp->connection.trace, 0, sizeof(hdcp->connection.trace));
- /* request authentication for remaining displays*/
- if (get_active_display_count(hdcp) > 0)
+ /* remove display */
+ status = mod_hdcp_remove_display_from_topology(hdcp, index);
+ if (status != MOD_HDCP_STATUS_SUCCESS)
+ goto out;
+ memset(display, 0, sizeof(struct mod_hdcp_display));
+
+ /* request authentication when connection is not reset */
+ if (current_state(hdcp) != HDCP_UNINITIALIZED)
callback_in_ms(hdcp->connection.link.adjust.auth_delay * 1000,
output);
out:
@@ -496,10 +485,8 @@ enum mod_hdcp_operation_mode mod_hdcp_signal_type_to_operation_mode(
break;
case SIGNAL_TYPE_EDP:
case SIGNAL_TYPE_DISPLAY_PORT:
- mode = MOD_HDCP_MODE_DP;
- break;
case SIGNAL_TYPE_DISPLAY_PORT_MST:
- mode = MOD_HDCP_MODE_DP_MST;
+ mode = MOD_HDCP_MODE_DP;
break;
default:
break;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
index af78e4f1be68..60ff1a0028ac 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp.h
@@ -41,7 +41,6 @@ enum mod_hdcp_trans_input_result {
struct mod_hdcp_transition_input_hdcp1 {
uint8_t bksv_read;
uint8_t bksv_validation;
- uint8_t add_topology;
uint8_t create_session;
uint8_t an_write;
uint8_t aksv_write;
@@ -71,7 +70,6 @@ struct mod_hdcp_transition_input_hdcp1 {
struct mod_hdcp_transition_input_hdcp2 {
uint8_t hdcp2version_read;
uint8_t hdcp2_capable_check;
- uint8_t add_topology;
uint8_t create_session;
uint8_t ake_init_prepare;
uint8_t ake_init_write;
@@ -167,9 +165,9 @@ struct mod_hdcp_auth_counters {
/* contains values per connection */
struct mod_hdcp_connection {
struct mod_hdcp_link link;
- struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
uint8_t is_repeater;
uint8_t is_km_stored;
+ uint8_t is_hdcp1_revoked;
uint8_t is_hdcp2_revoked;
struct mod_hdcp_trace trace;
uint8_t hdcp1_retry_count;
@@ -202,6 +200,8 @@ struct mod_hdcp {
struct mod_hdcp_config config;
/* per connection */
struct mod_hdcp_connection connection;
+ /* per displays */
+ struct mod_hdcp_display displays[MAX_NUM_OF_DISPLAYS];
/* per authentication attempt */
struct mod_hdcp_authentication auth;
/* per state in an authentication */
@@ -327,10 +327,10 @@ void mod_hdcp_dump_binary_message(uint8_t *msg, uint32_t msg_size,
/* TODO: add adjustment log */
/* psp functions */
-enum mod_hdcp_status mod_hdcp_add_display_topology(
- struct mod_hdcp *hdcp);
-enum mod_hdcp_status mod_hdcp_remove_display_topology(
- struct mod_hdcp *hdcp);
+enum mod_hdcp_status mod_hdcp_add_display_to_topology(
+ struct mod_hdcp *hdcp, struct mod_hdcp_display *display);
+enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
+ struct mod_hdcp *hdcp, uint8_t index);
enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp);
enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp);
@@ -392,13 +392,13 @@ enum mod_hdcp_status mod_hdcp_write_content_type(struct mod_hdcp *hdcp);
/* hdcp version helpers */
static inline uint8_t is_dp_hdcp(struct mod_hdcp *hdcp)
{
- return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP ||
- hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP);
}
static inline uint8_t is_dp_mst_hdcp(struct mod_hdcp *hdcp)
{
- return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP_MST);
+ return (hdcp->connection.link.mode == MOD_HDCP_MODE_DP &&
+ hdcp->connection.link.dp.mst_supported);
}
static inline uint8_t is_hdmi_dvi_sl_hdcp(struct mod_hdcp *hdcp)
@@ -503,11 +503,6 @@ static inline uint8_t is_display_active(struct mod_hdcp_display *display)
return display->state >= MOD_HDCP_DISPLAY_ACTIVE;
}
-static inline uint8_t is_display_added(struct mod_hdcp_display *display)
-{
- return display->state >= MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
-}
-
static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *display)
{
return display->state >= MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
@@ -515,35 +510,24 @@ static inline uint8_t is_display_encryption_enabled(struct mod_hdcp_display *dis
static inline uint8_t get_active_display_count(struct mod_hdcp *hdcp)
{
- uint8_t added_count = 0;
- uint8_t i;
-
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_active(&hdcp->connection.displays[i]))
- added_count++;
- return added_count;
-}
-
-static inline uint8_t get_added_display_count(struct mod_hdcp *hdcp)
-{
- uint8_t added_count = 0;
+ uint8_t active_count = 0;
uint8_t i;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->connection.displays[i]))
- added_count++;
- return added_count;
+ if (is_display_active(&hdcp->displays[i]))
+ active_count++;
+ return active_count;
}
-static inline struct mod_hdcp_display *get_first_added_display(
+static inline struct mod_hdcp_display *get_first_active_display(
struct mod_hdcp *hdcp)
{
uint8_t i;
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (is_display_added(&hdcp->connection.displays[i])) {
- display = &hdcp->connection.displays[i];
+ if (is_display_active(&hdcp->displays[i])) {
+ display = &hdcp->displays[i];
break;
}
return display;
@@ -556,9 +540,9 @@ static inline struct mod_hdcp_display *get_active_display_at_index(
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (hdcp->connection.displays[i].index == index &&
- is_display_active(&hdcp->connection.displays[i])) {
- display = &hdcp->connection.displays[i];
+ if (hdcp->displays[i].index == index &&
+ is_display_active(&hdcp->displays[i])) {
+ display = &hdcp->displays[i];
break;
}
return display;
@@ -571,8 +555,8 @@ static inline struct mod_hdcp_display *get_empty_display_container(
struct mod_hdcp_display *display = NULL;
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
- if (!is_display_active(&hdcp->connection.displays[i])) {
- display = &hdcp->connection.displays[i];
+ if (!is_display_active(&hdcp->displays[i])) {
+ display = &hdcp->displays[i];
break;
}
return display;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
index 37670db64855..f244b72e74e0 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_execution.c
@@ -129,7 +129,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static inline enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP1_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
@@ -168,10 +168,6 @@ static enum mod_hdcp_status exchange_ksvs(struct mod_hdcp *hdcp,
goto out;
}
- if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
- &input->add_topology, &status,
- hdcp, "add_topology"))
- goto out;
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp1_create_session,
&input->create_session, &status,
hdcp, "create_session"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
index 76edcbe51f71..f3711914364e 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp1_transition.c
@@ -46,8 +46,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_transition(struct mod_hdcp *hdcp,
set_state_id(hdcp, output, H1_A1_EXCHANGE_KSVS);
break;
case H1_A1_EXCHANGE_KSVS:
- if (input->add_topology != PASS ||
- input->create_session != PASS) {
+ if (input->create_session != PASS) {
/* out of sync with psp state */
adjust->hdcp1.disable = 1;
fail_and_restart_in_ms(0, &status, output);
@@ -173,8 +172,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
set_state_id(hdcp, output, D1_A1_EXCHANGE_KSVS);
break;
case D1_A1_EXCHANGE_KSVS:
- if (input->add_topology != PASS ||
- input->create_session != PASS) {
+ if (input->create_session != PASS) {
/* out of sync with psp state */
adjust->hdcp1.disable = 1;
fail_and_restart_in_ms(0, &status, output);
@@ -210,7 +208,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->rx_validation != PASS) {
- if (hdcp->state.stay_count < 2) {
+ if (hdcp->state.stay_count < 2 &&
+ !hdcp->connection.is_hdcp1_revoked) {
/* allow 2 additional retries */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
@@ -231,6 +230,9 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
(!conn->is_repeater && is_dp_mst_hdcp(hdcp) && input->stream_encryption_dp != PASS)) {
fail_and_restart_in_ms(0, &status, output);
break;
+ } else if (conn->hdcp1_retry_count < conn->link.adjust.hdcp1.min_auth_retries_wa) {
+ fail_and_restart_in_ms(0, &status, output);
+ break;
}
if (conn->is_repeater) {
set_watchdog_in_ms(hdcp, 5000, output);
@@ -290,7 +292,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_dp_transition(struct mod_hdcp *hdcp,
fail_and_restart_in_ms(0, &status, output);
break;
} else if (input->ksvlist_vp_validation != PASS) {
- if (hdcp->state.stay_count < 2) {
+ if (hdcp->state.stay_count < 2 &&
+ !hdcp->connection.is_hdcp1_revoked) {
/* allow 2 additional retries */
callback_in_ms(0, output);
increment_stay_counter(hdcp);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
index f730b94ac3c0..549c113abcf7 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_execution.c
@@ -34,7 +34,7 @@ static inline enum mod_hdcp_status check_receiver_id_list_ready(struct mod_hdcp
if (is_dp_hdcp(hdcp))
is_ready = HDCP_2_2_DP_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus_dp) ? 1 : 0;
else
- is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[0]) &&
+ is_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(hdcp->auth.msg.hdcp2.rxstatus[1]) &&
(HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(hdcp->auth.msg.hdcp2.rxstatus[1]) << 8 |
hdcp->auth.msg.hdcp2.rxstatus[0])) ? 1 : 0;
return is_ready ? MOD_HDCP_STATUS_SUCCESS :
@@ -46,8 +46,8 @@ static inline enum mod_hdcp_status check_hdcp2_capable(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp))
- status = (hdcp->auth.msg.hdcp2.rxcaps_dp[2] & HDCP_2_2_RX_CAPS_VERSION_VAL) &&
- HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[0]) ?
+ status = (hdcp->auth.msg.hdcp2.rxcaps_dp[0] == HDCP_2_2_RX_CAPS_VERSION_VAL) &&
+ HDCP_2_2_DP_HDCP_CAPABLE(hdcp->auth.msg.hdcp2.rxcaps_dp[2]) ?
MOD_HDCP_STATUS_SUCCESS :
MOD_HDCP_STATUS_HDCP2_NOT_CAPABLE;
else
@@ -67,7 +67,7 @@ static inline enum mod_hdcp_status check_reauthentication_request(
MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
MOD_HDCP_STATUS_SUCCESS;
else
- ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[0]) ?
+ ret = HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(hdcp->auth.msg.hdcp2.rxstatus[1]) ?
MOD_HDCP_STATUS_HDCP2_REAUTH_REQUEST :
MOD_HDCP_STATUS_SUCCESS;
return ret;
@@ -208,7 +208,7 @@ static inline uint8_t get_device_count(struct mod_hdcp *hdcp)
static enum mod_hdcp_status check_device_count(struct mod_hdcp *hdcp)
{
/* device count must be greater than or equal to tracked hdcp displays */
- return (get_device_count(hdcp) < get_added_display_count(hdcp)) ?
+ return (get_device_count(hdcp) < get_active_display_count(hdcp)) ?
MOD_HDCP_STATUS_HDCP2_DEVICE_COUNT_MISMATCH_FAILURE :
MOD_HDCP_STATUS_SUCCESS;
}
@@ -259,6 +259,7 @@ static enum mod_hdcp_status known_hdcp2_capable_rx(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
+
if (!mod_hdcp_execute_and_set(mod_hdcp_read_hdcp2version,
&input->hdcp2version_read, &status,
hdcp, "hdcp2version_read"))
@@ -281,10 +282,7 @@ static enum mod_hdcp_status send_ake_init(struct mod_hdcp *hdcp,
event_ctx->unexpected_event = 1;
goto out;
}
- if (!mod_hdcp_execute_and_set(mod_hdcp_add_display_topology,
- &input->add_topology, &status,
- hdcp, "add_topology"))
- goto out;
+
if (!mod_hdcp_execute_and_set(mod_hdcp_hdcp2_create_session,
&input->create_session, &status,
hdcp, "create_session"))
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
index 8cae3e3aacd5..e738c7ae66ec 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp2_transition.c
@@ -47,8 +47,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_transition(struct mod_hdcp *hdcp,
}
break;
case H2_A1_SEND_AKE_INIT:
- if (input->add_topology != PASS ||
- input->create_session != PASS ||
+ if (input->create_session != PASS ||
input->ake_init_prepare != PASS) {
/* out of sync with psp state */
adjust->hdcp2.disable = 1;
@@ -389,8 +388,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_dp_transition(struct mod_hdcp *hdcp,
}
break;
case D2_A1_SEND_AKE_INIT:
- if (input->add_topology != PASS ||
- input->create_session != PASS ||
+ if (input->create_session != PASS ||
input->ake_init_prepare != PASS) {
/* out of sync with psp state */
adjust->hdcp2.disable = 1;
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
index ff9d54812e62..bb5130f4228d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_ddc.c
@@ -65,6 +65,7 @@ enum mod_hdcp_ddc_message_id {
MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS,
MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK,
MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE,
MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
@@ -101,6 +102,7 @@ static const uint8_t hdcp_i2c_offsets[] = {
[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x80,
[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x80,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x80,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x60,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x60,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x80,
@@ -135,6 +137,7 @@ static const uint32_t hdcp_dpcd_addrs[] = {
[MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME] = 0x692f8,
[MOD_HDCP_MESSAGE_ID_WRITE_SKE_SEND_EKS] = 0x69318,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST] = 0x69330,
+ [MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2] = 0x69340,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_SEND_ACK] = 0x693e0,
[MOD_HDCP_MESSAGE_ID_WRITE_REPEATER_AUTH_STREAM_MANAGE] = 0x693f0,
[MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY] = 0x69473,
@@ -405,7 +408,7 @@ enum mod_hdcp_status mod_hdcp_read_ake_cert(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.ake_cert[0] = 3;
+ hdcp->auth.msg.hdcp2.ake_cert[0] = HDCP_2_2_AKE_SEND_CERT;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_CERT,
hdcp->auth.msg.hdcp2.ake_cert+1,
sizeof(hdcp->auth.msg.hdcp2.ake_cert)-1);
@@ -423,7 +426,7 @@ enum mod_hdcp_status mod_hdcp_read_h_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.ake_h_prime[0] = 7;
+ hdcp->auth.msg.hdcp2.ake_h_prime[0] = HDCP_2_2_AKE_SEND_HPRIME;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_H_PRIME,
hdcp->auth.msg.hdcp2.ake_h_prime+1,
sizeof(hdcp->auth.msg.hdcp2.ake_h_prime)-1);
@@ -441,7 +444,7 @@ enum mod_hdcp_status mod_hdcp_read_pairing_info(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.ake_pairing_info[0] = 8;
+ hdcp->auth.msg.hdcp2.ake_pairing_info[0] = HDCP_2_2_AKE_SEND_PAIRING_INFO;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_AKE_SEND_PAIRING_INFO,
hdcp->auth.msg.hdcp2.ake_pairing_info+1,
sizeof(hdcp->auth.msg.hdcp2.ake_pairing_info)-1);
@@ -459,7 +462,7 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.lc_l_prime[0] = 10;
+ hdcp->auth.msg.hdcp2.lc_l_prime[0] = HDCP_2_2_LC_SEND_LPRIME;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_LC_SEND_L_PRIME,
hdcp->auth.msg.hdcp2.lc_l_prime+1,
sizeof(hdcp->auth.msg.hdcp2.lc_l_prime)-1);
@@ -474,14 +477,27 @@ enum mod_hdcp_status mod_hdcp_read_l_prime(struct mod_hdcp *hdcp)
enum mod_hdcp_status mod_hdcp_read_rx_id_list(struct mod_hdcp *hdcp)
{
- enum mod_hdcp_status status;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.rx_id_list[0] = 12;
- status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
- hdcp->auth.msg.hdcp2.rx_id_list+1,
- sizeof(hdcp->auth.msg.hdcp2.rx_id_list)-1);
+ uint32_t device_count = 0;
+ uint32_t rx_id_list_size = 0;
+ uint32_t bytes_read = 0;
+ hdcp->auth.msg.hdcp2.rx_id_list[0] = HDCP_2_2_REP_SEND_RECVID_LIST;
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
+ hdcp->auth.msg.hdcp2.rx_id_list+1,
+ HDCP_MAX_AUX_TRANSACTION_SIZE);
+ if (status == MOD_HDCP_STATUS_SUCCESS) {
+ bytes_read = HDCP_MAX_AUX_TRANSACTION_SIZE;
+ device_count = HDCP_2_2_DEV_COUNT_LO(hdcp->auth.msg.hdcp2.rx_id_list[2]) +
+ (HDCP_2_2_DEV_COUNT_HI(hdcp->auth.msg.hdcp2.rx_id_list[1]) << 4);
+ rx_id_list_size = MIN((21 + 5 * device_count),
+ (sizeof(hdcp->auth.msg.hdcp2.rx_id_list) - 1));
+ status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST_PART2,
+ hdcp->auth.msg.hdcp2.rx_id_list + 1 + bytes_read,
+ (rx_id_list_size - 1) / HDCP_MAX_AUX_TRANSACTION_SIZE * HDCP_MAX_AUX_TRANSACTION_SIZE);
+ }
} else {
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_SEND_RECEIVERID_LIST,
hdcp->auth.msg.hdcp2.rx_id_list,
@@ -495,7 +511,7 @@ enum mod_hdcp_status mod_hdcp_read_stream_ready(struct mod_hdcp *hdcp)
enum mod_hdcp_status status;
if (is_dp_hdcp(hdcp)) {
- hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = 17;
+ hdcp->auth.msg.hdcp2.repeater_auth_stream_ready[0] = HDCP_2_2_REP_STREAM_READY;
status = read(hdcp, MOD_HDCP_MESSAGE_ID_READ_REPEATER_AUTH_STREAM_READY,
hdcp->auth.msg.hdcp2.repeater_auth_stream_ready+1,
sizeof(hdcp->auth.msg.hdcp2.repeater_auth_stream_ready)-1);
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
index 724ebcee9a19..44956f9ba178 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.c
@@ -90,10 +90,14 @@ char *mod_hdcp_status_to_str(int32_t status)
return "MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING";
case MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED:
+ return "MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED";
case MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY:
return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY";
case MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE:
return "MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE";
+ case MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED:
+ return "MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED";
case MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION:
return "MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION";
case MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE:
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
index ff91373ebada..d3192b9d0c3d 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_log.h
@@ -37,10 +37,11 @@
/* default logs */
#define HDCP_ERROR_TRACE(hdcp, status) \
HDCP_LOG_ERR(hdcp, \
- "[Link %d] WARNING %s IN STATE %s", \
+ "[Link %d] WARNING %s IN STATE %s STAY COUNT %d", \
hdcp->config.index, \
mod_hdcp_status_to_str(status), \
- mod_hdcp_state_id_to_str(hdcp->state.id))
+ mod_hdcp_state_id_to_str(hdcp->state.id), \
+ hdcp->state.stay_count)
#define HDCP_HDCP1_ENABLED_TRACE(hdcp, displayIndex) \
HDCP_LOG_VER(hdcp, \
"[Link %d] HDCP 1.4 enabled on display %d", \
@@ -49,6 +50,15 @@
HDCP_LOG_VER(hdcp, \
"[Link %d] HDCP 2.2 enabled on display %d", \
hdcp->config.index, displayIndex)
+#define HDCP_HDCP1_DISABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 1.4 disabled on display %d", \
+ hdcp->config.index, displayIndex)
+#define HDCP_HDCP2_DISABLED_TRACE(hdcp, displayIndex) \
+ HDCP_LOG_VER(hdcp, \
+ "[Link %d] HDCP 2.2 disabled on display %d", \
+ hdcp->config.index, displayIndex)
+
/* state machine logs */
#define HDCP_REMOVE_DISPLAY_TRACE(hdcp, displayIndex) \
HDCP_LOG_FSM(hdcp, \
@@ -102,6 +112,9 @@
sizeof(hdcp->auth.msg.hdcp1.bksv)); \
HDCP_DDC_READ_TRACE(hdcp, "BCAPS", &hdcp->auth.msg.hdcp1.bcaps, \
sizeof(hdcp->auth.msg.hdcp1.bcaps)); \
+ HDCP_DDC_READ_TRACE(hdcp, "BSTATUS", \
+ (uint8_t *)&hdcp->auth.msg.hdcp1.bstatus, \
+ sizeof(hdcp->auth.msg.hdcp1.bstatus)); \
HDCP_DDC_WRITE_TRACE(hdcp, "AN", hdcp->auth.msg.hdcp1.an, \
sizeof(hdcp->auth.msg.hdcp1.an)); \
HDCP_DDC_WRITE_TRACE(hdcp, "AKSV", hdcp->auth.msg.hdcp1.aksv, \
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
index 7911dc157d5a..836e47954938 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c
@@ -44,84 +44,78 @@ static void hdcp2_message_init(struct mod_hdcp *hdcp,
in->process.msg3_desc.msg_id = TA_HDCP_HDCP2_MSG_ID__NULL_MESSAGE;
in->process.msg3_desc.msg_size = 0;
}
-enum mod_hdcp_status mod_hdcp_remove_display_topology(struct mod_hdcp *hdcp)
-{
-
- struct psp_context *psp = hdcp->config.psp.handle;
- struct ta_dtm_shared_memory *dtm_cmd;
- struct mod_hdcp_display *display = NULL;
- uint8_t i;
+enum mod_hdcp_status mod_hdcp_remove_display_from_topology(
+ struct mod_hdcp *hdcp, uint8_t index)
+ {
+ struct psp_context *psp = hdcp->config.psp.handle;
+ struct ta_dtm_shared_memory *dtm_cmd;
+ struct mod_hdcp_display *display =
+ get_active_display_at_index(hdcp, index);
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (is_display_added(&(hdcp->connection.displays[i]))) {
+ if (!display || !is_display_active(display))
+ return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
- memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
- display = &hdcp->connection.displays[i];
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
- dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
- dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
- dtm_cmd->dtm_in_message.topology_update_v2.is_active = 0;
- dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
- psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
+ display->state = MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
+
+ return MOD_HDCP_STATUS_SUCCESS;
+ }
- display->state = MOD_HDCP_DISPLAY_ACTIVE;
- HDCP_TOP_REMOVE_DISPLAY_TRACE(hdcp, display->index);
- }
- }
-
- return MOD_HDCP_STATUS_SUCCESS;
-}
-
-enum mod_hdcp_status mod_hdcp_add_display_topology(struct mod_hdcp *hdcp)
+enum mod_hdcp_status mod_hdcp_add_display_to_topology(
+ struct mod_hdcp *hdcp, struct mod_hdcp_display *display)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_dtm_shared_memory *dtm_cmd;
- struct mod_hdcp_display *display = NULL;
struct mod_hdcp_link *link = &hdcp->connection.link;
- uint8_t i;
if (!psp->dtm_context.dtm_initialized) {
DRM_ERROR("Failed to add display topology, DTM TA is not initialized.");
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
return MOD_HDCP_STATUS_FAILURE;
}
dtm_cmd = (struct ta_dtm_shared_memory *)psp->dtm_context.dtm_shared_buf;
- for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state == MOD_HDCP_DISPLAY_ACTIVE) {
- display = &hdcp->connection.displays[i];
-
- memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
-
- dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
- dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
- dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
- dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
- dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
- dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
- dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
- dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
- dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
- TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
- dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
-
- psp_dtm_invoke(psp, dtm_cmd->cmd_id);
-
- if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
-
- display->state = MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED;
- HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
- }
+ memset(dtm_cmd, 0, sizeof(struct ta_dtm_shared_memory));
+
+ dtm_cmd->cmd_id = TA_DTM_COMMAND__TOPOLOGY_UPDATE_V2;
+ dtm_cmd->dtm_in_message.topology_update_v2.display_handle = display->index;
+ dtm_cmd->dtm_in_message.topology_update_v2.is_active = 1;
+ dtm_cmd->dtm_in_message.topology_update_v2.controller = display->controller;
+ dtm_cmd->dtm_in_message.topology_update_v2.ddc_line = link->ddc_line;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_be = link->dig_be;
+ dtm_cmd->dtm_in_message.topology_update_v2.dig_fe = display->dig_fe;
+ if (is_dp_hdcp(hdcp))
+ dtm_cmd->dtm_in_message.topology_update_v2.is_assr = link->dp.assr_supported;
+
+ dtm_cmd->dtm_in_message.topology_update_v2.dp_mst_vcid = display->vc_id;
+ dtm_cmd->dtm_in_message.topology_update_v2.max_hdcp_supported_version =
+ TA_DTM_HDCP_VERSION_MAX_SUPPORTED__2_2;
+ dtm_cmd->dtm_status = TA_DTM_STATUS__GENERIC_FAILURE;
+
+ psp_dtm_invoke(psp, dtm_cmd->cmd_id);
+
+ if (dtm_cmd->dtm_status != TA_DTM_STATUS__SUCCESS) {
+ display->state = MOD_HDCP_DISPLAY_INACTIVE;
+ return MOD_HDCP_STATUS_UPDATE_TOPOLOGY_FAILURE;
}
+ HDCP_TOP_ADD_DISPLAY_TRACE(hdcp, display->index);
+
return MOD_HDCP_STATUS_SUCCESS;
}
@@ -129,7 +123,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
struct ta_hdcp_shared_memory *hdcp_cmd;
if (!psp->hdcp_context.hdcp_initialized) {
@@ -164,6 +158,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ uint8_t i = 0;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -177,6 +172,14 @@ enum mod_hdcp_status mod_hdcp_hdcp1_destroy_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_HDCP1_DESTROY_SESSION_FAILURE;
HDCP_TOP_HDCP1_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(
+ &hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_HDCP1_DISABLED_TRACE(hdcp,
+ hdcp->displays[i].index);
+ }
return MOD_HDCP_STATUS_SUCCESS;
}
@@ -210,6 +213,10 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_rx(struct mod_hdcp *hdcp)
} else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
hdcp->connection.is_repeater = 0;
+ } else if (hdcp_cmd->out_msg.hdcp1_first_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
+ hdcp->connection.is_hdcp1_revoked = 1;
+ return MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED;
} else
return MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE;
@@ -221,7 +228,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -245,6 +252,7 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ enum mod_hdcp_status status = MOD_HDCP_STATUS_SUCCESS;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -264,10 +272,19 @@ enum mod_hdcp_status mod_hdcp_hdcp1_validate_ksvlist_vp(struct mod_hdcp *hdcp)
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
- if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
- return MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+ if (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS &&
+ hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_AUTHENTICATED) {
+ status = MOD_HDCP_STATUS_SUCCESS;
+ } else if (hdcp_cmd->out_msg.hdcp1_second_part_authentication.authentication_status ==
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED) {
+ hdcp->connection.is_hdcp1_revoked = 1;
+ status = MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED;
+ } else {
+ status = MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE;
+ }
- return MOD_HDCP_STATUS_SUCCESS;
+ return status;
}
enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp *hdcp)
@@ -281,14 +298,13 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->connection.displays[i].adjust.disable)
+ if (hdcp->displays[i].adjust.disable)
continue;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
- hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->in_msg.hdcp1_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP1_ENABLE_DP_STREAM_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
@@ -296,8 +312,8 @@ enum mod_hdcp_status mod_hdcp_hdcp1_enable_dp_stream_encryption(struct mod_hdcp
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
return MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE;
- hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
- HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP1_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
return MOD_HDCP_STATUS_SUCCESS;
@@ -344,7 +360,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
if (!psp->hdcp_context.hdcp_initialized) {
DRM_ERROR("Failed to create hdcp session, HDCP TA is not initialized");
@@ -385,6 +401,7 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
+ uint8_t i = 0;
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
@@ -398,6 +415,14 @@ enum mod_hdcp_status mod_hdcp_hdcp2_destroy_session(struct mod_hdcp *hdcp)
return MOD_HDCP_STATUS_HDCP2_DESTROY_SESSION_FAILURE;
HDCP_TOP_HDCP2_DESTROY_SESSION_TRACE(hdcp);
+ for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++)
+ if (is_display_encryption_enabled(
+ &hdcp->displays[i])) {
+ hdcp->displays[i].state =
+ MOD_HDCP_DISPLAY_ACTIVE;
+ HDCP_HDCP2_DISABLED_TRACE(hdcp,
+ hdcp->displays[i].index);
+ }
return MOD_HDCP_STATUS_SUCCESS;
}
@@ -473,9 +498,12 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_ake_cert(struct mod_hdcp *hdcp)
hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
return MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ return MOD_HDCP_STATUS_HDCP2_AKE_CERT_REVOKED;
}
- return MOD_HDCP_STATUS_FAILURE;
+ return MOD_HDCP_STATUS_HDCP2_VALIDATE_AKE_CERT_FAILURE;
}
enum mod_hdcp_status mod_hdcp_hdcp2_validate_h_prime(struct mod_hdcp *hdcp)
@@ -630,20 +658,15 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_encryption(struct mod_hdcp *hdcp)
{
struct psp_context *psp = hdcp->config.psp.handle;
struct ta_hdcp_shared_memory *hdcp_cmd;
- struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2 *msg_in;
- struct mod_hdcp_display *display = get_first_added_display(hdcp);
+ struct mod_hdcp_display *display = get_first_active_display(hdcp);
hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.hdcp_shared_buf;
memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory));
- msg_in = &hdcp_cmd->in_msg.hdcp2_prepare_process_authentication_message_v2;
-
- hdcp2_message_init(hdcp, msg_in);
-
if (!display)
return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND;
- hdcp_cmd->in_msg.hdcp1_enable_encryption.session_handle = hdcp->auth.id;
+ hdcp_cmd->in_msg.hdcp2_set_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_SET_ENCRYPTION;
psp_hdcp_invoke(psp, hdcp_cmd->cmd_id);
@@ -695,6 +718,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_validate_rx_id_list(struct mod_hdcp *hdcp)
hdcp->connection.is_km_stored = msg_out->process.is_km_stored ? 1 : 0;
hdcp->connection.is_repeater = msg_out->process.is_repeater ? 1 : 0;
return MOD_HDCP_STATUS_SUCCESS;
+ } else if (msg_out->process.msg1_status == TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED) {
+ hdcp->connection.is_hdcp2_revoked = 1;
+ return MOD_HDCP_STATUS_HDCP2_RX_ID_LIST_REVOKED;
}
@@ -717,10 +743,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
for (i = 0; i < MAX_NUM_OF_DISPLAYS; i++) {
- if (hdcp->connection.displays[i].state != MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED ||
- hdcp->connection.displays[i].adjust.disable)
+ if (hdcp->displays[i].adjust.disable)
continue;
- hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->connection.displays[i].index;
+ hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.display_handle = hdcp->displays[i].index;
hdcp_cmd->in_msg.hdcp2_enable_dp_stream_encryption.session_handle = hdcp->auth.id;
hdcp_cmd->cmd_id = TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION;
@@ -729,8 +754,8 @@ enum mod_hdcp_status mod_hdcp_hdcp2_enable_dp_stream_encryption(struct mod_hdcp
if (hdcp_cmd->hdcp_status != TA_HDCP_STATUS__SUCCESS)
break;
- hdcp->connection.displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
- HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->connection.displays[i].index);
+ hdcp->displays[i].state = MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED;
+ HDCP_HDCP2_ENABLED_TRACE(hdcp, hdcp->displays[i].index);
}
return (hdcp_cmd->hdcp_status == TA_HDCP_STATUS__SUCCESS) ? MOD_HDCP_STATUS_SUCCESS
diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
index 82a5e997d573..1a663dbbf810 100644
--- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
+++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.h
@@ -117,6 +117,8 @@ struct ta_dtm_shared_memory {
int psp_cmd_submit_buf(struct psp_context *psp, struct amdgpu_firmware_info *ucode, struct psp_gfx_cmd_resp *cmd,
uint64_t fence_mc_addr);
+enum { PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE = 5120 };
+
enum ta_hdcp_command {
TA_HDCP_COMMAND__INITIALIZE,
TA_HDCP_COMMAND__HDCP1_CREATE_SESSION,
@@ -134,7 +136,10 @@ enum ta_hdcp_command {
TA_HDCP_COMMAND__UNUSED_3,
TA_HDCP_COMMAND__HDCP2_CREATE_SESSION_V2,
TA_HDCP_COMMAND__HDCP2_PREPARE_PROCESS_AUTHENTICATION_MSG_V2,
- TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION
+ TA_HDCP_COMMAND__HDCP2_ENABLE_DP_STREAM_ENCRYPTION,
+ TA_HDCP_COMMAND__HDCP_DESTROY_ALL_SESSIONS,
+ TA_HDCP_COMMAND__HDCP_SET_SRM,
+ TA_HDCP_COMMAND__HDCP_GET_SRM
};
enum ta_hdcp2_msg_id {
@@ -235,7 +240,8 @@ enum ta_hdcp_authentication_status {
TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_PENDING = 0x06,
TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATION_FAILED = 0x07,
TA_HDCP_AUTHENTICATION_STATUS__HDCP22_AUTHENTICATED = 0x08,
- TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_VALIDATION_FAILED = 0x09,
+ TA_HDCP_AUTHENTICATION_STATUS__HDCP1_KSV_REVOKED = 0x0A
};
enum ta_hdcp2_msg_authentication_status {
@@ -253,7 +259,8 @@ enum ta_hdcp2_msg_authentication_status {
TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SEQ_NUM,
TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_SIZE,
TA_HDCP2_MSG_AUTHENTICATION_STATUS__INVALID_LENGTH,
- TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__REAUTH_REQUEST,
+ TA_HDCP2_MSG_AUTHENTICATION_STATUS__RECEIVERID_REVOKED
};
enum ta_hdcp_content_type {
@@ -415,6 +422,22 @@ struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input {
uint32_t display_handle;
};
+struct ta_hdcp_cmd_set_srm_input {
+ uint32_t srm_buf_size;
+ uint8_t srm_buf[PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE];
+};
+
+struct ta_hdcp_cmd_set_srm_output {
+ uint8_t valid_signature;
+ uint32_t srm_version;
+};
+
+struct ta_hdcp_cmd_get_srm_output {
+ uint32_t srm_version;
+ uint32_t srm_buf_size;
+ uint8_t srm_buf[PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE];
+};
+
/**********************************************************/
/* Common input structure for HDCP callbacks */
union ta_hdcp_cmd_input {
@@ -432,6 +455,7 @@ union ta_hdcp_cmd_input {
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_input_v2
hdcp2_prepare_process_authentication_message_v2;
struct ta_hdcp_cmd_hdcp2_enable_dp_stream_encryption_input hdcp2_enable_dp_stream_encryption;
+ struct ta_hdcp_cmd_set_srm_input hdcp_set_srm;
};
/* Common output structure for HDCP callbacks */
@@ -444,6 +468,8 @@ union ta_hdcp_cmd_output {
struct ta_hdcp_cmd_hdcp2_create_session_output_v2 hdcp2_create_session_v2;
struct ta_hdcp_cmd_hdcp2_process_prepare_authentication_message_output_v2
hdcp2_prepare_process_authentication_message_v2;
+ struct ta_hdcp_cmd_set_srm_output hdcp_set_srm;
+ struct ta_hdcp_cmd_get_srm_output hdcp_get_srm;
};
/**********************************************************/
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
index f2a0e1a064da..eae9309cfb24 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_hdcp.h
@@ -56,8 +56,10 @@ enum mod_hdcp_status {
MOD_HDCP_STATUS_HDCP1_NOT_CAPABLE,
MOD_HDCP_STATUS_HDCP1_R0_PRIME_PENDING,
MOD_HDCP_STATUS_HDCP1_VALIDATE_RX_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_BKSV_REVOKED,
MOD_HDCP_STATUS_HDCP1_KSV_LIST_NOT_READY,
MOD_HDCP_STATUS_HDCP1_VALIDATE_KSV_LIST_FAILURE,
+ MOD_HDCP_STATUS_HDCP1_KSV_LIST_REVOKED,
MOD_HDCP_STATUS_HDCP1_ENABLE_ENCRYPTION,
MOD_HDCP_STATUS_HDCP1_ENABLE_STREAM_ENCRYPTION_FAILURE,
MOD_HDCP_STATUS_HDCP1_MAX_CASCADE_EXCEEDED_FAILURE,
@@ -100,6 +102,7 @@ enum mod_hdcp_status {
struct mod_hdcp_displayport {
uint8_t rev;
uint8_t assr_supported;
+ uint8_t mst_supported;
};
struct mod_hdcp_hdmi {
@@ -108,14 +111,12 @@ struct mod_hdcp_hdmi {
enum mod_hdcp_operation_mode {
MOD_HDCP_MODE_OFF,
MOD_HDCP_MODE_DEFAULT,
- MOD_HDCP_MODE_DP,
- MOD_HDCP_MODE_DP_MST
+ MOD_HDCP_MODE_DP
};
enum mod_hdcp_display_state {
MOD_HDCP_DISPLAY_INACTIVE = 0,
MOD_HDCP_DISPLAY_ACTIVE,
- MOD_HDCP_DISPLAY_ACTIVE_AND_ADDED,
MOD_HDCP_DISPLAY_ENCRYPTION_ENABLED
};
@@ -155,7 +156,8 @@ struct mod_hdcp_display_adjustment {
struct mod_hdcp_link_adjustment_hdcp1 {
uint8_t disable : 1;
uint8_t postpone_encryption : 1;
- uint8_t reserved : 6;
+ uint8_t min_auth_retries_wa : 1;
+ uint8_t reserved : 5;
};
enum mod_hdcp_force_hdcp_type {
diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
index 42cbeffac640..13c57ff2abdc 100644
--- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
+++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h
@@ -34,8 +34,7 @@ struct dc_info_packet;
struct mod_vrr_params;
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet,
- bool *use_vsc_sdp_for_colorimetry);
+ struct dc_info_packet *info_packet);
void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream,
struct dc_info_packet *info_packet, int ALLMEnabled, int ALLMValue);
diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
index 6a8a056424b8..cff3ab15fc0c 100644
--- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
+++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c
@@ -130,8 +130,7 @@ enum ColorimetryYCCDP {
};
void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
- struct dc_info_packet *info_packet,
- bool *use_vsc_sdp_for_colorimetry)
+ struct dc_info_packet *info_packet)
{
unsigned int vsc_packet_revision = vsc_packet_undefined;
unsigned int i;
@@ -139,11 +138,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
unsigned int colorimetryFormat = 0;
bool stereo3dSupport = false;
- /* Initialize first, later if infopacket is valid determine if VSC SDP
- * should be used to signal colorimetry format and pixel encoding.
- */
- *use_vsc_sdp_for_colorimetry = false;
-
if (stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE && stream->view_format != VIEW_3D_FORMAT_NONE) {
vsc_packet_revision = vsc_packet_rev1;
stereo3dSupport = true;
@@ -153,9 +147,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
if (stream->psr_version != 0)
vsc_packet_revision = vsc_packet_rev2;
- /* Update to revision 5 for extended colorimetry support for DPCD 1.4+ */
- if (stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 &&
- stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED)
+ /* Update to revision 5 for extended colorimetry support */
+ if (stream->use_vsc_sdp_for_colorimetry)
vsc_packet_revision = vsc_packet_rev5;
/* VSC packet not needed based on the features
@@ -269,13 +262,6 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream,
info_packet->valid = true;
- /* If we are using VSC SDP revision 05h, use this to signal for
- * colorimetry format and pixel encoding. HW should later be
- * programmed to set MSA MISC1 bit 6 to indicate ignore
- * colorimetry format and pixel encoding in the MSA.
- */
- *use_vsc_sdp_for_colorimetry = true;
-
/* Set VSC SDP fields for pixel encoding and colorimetry format from DP 1.3 specs
* Data Bytes DB 18~16
* Bits 3:0 (Colorimetry Format) | Bits 7:4 (Pixel Encoding)
diff --git a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
index f0a153704f6e..00f132f8ad55 100644
--- a/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
+++ b/drivers/gpu/drm/amd/display/modules/vmid/vmid.c
@@ -40,14 +40,18 @@ struct core_vmid {
static void add_ptb_to_table(struct core_vmid *core_vmid, unsigned int vmid, uint64_t ptb)
{
- core_vmid->ptb_assigned_to_vmid[vmid] = ptb;
- core_vmid->num_vmids_available--;
+ if (vmid < MAX_VMID) {
+ core_vmid->ptb_assigned_to_vmid[vmid] = ptb;
+ core_vmid->num_vmids_available--;
+ }
}
static void clear_entry_from_vmid_table(struct core_vmid *core_vmid, unsigned int vmid)
{
- core_vmid->ptb_assigned_to_vmid[vmid] = 0;
- core_vmid->num_vmids_available++;
+ if (vmid < MAX_VMID) {
+ core_vmid->ptb_assigned_to_vmid[vmid] = 0;
+ core_vmid->num_vmids_available++;
+ }
}
static void evict_vmids(struct core_vmid *core_vmid)
@@ -57,7 +61,7 @@ static void evict_vmids(struct core_vmid *core_vmid)
// At this point any positions with value 0 are unused vmids, evict them
for (i = 1; i < core_vmid->num_vmid; i++) {
- if (ord & (1u << i))
+ if (!(ord & (1u << i)))
clear_entry_from_vmid_table(core_vmid, i);
}
}
@@ -91,7 +95,7 @@ static int get_next_available_vmid(struct core_vmid *core_vmid)
uint8_t mod_vmid_get_for_ptb(struct mod_vmid *mod_vmid, uint64_t ptb)
{
struct core_vmid *core_vmid = MOD_VMID_TO_CORE(mod_vmid);
- unsigned int vmid = 0;
+ int vmid = 0;
// Physical address gets vmid 0
if (ptb == 0)
diff --git a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
index b6f74bf4af02..27bb8c1ab858 100644
--- a/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
+++ b/drivers/gpu/drm/amd/include/asic_reg/dce/dce_12_0_offset.h
@@ -7376,6 +7376,8 @@
#define mmCRTC4_CRTC_DRR_CONTROL 0x0f3e
#define mmCRTC4_CRTC_DRR_CONTROL_BASE_IDX 2
+#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0 0x395d
+#define mmDCHUBBUB_SDPIF_MMIO_CNTRL_0_BASE_IDX 2
// addressBlock: dce_dc_fmt4_dispdec
// base address: 0x2000
diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h
new file mode 100644
index 000000000000..82b6cc25205e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_sh_mask.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _wafl2_4_0_0_SH_MASK_HEADER
+#define _wafl2_4_0_0_SH_MASK_HEADER
+
+//PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L
+#define PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h
new file mode 100644
index 000000000000..4a51a90c611a
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/wafl/wafl2_4_0_0_smn.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _wafl2_4_0_0_SMN_HEADER
+#define _wafl2_4_0_0_SMN_HEADER
+
+#define smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS 0x11cf0210
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h
new file mode 100644
index 000000000000..f37712f05b03
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_sh_mask.h
@@ -0,0 +1,69 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _xgmi_4_0_0_SH_MASK_HEADER
+#define _xgmi_4_0_0_SH_MASK_HEADER
+
+//PCS_GOPX16_PCS_ERROR_STATUS
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr__SHIFT 0x0
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr__SHIFT 0x1
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr__SHIFT 0x5
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr__SHIFT 0x6
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr__SHIFT 0x7
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr__SHIFT 0x8
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr__SHIFT 0x9
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr__SHIFT 0xa
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr__SHIFT 0xb
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr__SHIFT 0xc
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr__SHIFT 0xd
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr__SHIFT 0xf
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr__SHIFT 0x10
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr__SHIFT 0x11
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr__SHIFT 0x12
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr__SHIFT 0x13
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr__SHIFT 0x14
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr__SHIFT 0x15
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum__SHIFT 0x17
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator__SHIFT 0x18
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataLossErr_MASK 0x00000001L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TrainingErr_MASK 0x00000002L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__CRCErr_MASK 0x00000020L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERExceededErr_MASK 0x00000040L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__TxMetaDataErr_MASK 0x00000080L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayBufParityErr_MASK 0x00000100L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataParityErr_MASK 0x00000200L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoOverflowErr_MASK 0x00000400L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReplayFifoUnderflowErr_MASK 0x00000800L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ElasticFifoOverflowErr_MASK 0x00001000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DeskewErr_MASK 0x00002000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__DataStartupLimitErr_MASK 0x00008000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__FCInitTimeoutErr_MASK 0x00010000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryTimeoutErr_MASK 0x00020000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialTimeoutErr_MASK 0x00040000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ReadySerialAttemptErr_MASK 0x00080000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryAttemptErr_MASK 0x00100000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__RecoveryRelockAttemptErr_MASK 0x00200000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__ClearBERAccum_MASK 0x00800000L
+#define XGMI0_PCS_GOPX16_PCS_ERROR_STATUS__BERAccumulator_MASK 0xFF000000L
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h
new file mode 100644
index 000000000000..6ccbac4ce87e
--- /dev/null
+++ b/drivers/gpu/drm/amd/include/asic_reg/xgmi/xgmi_4_0_0_smn.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef _xgmi_4_0_0_SMN_HEADER
+#define _xgmi_4_0_0_SMN_HEADER
+
+#define smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS 0x11af0210
+
+#endif
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index a607b1034962..a3c238c39ef5 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -123,7 +123,7 @@ struct kgd2kfd_shared_resources {
uint32_t num_queue_per_pipe;
/* Bit n == 1 means Queue n is available for KFD */
- DECLARE_BITMAP(queue_bitmap, KGD_MAX_QUEUES);
+ DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES);
/* SDMA doorbell assignments (SOC15 and later chips only). Only
* specific doorbells are routed to each SDMA engine. Others
@@ -151,6 +151,7 @@ struct kgd2kfd_shared_resources {
/* Minor device number of the render node */
int drm_render_minor;
+
};
struct tile_config {
@@ -166,27 +167,6 @@ struct tile_config {
#define KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT 4096
-/*
- * Allocation flag domains
- * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
- */
-#define ALLOC_MEM_FLAGS_VRAM (1 << 0)
-#define ALLOC_MEM_FLAGS_GTT (1 << 1)
-#define ALLOC_MEM_FLAGS_USERPTR (1 << 2)
-#define ALLOC_MEM_FLAGS_DOORBELL (1 << 3)
-#define ALLOC_MEM_FLAGS_MMIO_REMAP (1 << 4)
-
-/*
- * Allocation flags attributes/access options.
- * NOTE: This must match the corresponding definitions in kfd_ioctl.h.
- */
-#define ALLOC_MEM_FLAGS_WRITABLE (1 << 31)
-#define ALLOC_MEM_FLAGS_EXECUTABLE (1 << 30)
-#define ALLOC_MEM_FLAGS_PUBLIC (1 << 29)
-#define ALLOC_MEM_FLAGS_NO_SUBSTITUTE (1 << 28) /* TODO */
-#define ALLOC_MEM_FLAGS_AQL_QUEUE_MEM (1 << 27)
-#define ALLOC_MEM_FLAGS_COHERENT (1 << 26) /* For GFXv9 or later */
-
/**
* struct kfd2kgd_calls
*
@@ -222,8 +202,6 @@ struct tile_config {
* @set_scratch_backing_va: Sets VA for scratch backing memory of a VMID.
* Only used for no cp scheduling mode
*
- * @get_tile_config: Returns GPU-specific tiling mode information
- *
* @set_vm_context_page_table_base: Program page table base for a VMID
*
* @invalidate_tlbs: Invalidate TLBs for a specific PASID
@@ -236,6 +214,8 @@ struct tile_config {
*
* @get_hive_id: Returns hive id of current device, 0 if xgmi is not enabled
*
+ * @get_unique_id: Returns uuid id of current device
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -307,12 +287,11 @@ struct kfd2kgd_calls {
void (*set_scratch_backing_va)(struct kgd_dev *kgd,
uint64_t va, uint32_t vmid);
- int (*get_tile_config)(struct kgd_dev *kgd, struct tile_config *config);
-
void (*set_vm_context_page_table_base)(struct kgd_dev *kgd,
uint32_t vmid, uint64_t page_table_base);
uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
uint64_t (*get_hive_id)(struct kgd_dev *kgd);
+ uint64_t (*get_unique_id)(struct kgd_dev *kgd);
};
diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
index 99ad4ddbe12f..f6d4b0ef46ad 100644
--- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c
@@ -23,15 +23,12 @@
#include <linux/firmware.h>
#include <linux/pci.h>
-#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
-#include "soc15_common.h"
#include "smu_v11_0.h"
#include "smu_v12_0.h"
#include "atom.h"
-#include "amd_pcie.h"
#include "vega20_ppt.h"
#include "arcturus_ppt.h"
#include "navi10_ppt.h"
@@ -121,20 +118,20 @@ static int smu_feature_update_enable_state(struct smu_context *smu,
if (enabled) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesLow,
- feature_low);
+ feature_low, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnableSmuFeaturesHigh,
- feature_high);
+ feature_high, NULL);
if (ret)
return ret;
} else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesLow,
- feature_low);
+ feature_low, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DisableSmuFeaturesHigh,
- feature_high);
+ feature_high, NULL);
if (ret)
return ret;
}
@@ -195,21 +192,13 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
return -EINVAL;
if (if_version) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion);
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, if_version);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetDriverIfVersion, if_version);
if (ret)
return ret;
}
if (smu_version) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion);
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, smu_version);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetSmuVersion, smu_version);
if (ret)
return ret;
}
@@ -218,17 +207,19 @@ int smu_get_smc_version(struct smu_context *smu, uint32_t *if_version, uint32_t
}
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max)
+ uint32_t min, uint32_t max, bool lock_needed)
{
int ret = 0;
- if (min <= 0 && max <= 0)
- return -EINVAL;
-
if (!smu_clk_dpm_is_enabled(smu, clk_type))
return 0;
+ if (lock_needed)
+ mutex_lock(&smu->mutex);
ret = smu_set_soft_freq_limited_range(smu, clk_type, min, max);
+ if (lock_needed)
+ mutex_unlock(&smu->mutex);
+
return ret;
}
@@ -251,7 +242,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMaxByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -259,7 +250,7 @@ int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -335,12 +326,8 @@ int smu_get_dpm_freq_by_index(struct smu_context *smu, enum smu_clk_type clk_typ
param = (uint32_t)(((clk_id & 0xffff) << 16) | (level & 0xffff));
- ret = smu_send_smc_msg_with_param(smu,SMU_MSG_GetDpmFreqByIndex,
- param);
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, &param);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmFreqByIndex,
+ param, &param);
if (ret)
return ret;
@@ -542,7 +529,8 @@ int smu_update_table(struct smu_context *smu, enum smu_table_id table_index, int
ret = smu_send_smc_msg_with_param(smu, drv2smu ?
SMU_MSG_TransferTableDram2Smu :
SMU_MSG_TransferTableSmu2Dram,
- table_id | ((argument & 0xFFFF) << 16));
+ table_id | ((argument & 0xFFFF) << 16),
+ NULL);
if (ret)
return ret;
@@ -900,6 +888,7 @@ static int smu_sw_init(void *handle)
mutex_init(&smu->sensor_lock);
mutex_init(&smu->metrics_lock);
+ mutex_init(&smu->message_lock);
smu->watermarks_bitmap = 0;
smu->power_profile_mode = PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT;
@@ -943,6 +932,13 @@ static int smu_sw_init(void *handle)
return ret;
}
+ if (adev->smu.ppt_funcs->i2c_eeprom_init) {
+ ret = smu_i2c_eeprom_init(smu, &adev->pm.smu_i2c);
+
+ if (ret)
+ return ret;
+ }
+
return 0;
}
@@ -952,6 +948,9 @@ static int smu_sw_fini(void *handle)
struct smu_context *smu = &adev->smu;
int ret;
+ if (adev->smu.ppt_funcs->i2c_eeprom_fini)
+ smu_i2c_eeprom_fini(smu, &adev->pm.smu_i2c);
+
kfree(smu->irq_source);
smu->irq_source = NULL;
@@ -1113,12 +1112,12 @@ static int smu_smc_table_hw_init(struct smu_context *smu,
return ret;
}
+ ret = smu_set_driver_table_location(smu);
+ if (ret)
+ return ret;
+
/* smu_dump_pptable(smu); */
if (!amdgpu_sriov_vf(adev)) {
- ret = smu_set_driver_table_location(smu);
- if (ret)
- return ret;
-
/*
* Copy pptable bo in the vram to smc with SMU MSGs such as
* SetDriverDramAddr and TransferTableDram2Smu.
@@ -1454,29 +1453,84 @@ int smu_reset(struct smu_context *smu)
return ret;
}
+static int smu_disable_dpm(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t smu_version;
+ int ret = 0;
+ bool use_baco = !smu->is_apu &&
+ ((adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
+ (adev->in_runpm && amdgpu_asic_supports_baco(adev)));
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version.\n");
+ return ret;
+ }
+
+ /*
+ * Disable all enabled SMU features.
+ * This should be handled in SMU FW, as a backup
+ * driver can issue call to SMU FW until sequence
+ * in SMU FW is operational.
+ */
+ ret = smu_system_features_control(smu, false);
+ if (ret) {
+ pr_err("Failed to disable smu features.\n");
+ return ret;
+ }
+
+ /*
+ * Arcturus does not have BACO bit in disable feature mask.
+ * Enablement of BACO bit on Arcturus should be skipped.
+ */
+ if (adev->asic_type == CHIP_ARCTURUS) {
+ if (use_baco && (smu_version > 0x360e00))
+ return 0;
+ }
+
+ /* For baco, need to leave BACO feature enabled */
+ if (use_baco) {
+ /*
+ * Correct the way for checking whether SMU_FEATURE_BACO_BIT
+ * is supported.
+ *
+ * Since 'smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)' will
+ * always return false as the 'smu_system_features_control(smu, false)'
+ * was just issued above which disabled all SMU features.
+ *
+ * Thus 'smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT)' is used
+ * now for the checking.
+ */
+ if (smu_feature_get_index(smu, SMU_FEATURE_BACO_BIT) >= 0) {
+ ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
+ if (ret) {
+ pr_warn("set BACO feature enabled failed, return %d\n", ret);
+ return ret;
+ }
+ }
+ }
+
+ return ret;
+}
+
static int smu_suspend(void *handle)
{
- int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct smu_context *smu = &adev->smu;
- bool baco_feature_is_enabled = false;
+ int ret;
- if (!smu->pm_enabled)
+ if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- if(!smu->is_apu)
- baco_feature_is_enabled = smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT);
-
- ret = smu_system_features_control(smu, false);
- if (ret)
- return ret;
+ if (!smu->pm_enabled)
+ return 0;
- if (baco_feature_is_enabled) {
- ret = smu_feature_set_enabled(smu, SMU_FEATURE_BACO_BIT, true);
- if (ret) {
- pr_warn("set BACO feature enabled failed, return %d\n", ret);
+ if(!amdgpu_sriov_vf(adev)) {
+ ret = smu_disable_dpm(smu);
+ if (ret)
return ret;
- }
}
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
@@ -1942,7 +1996,7 @@ int smu_set_mp1_state(struct smu_context *smu,
return 0;
}
- ret = smu_send_smc_msg(smu, msg);
+ ret = smu_send_smc_msg(smu, msg, NULL);
if (ret)
pr_err("[PrepareMp1] Failed!\n");
@@ -2006,8 +2060,11 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
smu_set_watermarks_table(smu, table, clock_ranges);
- smu->watermarks_bitmap |= WATERMARKS_EXIST;
- smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+
+ if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
+ }
}
mutex_unlock(&smu->mutex);
@@ -2617,12 +2674,3 @@ uint32_t smu_get_pptable_power_limit(struct smu_context *smu)
return ret;
}
-
-int smu_send_smc_msg(struct smu_context *smu,
- enum smu_message_type msg)
-{
- int ret;
-
- ret = smu_send_smc_msg_with_param(smu, msg, 0);
- return ret;
-}
diff --git a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
index 14ba6aa876e2..c6d3bef15320 100644
--- a/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/arcturus_ppt.c
@@ -21,7 +21,6 @@
*
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -42,7 +41,7 @@
#include <linux/pci.h>
#include "amdgpu_ras.h"
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_ras, eeprom_control.eeprom_accessor))->adev
+#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
#define CTF_OFFSET_EDGE 5
#define CTF_OFFSET_HOTSPOT 5
@@ -127,6 +126,7 @@ static struct smu_11_0_cmn2aisc_mapping arcturus_message_map[SMU_MSG_MAX_COUNT]
MSG_MAP(WaflTest, PPSMC_MSG_WaflTest),
MSG_MAP(SetXgmiMode, PPSMC_MSG_SetXgmiMode),
MSG_MAP(SetMemoryChannelEnable, PPSMC_MSG_SetMemoryChannelEnable),
+ MSG_MAP(DFCstateControl, PPSMC_MSG_DFCstateControl),
};
static struct smu_11_0_cmn2aisc_mapping arcturus_clk_map[SMU_CLK_COUNT] = {
@@ -373,13 +373,13 @@ arcturus_set_single_dpm_table(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ &num_of_levels);
if (ret) {
pr_err("[%s] failed to get dpm levels!\n", __func__);
return ret;
}
- smu_read_smc_arg(smu, &num_of_levels);
if (!num_of_levels) {
pr_err("[%s] number of clk levels is invalid!\n", __func__);
return -EINVAL;
@@ -389,12 +389,12 @@ arcturus_set_single_dpm_table(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | i));
+ (clk_id << 16 | i),
+ &clk);
if (ret) {
pr_err("[%s] failed to get dpm freq by index!\n", __func__);
return ret;
}
- smu_read_smc_arg(smu, &clk);
if (!clk) {
pr_err("[%s] clk value is invalid!\n", __func__);
return -EINVAL;
@@ -552,13 +552,13 @@ static int arcturus_run_btc(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
if (ret) {
pr_err("RunAfllBtc failed!\n");
return ret;
}
- return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc);
+ return smu_send_smc_msg(smu, SMU_MSG_RunDcBtc, NULL);
}
static int arcturus_populate_umd_state_clk(struct smu_context *smu)
@@ -743,7 +743,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s gfxclk !\n",
max ? "max" : "min");
@@ -758,7 +759,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_UCLK << 16) | (freq & 0xffff));
+ (PPCLK_UCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s memclk !\n",
max ? "max" : "min");
@@ -773,7 +775,8 @@ static int arcturus_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s socclk !\n",
max ? "max" : "min");
@@ -1288,12 +1291,11 @@ static int arcturus_get_power_limit(struct smu_context *smu,
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
- power_src << 16);
+ power_src << 16, &asic_default_power_limit);
if (ret) {
pr_err("[%s] get PPT limit failed!", __func__);
return ret;
}
- smu_read_smc_arg(smu, &asic_default_power_limit);
} else {
/* the last hope to figure out the ppt limit */
if (!pptable) {
@@ -1497,7 +1499,8 @@ static int arcturus_set_power_profile_mode(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
if (ret) {
pr_err("Fail to set workload type %d\n", workload_type);
return ret;
@@ -2187,7 +2190,7 @@ static int arcturus_i2c_eeprom_control_init(struct i2c_adapter *control)
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &arcturus_i2c_eeprom_i2c_algo;
- snprintf(control->name, sizeof(control->name), "RAS EEPROM");
+ snprintf(control->name, sizeof(control->name), "AMDGPU EEPROM");
res = i2c_add_adapter(control);
if (res)
@@ -2214,6 +2217,27 @@ static uint32_t arcturus_get_pptable_power_limit(struct smu_context *smu)
return pptable->SocketPowerLimitAc[PPT_THROTTLER_PPT0];
}
+static int arcturus_set_df_cstate(struct smu_context *smu,
+ enum pp_df_cstate state)
+{
+ uint32_t smu_version;
+ int ret;
+
+ ret = smu_get_smc_version(smu, NULL, &smu_version);
+ if (ret) {
+ pr_err("Failed to get smu version!\n");
+ return ret;
+ }
+
+ /* PPSMC_MSG_DFCstateControl is supported by 54.15.0 and onwards */
+ if (smu_version < 0x360F00) {
+ pr_err("DFCstateControl is only supported by PMFW 54.15.0 and onwards\n");
+ return -EINVAL;
+ }
+
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
+}
+
static const struct pptable_funcs arcturus_ppt_funcs = {
/* translate smu index into arcturus specific index */
.get_smu_msg_index = arcturus_get_smu_msg_index,
@@ -2277,7 +2301,6 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
@@ -2307,6 +2330,7 @@ static const struct pptable_funcs arcturus_ppt_funcs = {
.set_soft_freq_limited_range = smu_v11_0_set_soft_freq_limited_range,
.override_pcie_parameters = smu_v11_0_override_pcie_parameters,
.get_pptable_power_limit = arcturus_get_pptable_power_limit,
+ .set_df_cstate = arcturus_set_df_cstate,
};
void arcturus_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
index bf04cfefb283..7740488999df 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
@@ -1250,7 +1250,7 @@ static void smu7_set_dpm_event_sources(struct pp_hwmgr *hwmgr, uint32_t sources)
switch (sources) {
default:
pr_err("Unknown throttling event sources.");
- /* fall through */
+ fallthrough;
case 0:
protection = false;
/* src is unused */
@@ -3698,12 +3698,12 @@ static int smu7_request_link_speed_change_before_state_change(
data->force_pcie_gen = PP_PCIEGen2;
if (current_link_speed == PP_PCIEGen2)
break;
- /* fall through */
+ fallthrough;
case PP_PCIEGen2:
if (0 == amdgpu_acpi_pcie_performance_request(hwmgr->adev, PCIE_PERF_REQ_GEN2, false))
break;
+ fallthrough;
#endif
- /* fall through */
default:
data->force_pcie_gen = smu7_get_current_pcie_speed(hwmgr);
break;
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 92a65e3daff4..f29f95be1e56 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -3382,7 +3382,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_dpm_table &
- (DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
+ (DPMTABLE_OD_UPDATE_SCLK | DPMTABLE_UPDATE_SCLK | DPMTABLE_UPDATE_SOCCLK)) {
result = vega10_populate_all_graphic_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate SCLK during PopulateNewDPMClocksStates Function!",
@@ -3390,7 +3390,7 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
}
if (data->need_update_dpm_table &
- (DPMTABLE_OD_UPDATE_MCLK + DPMTABLE_UPDATE_MCLK)) {
+ (DPMTABLE_OD_UPDATE_MCLK | DPMTABLE_UPDATE_MCLK)) {
result = vega10_populate_all_memory_levels(hwmgr);
PP_ASSERT_WITH_CODE((0 == result),
"Failed to populate MCLK during PopulateNewDPMClocksStates Function!",
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
index 3b3ec5666051..08b6ba39a6d7 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega20_hwmgr.c
@@ -487,15 +487,16 @@ static int vega20_setup_asic_task(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = (struct amdgpu_device *)(hwmgr->adev);
int ret = 0;
+ bool use_baco = (adev->in_gpu_reset &&
+ (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) ||
+ (adev->in_runpm && amdgpu_asic_supports_baco(adev));
ret = vega20_init_sclk_threshold(hwmgr);
PP_ASSERT_WITH_CODE(!ret,
"Failed to init sclk threshold!",
return ret);
- if (adev->in_gpu_reset &&
- (amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO)) {
-
+ if (use_baco) {
ret = vega20_baco_apply_vdci_flush_workaround(hwmgr);
if (ret)
pr_err("Failed to apply vega20 baco workaround!\n");
diff --git a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
index 97b6714e83e6..657a6f17e91f 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/amdgpu_smu.h
@@ -362,6 +362,7 @@ struct smu_context
struct mutex mutex;
struct mutex sensor_lock;
struct mutex metrics_lock;
+ struct mutex message_lock;
uint64_t pool_size;
struct smu_table_context smu_table;
@@ -371,6 +372,9 @@ struct smu_context
struct amd_pp_display_configuration *display_config;
struct smu_baco_context smu_baco;
void *od_settings;
+#if defined(CONFIG_DEBUG_FS)
+ struct dentry *debugfs_sclk;
+#endif
uint32_t pstate_sclk;
uint32_t pstate_mclk;
@@ -514,8 +518,7 @@ struct pptable_funcs {
int (*set_last_dcef_min_deep_sleep_clk)(struct smu_context *smu);
int (*system_features_control)(struct smu_context *smu, bool en);
int (*send_smc_msg_with_param)(struct smu_context *smu,
- enum smu_message_type msg, uint32_t param);
- int (*read_smc_arg)(struct smu_context *smu, uint32_t *arg);
+ enum smu_message_type msg, uint32_t param, uint32_t *read_arg);
int (*init_display_count)(struct smu_context *smu, uint32_t count);
int (*set_allowed_mask)(struct smu_context *smu);
int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
@@ -707,7 +710,7 @@ int smu_get_dpm_level_count(struct smu_context *smu, enum smu_clk_type clk_type,
int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max, bool lock_needed);
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
- uint32_t min, uint32_t max);
+ uint32_t min, uint32_t max, bool lock_needed);
int smu_set_hard_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
int smu_get_dpm_level_range(struct smu_context *smu, enum smu_clk_type clk_type,
diff --git a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
index e3291259b249..f736d773f9d6 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/arcturus_ppsmc.h
@@ -110,7 +110,11 @@
//Others
#define PPSMC_MSG_SetMemoryChannelEnable 0x39
-#define PPSMC_Message_Count 0x3A
+//OOB
+#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x3A
+
+#define PPSMC_MSG_DFCstateControl 0x3B
+#define PPSMC_Message_Count 0x3C
typedef uint32_t PPSMC_Result;
typedef uint32_t PPSMC_Msg;
diff --git a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
index 822cd8b5bf90..cea65093b6ad 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/pp_debug.h
@@ -37,7 +37,7 @@
#define PP_ASSERT_WITH_CODE(cond, msg, code) \
do { \
if (!(cond)) { \
- pr_warn("%s\n", msg); \
+ pr_warn_ratelimited("%s\n", msg); \
code; \
} \
} while (0)
@@ -45,7 +45,7 @@
#define PP_ASSERT(cond, msg) \
do { \
if (!(cond)) { \
- pr_warn("%s\n", msg); \
+ pr_warn_ratelimited("%s\n", msg); \
} \
} while (0)
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
index ac0120e384be..4b2da98afcd2 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu11_driver_if_navi10.h
@@ -701,7 +701,8 @@ typedef struct {
// APCC Settings
uint16_t PccThresholdLow;
uint16_t PccThresholdHigh;
- uint32_t PaddingAPCC[6]; //FIXME pending SPEC
+ uint32_t MGpuFanBoostLimitRpm;
+ uint32_t PaddingAPCC[5];
// Temperature Dependent Vmin
uint16_t VDDGFX_TVmin; //Celcius
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
index d5314d12628a..1c88219fe403 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0.h
@@ -28,8 +28,9 @@
#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU11_DRIVER_IF_VERSION_VG20 0x13
#define SMU11_DRIVER_IF_VERSION_ARCT 0x12
-#define SMU11_DRIVER_IF_VERSION_NV10 0x33
-#define SMU11_DRIVER_IF_VERSION_NV14 0x34
+#define SMU11_DRIVER_IF_VERSION_NV10 0x35
+#define SMU11_DRIVER_IF_VERSION_NV12 0x33
+#define SMU11_DRIVER_IF_VERSION_NV14 0x36
/* MP Apertures */
#define MP0_Public 0x03800000
@@ -182,9 +183,8 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
int
smu_v11_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param);
-
-int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg);
+ uint32_t param,
+ uint32_t *read_arg);
int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count);
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
index b2f96a101124..7a63cf8e85ed 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v11_0_pptable.h
@@ -39,21 +39,39 @@
#define SMU_11_0_PP_OVERDRIVE_VERSION 0x0800
#define SMU_11_0_PP_POWERSAVINGCLOCK_VERSION 0x0100
+enum SMU_11_0_ODFEATURE_CAP {
+ SMU_11_0_ODCAP_GFXCLK_LIMITS = 0,
+ SMU_11_0_ODCAP_GFXCLK_CURVE,
+ SMU_11_0_ODCAP_UCLK_MAX,
+ SMU_11_0_ODCAP_POWER_LIMIT,
+ SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT,
+ SMU_11_0_ODCAP_FAN_SPEED_MIN,
+ SMU_11_0_ODCAP_TEMPERATURE_FAN,
+ SMU_11_0_ODCAP_TEMPERATURE_SYSTEM,
+ SMU_11_0_ODCAP_MEMORY_TIMING_TUNE,
+ SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL,
+ SMU_11_0_ODCAP_AUTO_UV_ENGINE,
+ SMU_11_0_ODCAP_AUTO_OC_ENGINE,
+ SMU_11_0_ODCAP_AUTO_OC_MEMORY,
+ SMU_11_0_ODCAP_FAN_CURVE,
+ SMU_11_0_ODCAP_COUNT,
+};
+
enum SMU_11_0_ODFEATURE_ID {
- SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << 0, //GFXCLK Limit feature
- SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << 1, //GFXCLK Curve feature
- SMU_11_0_ODFEATURE_UCLK_MAX = 1 << 2, //UCLK Limit feature
- SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << 3, //Power Limit feature
- SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << 4, //Fan Acoustic RPM feature
- SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << 5, //Minimum Fan Speed feature
- SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << 6, //Fan Target Temperature Limit feature
- SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << 7, //Operating Temperature Limit feature
- SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << 8, //AC Timing Tuning feature
- SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << 9, //Zero RPM feature
- SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << 10, //Auto Under Volt GFXCLK feature
- SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << 11, //Auto Over Clock GFXCLK feature
- SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << 12, //Auto Over Clock MCLK feature
- SMU_11_0_ODFEATURE_FAN_CURVE = 1 << 13, //VICTOR TODO
+ SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_11_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature
+ SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << SMU_11_0_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature
+ SMU_11_0_ODFEATURE_UCLK_MAX = 1 << SMU_11_0_ODCAP_UCLK_MAX, //UCLK Limit feature
+ SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << SMU_11_0_ODCAP_POWER_LIMIT, //Power Limit feature
+ SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature
+ SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << SMU_11_0_ODCAP_FAN_SPEED_MIN, //Minimum Fan Speed feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << SMU_11_0_ODCAP_TEMPERATURE_FAN, //Fan Target Temperature Limit feature
+ SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << SMU_11_0_ODCAP_TEMPERATURE_SYSTEM, //Operating Temperature Limit feature
+ SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_11_0_ODCAP_MEMORY_TIMING_TUNE, //AC Timing Tuning feature
+ SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL, //Zero RPM feature
+ SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_UV_ENGINE, //Auto Under Volt GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_OC_ENGINE, //Auto Over Clock GFXCLK feature
+ SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_11_0_ODCAP_AUTO_OC_MEMORY, //Auto Over Clock MCLK feature
+ SMU_11_0_ODFEATURE_FAN_CURVE = 1 << SMU_11_0_ODCAP_FAN_CURVE, //Fan Curve feature
SMU_11_0_ODFEATURE_COUNT = 14,
};
#define SMU_11_0_MAX_ODFEATURE 32 //Maximum Number of OD Features
diff --git a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
index d79e54b5ebf6..7fbebc1979cf 100644
--- a/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/powerplay/inc/smu_v12_0.h
@@ -40,14 +40,13 @@ struct smu_12_0_cmn2aisc_mapping {
int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg);
-int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg);
-
int smu_v12_0_wait_for_response(struct smu_context *smu);
int
smu_v12_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param);
+ uint32_t param,
+ uint32_t *read_arg);
int smu_v12_0_check_fw_status(struct smu_context *smu);
diff --git a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
index 19a9846b730e..d66dfa7410b6 100644
--- a/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/navi10_ppt.c
@@ -21,7 +21,6 @@
*
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include <linux/pci.h>
#include "amdgpu.h"
@@ -31,7 +30,6 @@
#include "amdgpu_atomfirmware.h"
#include "smu_v11_0.h"
#include "smu11_driver_if_navi10.h"
-#include "soc15_common.h"
#include "atom.h"
#include "navi10_ppt.h"
#include "smu_v11_0_pptable.h"
@@ -661,14 +659,14 @@ static int navi10_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 1, NULL);
if (ret)
return ret;
}
power_gate->vcn_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret)
return ret;
}
@@ -686,14 +684,14 @@ static int navi10_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (enable) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerUpJpeg, NULL);
if (ret)
return ret;
}
power_gate->jpeg_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownJpeg, NULL);
if (ret)
return ret;
}
@@ -736,9 +734,9 @@ static bool navi10_is_support_fine_grained_dpm(struct smu_context *smu, enum smu
return dpm_desc->SnapToDiscrete == 0 ? true : false;
}
-static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_ID feature)
+static inline bool navi10_od_feature_is_supported(struct smu_11_0_overdrive_table *od_table, enum SMU_11_0_ODFEATURE_CAP cap)
{
- return od_table->cap[feature];
+ return od_table->cap[cap];
}
static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_table,
@@ -846,7 +844,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_SCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS))
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
break;
size += sprintf(buf + size, "OD_SCLK:\n");
size += sprintf(buf + size, "0: %uMhz\n1: %uMhz\n", od_table->GfxclkFmin, od_table->GfxclkFmax);
@@ -854,7 +852,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_MCLK:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX))
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
break;
size += sprintf(buf + size, "OD_MCLK:\n");
size += sprintf(buf + size, "1: %uMHz\n", od_table->UclkFmax);
@@ -862,7 +860,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
case SMU_OD_VDDC_CURVE:
if (!smu->od_enabled || !od_table || !od_settings)
break;
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE))
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
break;
size += sprintf(buf + size, "OD_VDDC_CURVE:\n");
for (i = 0; i < 3; i++) {
@@ -887,7 +885,7 @@ static int navi10_print_clk_levels(struct smu_context *smu,
break;
size = sprintf(buf, "%s:\n", "OD_RANGE");
- if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
&min_value, NULL);
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
@@ -896,14 +894,14 @@ static int navi10_print_clk_levels(struct smu_context *smu,
min_value, max_value);
}
- if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
&min_value, &max_value);
size += sprintf(buf + size, "MCLK: %7uMhz %10uMhz\n",
min_value, max_value);
}
- if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
&min_value, &max_value);
size += sprintf(buf + size, "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
@@ -970,7 +968,7 @@ static int navi10_force_clk_levels(struct smu_context *smu,
if (ret)
return size;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return size;
break;
@@ -1042,7 +1040,7 @@ static int navi10_pre_display_config_changed(struct smu_context *smu)
int ret = 0;
uint32_t max_freq = 0;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
if (ret)
return ret;
@@ -1063,19 +1061,11 @@ static int navi10_display_config_changed(struct smu_context *smu)
int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
- ret = smu_write_watermarks_table(smu);
- if (ret)
- return ret;
-
- smu->watermarks_bitmap |= WATERMARKS_LOADED;
- }
-
- if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
- smu->display_config->num_display);
+ smu->display_config->num_display,
+ NULL);
if (ret)
return ret;
}
@@ -1102,7 +1092,7 @@ static int navi10_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -1128,7 +1118,7 @@ static int navi10_unforce_dpm_levels(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
}
@@ -1400,7 +1390,7 @@ static int navi10_set_power_profile_mode(struct smu_context *smu, long *input, u
if (workload_type < 0)
return -EINVAL;
smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type, NULL);
return ret;
}
@@ -1465,7 +1455,8 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcef_clock_in_sr/100);
+ min_clocks.dcef_clock_in_sr/100,
+ NULL);
if (ret) {
pr_err("Attempt to set divider for DCEFCLK Failed!");
return ret;
@@ -1493,6 +1484,7 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
*clock_ranges)
{
int i;
+ int ret = 0;
Watermarks_t *table = watermarks;
if (!table || !clock_ranges)
@@ -1544,6 +1536,18 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
+ /* pass data to smu controller */
+ if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_write_watermarks_table(smu);
+ if (ret) {
+ pr_err("Failed to update WMTABLE!");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
+
return 0;
}
@@ -1674,10 +1678,10 @@ static int navi10_set_standard_performance_level(struct smu_context *smu)
return navi10_set_performance_level(smu, AMD_DPM_FORCED_LEVEL_AUTO);
}
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -1742,10 +1746,10 @@ static int navi10_set_peak_performance_level(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -1855,12 +1859,11 @@ static int navi10_get_power_limit(struct smu_context *smu,
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetPptLimit,
- power_src << 16);
+ power_src << 16, &asic_default_power_limit);
if (ret) {
pr_err("[%s] get PPT limit failed!", __func__);
return ret;
}
- smu_read_smc_arg(smu, &asic_default_power_limit);
} else {
/* the last hope to figure out the ppt limit */
if (!pptable) {
@@ -1900,7 +1903,8 @@ static int navi10_update_pcie_parameters(struct smu_context *smu,
pptable->PcieLaneCount[i] : pcie_width_cap);
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
+ smu_pcie_arg,
+ NULL);
if (ret)
return ret;
@@ -1946,13 +1950,13 @@ static int navi10_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetVoltageByDpm,
- param);
+ param,
+ &value);
if (ret) {
pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
return ret;
}
- smu_read_smc_arg(smu, &value);
*voltage = (uint16_t)value;
return 0;
@@ -2056,7 +2060,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
switch (type) {
case PP_OD_EDIT_SCLK_VDDC_TABLE:
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_LIMITS)) {
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
pr_warn("GFXCLK_LIMITS not supported!\n");
return -ENOTSUPP;
}
@@ -2102,7 +2106,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
}
break;
case PP_OD_EDIT_MCLK_VDDC_TABLE:
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_UCLK_MAX)) {
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
pr_warn("UCLK_MAX not supported!\n");
return -ENOTSUPP;
}
@@ -2143,7 +2147,7 @@ static int navi10_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABL
}
break;
case PP_OD_EDIT_VDDC_CURVE:
- if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODFEATURE_GFXCLK_CURVE)) {
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
pr_warn("GFXCLK_CURVE not supported!\n");
return -ENOTSUPP;
}
@@ -2209,7 +2213,7 @@ static int navi10_run_btc(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc);
+ ret = smu_send_smc_msg(smu, SMU_MSG_RunBtc, NULL);
if (ret)
pr_err("RunBtc failed!\n");
@@ -2221,9 +2225,9 @@ static int navi10_dummy_pstate_control(struct smu_context *smu, bool enable)
int result = 0;
if (!enable)
- result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE);
+ result = smu_send_smc_msg(smu, SMU_MSG_DAL_DISABLE_DUMMY_PSTATE_CHANGE, NULL);
else
- result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE);
+ result = smu_send_smc_msg(smu, SMU_MSG_DAL_ENABLE_DUMMY_PSTATE_CHANGE, NULL);
return result;
}
@@ -2332,7 +2336,6 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
index 861e6410363b..7bf52ecba01d 100644
--- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c
@@ -24,7 +24,6 @@
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
-#include "soc15_common.h"
#include "smu_v12_0_ppsmc.h"
#include "smu12_driver_if.h"
#include "smu_v12_0.h"
@@ -111,8 +110,8 @@ static struct smu_12_0_cmn2aisc_mapping renoir_clk_map[SMU_CLK_COUNT] = {
CLK_MAP(GFXCLK, CLOCK_GFXCLK),
CLK_MAP(SCLK, CLOCK_GFXCLK),
CLK_MAP(SOCCLK, CLOCK_SOCCLK),
- CLK_MAP(UCLK, CLOCK_UMCCLK),
- CLK_MAP(MCLK, CLOCK_UMCCLK),
+ CLK_MAP(UCLK, CLOCK_FCLK),
+ CLK_MAP(MCLK, CLOCK_FCLK),
};
static struct smu_12_0_cmn2aisc_mapping renoir_table_map[SMU_TABLE_COUNT] = {
@@ -280,7 +279,7 @@ static int renoir_print_clk_levels(struct smu_context *smu,
break;
case SMU_MCLK:
count = NUM_MEMCLK_DPM_LEVELS;
- cur_value = metrics.ClockFrequency[CLOCK_UMCCLK];
+ cur_value = metrics.ClockFrequency[CLOCK_FCLK];
break;
case SMU_DCEFCLK:
count = NUM_DCFCLK_DPM_LEVELS;
@@ -342,14 +341,14 @@ static int renoir_dpm_set_uvd_enable(struct smu_context *smu, bool enable)
if (enable) {
/* vcn dpm on is a prerequisite for vcn power gate messages */
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn, 0, NULL);
if (ret)
return ret;
}
power_gate->vcn_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_VCN_PG_BIT)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ ret = smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
if (ret)
return ret;
}
@@ -367,14 +366,14 @@ static int renoir_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
if (enable) {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
if (ret)
return ret;
}
power_gate->jpeg_gated = false;
} else {
if (smu_feature_is_enabled(smu, SMU_FEATURE_JPEG_PG_BIT)) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
if (ret)
return ret;
}
@@ -423,7 +422,7 @@ static int renoir_force_dpm_limit_value(struct smu_context *smu, bool highest)
return ret;
force_freq = highest ? max_freq : min_freq;
- ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, force_freq, force_freq, false);
if (ret)
return ret;
}
@@ -456,7 +455,7 @@ static int renoir_unforce_dpm_levels(struct smu_context *smu) {
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq);
+ ret = smu_set_soft_freq_range(smu, clk_type, min_freq, max_freq, false);
if (ret)
return ret;
}
@@ -622,22 +621,24 @@ static int renoir_force_clk_levels(struct smu_context *smu,
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
soft_max_level == 0 ? min_freq :
- soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq);
+ soft_max_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : max_freq,
+ NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
soft_min_level == 2 ? max_freq :
- soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq);
+ soft_min_level == 1 ? RENOIR_UMD_PSTATE_GFXCLK : min_freq,
+ NULL);
if (ret)
return ret;
break;
case SMU_SOCCLK:
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max_freq, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min_freq, NULL);
if (ret)
return ret;
break;
@@ -645,10 +646,10 @@ static int renoir_force_clk_levels(struct smu_context *smu,
case SMU_FCLK:
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_min_level, min_freq);
GET_DPM_CUR_FREQ(clk_table, clk_type, soft_max_level, max_freq);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max_freq, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min_freq, NULL);
if (ret)
return ret;
break;
@@ -672,14 +673,19 @@ static int renoir_set_power_profile_mode(struct smu_context *smu, long *input, u
/* conv PP_SMC_POWER_PROFILE* to WORKLOAD_PPLIB_*_BIT */
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
if (workload_type < 0) {
- pr_err("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
+ /*
+ * TODO: If some case need switch to powersave/default power mode
+ * then can consider enter WORKLOAD_COMPUTE/WORKLOAD_CUSTOM for power saving.
+ */
+ pr_err_once("Unsupported power profile mode %d on RENOIR\n",smu->power_profile_mode);
return -EINVAL;
}
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ 1 << workload_type,
+ NULL);
if (ret) {
- pr_err("Fail to set workload type %d\n", workload_type);
+ pr_err_once("Fail to set workload type %d\n", workload_type);
return ret;
}
@@ -697,7 +703,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_SCLK, sclk_freq, sclk_freq, false);
if (ret)
return ret;
@@ -705,7 +711,7 @@ static int renoir_set_peak_clock_by_device(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq);
+ ret = smu_set_soft_freq_range(smu, SMU_UCLK, uclk_freq, uclk_freq, false);
if (ret)
return ret;
@@ -806,9 +812,10 @@ static int renoir_set_watermarks_table(
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
}
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+
/* pass data to smu controller */
- if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
ret = smu_write_watermarks_table(smu);
if (ret) {
pr_err("Failed to update WMTABLE!");
@@ -909,7 +916,6 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.powergate_vcn = smu_v12_0_powergate_vcn,
.powergate_jpeg = smu_v12_0_powergate_jpeg,
.send_smc_msg_with_param = smu_v12_0_send_msg_with_param,
- .read_smc_arg = smu_v12_0_read_arg,
.set_gfx_cgpg = smu_v12_0_set_gfx_cgpg,
.gfx_off_control = smu_v12_0_gfx_off_control,
.init_smc_tables = smu_v12_0_init_smc_tables,
diff --git a/drivers/gpu/drm/amd/powerplay/smu_internal.h b/drivers/gpu/drm/amd/powerplay/smu_internal.h
index 7bd200ffcda8..6900877de845 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_internal.h
+++ b/drivers/gpu/drm/amd/powerplay/smu_internal.h
@@ -79,12 +79,13 @@
#define smu_set_default_od_settings(smu, initialize) \
((smu)->ppt_funcs->set_default_od_settings ? (smu)->ppt_funcs->set_default_od_settings((smu), (initialize)) : 0)
-int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg);
+#define smu_send_smc_msg_with_param(smu, msg, param, read_arg) \
+ ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param), (read_arg)) : 0)
+
+static inline int smu_send_smc_msg(struct smu_context *smu, enum smu_message_type msg, uint32_t *read_arg) {
+ return smu_send_smc_msg_with_param(smu, msg, 0, read_arg);
+}
-#define smu_send_smc_msg_with_param(smu, msg, param) \
- ((smu)->ppt_funcs->send_smc_msg_with_param? (smu)->ppt_funcs->send_smc_msg_with_param((smu), (msg), (param)) : 0)
-#define smu_read_smc_arg(smu, arg) \
- ((smu)->ppt_funcs->read_smc_arg? (smu)->ppt_funcs->read_smc_arg((smu), (arg)) : 0)
#define smu_alloc_dpm_context(smu) \
((smu)->ppt_funcs->alloc_dpm_context ? (smu)->ppt_funcs->alloc_dpm_context((smu)) : 0)
#define smu_init_display_count(smu, count) \
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
index 0dc49479a7eb..4fd77c7cfc80 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v11_0.c
@@ -26,7 +26,6 @@
#define SMU_11_0_PARTIAL_PPTABLE
-#include "pp_debug.h"
#include "amdgpu.h"
#include "amdgpu_smu.h"
#include "smu_internal.h"
@@ -64,7 +63,7 @@ static int smu_v11_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
+static int smu_v11_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -92,7 +91,8 @@ static int smu_v11_0_wait_for_response(struct smu_context *smu)
int
smu_v11_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param)
+ uint32_t param,
+ uint32_t *read_arg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -101,11 +101,12 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
if (index < 0)
return index;
+ mutex_lock(&smu->message_lock);
ret = smu_v11_0_wait_for_response(smu);
if (ret) {
pr_err("Msg issuing pre-check failed and "
"SMU may be not in the right state!\n");
- return ret;
+ goto out;
}
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -115,10 +116,21 @@ smu_v11_0_send_msg_with_param(struct smu_context *smu,
smu_v11_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v11_0_wait_for_response(smu);
- if (ret)
+ if (ret) {
pr_err("failed send message: %10s (%d) \tparam: 0x%08x response %#x\n",
smu_get_message_name(smu, msg), index, param, ret);
-
+ goto out;
+ }
+ if (read_arg) {
+ ret = smu_v11_0_read_arg(smu, read_arg);
+ if (ret) {
+ pr_err("failed to read message arg: %10s (%d) \tparam: 0x%08x response %#x\n",
+ smu_get_message_name(smu, msg), index, param, ret);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&smu->message_lock);
return ret;
}
@@ -262,6 +274,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
case CHIP_NAVI10:
smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV10;
break;
+ case CHIP_NAVI12:
+ smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV12;
+ break;
case CHIP_NAVI14:
smu->smc_if_version = SMU11_DRIVER_IF_VERSION_NV14;
break;
@@ -671,12 +686,14 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrHigh,
- address_high);
+ address_high,
+ NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetSystemVirtualDramAddrLow,
- address_low);
+ address_low,
+ NULL);
if (ret)
return ret;
@@ -685,15 +702,15 @@ int smu_v11_0_notify_memory_pool_location(struct smu_context *smu)
address_low = (uint32_t)lower_32_bits(address);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrHigh,
- address_high);
+ address_high, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramAddrLow,
- address_low);
+ address_low, NULL);
if (ret)
return ret;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_DramLogSetDramSize,
- (uint32_t)memory_pool->size);
+ (uint32_t)memory_pool->size, NULL);
if (ret)
return ret;
@@ -757,7 +774,7 @@ int smu_v11_0_set_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk)
int ret;
ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinDeepSleepDcefclk, clk);
+ SMU_MSG_SetMinDeepSleepDcefclk, clk, NULL);
if (ret)
pr_err("SMU11 attempt to set divider for DCEFCLK Failed!");
@@ -784,11 +801,13 @@ int smu_v11_0_set_driver_table_location(struct smu_context *smu)
if (driver_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address));
+ upper_32_bits(driver_table->mc_address),
+ NULL);
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address));
+ lower_32_bits(driver_table->mc_address),
+ NULL);
}
return ret;
@@ -802,11 +821,13 @@ int smu_v11_0_set_tool_table_location(struct smu_context *smu)
if (tool_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrHigh,
- upper_32_bits(tool_table->mc_address));
+ upper_32_bits(tool_table->mc_address),
+ NULL);
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetToolsDramAddrLow,
- lower_32_bits(tool_table->mc_address));
+ lower_32_bits(tool_table->mc_address),
+ NULL);
}
return ret;
@@ -819,7 +840,7 @@ int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count)
if (!smu->pm_enabled)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, count, NULL);
return ret;
}
@@ -837,12 +858,12 @@ int smu_v11_0_set_allowed_mask(struct smu_context *smu)
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
- feature_mask[1]);
+ feature_mask[1], NULL);
if (ret)
goto failed;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
- feature_mask[0]);
+ feature_mask[0], NULL);
if (ret)
goto failed;
@@ -862,17 +883,11 @@ int smu_v11_0_get_enabled_mask(struct smu_context *smu,
return -EINVAL;
if (bitmap_empty(feature->enabled, feature->feature_num)) {
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_high);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
if (ret)
return ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_low);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
if (ret)
return ret;
@@ -894,10 +909,13 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
int ret = 0;
ret = smu_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
- SMU_MSG_DisableAllSmuFeatures));
+ SMU_MSG_DisableAllSmuFeatures), NULL);
if (ret)
return ret;
+ bitmap_zero(feature->enabled, feature->feature_num);
+ bitmap_zero(feature->supported, feature->feature_num);
+
if (en) {
ret = smu_feature_get_enabled_mask(smu, feature_mask, 2);
if (ret)
@@ -907,9 +925,6 @@ int smu_v11_0_system_features_control(struct smu_context *smu,
feature->feature_num);
bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
feature->feature_num);
- } else {
- bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
}
return ret;
@@ -923,7 +938,7 @@ int smu_v11_0_notify_display_change(struct smu_context *smu)
return ret;
if (smu_feature_is_enabled(smu, SMU_FEATURE_DPM_UCLK_BIT) &&
smu->adev->gmc.vram_type == AMDGPU_VRAM_TYPE_HBM)
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetUclkFastSwitch, 1, NULL);
return ret;
}
@@ -947,30 +962,24 @@ smu_v11_0_get_max_sustainable_clock(struct smu_context *smu, uint32_t *clock,
return -EINVAL;
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDcModeMaxDpmFreq,
- clk_id << 16);
+ clk_id << 16, clock);
if (ret) {
pr_err("[GetMaxSustainableClock] Failed to get max DC clock from SMC!");
return ret;
}
- ret = smu_read_smc_arg(smu, clock);
- if (ret)
- return ret;
-
if (*clock != 0)
return 0;
/* if DC limit is zero, return AC limit */
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq,
- clk_id << 16);
+ clk_id << 16, clock);
if (ret) {
pr_err("[GetMaxSustainableClock] failed to get max AC clock from SMC!");
return ret;
}
- ret = smu_read_smc_arg(smu, clock);
-
- return ret;
+ return 0;
}
int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
@@ -978,8 +987,12 @@ int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu)
struct smu_11_0_max_sustainable_clocks *max_sustainable_clocks;
int ret = 0;
- max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
+ if (!smu->smu_table.max_sustainable_clocks)
+ max_sustainable_clocks = kzalloc(sizeof(struct smu_11_0_max_sustainable_clocks),
GFP_KERNEL);
+ else
+ max_sustainable_clocks = smu->smu_table.max_sustainable_clocks;
+
smu->smu_table.max_sustainable_clocks = (void *)max_sustainable_clocks;
max_sustainable_clocks->uclock = smu->smu_table.boot_values.uclk / 100;
@@ -1102,7 +1115,7 @@ int smu_v11_0_set_power_limit(struct smu_context *smu, uint32_t n)
return -EOPNOTSUPP;
}
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetPptLimit, n, NULL);
if (ret) {
pr_err("[%s] Set power limit Failed!\n", __func__);
return ret;
@@ -1132,11 +1145,7 @@ int smu_v11_0_get_current_clk_freq(struct smu_context *smu,
ret = smu_get_current_clk_freq_by_table(smu, clk_id, &freq);
else {
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetDpmClockFreq,
- (asic_clk_id << 16));
- if (ret)
- return ret;
-
- ret = smu_read_smc_arg(smu, &freq);
+ (asic_clk_id << 16), &freq);
if (ret)
return ret;
}
@@ -1371,9 +1380,9 @@ int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable)
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
- ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
else
- ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
break;
default:
break;
@@ -1511,7 +1520,8 @@ int smu_v11_0_set_xgmi_pstate(struct smu_context *smu,
int ret = 0;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetXgmiMode,
- pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3);
+ pstate ? XGMI_MODE_PSTATE_D0 : XGMI_MODE_PSTATE_D3,
+ NULL);
return ret;
}
@@ -1624,14 +1634,14 @@ int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu)
{
int ret = 0;
- ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME);
+ ret = smu_send_smc_msg(smu, SMU_MSG_BacoAudioD3PME, NULL);
return ret;
}
static int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, enum smu_v11_0_baco_seq baco_seq)
{
- return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_ArmD3, baco_seq, NULL);
}
bool smu_v11_0_baco_is_support(struct smu_context *smu)
@@ -1700,12 +1710,12 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
data |= 0x80000000;
WREG32_SOC15(THM, 0, mmTHM_BACO_CNTL, data);
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 0, NULL);
} else {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_EnterBaco, 1, NULL);
}
} else {
- ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco);
+ ret = smu_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
if (ret)
goto out;
@@ -1773,19 +1783,13 @@ int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
param = (clk_id & 0xffff) << 16;
if (max) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param);
- if (ret)
- goto failed;
- ret = smu_read_smc_arg(smu, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMaxDpmFreq, param, max);
if (ret)
goto failed;
}
if (min) {
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param);
- if (ret)
- goto failed;
- ret = smu_read_smc_arg(smu, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_GetMinDpmFreq, param, min);
if (ret)
goto failed;
}
@@ -1807,7 +1811,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
@@ -1815,7 +1819,7 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
if (min > 0) {
param = (uint32_t)((clk_id << 16) | (min & 0xffff));
ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinByFreq,
- param);
+ param, NULL);
if (ret)
return ret;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
index 870e6db2907e..169ebdad87b8 100644
--- a/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/powerplay/smu_v12_0.c
@@ -20,7 +20,6 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -50,7 +49,7 @@ int smu_v12_0_send_msg_without_waiting(struct smu_context *smu,
return 0;
}
-int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
+static int smu_v12_0_read_arg(struct smu_context *smu, uint32_t *arg)
{
struct amdgpu_device *adev = smu->adev;
@@ -78,7 +77,8 @@ int smu_v12_0_wait_for_response(struct smu_context *smu)
int
smu_v12_0_send_msg_with_param(struct smu_context *smu,
enum smu_message_type msg,
- uint32_t param)
+ uint32_t param,
+ uint32_t *read_arg)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0, index = 0;
@@ -87,11 +87,12 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
if (index < 0)
return index;
+ mutex_lock(&smu->message_lock);
ret = smu_v12_0_wait_for_response(smu);
if (ret) {
pr_err("Msg issuing pre-check failed and "
"SMU may be not in the right state!\n");
- return ret;
+ goto out;
}
WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
@@ -101,10 +102,21 @@ smu_v12_0_send_msg_with_param(struct smu_context *smu,
smu_v12_0_send_msg_without_waiting(smu, (uint16_t)index);
ret = smu_v12_0_wait_for_response(smu);
- if (ret)
+ if (ret) {
pr_err("Failed to send message 0x%x, response 0x%x param 0x%x\n",
index, ret, param);
-
+ goto out;
+ }
+ if (read_arg) {
+ ret = smu_v12_0_read_arg(smu, read_arg);
+ if (ret) {
+ pr_err("Failed to read message arg 0x%x, response 0x%x param 0x%x\n",
+ index, ret, param);
+ goto out;
+ }
+ }
+out:
+ mutex_unlock(&smu->message_lock);
return ret;
}
@@ -163,9 +175,9 @@ int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerDownSdma, NULL);
else
- return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerUpSdma, NULL);
}
int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
@@ -174,9 +186,9 @@ int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerDownVcn, NULL);
else
- return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn);
+ return smu_send_smc_msg(smu, SMU_MSG_PowerUpVcn, NULL);
}
int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
@@ -185,9 +197,9 @@ int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate)
return 0;
if (gate)
- return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerDownJpeg, 0, NULL);
else
- return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg, 0, NULL);
}
int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
@@ -196,7 +208,9 @@ int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable)
return 0;
return smu_v12_0_send_msg_with_param(smu,
- SMU_MSG_SetGfxCGPG, enable ? 1 : 0);
+ SMU_MSG_SetGfxCGPG,
+ enable ? 1 : 0,
+ NULL);
}
int smu_v12_0_read_sensor(struct smu_context *smu,
@@ -262,10 +276,10 @@ int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable)
int ret = 0, timeout = 500;
if (enable) {
- ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_AllowGfxOff, NULL);
} else {
- ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff);
+ ret = smu_send_smc_msg(smu, SMU_MSG_DisallowGfxOff, NULL);
/* confirm gfx is back to "on" state, timeout is 0.5 second */
while (!(smu_v12_0_get_gfxoff_status(smu) == 2)) {
@@ -331,17 +345,11 @@ int smu_v12_0_get_enabled_mask(struct smu_context *smu,
if (!feature_mask || num < 2)
return -EINVAL;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_high);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
if (ret)
return ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow);
- if (ret)
- return ret;
- ret = smu_read_smc_arg(smu, &feature_mask_low);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
if (ret)
return ret;
@@ -388,14 +396,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetMaxGfxclkFrequency, max);
if (ret) {
pr_err("Attempt to get max GX frequency from SMC Failed !\n");
goto failed;
}
- ret = smu_read_smc_arg(smu, max);
- if (ret)
- goto failed;
break;
case SMU_UCLK:
case SMU_FCLK:
@@ -419,14 +424,11 @@ int smu_v12_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type c
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetMinGfxclkFrequency, min);
if (ret) {
pr_err("Attempt to get min GX frequency from SMC Failed !\n");
goto failed;
}
- ret = smu_read_smc_arg(smu, min);
- if (ret)
- goto failed;
break;
case SMU_UCLK:
case SMU_FCLK:
@@ -450,7 +452,7 @@ failed:
}
int smu_v12_0_mode2_reset(struct smu_context *smu){
- return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2);
+ return smu_v12_0_send_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, SMU_RESET_MODE_2, NULL);
}
int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type,
@@ -458,45 +460,42 @@ int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_
{
int ret = 0;
- if (max < min)
- return -EINVAL;
-
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk, max, NULL);
if (ret)
return ret;
break;
case SMU_FCLK:
case SMU_MCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinFclkByFreq, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxFclkByFreq, max, NULL);
if (ret)
return ret;
break;
case SMU_SOCCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinSocclkByFreq, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxSocclkByFreq, max, NULL);
if (ret)
return ret;
break;
case SMU_VCLK:
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinVcn, min, NULL);
if (ret)
return ret;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max);
+ ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxVcn, max, NULL);
if (ret)
return ret;
break;
@@ -515,11 +514,13 @@ int smu_v12_0_set_driver_table_location(struct smu_context *smu)
if (driver_table->mc_address) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrHigh,
- upper_32_bits(driver_table->mc_address));
+ upper_32_bits(driver_table->mc_address),
+ NULL);
if (!ret)
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetDriverDramAddrLow,
- lower_32_bits(driver_table->mc_address));
+ lower_32_bits(driver_table->mc_address),
+ NULL);
}
return ret;
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
index 49e5ef3e3876..16aa171971d3 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/vega20_smumgr.c
@@ -33,6 +33,8 @@
#include "smu7_smumgr.h"
#include "vega20_hwmgr.h"
+#include "smu_v11_0_i2c.h"
+
/* MP Apertures */
#define MP0_Public 0x03800000
#define MP0_SRAM 0x03900000
@@ -406,6 +408,7 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
struct vega20_smumgr *priv;
unsigned long tools_size = 0x19000;
int ret = 0;
+ struct amdgpu_device *adev = hwmgr->adev;
struct cgs_firmware_info info = {0};
@@ -505,6 +508,10 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
+ ret = smu_v11_0_i2c_eeprom_control_init(&adev->pm.smu_i2c);
+ if (ret)
+ goto err4;
+
return 0;
err4:
@@ -537,6 +544,9 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
{
struct vega20_smumgr *priv =
(struct vega20_smumgr *)(hwmgr->smu_backend);
+ struct amdgpu_device *adev = hwmgr->adev;
+
+ smu_v11_0_i2c_eeprom_control_fini(&adev->pm.smu_i2c);
if (priv) {
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
@@ -560,6 +570,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
kfree(hwmgr->smu_backend);
hwmgr->smu_backend = NULL;
}
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
index 4ad8d6c14ee5..49ff3756bd9f 100644
--- a/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
+++ b/drivers/gpu/drm/amd/powerplay/vega20_ppt.c
@@ -21,7 +21,6 @@
*
*/
-#include "pp_debug.h"
#include <linux/firmware.h>
#include "amdgpu.h"
#include "amdgpu_smu.h"
@@ -587,7 +586,7 @@ static int vega20_check_powerplay_table(struct smu_context *smu)
static int vega20_run_btc_afll(struct smu_context *smu)
{
- return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc);
+ return smu_send_smc_msg(smu, SMU_MSG_RunAfllBtc, NULL);
}
#define FEATURE_MASK(feature) (1ULL << feature)
@@ -670,13 +669,13 @@ vega20_set_single_dpm_table(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | 0xFF));
+ (clk_id << 16 | 0xFF),
+ &num_of_levels);
if (ret) {
pr_err("[GetNumOfDpmLevel] failed to get dpm levels!");
return ret;
}
- smu_read_smc_arg(smu, &num_of_levels);
if (!num_of_levels) {
pr_err("[GetNumOfDpmLevel] number of clk levels is invalid!");
return -EINVAL;
@@ -687,12 +686,12 @@ vega20_set_single_dpm_table(struct smu_context *smu,
for (i = 0; i < num_of_levels; i++) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetDpmFreqByIndex,
- (clk_id << 16 | i));
+ (clk_id << 16 | i),
+ &clk);
if (ret) {
pr_err("[GetDpmFreqByIndex] failed to get dpm freq by index!");
return ret;
}
- smu_read_smc_arg(smu, &clk);
if (!clk) {
pr_err("[GetDpmFreqByIndex] clk value is invalid!");
return -EINVAL;
@@ -1200,7 +1199,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_GFXCLK << 16) | (freq & 0xffff));
+ (PPCLK_GFXCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s gfxclk !\n",
max ? "max" : "min");
@@ -1215,7 +1215,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_UCLK << 16) | (freq & 0xffff));
+ (PPCLK_UCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s memclk !\n",
max ? "max" : "min");
@@ -1230,7 +1231,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_SOCCLK << 16) | (freq & 0xffff));
+ (PPCLK_SOCCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s socclk !\n",
max ? "max" : "min");
@@ -1245,7 +1247,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
single_dpm_table->dpm_state.soft_min_level;
ret = smu_send_smc_msg_with_param(smu,
(max ? SMU_MSG_SetSoftMaxByFreq : SMU_MSG_SetSoftMinByFreq),
- (PPCLK_FCLK << 16) | (freq & 0xffff));
+ (PPCLK_FCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set soft %s fclk !\n",
max ? "max" : "min");
@@ -1260,7 +1263,8 @@ static int vega20_upload_dpm_level(struct smu_context *smu, bool max,
if (!max) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq,
- (PPCLK_DCEFCLK << 16) | (freq & 0xffff));
+ (PPCLK_DCEFCLK << 16) | (freq & 0xffff),
+ NULL);
if (ret) {
pr_err("Failed to set hard min dcefclk !\n");
return ret;
@@ -1421,7 +1425,9 @@ static int vega20_force_clk_levels(struct smu_context *smu,
}
ret = smu_send_smc_msg_with_param(smu,
- SMU_MSG_SetMinLinkDpmByIndex, soft_min_level);
+ SMU_MSG_SetMinLinkDpmByIndex,
+ soft_min_level,
+ NULL);
if (ret)
pr_err("Failed to set min link dpm level!\n");
@@ -1477,13 +1483,13 @@ static int vega20_overdrive_get_gfx_clk_base_voltage(struct smu_context *smu,
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_GetAVFSVoltageByDpm,
- ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq));
+ ((AVFS_CURVE << 24) | (OD8_HOTCURVE_TEMPERATURE << 16) | freq),
+ voltage);
if (ret) {
pr_err("[GetBaseVoltage] failed to get GFXCLK AVFS voltage from SMU!");
return ret;
}
- smu_read_smc_arg(smu, voltage);
*voltage = *voltage / VOLTAGE_SCALE;
return 0;
@@ -1956,8 +1962,10 @@ static int vega20_set_power_profile_mode(struct smu_context *smu, long *input, u
workload_type = smu_workload_get_type(smu, smu->power_profile_mode);
if (workload_type < 0)
return -EINVAL;
- smu_send_smc_msg_with_param(smu, SMU_MSG_SetWorkloadMask,
- 1 << workload_type);
+ smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetWorkloadMask,
+ 1 << workload_type,
+ NULL);
return ret;
}
@@ -2029,7 +2037,8 @@ vega20_set_uclk_to_highest_dpm_level(struct smu_context *smu,
dpm_table->dpm_state.hard_min_level = dpm_table->dpm_levels[dpm_table->count - 1].value;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level);
+ (PPCLK_UCLK << 16) | dpm_table->dpm_state.hard_min_level,
+ NULL);
if (ret) {
pr_err("[%s] Set hard min uclk failed!", __func__);
return ret;
@@ -2047,7 +2056,7 @@ static int vega20_pre_display_config_changed(struct smu_context *smu)
if (!smu->smu_dpm.dpm_context)
return -EINVAL;
- smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0);
+ smu_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays, 0, NULL);
ret = vega20_set_uclk_to_highest_dpm_level(smu,
&dpm_table->mem_table);
if (ret)
@@ -2074,7 +2083,8 @@ static int vega20_display_config_changed(struct smu_context *smu)
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
smu_send_smc_msg_with_param(smu,
SMU_MSG_NumOfDisplays,
- smu->display_config->num_display);
+ smu->display_config->num_display,
+ NULL);
}
return ret;
@@ -2247,7 +2257,8 @@ vega20_notify_smc_display_config(struct smu_context *smu)
if (smu_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
- min_clocks.dcef_clock_in_sr/100);
+ min_clocks.dcef_clock_in_sr/100,
+ NULL);
if (ret) {
pr_err("Attempt to set divider for DCEFCLK Failed!");
return ret;
@@ -2262,7 +2273,8 @@ vega20_notify_smc_display_config(struct smu_context *smu)
memtable->dpm_state.hard_min_level = min_clocks.memory_clock/100;
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_SetHardMinByFreq,
- (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level);
+ (PPCLK_UCLK << 16) | memtable->dpm_state.hard_min_level,
+ NULL);
if (ret) {
pr_err("[%s] Set hard min uclk failed!", __func__);
return ret;
@@ -2853,8 +2865,10 @@ static int vega20_set_thermal_fan_table(struct smu_context *smu)
struct smu_table_context *table_context = &smu->smu_table;
PPTable_t *pptable = table_context->driver_pptable;
- ret = smu_send_smc_msg_with_param(smu, SMU_MSG_SetFanTemperatureTarget,
- (uint32_t)pptable->FanTargetTemperature);
+ ret = smu_send_smc_msg_with_param(smu,
+ SMU_MSG_SetFanTemperatureTarget,
+ (uint32_t)pptable->FanTargetTemperature,
+ NULL);
return ret;
}
@@ -2864,15 +2878,13 @@ static int vega20_get_fan_speed_rpm(struct smu_context *smu,
{
int ret;
- ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm);
+ ret = smu_send_smc_msg(smu, SMU_MSG_GetCurrentRpm, speed);
if (ret) {
pr_err("Attempt to get current RPM from SMC Failed!\n");
return ret;
}
- smu_read_smc_arg(smu, speed);
-
return 0;
}
@@ -3137,7 +3149,7 @@ static int vega20_set_df_cstate(struct smu_context *smu,
return -EINVAL;
}
- return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state);
+ return smu_send_smc_msg_with_param(smu, SMU_MSG_DFCstateControl, state, NULL);
}
static int vega20_update_pcie_parameters(struct smu_context *smu,
@@ -3155,7 +3167,8 @@ static int vega20_update_pcie_parameters(struct smu_context *smu,
pptable->PcieLaneCount[i] : pcie_width_cap);
ret = smu_send_smc_msg_with_param(smu,
SMU_MSG_OverridePcieParameters,
- smu_pcie_arg);
+ smu_pcie_arg,
+ NULL);
}
return ret;
@@ -3229,7 +3242,6 @@ static const struct pptable_funcs vega20_ppt_funcs = {
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.system_features_control = smu_v11_0_system_features_control,
.send_smc_msg_with_param = smu_v11_0_send_msg_with_param,
- .read_smc_arg = smu_v11_0_read_arg,
.init_display_count = smu_v11_0_init_display_count,
.set_allowed_mask = smu_v11_0_set_allowed_mask,
.get_enabled_mask = smu_v11_0_get_enabled_mask,
diff --git a/drivers/gpu/drm/arc/arcpgu_crtc.c b/drivers/gpu/drm/arc/arcpgu_crtc.c
index 8ae1e1f97a73..be7c29cec318 100644
--- a/drivers/gpu/drm/arc/arcpgu_crtc.c
+++ b/drivers/gpu/drm/arc/arcpgu_crtc.c
@@ -9,7 +9,6 @@
#include <drm/drm_device.h>
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
-#include <drm/drm_vblank.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <linux/clk.h>
@@ -138,24 +137,9 @@ static void arc_pgu_crtc_atomic_disable(struct drm_crtc *crtc,
~ARCPGU_CTRL_ENABLE_MASK);
}
-static void arc_pgu_crtc_atomic_begin(struct drm_crtc *crtc,
- struct drm_crtc_state *state)
-{
- struct drm_pending_vblank_event *event = crtc->state->event;
-
- if (event) {
- crtc->state->event = NULL;
-
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irq(&crtc->dev->event_lock);
- }
-}
-
static const struct drm_crtc_helper_funcs arc_pgu_crtc_helper_funcs = {
.mode_valid = arc_pgu_crtc_mode_valid,
.mode_set_nofb = arc_pgu_crtc_mode_set_nofb,
- .atomic_begin = arc_pgu_crtc_atomic_begin,
.atomic_enable = arc_pgu_crtc_atomic_enable,
.atomic_disable = arc_pgu_crtc_atomic_disable,
};
diff --git a/drivers/gpu/drm/arc/arcpgu_hdmi.c b/drivers/gpu/drm/arc/arcpgu_hdmi.c
index 8fd7094beece..52839934f2fb 100644
--- a/drivers/gpu/drm/arc/arcpgu_hdmi.c
+++ b/drivers/gpu/drm/arc/arcpgu_hdmi.c
@@ -40,7 +40,7 @@ int arcpgu_drm_hdmi_init(struct drm_device *drm, struct device_node *np)
return ret;
/* Link drm_bridge to encoder */
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
drm_encoder_cleanup(encoder);
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index ac8a78bfda03..f2dc371bd8e5 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -129,18 +129,12 @@ int armada_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, fbh, &armada_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, fbh, 1);
+ ret = drm_fb_helper_init(dev, fbh);
if (ret) {
DRM_ERROR("failed to initialize drm fb helper\n");
goto err_fb_helper;
}
- ret = drm_fb_helper_single_add_all_connectors(fbh);
- if (ret) {
- DRM_ERROR("failed to add fb connectors\n");
- goto err_fb_setup;
- }
-
ret = drm_fb_helper_initial_config(fbh, 32);
if (ret) {
DRM_ERROR("failed to set initial config\n");
diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h
index f5d8780776ae..656d591b154b 100644
--- a/drivers/gpu/drm/ast/ast_drv.h
+++ b/drivers/gpu/drm/ast/ast_drv.h
@@ -121,6 +121,7 @@ struct ast_private {
unsigned int next_index;
} cursor;
+ struct drm_encoder encoder;
struct drm_plane primary_plane;
struct drm_plane cursor_plane;
@@ -238,13 +239,8 @@ struct ast_crtc {
u8 offset_x, offset_y;
};
-struct ast_encoder {
- struct drm_encoder base;
-};
-
#define to_ast_crtc(x) container_of(x, struct ast_crtc, base)
#define to_ast_connector(x) container_of(x, struct ast_connector, base)
-#define to_ast_encoder(x) container_of(x, struct ast_encoder, base)
struct ast_vbios_stdtable {
u8 misc;
diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c
index b79f484e9bd2..18a0a4ce00f6 100644
--- a/drivers/gpu/drm/ast/ast_main.c
+++ b/drivers/gpu/drm/ast/ast_main.c
@@ -388,31 +388,9 @@ static int ast_get_dram_info(struct drm_device *dev)
return 0;
}
-enum drm_mode_status ast_mode_config_mode_valid(struct drm_device *dev,
- const struct drm_display_mode *mode)
-{
- static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGBA8888 */
-
- struct ast_private *ast = dev->dev_private;
- unsigned long fbsize, fbpages, max_fbpages;
-
- /* To support double buffering, a framebuffer may not
- * consume more than half of the available VRAM.
- */
- max_fbpages = (ast->vram_size / 2) >> PAGE_SHIFT;
-
- fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
- fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
-
- if (fbpages > max_fbpages)
- return MODE_MEM;
-
- return MODE_OK;
-}
-
static const struct drm_mode_config_funcs ast_mode_funcs = {
.fb_create = drm_gem_fb_create,
- .mode_valid = ast_mode_config_mode_valid,
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/ast/ast_mode.c b/drivers/gpu/drm/ast/ast_mode.c
index 34608f0499eb..cdd6c46d6557 100644
--- a/drivers/gpu/drm/ast/ast_mode.c
+++ b/drivers/gpu/drm/ast/ast_mode.c
@@ -40,6 +40,7 @@
#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "ast_drv.h"
#include "ast_tables.h"
@@ -833,8 +834,6 @@ static void ast_crtc_helper_atomic_flush(struct drm_crtc *crtc,
struct ast_vbios_mode_info *vbios_mode_info;
struct drm_display_mode *adjusted_mode;
- crtc->state->no_vblank = true;
-
ast_state = to_ast_crtc_state(crtc->state);
format = ast_state->format;
@@ -959,28 +958,18 @@ err_kfree:
* Encoder
*/
-static void ast_encoder_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
- kfree(encoder);
-}
-
-static const struct drm_encoder_funcs ast_enc_funcs = {
- .destroy = ast_encoder_destroy,
-};
-
static int ast_encoder_init(struct drm_device *dev)
{
- struct ast_encoder *ast_encoder;
+ struct ast_private *ast = dev->dev_private;
+ struct drm_encoder *encoder = &ast->encoder;
+ int ret;
- ast_encoder = kzalloc(sizeof(struct ast_encoder), GFP_KERNEL);
- if (!ast_encoder)
- return -ENOMEM;
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_DAC);
+ if (ret)
+ return ret;
- drm_encoder_init(dev, &ast_encoder->base, &ast_enc_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
+ encoder->possible_crtcs = 1;
- ast_encoder->base.possible_crtcs = 1;
return 0;
}
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 121b62682d80..e2019fe97fff 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -114,7 +114,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
}
if (bridge) {
- ret = drm_bridge_attach(&output->encoder, bridge, NULL);
+ ret = drm_bridge_attach(&output->encoder, bridge, NULL, 0);
if (!ret)
return 0;
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 10460878414e..addb0568c1af 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -23,7 +23,6 @@ static void bochs_unload(struct drm_device *dev)
bochs_kms_fini(bochs);
bochs_mm_fini(bochs);
- bochs_hw_fini(dev);
kfree(bochs);
dev->dev_private = NULL;
}
@@ -69,6 +68,7 @@ static struct drm_driver bochs_driver = {
.major = 1,
.minor = 0,
DRM_GEM_VRAM_DRIVER,
+ .release = bochs_unload,
};
/* ---------------------------------------------------------------------- */
@@ -148,9 +148,9 @@ static void bochs_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
+ drm_dev_unplug(dev);
drm_atomic_helper_shutdown(dev);
- drm_dev_unregister(dev);
- bochs_unload(dev);
+ bochs_hw_fini(dev);
drm_dev_put(dev);
}
diff --git a/drivers/gpu/drm/bochs/bochs_hw.c b/drivers/gpu/drm/bochs/bochs_hw.c
index b615b7dfdd9d..952199cc0462 100644
--- a/drivers/gpu/drm/bochs/bochs_hw.c
+++ b/drivers/gpu/drm/bochs/bochs_hw.c
@@ -4,6 +4,7 @@
#include <linux/pci.h>
+#include <drm/drm_drv.h>
#include <drm/drm_fourcc.h>
#include "bochs.h"
@@ -194,6 +195,8 @@ void bochs_hw_fini(struct drm_device *dev)
{
struct bochs_device *bochs = dev->dev_private;
+ /* TODO: shot down existing vram mappings */
+
if (bochs->mmio)
iounmap(bochs->mmio);
if (bochs->ioports)
@@ -207,6 +210,11 @@ void bochs_hw_fini(struct drm_device *dev)
void bochs_hw_setmode(struct bochs_device *bochs,
struct drm_display_mode *mode)
{
+ int idx;
+
+ if (!drm_dev_enter(bochs->dev, &idx))
+ return;
+
bochs->xres = mode->hdisplay;
bochs->yres = mode->vdisplay;
bochs->bpp = 32;
@@ -232,11 +240,18 @@ void bochs_hw_setmode(struct bochs_device *bochs,
bochs_dispi_write(bochs, VBE_DISPI_INDEX_ENABLE,
VBE_DISPI_ENABLED | VBE_DISPI_LFB_ENABLED);
+
+ drm_dev_exit(idx);
}
void bochs_hw_setformat(struct bochs_device *bochs,
const struct drm_format_info *format)
{
+ int idx;
+
+ if (!drm_dev_enter(bochs->dev, &idx))
+ return;
+
DRM_DEBUG_DRIVER("format %c%c%c%c\n",
(format->format >> 0) & 0xff,
(format->format >> 8) & 0xff,
@@ -256,13 +271,18 @@ void bochs_hw_setformat(struct bochs_device *bochs,
__func__, format->format);
break;
}
+
+ drm_dev_exit(idx);
}
void bochs_hw_setbase(struct bochs_device *bochs,
int x, int y, int stride, u64 addr)
{
unsigned long offset;
- unsigned int vx, vy, vwidth;
+ unsigned int vx, vy, vwidth, idx;
+
+ if (!drm_dev_enter(bochs->dev, &idx))
+ return;
bochs->stride = stride;
offset = (unsigned long)addr +
@@ -277,4 +297,6 @@ void bochs_hw_setbase(struct bochs_device *bochs,
bochs_dispi_write(bochs, VBE_DISPI_INDEX_VIRT_WIDTH, vwidth);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_X_OFFSET, vx);
bochs_dispi_write(bochs, VBE_DISPI_INDEX_Y_OFFSET, vy);
+
+ drm_dev_exit(idx);
}
diff --git a/drivers/gpu/drm/bochs/bochs_kms.c b/drivers/gpu/drm/bochs/bochs_kms.c
index 3f0006c2470d..8066d7d370d5 100644
--- a/drivers/gpu/drm/bochs/bochs_kms.c
+++ b/drivers/gpu/drm/bochs/bochs_kms.c
@@ -7,7 +7,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "bochs.h"
@@ -57,16 +56,8 @@ static void bochs_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct bochs_device *bochs = pipe->crtc.dev->dev_private;
- struct drm_crtc *crtc = &pipe->crtc;
bochs_plane_update(bochs, pipe->plane.state);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
static const struct drm_simple_display_pipe_funcs bochs_pipe_funcs = {
@@ -92,32 +83,11 @@ static int bochs_connector_get_modes(struct drm_connector *connector)
return count;
}
-static enum drm_mode_status bochs_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct bochs_device *bochs =
- container_of(connector, struct bochs_device, connector);
- unsigned long size = mode->hdisplay * mode->vdisplay * 4;
-
- /*
- * Make sure we can fit two framebuffers into video memory.
- * This allows up to 1600x1200 with 16 MB (default size).
- * If you want more try this:
- * 'qemu -vga std -global VGA.vgamem_mb=32 $otherargs'
- */
- if (size * 2 > bochs->fb_size)
- return MODE_BAD;
-
- return MODE_OK;
-}
-
static const struct drm_connector_helper_funcs bochs_connector_connector_helper_funcs = {
.get_modes = bochs_connector_get_modes,
- .mode_valid = bochs_connector_mode_valid,
};
static const struct drm_connector_funcs bochs_connector_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
.fill_modes = drm_helper_probe_single_connector_modes,
.destroy = drm_connector_cleanup,
.reset = drm_atomic_helper_connector_reset,
@@ -157,6 +127,7 @@ bochs_gem_fb_create(struct drm_device *dev, struct drm_file *file,
const struct drm_mode_config_funcs bochs_mode_funcs = {
.fb_create = bochs_gem_fb_create,
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -192,6 +163,9 @@ int bochs_kms_init(struct bochs_device *bochs)
void bochs_kms_fini(struct bochs_device *bochs)
{
+ if (!bochs->dev->mode_config.num_connector)
+ return;
+
drm_atomic_helper_shutdown(bochs->dev);
drm_mode_config_cleanup(bochs->dev);
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index 0b9ca5862455..aaed2347ace9 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -27,13 +27,16 @@ config DRM_CDNS_DSI
Support Cadence DPI to DSI bridge. This is an internal
bridge and is meant to be directly embedded in a SoC.
-config DRM_DUMB_VGA_DAC
- tristate "Dumb VGA DAC Bridge support"
+config DRM_DISPLAY_CONNECTOR
+ tristate "Display connector support"
depends on OF
- select DRM_KMS_HELPER
help
- Support for non-programmable RGB to VGA DAC bridges, such as ADI
- ADV7123, TI THS8134 and THS8135 or passive resistor ladder DACs.
+ Driver for display connectors with support for DDC and hot-plug
+ detection. Most display controller handle display connectors
+ internally and don't need this driver, but the DRM subsystem is
+ moving towards separating connector handling from display controllers
+ on ARM-based platforms. Saying Y here when this driver is not needed
+ will not cause any issue.
config DRM_LVDS_CODEC
tristate "Transparent LVDS encoders and decoders support"
@@ -72,6 +75,17 @@ config DRM_PARADE_PS8622
---help---
Parade eDP-LVDS bridge chip driver.
+config DRM_PARADE_PS8640
+ tristate "Parade PS8640 MIPI DSI to eDP Converter"
+ depends on OF
+ select DRM_KMS_HELPER
+ select DRM_MIPI_DSI
+ select DRM_PANEL
+ help
+ Choose this option if you have PS8640 for display
+ The PS8640 is a high-performance and low-power
+ MIPI DSI to eDP converter
+
config DRM_SIL_SII8620
tristate "Silicon Image SII8620 HDMI/MHL bridge"
depends on OF
@@ -87,6 +101,7 @@ config DRM_SII902X
select DRM_KMS_HELPER
select REGMAP_I2C
select I2C_MUX
+ select SND_SOC_HDMI_CODEC if SND_SOC
---help---
Silicon Image sii902x bridge chip driver.
@@ -98,6 +113,14 @@ config DRM_SII9234
It is an I2C driver, that detects connection of MHL bridge
and starts encapsulation of HDMI signal.
+config DRM_SIMPLE_BRIDGE
+ tristate "Simple DRM bridge support"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Support for non-programmable DRM bridges, such as ADI ADV7123, TI
+ THS8134 and THS8135 or passive resistor ladder DACs.
+
config DRM_THINE_THC63LVD1024
tristate "Thine THC63LVD1024 LVDS decoder bridge"
depends on OF
@@ -122,6 +145,16 @@ config DRM_TOSHIBA_TC358767
---help---
Toshiba TC358767 eDP bridge chip driver.
+config DRM_TOSHIBA_TC358768
+ tristate "Toshiba TC358768 MIPI DSI bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ help
+ Toshiba TC358768AXBG/TC358778XBG DSI bridge chip driver.
+
config DRM_TI_TFP410
tristate "TI TFP410 DVI/HDMI bridge"
depends on OF
@@ -139,6 +172,14 @@ config DRM_TI_SN65DSI86
help
Texas Instruments SN65DSI86 DSI to eDP Bridge driver
+config DRM_TI_TPD12S015
+ tristate "TI TPD12S015 HDMI level shifter and ESD protection"
+ depends on OF
+ select DRM_KMS_HELPER
+ help
+ Texas Instruments TPD12S015 HDMI level shifter and ESD protection
+ driver.
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index cd16ce830270..6fb062b5b0f0 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -1,19 +1,23 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_CDNS_DSI) += cdns-dsi.o
-obj-$(CONFIG_DRM_DUMB_VGA_DAC) += dumb-vga-dac.o
+obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o
obj-$(CONFIG_DRM_LVDS_CODEC) += lvds-codec.o
obj-$(CONFIG_DRM_MEGACHIPS_STDPXXXX_GE_B850V3_FW) += megachips-stdpxxxx-ge-b850v3-fw.o
obj-$(CONFIG_DRM_NXP_PTN3460) += nxp-ptn3460.o
obj-$(CONFIG_DRM_PARADE_PS8622) += parade-ps8622.o
+obj-$(CONFIG_DRM_PARADE_PS8640) += parade-ps8640.o
obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
+obj-$(CONFIG_DRM_SIMPLE_BRIDGE) += simple-bridge.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358768) += tc358768.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
+obj-$(CONFIG_DRM_TI_TPD12S015) += ti-tpd12s015.o
obj-y += analogix/
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/adv7511/Kconfig b/drivers/gpu/drm/bridge/adv7511/Kconfig
index 8a56ff81f4fb..47d4eb9e845d 100644
--- a/drivers/gpu/drm/bridge/adv7511/Kconfig
+++ b/drivers/gpu/drm/bridge/adv7511/Kconfig
@@ -4,8 +4,9 @@ config DRM_I2C_ADV7511
depends on OF
select DRM_KMS_HELPER
select REGMAP_I2C
+ select DRM_MIPI_DSI
help
- Support for the Analog Device ADV7511(W) and ADV7513 HDMI encoders.
+ Support for the Analog Device ADV7511(W)/13/33/35 HDMI encoders.
config DRM_I2C_ADV7511_AUDIO
bool "ADV7511 HDMI Audio driver"
@@ -15,16 +16,8 @@ config DRM_I2C_ADV7511_AUDIO
Support the ADV7511 HDMI Audio interface. This is used in
conjunction with the AV7511 HDMI driver.
-config DRM_I2C_ADV7533
- bool "ADV7533 encoder"
- depends on DRM_I2C_ADV7511
- select DRM_MIPI_DSI
- default y
- help
- Support for the Analog Devices ADV7533 DSI to HDMI encoder.
-
config DRM_I2C_ADV7511_CEC
- bool "ADV7511/33 HDMI CEC driver"
+ bool "ADV7511/33/35 HDMI CEC driver"
depends on DRM_I2C_ADV7511
select CEC_CORE
default y
diff --git a/drivers/gpu/drm/bridge/adv7511/Makefile b/drivers/gpu/drm/bridge/adv7511/Makefile
index b46ebeb35fd4..d8ceb534b51f 100644
--- a/drivers/gpu/drm/bridge/adv7511/Makefile
+++ b/drivers/gpu/drm/bridge/adv7511/Makefile
@@ -1,6 +1,5 @@
# SPDX-License-Identifier: GPL-2.0-only
-adv7511-y := adv7511_drv.o
+adv7511-y := adv7511_drv.o adv7533.o
adv7511-$(CONFIG_DRM_I2C_ADV7511_AUDIO) += adv7511_audio.o
adv7511-$(CONFIG_DRM_I2C_ADV7511_CEC) += adv7511_cec.o
-adv7511-$(CONFIG_DRM_I2C_ADV7533) += adv7533.o
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511.o
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h
index 52b2adfdc877..a9bb734366ae 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511.h
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h
@@ -320,6 +320,7 @@ struct adv7511_video_config {
enum adv7511_type {
ADV7511,
ADV7533,
+ ADV7535,
};
#define ADV7511_MAX_ADDRS 3
@@ -393,7 +394,6 @@ static inline int adv7511_cec_init(struct device *dev, struct adv7511 *adv7511)
}
#endif
-#ifdef CONFIG_DRM_I2C_ADV7533
void adv7533_dsi_power_on(struct adv7511 *adv);
void adv7533_dsi_power_off(struct adv7511 *adv);
void adv7533_mode_set(struct adv7511 *adv, const struct drm_display_mode *mode);
@@ -402,44 +402,6 @@ int adv7533_patch_cec_registers(struct adv7511 *adv);
int adv7533_attach_dsi(struct adv7511 *adv);
void adv7533_detach_dsi(struct adv7511 *adv);
int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv);
-#else
-static inline void adv7533_dsi_power_on(struct adv7511 *adv)
-{
-}
-
-static inline void adv7533_dsi_power_off(struct adv7511 *adv)
-{
-}
-
-static inline void adv7533_mode_set(struct adv7511 *adv,
- const struct drm_display_mode *mode)
-{
-}
-
-static inline int adv7533_patch_registers(struct adv7511 *adv)
-{
- return -ENODEV;
-}
-
-static inline int adv7533_patch_cec_registers(struct adv7511 *adv)
-{
- return -ENODEV;
-}
-
-static inline int adv7533_attach_dsi(struct adv7511 *adv)
-{
- return -ENODEV;
-}
-
-static inline void adv7533_detach_dsi(struct adv7511 *adv)
-{
-}
-
-static inline int adv7533_parse_dt(struct device_node *np, struct adv7511 *adv)
-{
- return -ENODEV;
-}
-#endif
#ifdef CONFIG_DRM_I2C_ADV7511_AUDIO
int adv7511_audio_init(struct device *dev, struct adv7511 *adv7511);
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 9e13e466e72c..87b58c1acff4 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -367,7 +367,7 @@ static void adv7511_power_on(struct adv7511 *adv7511)
*/
regcache_sync(adv7511->regmap);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_dsi_power_on(adv7511);
adv7511->powered = true;
}
@@ -387,7 +387,7 @@ static void __adv7511_power_off(struct adv7511 *adv7511)
static void adv7511_power_off(struct adv7511 *adv7511)
{
__adv7511_power_off(adv7511);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_dsi_power_off(adv7511);
adv7511->powered = false;
}
@@ -761,7 +761,7 @@ static void adv7511_mode_set(struct adv7511 *adv7511,
regmap_update_bits(adv7511->regmap, 0x17,
0x60, (vsync_polarity << 6) | (hsync_polarity << 5));
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_mode_set(adv7511, adj_mode);
drm_mode_copy(&adv7511->curr_mode, adj_mode);
@@ -847,11 +847,17 @@ static void adv7511_bridge_mode_set(struct drm_bridge *bridge,
adv7511_mode_set(adv, mode, adj_mode);
}
-static int adv7511_bridge_attach(struct drm_bridge *bridge)
+static int adv7511_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct adv7511 *adv = bridge_to_adv7511(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -874,7 +880,7 @@ static int adv7511_bridge_attach(struct drm_bridge *bridge)
&adv7511_connector_helper_funcs);
drm_connector_attach_encoder(&adv->connector, bridge->encoder);
- if (adv->type == ADV7533)
+ if (adv->type == ADV7533 || adv->type == ADV7535)
ret = adv7533_attach_dsi(adv);
if (adv->i2c_main->irq)
@@ -952,7 +958,7 @@ static bool adv7511_cec_register_volatile(struct device *dev, unsigned int reg)
struct i2c_client *i2c = to_i2c_client(dev);
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
reg -= ADV7533_REG_CEC_OFFSET;
switch (reg) {
@@ -994,7 +1000,7 @@ static int adv7511_init_cec_regmap(struct adv7511 *adv)
goto err;
}
- if (adv->type == ADV7533) {
+ if (adv->type == ADV7533 || adv->type == ADV7535) {
ret = adv7533_patch_cec_registers(adv);
if (ret)
goto err;
@@ -1242,7 +1248,7 @@ static int adv7511_remove(struct i2c_client *i2c)
{
struct adv7511 *adv7511 = i2c_get_clientdata(i2c);
- if (adv7511->type == ADV7533)
+ if (adv7511->type == ADV7533 || adv7511->type == ADV7535)
adv7533_detach_dsi(adv7511);
i2c_unregister_device(adv7511->i2c_cec);
if (adv7511->cec_clk)
@@ -1266,9 +1272,8 @@ static const struct i2c_device_id adv7511_i2c_ids[] = {
{ "adv7511", ADV7511 },
{ "adv7511w", ADV7511 },
{ "adv7513", ADV7511 },
-#ifdef CONFIG_DRM_I2C_ADV7533
{ "adv7533", ADV7533 },
-#endif
+ { "adv7535", ADV7535 },
{ }
};
MODULE_DEVICE_TABLE(i2c, adv7511_i2c_ids);
@@ -1277,9 +1282,8 @@ static const struct of_device_id adv7511_of_ids[] = {
{ .compatible = "adi,adv7511", .data = (void *)ADV7511 },
{ .compatible = "adi,adv7511w", .data = (void *)ADV7511 },
{ .compatible = "adi,adv7513", .data = (void *)ADV7511 },
-#ifdef CONFIG_DRM_I2C_ADV7533
{ .compatible = "adi,adv7533", .data = (void *)ADV7533 },
-#endif
+ { .compatible = "adi,adv7535", .data = (void *)ADV7535 },
{ }
};
MODULE_DEVICE_TABLE(of, adv7511_of_ids);
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
index 56f55c53abfd..2bc6e4f85171 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx6345.c
@@ -210,8 +210,7 @@ static int anx6345_dp_link_training(struct anx6345 *anx6345)
if (err)
return err;
- dpcd[0] = drm_dp_max_link_rate(anx6345->dpcd);
- dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
+ dpcd[0] = dp_bw;
err = regmap_write(anx6345->map[I2C_IDX_DPTX],
SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
if (err)
@@ -520,11 +519,17 @@ static const struct drm_connector_funcs anx6345_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int anx6345_bridge_attach(struct drm_bridge *bridge)
+static int anx6345_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct anx6345 *anx6345 = bridge_to_anx6345(bridge);
int err;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -712,16 +717,20 @@ static int anx6345_i2c_probe(struct i2c_client *client,
DRM_DEBUG("No panel found\n");
/* 1.2V digital core power regulator */
- anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12-supply");
+ anx6345->dvdd12 = devm_regulator_get(dev, "dvdd12");
if (IS_ERR(anx6345->dvdd12)) {
- DRM_ERROR("dvdd12-supply not found\n");
+ if (PTR_ERR(anx6345->dvdd12) != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get dvdd12 supply (%ld)\n",
+ PTR_ERR(anx6345->dvdd12));
return PTR_ERR(anx6345->dvdd12);
}
/* 2.5V digital core power regulator */
- anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25-supply");
+ anx6345->dvdd25 = devm_regulator_get(dev, "dvdd25");
if (IS_ERR(anx6345->dvdd25)) {
- DRM_ERROR("dvdd25-supply not found\n");
+ if (PTR_ERR(anx6345->dvdd25) != -EPROBE_DEFER)
+ DRM_ERROR("Failed to get dvdd25 supply (%ld)\n",
+ PTR_ERR(anx6345->dvdd25));
return PTR_ERR(anx6345->dvdd25);
}
diff --git a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
index 41867be03751..0d5a5ad0c9ee 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix-anx78xx.c
@@ -722,10 +722,9 @@ static int anx78xx_dp_link_training(struct anx78xx *anx78xx)
if (err)
return err;
- dpcd[0] = drm_dp_max_link_rate(anx78xx->dpcd);
- dpcd[0] = drm_dp_link_rate_to_bw_code(dpcd[0]);
err = regmap_write(anx78xx->map[I2C_IDX_TX_P0],
- SP_DP_MAIN_LINK_BW_SET_REG, dpcd[0]);
+ SP_DP_MAIN_LINK_BW_SET_REG,
+ anx78xx->dpcd[DP_MAX_LINK_RATE]);
if (err)
return err;
@@ -887,11 +886,17 @@ static const struct drm_connector_funcs anx78xx_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int anx78xx_bridge_attach(struct drm_bridge *bridge)
+static int anx78xx_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct anx78xx *anx78xx = bridge_to_anx78xx(bridge);
int err;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
index 6effe532f820..9ded2cef57dd 100644
--- a/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
+++ b/drivers/gpu/drm/bridge/analogix/analogix_dp_core.c
@@ -1216,13 +1216,19 @@ static const struct drm_connector_funcs analogix_dp_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int analogix_dp_bridge_attach(struct drm_bridge *bridge)
+static int analogix_dp_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_encoder *encoder = dp->encoder;
struct drm_connector *connector = NULL;
int ret = 0;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
@@ -1289,19 +1295,21 @@ struct drm_crtc *analogix_dp_get_new_crtc(struct analogix_dp_device *dp,
return conn_state->crtc;
}
-static void analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_pre_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int ret;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
return;
- old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
/* Don't touch the panel if we're coming back from PSR */
if (old_crtc_state && old_crtc_state->self_refresh_active)
return;
@@ -1366,20 +1374,22 @@ out_dp_clk_pre:
return ret;
}
-static void analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *old_crtc_state;
int timeout_loop = 0;
int ret;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
return;
- old_crtc_state = drm_atomic_get_old_crtc_state(state, crtc);
+ old_crtc_state = drm_atomic_get_old_crtc_state(old_state, crtc);
/* Not a full enable, just disable PSR and continue */
if (old_crtc_state && old_crtc_state->self_refresh_active) {
ret = analogix_dp_disable_psr(dp);
@@ -1440,18 +1450,20 @@ static void analogix_dp_bridge_disable(struct drm_bridge *bridge)
dp->dpms_mode = DRM_MODE_DPMS_OFF;
}
-static void analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state = NULL;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
goto out;
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
if (!new_crtc_state)
goto out;
@@ -1463,20 +1475,21 @@ out:
analogix_dp_bridge_disable(bridge);
}
-static
-void analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+static void
+analogix_dp_bridge_atomic_post_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *old_state = old_bridge_state->base.state;
struct analogix_dp_device *dp = bridge->driver_private;
struct drm_crtc *crtc;
struct drm_crtc_state *new_crtc_state;
int ret;
- crtc = analogix_dp_get_new_crtc(dp, state);
+ crtc = analogix_dp_get_new_crtc(dp, old_state);
if (!crtc)
return;
- new_crtc_state = drm_atomic_get_new_crtc_state(state, crtc);
+ new_crtc_state = drm_atomic_get_new_crtc_state(old_state, crtc);
if (!new_crtc_state || !new_crtc_state->self_refresh_active)
return;
@@ -1563,6 +1576,9 @@ static void analogix_dp_bridge_mode_set(struct drm_bridge *bridge,
}
static const struct drm_bridge_funcs analogix_dp_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_pre_enable = analogix_dp_bridge_atomic_pre_enable,
.atomic_enable = analogix_dp_bridge_atomic_enable,
.atomic_disable = analogix_dp_bridge_atomic_disable,
@@ -1588,7 +1604,7 @@ static int analogix_dp_create_bridge(struct drm_device *drm_dev,
bridge->driver_private = dp;
bridge->funcs = &analogix_dp_bridge_funcs;
- ret = drm_bridge_attach(dp->encoder, bridge, NULL);
+ ret = drm_bridge_attach(dp->encoder, bridge, NULL, 0);
if (ret) {
DRM_ERROR("failed to attach drm bridge\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/bridge/cdns-dsi.c b/drivers/gpu/drm/bridge/cdns-dsi.c
index b7c97f060241..69c3892caee5 100644
--- a/drivers/gpu/drm/bridge/cdns-dsi.c
+++ b/drivers/gpu/drm/bridge/cdns-dsi.c
@@ -644,7 +644,8 @@ static int cdns_dsi_check_conf(struct cdns_dsi *dsi,
return 0;
}
-static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
+static int cdns_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct cdns_dsi_input *input = bridge_to_cdns_dsi_input(bridge);
struct cdns_dsi *dsi = input_to_dsi(input);
@@ -656,7 +657,8 @@ static int cdns_dsi_bridge_attach(struct drm_bridge *bridge)
return -ENOTSUPP;
}
- return drm_bridge_attach(bridge->encoder, output->bridge, bridge);
+ return drm_bridge_attach(bridge->encoder, output->bridge, bridge,
+ flags);
}
static enum drm_mode_status
diff --git a/drivers/gpu/drm/bridge/display-connector.c b/drivers/gpu/drm/bridge/display-connector.c
new file mode 100644
index 000000000000..4d278573cdb9
--- /dev/null
+++ b/drivers/gpu/drm/bridge/display-connector.c
@@ -0,0 +1,295 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2019 Laurent Pinchart <[email protected]>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_edid.h>
+
+struct display_connector {
+ struct drm_bridge bridge;
+
+ struct gpio_desc *hpd_gpio;
+ int hpd_irq;
+};
+
+static inline struct display_connector *
+to_display_connector(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct display_connector, bridge);
+}
+
+static int display_connector_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ return flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR ? 0 : -EINVAL;
+}
+
+static enum drm_connector_status
+display_connector_detect(struct drm_bridge *bridge)
+{
+ struct display_connector *conn = to_display_connector(bridge);
+
+ if (conn->hpd_gpio) {
+ if (gpiod_get_value_cansleep(conn->hpd_gpio))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+ }
+
+ if (conn->bridge.ddc && drm_probe_ddc(conn->bridge.ddc))
+ return connector_status_connected;
+
+ switch (conn->bridge.type) {
+ case DRM_MODE_CONNECTOR_DVIA:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ /*
+ * For DVI and HDMI connectors a DDC probe failure indicates
+ * that no cable is connected.
+ */
+ return connector_status_disconnected;
+
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_VGA:
+ default:
+ /*
+ * Composite and S-Video connectors have no other detection
+ * mean than the HPD GPIO. For VGA connectors, even if we have
+ * an I2C bus, we can't assume that the cable is disconnected
+ * if drm_probe_ddc fails, as some cables don't wire the DDC
+ * pins.
+ */
+ return connector_status_unknown;
+ }
+}
+
+static struct edid *display_connector_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct display_connector *conn = to_display_connector(bridge);
+
+ return drm_get_edid(connector, conn->bridge.ddc);
+}
+
+static const struct drm_bridge_funcs display_connector_bridge_funcs = {
+ .attach = display_connector_attach,
+ .detect = display_connector_detect,
+ .get_edid = display_connector_get_edid,
+};
+
+static irqreturn_t display_connector_hpd_irq(int irq, void *arg)
+{
+ struct display_connector *conn = arg;
+ struct drm_bridge *bridge = &conn->bridge;
+
+ drm_bridge_hpd_notify(bridge, display_connector_detect(bridge));
+
+ return IRQ_HANDLED;
+}
+
+static int display_connector_probe(struct platform_device *pdev)
+{
+ struct display_connector *conn;
+ unsigned int type;
+ const char *label;
+ int ret;
+
+ conn = devm_kzalloc(&pdev->dev, sizeof(*conn), GFP_KERNEL);
+ if (!conn)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, conn);
+
+ type = (uintptr_t)of_device_get_match_data(&pdev->dev);
+
+ /* Get the exact connector type. */
+ switch (type) {
+ case DRM_MODE_CONNECTOR_DVII: {
+ bool analog, digital;
+
+ analog = of_property_read_bool(pdev->dev.of_node, "analog");
+ digital = of_property_read_bool(pdev->dev.of_node, "digital");
+ if (analog && !digital) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_DVIA;
+ } else if (!analog && digital) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_DVID;
+ } else if (analog && digital) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_DVII;
+ } else {
+ dev_err(&pdev->dev, "DVI connector with no type\n");
+ return -EINVAL;
+ }
+ break;
+ }
+
+ case DRM_MODE_CONNECTOR_HDMIA: {
+ const char *hdmi_type;
+
+ ret = of_property_read_string(pdev->dev.of_node, "type",
+ &hdmi_type);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "HDMI connector with no type\n");
+ return -EINVAL;
+ }
+
+ if (!strcmp(hdmi_type, "a") || !strcmp(hdmi_type, "c") ||
+ !strcmp(hdmi_type, "d") || !strcmp(hdmi_type, "e")) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ } else if (!strcmp(hdmi_type, "b")) {
+ conn->bridge.type = DRM_MODE_CONNECTOR_HDMIB;
+ } else {
+ dev_err(&pdev->dev,
+ "Unsupported HDMI connector type '%s'\n",
+ hdmi_type);
+ return -EINVAL;
+ }
+
+ break;
+ }
+
+ default:
+ conn->bridge.type = type;
+ break;
+ }
+
+ /* All the supported connector types support interlaced modes. */
+ conn->bridge.interlace_allowed = true;
+
+ /* Get the optional connector label. */
+ of_property_read_string(pdev->dev.of_node, "label", &label);
+
+ /*
+ * Get the HPD GPIO for DVI and HDMI connectors. If the GPIO can provide
+ * edge interrupts, register an interrupt handler.
+ */
+ if (type == DRM_MODE_CONNECTOR_DVII ||
+ type == DRM_MODE_CONNECTOR_HDMIA) {
+ conn->hpd_gpio = devm_gpiod_get_optional(&pdev->dev, "hpd",
+ GPIOD_IN);
+ if (IS_ERR(conn->hpd_gpio)) {
+ if (PTR_ERR(conn->hpd_gpio) != -EPROBE_DEFER)
+ dev_err(&pdev->dev,
+ "Unable to retrieve HPD GPIO\n");
+ return PTR_ERR(conn->hpd_gpio);
+ }
+
+ conn->hpd_irq = gpiod_to_irq(conn->hpd_gpio);
+ } else {
+ conn->hpd_irq = -EINVAL;
+ }
+
+ if (conn->hpd_irq >= 0) {
+ ret = devm_request_threaded_irq(&pdev->dev, conn->hpd_irq,
+ NULL, display_connector_hpd_irq,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "HPD", conn);
+ if (ret) {
+ dev_info(&pdev->dev,
+ "Failed to request HPD edge interrupt, falling back to polling\n");
+ conn->hpd_irq = -EINVAL;
+ }
+ }
+
+ /* Retrieve the DDC I2C adapter for DVI, HDMI and VGA connectors. */
+ if (type == DRM_MODE_CONNECTOR_DVII ||
+ type == DRM_MODE_CONNECTOR_HDMIA ||
+ type == DRM_MODE_CONNECTOR_VGA) {
+ struct device_node *phandle;
+
+ phandle = of_parse_phandle(pdev->dev.of_node, "ddc-i2c-bus", 0);
+ if (phandle) {
+ conn->bridge.ddc = of_get_i2c_adapter_by_node(phandle);
+ of_node_put(phandle);
+ if (!conn->bridge.ddc)
+ return -EPROBE_DEFER;
+ } else {
+ dev_dbg(&pdev->dev,
+ "No I2C bus specified, disabling EDID readout\n");
+ }
+ }
+
+ conn->bridge.funcs = &display_connector_bridge_funcs;
+ conn->bridge.of_node = pdev->dev.of_node;
+
+ if (conn->bridge.ddc)
+ conn->bridge.ops |= DRM_BRIDGE_OP_EDID
+ | DRM_BRIDGE_OP_DETECT;
+ if (conn->hpd_gpio)
+ conn->bridge.ops |= DRM_BRIDGE_OP_DETECT;
+ if (conn->hpd_irq >= 0)
+ conn->bridge.ops |= DRM_BRIDGE_OP_HPD;
+
+ dev_dbg(&pdev->dev,
+ "Found %s display connector '%s' %s DDC bus and %s HPD GPIO (ops 0x%x)\n",
+ drm_get_connector_type_name(conn->bridge.type),
+ label ? label : "<unlabelled>",
+ conn->bridge.ddc ? "with" : "without",
+ conn->hpd_gpio ? "with" : "without",
+ conn->bridge.ops);
+
+ drm_bridge_add(&conn->bridge);
+
+ return 0;
+}
+
+static int display_connector_remove(struct platform_device *pdev)
+{
+ struct display_connector *conn = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&conn->bridge);
+
+ if (!IS_ERR(conn->bridge.ddc))
+ i2c_put_adapter(conn->bridge.ddc);
+
+ return 0;
+}
+
+static const struct of_device_id display_connector_match[] = {
+ {
+ .compatible = "composite-video-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_Composite,
+ }, {
+ .compatible = "dvi-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_DVII,
+ }, {
+ .compatible = "hdmi-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_HDMIA,
+ }, {
+ .compatible = "svideo-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_SVIDEO,
+ }, {
+ .compatible = "vga-connector",
+ .data = (void *)DRM_MODE_CONNECTOR_VGA,
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, display_connector_match);
+
+static struct platform_driver display_connector_driver = {
+ .probe = display_connector_probe,
+ .remove = display_connector_remove,
+ .driver = {
+ .name = "display-connector",
+ .of_match_table = display_connector_match,
+ },
+};
+module_platform_driver(display_connector_driver);
+
+MODULE_AUTHOR("Laurent Pinchart <[email protected]>");
+MODULE_DESCRIPTION("Display connector driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/dumb-vga-dac.c b/drivers/gpu/drm/bridge/dumb-vga-dac.c
deleted file mode 100644
index cc33dc411b9e..000000000000
--- a/drivers/gpu/drm/bridge/dumb-vga-dac.c
+++ /dev/null
@@ -1,300 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-or-later
-/*
- * Copyright (C) 2015-2016 Free Electrons
- * Copyright (C) 2015-2016 NextThing Co
- *
- * Maxime Ripard <[email protected]>
- */
-
-#include <linux/module.h>
-#include <linux/of_device.h>
-#include <linux/of_graph.h>
-#include <linux/regulator/consumer.h>
-
-#include <drm/drm_atomic_helper.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_print.h>
-#include <drm/drm_probe_helper.h>
-
-struct dumb_vga {
- struct drm_bridge bridge;
- struct drm_connector connector;
-
- struct i2c_adapter *ddc;
- struct regulator *vdd;
-};
-
-static inline struct dumb_vga *
-drm_bridge_to_dumb_vga(struct drm_bridge *bridge)
-{
- return container_of(bridge, struct dumb_vga, bridge);
-}
-
-static inline struct dumb_vga *
-drm_connector_to_dumb_vga(struct drm_connector *connector)
-{
- return container_of(connector, struct dumb_vga, connector);
-}
-
-static int dumb_vga_get_modes(struct drm_connector *connector)
-{
- struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
- struct edid *edid;
- int ret;
-
- if (!vga->ddc)
- goto fallback;
-
- edid = drm_get_edid(connector, vga->ddc);
- if (!edid) {
- DRM_INFO("EDID readout failed, falling back to standard modes\n");
- goto fallback;
- }
-
- drm_connector_update_edid_property(connector, edid);
- ret = drm_add_edid_modes(connector, edid);
- kfree(edid);
- return ret;
-
-fallback:
- /*
- * In case we cannot retrieve the EDIDs (broken or missing i2c
- * bus), fallback on the XGA standards
- */
- ret = drm_add_modes_noedid(connector, 1920, 1200);
-
- /* And prefer a mode pretty much anyone can handle */
- drm_set_preferred_mode(connector, 1024, 768);
-
- return ret;
-}
-
-static const struct drm_connector_helper_funcs dumb_vga_con_helper_funcs = {
- .get_modes = dumb_vga_get_modes,
-};
-
-static enum drm_connector_status
-dumb_vga_connector_detect(struct drm_connector *connector, bool force)
-{
- struct dumb_vga *vga = drm_connector_to_dumb_vga(connector);
-
- /*
- * Even if we have an I2C bus, we can't assume that the cable
- * is disconnected if drm_probe_ddc fails. Some cables don't
- * wire the DDC pins, or the I2C bus might not be working at
- * all.
- */
- if (vga->ddc && drm_probe_ddc(vga->ddc))
- return connector_status_connected;
-
- return connector_status_unknown;
-}
-
-static const struct drm_connector_funcs dumb_vga_con_funcs = {
- .detect = dumb_vga_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = drm_connector_cleanup,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static int dumb_vga_attach(struct drm_bridge *bridge)
-{
- struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
- int ret;
-
- if (!bridge->encoder) {
- DRM_ERROR("Missing encoder\n");
- return -ENODEV;
- }
-
- drm_connector_helper_add(&vga->connector,
- &dumb_vga_con_helper_funcs);
- ret = drm_connector_init_with_ddc(bridge->dev, &vga->connector,
- &dumb_vga_con_funcs,
- DRM_MODE_CONNECTOR_VGA,
- vga->ddc);
- if (ret) {
- DRM_ERROR("Failed to initialize connector\n");
- return ret;
- }
-
- drm_connector_attach_encoder(&vga->connector,
- bridge->encoder);
-
- return 0;
-}
-
-static void dumb_vga_enable(struct drm_bridge *bridge)
-{
- struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
- int ret = 0;
-
- if (vga->vdd)
- ret = regulator_enable(vga->vdd);
-
- if (ret)
- DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
-}
-
-static void dumb_vga_disable(struct drm_bridge *bridge)
-{
- struct dumb_vga *vga = drm_bridge_to_dumb_vga(bridge);
-
- if (vga->vdd)
- regulator_disable(vga->vdd);
-}
-
-static const struct drm_bridge_funcs dumb_vga_bridge_funcs = {
- .attach = dumb_vga_attach,
- .enable = dumb_vga_enable,
- .disable = dumb_vga_disable,
-};
-
-static struct i2c_adapter *dumb_vga_retrieve_ddc(struct device *dev)
-{
- struct device_node *phandle, *remote;
- struct i2c_adapter *ddc;
-
- remote = of_graph_get_remote_node(dev->of_node, 1, -1);
- if (!remote)
- return ERR_PTR(-EINVAL);
-
- phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
- of_node_put(remote);
- if (!phandle)
- return ERR_PTR(-ENODEV);
-
- ddc = of_get_i2c_adapter_by_node(phandle);
- of_node_put(phandle);
- if (!ddc)
- return ERR_PTR(-EPROBE_DEFER);
-
- return ddc;
-}
-
-static int dumb_vga_probe(struct platform_device *pdev)
-{
- struct dumb_vga *vga;
-
- vga = devm_kzalloc(&pdev->dev, sizeof(*vga), GFP_KERNEL);
- if (!vga)
- return -ENOMEM;
- platform_set_drvdata(pdev, vga);
-
- vga->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
- if (IS_ERR(vga->vdd)) {
- int ret = PTR_ERR(vga->vdd);
- if (ret == -EPROBE_DEFER)
- return -EPROBE_DEFER;
- vga->vdd = NULL;
- dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
- }
-
- vga->ddc = dumb_vga_retrieve_ddc(&pdev->dev);
- if (IS_ERR(vga->ddc)) {
- if (PTR_ERR(vga->ddc) == -ENODEV) {
- dev_dbg(&pdev->dev,
- "No i2c bus specified. Disabling EDID readout\n");
- vga->ddc = NULL;
- } else {
- dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
- return PTR_ERR(vga->ddc);
- }
- }
-
- vga->bridge.funcs = &dumb_vga_bridge_funcs;
- vga->bridge.of_node = pdev->dev.of_node;
- vga->bridge.timings = of_device_get_match_data(&pdev->dev);
-
- drm_bridge_add(&vga->bridge);
-
- return 0;
-}
-
-static int dumb_vga_remove(struct platform_device *pdev)
-{
- struct dumb_vga *vga = platform_get_drvdata(pdev);
-
- drm_bridge_remove(&vga->bridge);
-
- if (vga->ddc)
- i2c_put_adapter(vga->ddc);
-
- return 0;
-}
-
-/*
- * We assume the ADV7123 DAC is the "default" for historical reasons
- * Information taken from the ADV7123 datasheet, revision D.
- * NOTE: the ADV7123EP seems to have other timings and need a new timings
- * set if used.
- */
-static const struct drm_bridge_timings default_dac_timings = {
- /* Timing specifications, datasheet page 7 */
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
- .setup_time_ps = 500,
- .hold_time_ps = 1500,
-};
-
-/*
- * Information taken from the THS8134, THS8134A, THS8134B datasheet named
- * "SLVS205D", dated May 1990, revised March 2000.
- */
-static const struct drm_bridge_timings ti_ths8134_dac_timings = {
- /* From timing diagram, datasheet page 9 */
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
- /* From datasheet, page 12 */
- .setup_time_ps = 3000,
- /* I guess this means latched input */
- .hold_time_ps = 0,
-};
-
-/*
- * Information taken from the THS8135 datasheet named "SLAS343B", dated
- * May 2001, revised April 2013.
- */
-static const struct drm_bridge_timings ti_ths8135_dac_timings = {
- /* From timing diagram, datasheet page 14 */
- .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
- /* From datasheet, page 16 */
- .setup_time_ps = 2000,
- .hold_time_ps = 500,
-};
-
-static const struct of_device_id dumb_vga_match[] = {
- {
- .compatible = "dumb-vga-dac",
- .data = NULL,
- },
- {
- .compatible = "adi,adv7123",
- .data = &default_dac_timings,
- },
- {
- .compatible = "ti,ths8135",
- .data = &ti_ths8135_dac_timings,
- },
- {
- .compatible = "ti,ths8134",
- .data = &ti_ths8134_dac_timings,
- },
- {},
-};
-MODULE_DEVICE_TABLE(of, dumb_vga_match);
-
-static struct platform_driver dumb_vga_driver = {
- .probe = dumb_vga_probe,
- .remove = dumb_vga_remove,
- .driver = {
- .name = "dumb-vga-dac",
- .of_match_table = dumb_vga_match,
- },
-};
-module_platform_driver(dumb_vga_driver);
-
-MODULE_AUTHOR("Maxime Ripard <[email protected]>");
-MODULE_DESCRIPTION("Dumb VGA DAC bridge driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/lvds-codec.c b/drivers/gpu/drm/bridge/lvds-codec.c
index 5f04cc11227e..24fb1befdfa2 100644
--- a/drivers/gpu/drm/bridge/lvds-codec.c
+++ b/drivers/gpu/drm/bridge/lvds-codec.c
@@ -21,19 +21,23 @@ struct lvds_codec {
u32 connector_type;
};
-static int lvds_codec_attach(struct drm_bridge *bridge)
+static inline struct lvds_codec *to_lvds_codec(struct drm_bridge *bridge)
{
- struct lvds_codec *lvds_codec = container_of(bridge,
- struct lvds_codec, bridge);
+ return container_of(bridge, struct lvds_codec, bridge);
+}
+
+static int lvds_codec_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
return drm_bridge_attach(bridge->encoder, lvds_codec->panel_bridge,
- bridge);
+ bridge, flags);
}
static void lvds_codec_enable(struct drm_bridge *bridge)
{
- struct lvds_codec *lvds_codec = container_of(bridge,
- struct lvds_codec, bridge);
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
if (lvds_codec->powerdown_gpio)
gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 0);
@@ -41,14 +45,13 @@ static void lvds_codec_enable(struct drm_bridge *bridge)
static void lvds_codec_disable(struct drm_bridge *bridge)
{
- struct lvds_codec *lvds_codec = container_of(bridge,
- struct lvds_codec, bridge);
+ struct lvds_codec *lvds_codec = to_lvds_codec(bridge);
if (lvds_codec->powerdown_gpio)
gpiod_set_value_cansleep(lvds_codec->powerdown_gpio, 1);
}
-static struct drm_bridge_funcs funcs = {
+static const struct drm_bridge_funcs funcs = {
.attach = lvds_codec_attach,
.enable = lvds_codec_enable,
.disable = lvds_codec_disable,
diff --git a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
index e8a49f6146c6..6200f12a37e6 100644
--- a/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
+++ b/drivers/gpu/drm/bridge/megachips-stdpxxxx-ge-b850v3-fw.c
@@ -206,13 +206,19 @@ static irqreturn_t ge_b850v3_lvds_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int ge_b850v3_lvds_attach(struct drm_bridge *bridge)
+static int ge_b850v3_lvds_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct drm_connector *connector = &ge_b850v3_lvds_ptr->connector;
struct i2c_client *stdp4028_i2c
= ge_b850v3_lvds_ptr->stdp4028_i2c;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/nxp-ptn3460.c b/drivers/gpu/drm/bridge/nxp-ptn3460.c
index 57ff01339559..438e566ce0a4 100644
--- a/drivers/gpu/drm/bridge/nxp-ptn3460.c
+++ b/drivers/gpu/drm/bridge/nxp-ptn3460.c
@@ -236,11 +236,17 @@ static const struct drm_connector_funcs ptn3460_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ptn3460_bridge_attach(struct drm_bridge *bridge)
+static int ptn3460_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct ptn3460_bridge *ptn_bridge = bridge_to_ptn3460(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/panel.c b/drivers/gpu/drm/bridge/panel.c
index f66777e24968..8461ee8304ba 100644
--- a/drivers/gpu/drm/bridge/panel.c
+++ b/drivers/gpu/drm/bridge/panel.c
@@ -53,12 +53,16 @@ static const struct drm_connector_funcs panel_bridge_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int panel_bridge_attach(struct drm_bridge *bridge)
+static int panel_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
struct drm_connector *connector = &panel_bridge->connector;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
if (!bridge->encoder) {
DRM_ERROR("Missing encoder\n");
return -ENODEV;
@@ -120,6 +124,14 @@ static void panel_bridge_post_disable(struct drm_bridge *bridge)
drm_panel_unprepare(panel_bridge->panel);
}
+static int panel_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ struct panel_bridge *panel_bridge = drm_bridge_to_panel_bridge(bridge);
+
+ return drm_panel_get_modes(panel_bridge->panel, connector);
+}
+
static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
.attach = panel_bridge_attach,
.detach = panel_bridge_detach,
@@ -127,6 +139,11 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
.enable = panel_bridge_enable,
.disable = panel_bridge_disable,
.post_disable = panel_bridge_post_disable,
+ .get_modes = panel_bridge_get_modes,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt,
};
/**
@@ -151,7 +168,7 @@ static const struct drm_bridge_funcs panel_bridge_bridge_funcs = {
* known type. Calling this function with a panel whose connector type is
* DRM_MODE_CONNECTOR_Unknown will return NULL.
*
- * See devm_drm_panel_bridge_add() for an automatically manged version of this
+ * See devm_drm_panel_bridge_add() for an automatically managed version of this
* function.
*/
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel)
@@ -196,6 +213,8 @@ struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
#ifdef CONFIG_OF
panel_bridge->bridge.of_node = panel->dev->of_node;
#endif
+ panel_bridge->bridge.ops = DRM_BRIDGE_OP_MODES;
+ panel_bridge->bridge.type = connector_type;
drm_bridge_add(&panel_bridge->bridge);
diff --git a/drivers/gpu/drm/bridge/parade-ps8622.c b/drivers/gpu/drm/bridge/parade-ps8622.c
index 10c47c008b40..d789ea2a7fb9 100644
--- a/drivers/gpu/drm/bridge/parade-ps8622.c
+++ b/drivers/gpu/drm/bridge/parade-ps8622.c
@@ -476,11 +476,17 @@ static const struct drm_connector_funcs ps8622_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int ps8622_attach(struct drm_bridge *bridge)
+static int ps8622_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct ps8622_bridge *ps8622 = bridge_to_ps8622(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
if (!bridge->encoder) {
DRM_ERROR("Parent encoder object not found");
return -ENODEV;
diff --git a/drivers/gpu/drm/bridge/parade-ps8640.c b/drivers/gpu/drm/bridge/parade-ps8640.c
new file mode 100644
index 000000000000..d3a53442d449
--- /dev/null
+++ b/drivers/gpu/drm/bridge/parade-ps8640.c
@@ -0,0 +1,349 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2016 MediaTek Inc.
+ */
+
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_bridge.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define PAGE2_GPIO_H 0xa7
+#define PS_GPIO9 BIT(1)
+#define PAGE2_I2C_BYPASS 0xea
+#define I2C_BYPASS_EN 0xd0
+#define PAGE2_MCS_EN 0xf3
+#define MCS_EN BIT(0)
+#define PAGE3_SET_ADD 0xfe
+#define VDO_CTL_ADD 0x13
+#define VDO_DIS 0x18
+#define VDO_EN 0x1c
+#define DP_NUM_LANES 4
+
+/*
+ * PS8640 uses multiple addresses:
+ * page[0]: for DP control
+ * page[1]: for VIDEO Bridge
+ * page[2]: for control top
+ * page[3]: for DSI Link Control1
+ * page[4]: for MIPI Phy
+ * page[5]: for VPLL
+ * page[6]: for DSI Link Control2
+ * page[7]: for SPI ROM mapping
+ */
+enum page_addr_offset {
+ PAGE0_DP_CNTL = 0,
+ PAGE1_VDO_BDG,
+ PAGE2_TOP_CNTL,
+ PAGE3_DSI_CNTL1,
+ PAGE4_MIPI_PHY,
+ PAGE5_VPLL,
+ PAGE6_DSI_CNTL2,
+ PAGE7_SPI_CNTL,
+ MAX_DEVS
+};
+
+enum ps8640_vdo_control {
+ DISABLE = VDO_DIS,
+ ENABLE = VDO_EN,
+};
+
+struct ps8640 {
+ struct drm_bridge bridge;
+ struct drm_bridge *panel_bridge;
+ struct mipi_dsi_device *dsi;
+ struct i2c_client *page[MAX_DEVS];
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *gpio_reset;
+ struct gpio_desc *gpio_powerdown;
+};
+
+static inline struct ps8640 *bridge_to_ps8640(struct drm_bridge *e)
+{
+ return container_of(e, struct ps8640, bridge);
+}
+
+static int ps8640_bridge_vdo_control(struct ps8640 *ps_bridge,
+ const enum ps8640_vdo_control ctrl)
+{
+ struct i2c_client *client = ps_bridge->page[PAGE3_DSI_CNTL1];
+ u8 vdo_ctrl_buf[] = { VDO_CTL_ADD, ctrl };
+ int ret;
+
+ ret = i2c_smbus_write_i2c_block_data(client, PAGE3_SET_ADD,
+ sizeof(vdo_ctrl_buf),
+ vdo_ctrl_buf);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void ps8640_pre_enable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ struct i2c_client *client = ps_bridge->page[PAGE2_TOP_CNTL];
+ unsigned long timeout;
+ int ret, status;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+ if (ret < 0) {
+ DRM_ERROR("cannot enable regulators %d\n", ret);
+ return;
+ }
+
+ gpiod_set_value(ps_bridge->gpio_powerdown, 0);
+ gpiod_set_value(ps_bridge->gpio_reset, 1);
+ usleep_range(2000, 2500);
+ gpiod_set_value(ps_bridge->gpio_reset, 0);
+
+ /*
+ * Wait for the ps8640 embedded MCU to be ready
+ * First wait 200ms and then check the MCU ready flag every 20ms
+ */
+ msleep(200);
+
+ timeout = jiffies + msecs_to_jiffies(200) + 1;
+
+ while (time_is_after_jiffies(timeout)) {
+ status = i2c_smbus_read_byte_data(client, PAGE2_GPIO_H);
+ if (status < 0) {
+ DRM_ERROR("failed read PAGE2_GPIO_H: %d\n", status);
+ goto err_regulators_disable;
+ }
+ if ((status & PS_GPIO9) == PS_GPIO9)
+ break;
+
+ msleep(20);
+ }
+
+ msleep(50);
+
+ /*
+ * The Manufacturer Command Set (MCS) is a device dependent interface
+ * intended for factory programming of the display module default
+ * parameters. Once the display module is configured, the MCS shall be
+ * disabled by the manufacturer. Once disabled, all MCS commands are
+ * ignored by the display interface.
+ */
+ status = i2c_smbus_read_byte_data(client, PAGE2_MCS_EN);
+ if (status < 0) {
+ DRM_ERROR("failed read PAGE2_MCS_EN: %d\n", status);
+ goto err_regulators_disable;
+ }
+
+ ret = i2c_smbus_write_byte_data(client, PAGE2_MCS_EN,
+ status & ~MCS_EN);
+ if (ret < 0) {
+ DRM_ERROR("failed write PAGE2_MCS_EN: %d\n", ret);
+ goto err_regulators_disable;
+ }
+
+ ret = ps8640_bridge_vdo_control(ps_bridge, ENABLE);
+ if (ret) {
+ DRM_ERROR("failed to enable VDO: %d\n", ret);
+ goto err_regulators_disable;
+ }
+
+ /* Switch access edp panel's edid through i2c */
+ ret = i2c_smbus_write_byte_data(client, PAGE2_I2C_BYPASS,
+ I2C_BYPASS_EN);
+ if (ret < 0) {
+ DRM_ERROR("failed write PAGE2_I2C_BYPASS: %d\n", ret);
+ goto err_regulators_disable;
+ }
+
+ return;
+
+err_regulators_disable:
+ regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+}
+
+static void ps8640_post_disable(struct drm_bridge *bridge)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ int ret;
+
+ ret = ps8640_bridge_vdo_control(ps_bridge, DISABLE);
+ if (ret < 0)
+ DRM_ERROR("failed to disable VDO: %d\n", ret);
+
+ gpiod_set_value(ps_bridge->gpio_reset, 1);
+ gpiod_set_value(ps_bridge->gpio_powerdown, 1);
+ ret = regulator_bulk_disable(ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+ if (ret < 0)
+ DRM_ERROR("cannot disable regulators %d\n", ret);
+}
+
+static int ps8640_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct ps8640 *ps_bridge = bridge_to_ps8640(bridge);
+ struct device *dev = &ps_bridge->page[0]->dev;
+ struct device_node *in_ep, *dsi_node;
+ struct mipi_dsi_device *dsi;
+ struct mipi_dsi_host *host;
+ int ret;
+ const struct mipi_dsi_device_info info = { .type = "ps8640",
+ .channel = 0,
+ .node = NULL,
+ };
+ /* port@0 is ps8640 dsi input port */
+ in_ep = of_graph_get_endpoint_by_regs(dev->of_node, 0, -1);
+ if (!in_ep)
+ return -ENODEV;
+
+ dsi_node = of_graph_get_remote_port_parent(in_ep);
+ of_node_put(in_ep);
+ if (!dsi_node)
+ return -ENODEV;
+
+ host = of_find_mipi_dsi_host_by_node(dsi_node);
+ of_node_put(dsi_node);
+ if (!host)
+ return -ENODEV;
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ dev_err(dev, "failed to create dsi device\n");
+ ret = PTR_ERR(dsi);
+ return ret;
+ }
+
+ ps_bridge->dsi = dsi;
+
+ dsi->host = host;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO |
+ MIPI_DSI_MODE_VIDEO_SYNC_PULSE;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = DP_NUM_LANES;
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ goto err_dsi_attach;
+
+ /* Attach the panel-bridge to the dsi bridge */
+ return drm_bridge_attach(bridge->encoder, ps_bridge->panel_bridge,
+ &ps_bridge->bridge, flags);
+
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+ return ret;
+}
+
+static const struct drm_bridge_funcs ps8640_bridge_funcs = {
+ .attach = ps8640_bridge_attach,
+ .post_disable = ps8640_post_disable,
+ .pre_enable = ps8640_pre_enable,
+};
+
+static int ps8640_probe(struct i2c_client *client)
+{
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ struct ps8640 *ps_bridge;
+ struct drm_panel *panel;
+ int ret;
+ u32 i;
+
+ ps_bridge = devm_kzalloc(dev, sizeof(*ps_bridge), GFP_KERNEL);
+ if (!ps_bridge)
+ return -ENOMEM;
+
+ /* port@1 is ps8640 output port */
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &panel, NULL);
+ if (ret < 0)
+ return ret;
+ if (!panel)
+ return -ENODEV;
+
+ panel->connector_type = DRM_MODE_CONNECTOR_eDP;
+
+ ps_bridge->panel_bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(ps_bridge->panel_bridge))
+ return PTR_ERR(ps_bridge->panel_bridge);
+
+ ps_bridge->supplies[0].supply = "vdd33";
+ ps_bridge->supplies[1].supply = "vdd12";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ps_bridge->supplies),
+ ps_bridge->supplies);
+ if (ret)
+ return ret;
+
+ ps_bridge->gpio_powerdown = devm_gpiod_get(&client->dev, "powerdown",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ps_bridge->gpio_powerdown))
+ return PTR_ERR(ps_bridge->gpio_powerdown);
+
+ /*
+ * Assert the reset to avoid the bridge being initialized prematurely
+ */
+ ps_bridge->gpio_reset = devm_gpiod_get(&client->dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(ps_bridge->gpio_reset))
+ return PTR_ERR(ps_bridge->gpio_reset);
+
+ ps_bridge->bridge.funcs = &ps8640_bridge_funcs;
+ ps_bridge->bridge.of_node = dev->of_node;
+
+ ps_bridge->page[PAGE0_DP_CNTL] = client;
+
+ for (i = 1; i < ARRAY_SIZE(ps_bridge->page); i++) {
+ ps_bridge->page[i] = devm_i2c_new_dummy_device(&client->dev,
+ client->adapter,
+ client->addr + i);
+ if (IS_ERR(ps_bridge->page[i])) {
+ dev_err(dev, "failed i2c dummy device, address %02x\n",
+ client->addr + i);
+ return PTR_ERR(ps_bridge->page[i]);
+ }
+ }
+
+ i2c_set_clientdata(client, ps_bridge);
+
+ drm_bridge_add(&ps_bridge->bridge);
+
+ return 0;
+}
+
+static int ps8640_remove(struct i2c_client *client)
+{
+ struct ps8640 *ps_bridge = i2c_get_clientdata(client);
+
+ drm_bridge_remove(&ps_bridge->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id ps8640_match[] = {
+ { .compatible = "parade,ps8640" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ps8640_match);
+
+static struct i2c_driver ps8640_driver = {
+ .probe_new = ps8640_probe,
+ .remove = ps8640_remove,
+ .driver = {
+ .name = "ps8640",
+ .of_match_table = ps8640_match,
+ },
+};
+module_i2c_driver(ps8640_driver);
+
+MODULE_AUTHOR("Jitao Shi <[email protected]>");
+MODULE_AUTHOR("CK Hu <[email protected]>");
+MODULE_AUTHOR("Enric Balletbo i Serra <[email protected]>");
+MODULE_DESCRIPTION("PARADE ps8640 DSI-eDP converter driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/sii902x.c b/drivers/gpu/drm/bridge/sii902x.c
index b70e8c5cf2e1..6dad025f8da7 100644
--- a/drivers/gpu/drm/bridge/sii902x.c
+++ b/drivers/gpu/drm/bridge/sii902x.c
@@ -399,12 +399,18 @@ out:
mutex_unlock(&sii902x->mutex);
}
-static int sii902x_bridge_attach(struct drm_bridge *bridge)
+static int sii902x_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct sii902x *sii902x = bridge_to_sii902x(bridge);
struct drm_device *drm = bridge->dev;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
drm_connector_helper_add(&sii902x->connector,
&sii902x_connector_helper_funcs);
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 4c0eef406eb1..92acd336aa89 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -2202,7 +2202,8 @@ static inline struct sii8620 *bridge_to_sii8620(struct drm_bridge *bridge)
return container_of(bridge, struct sii8620, bridge);
}
-static int sii8620_attach(struct drm_bridge *bridge)
+static int sii8620_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c
new file mode 100644
index 000000000000..a2dca7a3ef03
--- /dev/null
+++ b/drivers/gpu/drm/bridge/simple-bridge.c
@@ -0,0 +1,342 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015-2016 Free Electrons
+ * Copyright (C) 2015-2016 NextThing Co
+ *
+ * Maxime Ripard <[email protected]>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_print.h>
+#include <drm/drm_probe_helper.h>
+
+struct simple_bridge_info {
+ const struct drm_bridge_timings *timings;
+ unsigned int connector_type;
+};
+
+struct simple_bridge {
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+
+ const struct simple_bridge_info *info;
+
+ struct i2c_adapter *ddc;
+ struct regulator *vdd;
+ struct gpio_desc *enable;
+};
+
+static inline struct simple_bridge *
+drm_bridge_to_simple_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct simple_bridge, bridge);
+}
+
+static inline struct simple_bridge *
+drm_connector_to_simple_bridge(struct drm_connector *connector)
+{
+ return container_of(connector, struct simple_bridge, connector);
+}
+
+static int simple_bridge_get_modes(struct drm_connector *connector)
+{
+ struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
+ struct edid *edid;
+ int ret;
+
+ if (!sbridge->ddc)
+ goto fallback;
+
+ edid = drm_get_edid(connector, sbridge->ddc);
+ if (!edid) {
+ DRM_INFO("EDID readout failed, falling back to standard modes\n");
+ goto fallback;
+ }
+
+ drm_connector_update_edid_property(connector, edid);
+ ret = drm_add_edid_modes(connector, edid);
+ kfree(edid);
+ return ret;
+
+fallback:
+ /*
+ * In case we cannot retrieve the EDIDs (broken or missing i2c
+ * bus), fallback on the XGA standards
+ */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+
+ /* And prefer a mode pretty much anyone can handle */
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return ret;
+}
+
+static const struct drm_connector_helper_funcs simple_bridge_con_helper_funcs = {
+ .get_modes = simple_bridge_get_modes,
+};
+
+static enum drm_connector_status
+simple_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct simple_bridge *sbridge = drm_connector_to_simple_bridge(connector);
+
+ /*
+ * Even if we have an I2C bus, we can't assume that the cable
+ * is disconnected if drm_probe_ddc fails. Some cables don't
+ * wire the DDC pins, or the I2C bus might not be working at
+ * all.
+ */
+ if (sbridge->ddc && drm_probe_ddc(sbridge->ddc))
+ return connector_status_connected;
+
+ return connector_status_unknown;
+}
+
+static const struct drm_connector_funcs simple_bridge_con_funcs = {
+ .detect = simple_bridge_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static int simple_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+ int ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
+ if (!bridge->encoder) {
+ DRM_ERROR("Missing encoder\n");
+ return -ENODEV;
+ }
+
+ drm_connector_helper_add(&sbridge->connector,
+ &simple_bridge_con_helper_funcs);
+ ret = drm_connector_init_with_ddc(bridge->dev, &sbridge->connector,
+ &simple_bridge_con_funcs,
+ sbridge->info->connector_type,
+ sbridge->ddc);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ drm_connector_attach_encoder(&sbridge->connector,
+ bridge->encoder);
+
+ return 0;
+}
+
+static void simple_bridge_enable(struct drm_bridge *bridge)
+{
+ struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+ int ret;
+
+ if (sbridge->vdd) {
+ ret = regulator_enable(sbridge->vdd);
+ if (ret)
+ DRM_ERROR("Failed to enable vdd regulator: %d\n", ret);
+ }
+
+ gpiod_set_value_cansleep(sbridge->enable, 1);
+}
+
+static void simple_bridge_disable(struct drm_bridge *bridge)
+{
+ struct simple_bridge *sbridge = drm_bridge_to_simple_bridge(bridge);
+
+ gpiod_set_value_cansleep(sbridge->enable, 0);
+
+ if (sbridge->vdd)
+ regulator_disable(sbridge->vdd);
+}
+
+static const struct drm_bridge_funcs simple_bridge_bridge_funcs = {
+ .attach = simple_bridge_attach,
+ .enable = simple_bridge_enable,
+ .disable = simple_bridge_disable,
+};
+
+static struct i2c_adapter *simple_bridge_retrieve_ddc(struct device *dev)
+{
+ struct device_node *phandle, *remote;
+ struct i2c_adapter *ddc;
+
+ remote = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!remote)
+ return ERR_PTR(-EINVAL);
+
+ phandle = of_parse_phandle(remote, "ddc-i2c-bus", 0);
+ of_node_put(remote);
+ if (!phandle)
+ return ERR_PTR(-ENODEV);
+
+ ddc = of_get_i2c_adapter_by_node(phandle);
+ of_node_put(phandle);
+ if (!ddc)
+ return ERR_PTR(-EPROBE_DEFER);
+
+ return ddc;
+}
+
+static int simple_bridge_probe(struct platform_device *pdev)
+{
+ struct simple_bridge *sbridge;
+
+ sbridge = devm_kzalloc(&pdev->dev, sizeof(*sbridge), GFP_KERNEL);
+ if (!sbridge)
+ return -ENOMEM;
+ platform_set_drvdata(pdev, sbridge);
+
+ sbridge->info = of_device_get_match_data(&pdev->dev);
+
+ sbridge->vdd = devm_regulator_get_optional(&pdev->dev, "vdd");
+ if (IS_ERR(sbridge->vdd)) {
+ int ret = PTR_ERR(sbridge->vdd);
+ if (ret == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ sbridge->vdd = NULL;
+ dev_dbg(&pdev->dev, "No vdd regulator found: %d\n", ret);
+ }
+
+ sbridge->enable = devm_gpiod_get_optional(&pdev->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(sbridge->enable)) {
+ if (PTR_ERR(sbridge->enable) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "Unable to retrieve enable GPIO\n");
+ return PTR_ERR(sbridge->enable);
+ }
+
+ sbridge->ddc = simple_bridge_retrieve_ddc(&pdev->dev);
+ if (IS_ERR(sbridge->ddc)) {
+ if (PTR_ERR(sbridge->ddc) == -ENODEV) {
+ dev_dbg(&pdev->dev,
+ "No i2c bus specified. Disabling EDID readout\n");
+ sbridge->ddc = NULL;
+ } else {
+ dev_err(&pdev->dev, "Couldn't retrieve i2c bus\n");
+ return PTR_ERR(sbridge->ddc);
+ }
+ }
+
+ sbridge->bridge.funcs = &simple_bridge_bridge_funcs;
+ sbridge->bridge.of_node = pdev->dev.of_node;
+ sbridge->bridge.timings = sbridge->info->timings;
+
+ drm_bridge_add(&sbridge->bridge);
+
+ return 0;
+}
+
+static int simple_bridge_remove(struct platform_device *pdev)
+{
+ struct simple_bridge *sbridge = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&sbridge->bridge);
+
+ if (sbridge->ddc)
+ i2c_put_adapter(sbridge->ddc);
+
+ return 0;
+}
+
+/*
+ * We assume the ADV7123 DAC is the "default" for historical reasons
+ * Information taken from the ADV7123 datasheet, revision D.
+ * NOTE: the ADV7123EP seems to have other timings and need a new timings
+ * set if used.
+ */
+static const struct drm_bridge_timings default_bridge_timings = {
+ /* Timing specifications, datasheet page 7 */
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ .setup_time_ps = 500,
+ .hold_time_ps = 1500,
+};
+
+/*
+ * Information taken from the THS8134, THS8134A, THS8134B datasheet named
+ * "SLVS205D", dated May 1990, revised March 2000.
+ */
+static const struct drm_bridge_timings ti_ths8134_bridge_timings = {
+ /* From timing diagram, datasheet page 9 */
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ /* From datasheet, page 12 */
+ .setup_time_ps = 3000,
+ /* I guess this means latched input */
+ .hold_time_ps = 0,
+};
+
+/*
+ * Information taken from the THS8135 datasheet named "SLAS343B", dated
+ * May 2001, revised April 2013.
+ */
+static const struct drm_bridge_timings ti_ths8135_bridge_timings = {
+ /* From timing diagram, datasheet page 14 */
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE,
+ /* From datasheet, page 16 */
+ .setup_time_ps = 2000,
+ .hold_time_ps = 500,
+};
+
+static const struct of_device_id simple_bridge_match[] = {
+ {
+ .compatible = "dumb-vga-dac",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ }, {
+ .compatible = "adi,adv7123",
+ .data = &(const struct simple_bridge_info) {
+ .timings = &default_bridge_timings,
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ }, {
+ .compatible = "ti,opa362",
+ .data = &(const struct simple_bridge_info) {
+ .connector_type = DRM_MODE_CONNECTOR_Composite,
+ },
+ }, {
+ .compatible = "ti,ths8135",
+ .data = &(const struct simple_bridge_info) {
+ .timings = &ti_ths8135_bridge_timings,
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ }, {
+ .compatible = "ti,ths8134",
+ .data = &(const struct simple_bridge_info) {
+ .timings = &ti_ths8134_bridge_timings,
+ .connector_type = DRM_MODE_CONNECTOR_VGA,
+ },
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, simple_bridge_match);
+
+static struct platform_driver simple_bridge_driver = {
+ .probe = simple_bridge_probe,
+ .remove = simple_bridge_remove,
+ .driver = {
+ .name = "simple-bridge",
+ .of_match_table = simple_bridge_match,
+ },
+};
+module_platform_driver(simple_bridge_driver);
+
+MODULE_AUTHOR("Maxime Ripard <[email protected]>");
+MODULE_DESCRIPTION("Simple DRM bridge driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
index 67fca439bbfb..f85c15ad8486 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-hdmi.c
@@ -1814,13 +1814,32 @@ static void hdmi_av_composer(struct dw_hdmi *hdmi,
int hblank, vblank, h_de_hs, v_de_vs, hsync_len, vsync_len;
unsigned int vdisplay, hdisplay;
- vmode->mtmdsclock = vmode->mpixelclock = mode->clock * 1000;
+ vmode->mpixelclock = mode->clock * 1000;
dev_dbg(hdmi->dev, "final pixclk = %d\n", vmode->mpixelclock);
+ vmode->mtmdsclock = vmode->mpixelclock;
+
+ if (!hdmi_bus_fmt_is_yuv422(hdmi->hdmi_data.enc_out_bus_format)) {
+ switch (hdmi_bus_fmt_color_depth(
+ hdmi->hdmi_data.enc_out_bus_format)) {
+ case 16:
+ vmode->mtmdsclock = vmode->mpixelclock * 2;
+ break;
+ case 12:
+ vmode->mtmdsclock = vmode->mpixelclock * 3 / 2;
+ break;
+ case 10:
+ vmode->mtmdsclock = vmode->mpixelclock * 5 / 4;
+ break;
+ }
+ }
+
if (hdmi_bus_fmt_is_yuv420(hdmi->hdmi_data.enc_out_bus_format))
vmode->mtmdsclock /= 2;
+ dev_dbg(hdmi->dev, "final tmdsclock = %d\n", vmode->mtmdsclock);
+
/* Set up HDMI_FC_INVIDCONF */
inv_val = (hdmi->hdmi_data.hdcp_enable ||
(dw_hdmi_support_scdc(hdmi) &&
@@ -2078,11 +2097,10 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
hdmi->hdmi_data.video_mode.mpixelrepetitionoutput = 0;
hdmi->hdmi_data.video_mode.mpixelrepetitioninput = 0;
- /* TOFIX: Get input format from plat data or fallback to RGB888 */
if (hdmi->plat_data->input_bus_format)
hdmi->hdmi_data.enc_in_bus_format =
hdmi->plat_data->input_bus_format;
- else
+ else if (hdmi->hdmi_data.enc_in_bus_format == MEDIA_BUS_FMT_FIXED)
hdmi->hdmi_data.enc_in_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
/* TOFIX: Get input encoding from plat data or fallback to none */
@@ -2092,8 +2110,8 @@ static int dw_hdmi_setup(struct dw_hdmi *hdmi, struct drm_display_mode *mode)
else
hdmi->hdmi_data.enc_in_encoding = V4L2_YCBCR_ENC_DEFAULT;
- /* TOFIX: Default to RGB888 output format */
- hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ if (hdmi->hdmi_data.enc_out_bus_format == MEDIA_BUS_FMT_FIXED)
+ hdmi->hdmi_data.enc_out_bus_format = MEDIA_BUS_FMT_RGB888_1X24;
hdmi->hdmi_data.pix_repet_factor = 0;
hdmi->hdmi_data.hdcp_enable = 0;
@@ -2371,7 +2389,279 @@ static const struct drm_connector_helper_funcs dw_hdmi_connector_helper_funcs =
.atomic_check = dw_hdmi_connector_atomic_check,
};
-static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
+/*
+ * Possible output formats :
+ * - MEDIA_BUS_FMT_UYYVYY16_0_5X48,
+ * - MEDIA_BUS_FMT_UYYVYY12_0_5X36,
+ * - MEDIA_BUS_FMT_UYYVYY10_0_5X30,
+ * - MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+ * - MEDIA_BUS_FMT_YUV16_1X48,
+ * - MEDIA_BUS_FMT_RGB161616_1X48,
+ * - MEDIA_BUS_FMT_UYVY12_1X24,
+ * - MEDIA_BUS_FMT_YUV12_1X36,
+ * - MEDIA_BUS_FMT_RGB121212_1X36,
+ * - MEDIA_BUS_FMT_UYVY10_1X20,
+ * - MEDIA_BUS_FMT_YUV10_1X30,
+ * - MEDIA_BUS_FMT_RGB101010_1X30,
+ * - MEDIA_BUS_FMT_UYVY8_1X16,
+ * - MEDIA_BUS_FMT_YUV8_1X24,
+ * - MEDIA_BUS_FMT_RGB888_1X24,
+ */
+
+/* Can return a maximum of 11 possible output formats for a mode/connector */
+#define MAX_OUTPUT_SEL_FORMATS 11
+
+static u32 *dw_hdmi_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
+{
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_display_info *info = &conn->display_info;
+ struct drm_display_mode *mode = &crtc_state->mode;
+ u8 max_bpc = conn_state->max_requested_bpc;
+ bool is_hdmi2_sink = info->hdmi.scdc.supported ||
+ (info->color_formats & DRM_COLOR_FORMAT_YCRCB420);
+ u32 *output_fmts;
+ unsigned int i = 0;
+
+ *num_output_fmts = 0;
+
+ output_fmts = kcalloc(MAX_OUTPUT_SEL_FORMATS, sizeof(*output_fmts),
+ GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ /* If dw-hdmi is the only bridge, avoid negociating with ourselves */
+ if (list_is_singular(&bridge->encoder->bridge_chain)) {
+ *num_output_fmts = 1;
+ output_fmts[0] = MEDIA_BUS_FMT_FIXED;
+
+ return output_fmts;
+ }
+
+ /*
+ * If the current mode enforces 4:2:0, force the output but format
+ * to 4:2:0 and do not add the YUV422/444/RGB formats
+ */
+ if (conn->ycbcr_420_allowed &&
+ (drm_mode_is_420_only(info, mode) ||
+ (is_hdmi2_sink && drm_mode_is_420_also(info, mode)))) {
+
+ /* Order bus formats from 16bit to 8bit if supported */
+ if (max_bpc >= 16 && info->bpc == 16 &&
+ (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_48))
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY16_0_5X48;
+
+ if (max_bpc >= 12 && info->bpc >= 12 &&
+ (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_36))
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY12_0_5X36;
+
+ if (max_bpc >= 10 && info->bpc >= 10 &&
+ (info->hdmi.y420_dc_modes & DRM_EDID_YCBCR420_DC_30))
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY10_0_5X30;
+
+ /* Default 8bit fallback */
+ output_fmts[i++] = MEDIA_BUS_FMT_UYYVYY8_0_5X24;
+
+ *num_output_fmts = i;
+
+ return output_fmts;
+ }
+
+ /*
+ * Order bus formats from 16bit to 8bit and from YUV422 to RGB
+ * if supported. In any case the default RGB888 format is added
+ */
+
+ if (max_bpc >= 16 && info->bpc == 16) {
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+ }
+
+ if (max_bpc >= 12 && info->bpc >= 12) {
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ output_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ }
+
+ if (max_bpc >= 10 && info->bpc >= 10) {
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ output_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ }
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB422)
+ output_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+
+ if (info->color_formats & DRM_COLOR_FORMAT_YCRCB444)
+ output_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+
+ /* Default 8bit RGB fallback */
+ output_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+
+ *num_output_fmts = i;
+
+ return output_fmts;
+}
+
+/*
+ * Possible input formats :
+ * - MEDIA_BUS_FMT_RGB888_1X24
+ * - MEDIA_BUS_FMT_YUV8_1X24
+ * - MEDIA_BUS_FMT_UYVY8_1X16
+ * - MEDIA_BUS_FMT_UYYVYY8_0_5X24
+ * - MEDIA_BUS_FMT_RGB101010_1X30
+ * - MEDIA_BUS_FMT_YUV10_1X30
+ * - MEDIA_BUS_FMT_UYVY10_1X20
+ * - MEDIA_BUS_FMT_UYYVYY10_0_5X30
+ * - MEDIA_BUS_FMT_RGB121212_1X36
+ * - MEDIA_BUS_FMT_YUV12_1X36
+ * - MEDIA_BUS_FMT_UYVY12_1X24
+ * - MEDIA_BUS_FMT_UYYVYY12_0_5X36
+ * - MEDIA_BUS_FMT_RGB161616_1X48
+ * - MEDIA_BUS_FMT_YUV16_1X48
+ * - MEDIA_BUS_FMT_UYYVYY16_0_5X48
+ */
+
+/* Can return a maximum of 3 possible input formats for an output format */
+#define MAX_INPUT_SEL_FORMATS 3
+
+static u32 *dw_hdmi_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+ unsigned int i = 0;
+
+ *num_input_fmts = 0;
+
+ input_fmts = kcalloc(MAX_INPUT_SEL_FORMATS, sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ switch (output_fmt) {
+ /* If MEDIA_BUS_FMT_FIXED is tested, return default bus format */
+ case MEDIA_BUS_FMT_FIXED:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ /* 8bit */
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+ break;
+ case MEDIA_BUS_FMT_YUV8_1X24:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+ case MEDIA_BUS_FMT_UYVY8_1X16:
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY8_1X16;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV8_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB888_1X24;
+ break;
+
+ /* 10bit */
+ case MEDIA_BUS_FMT_RGB101010_1X30:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+ break;
+ case MEDIA_BUS_FMT_YUV10_1X30:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ break;
+ case MEDIA_BUS_FMT_UYVY10_1X20:
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY10_1X20;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV10_1X30;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB101010_1X30;
+ break;
+
+ /* 12bit */
+ case MEDIA_BUS_FMT_RGB121212_1X36:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+ break;
+ case MEDIA_BUS_FMT_YUV12_1X36:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ break;
+ case MEDIA_BUS_FMT_UYVY12_1X24:
+ input_fmts[i++] = MEDIA_BUS_FMT_UYVY12_1X24;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV12_1X36;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB121212_1X36;
+ break;
+
+ /* 16bit */
+ case MEDIA_BUS_FMT_RGB161616_1X48:
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+ break;
+ case MEDIA_BUS_FMT_YUV16_1X48:
+ input_fmts[i++] = MEDIA_BUS_FMT_YUV16_1X48;
+ input_fmts[i++] = MEDIA_BUS_FMT_RGB161616_1X48;
+ break;
+
+ /*YUV 4:2:0 */
+ case MEDIA_BUS_FMT_UYYVYY8_0_5X24:
+ case MEDIA_BUS_FMT_UYYVYY10_0_5X30:
+ case MEDIA_BUS_FMT_UYYVYY12_0_5X36:
+ case MEDIA_BUS_FMT_UYYVYY16_0_5X48:
+ input_fmts[i++] = output_fmt;
+ break;
+ }
+
+ *num_input_fmts = i;
+
+ if (*num_input_fmts == 0) {
+ kfree(input_fmts);
+ input_fmts = NULL;
+ }
+
+ return input_fmts;
+}
+
+static int dw_hdmi_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dw_hdmi *hdmi = bridge->driver_private;
+
+ hdmi->hdmi_data.enc_out_bus_format =
+ bridge_state->output_bus_cfg.format;
+
+ hdmi->hdmi_data.enc_in_bus_format =
+ bridge_state->input_bus_cfg.format;
+
+ dev_dbg(hdmi->dev, "input format 0x%04x, output format 0x%04x\n",
+ bridge_state->input_bus_cfg.format,
+ bridge_state->output_bus_cfg.format);
+
+ return 0;
+}
+
+static int dw_hdmi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct dw_hdmi *hdmi = bridge->driver_private;
struct drm_encoder *encoder = bridge->encoder;
@@ -2379,6 +2669,11 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
struct cec_connector_info conn_info;
struct cec_notifier *notifier;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
connector->interlace_allowed = 1;
connector->polled = DRM_CONNECTOR_POLL_HPD;
@@ -2389,6 +2684,14 @@ static int dw_hdmi_bridge_attach(struct drm_bridge *bridge)
DRM_MODE_CONNECTOR_HDMIA,
hdmi->ddc);
+ /*
+ * drm_connector_attach_max_bpc_property() requires the
+ * connector to have a state.
+ */
+ drm_atomic_helper_connector_reset(connector);
+
+ drm_connector_attach_max_bpc_property(connector, 8, 16);
+
if (hdmi->version >= 0x200a && hdmi->plat_data->use_drm_infoframe)
drm_object_attach_property(&connector->base,
connector->dev->mode_config.hdr_output_metadata_property, 0);
@@ -2473,8 +2776,14 @@ static void dw_hdmi_bridge_enable(struct drm_bridge *bridge)
}
static const struct drm_bridge_funcs dw_hdmi_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.attach = dw_hdmi_bridge_attach,
.detach = dw_hdmi_bridge_detach,
+ .atomic_check = dw_hdmi_bridge_atomic_check,
+ .atomic_get_output_bus_fmts = dw_hdmi_bridge_atomic_get_output_bus_fmts,
+ .atomic_get_input_bus_fmts = dw_hdmi_bridge_atomic_get_input_bus_fmts,
.enable = dw_hdmi_bridge_enable,
.disable = dw_hdmi_bridge_disable,
.mode_set = dw_hdmi_bridge_mode_set,
@@ -2943,6 +3252,12 @@ __dw_hdmi_probe(struct platform_device *pdev,
hdmi->bridge.of_node = pdev->dev.of_node;
#endif
+ if (hdmi->version >= 0x200a)
+ hdmi->connector.ycbcr_420_allowed =
+ hdmi->plat_data->ycbcr_420_allowed;
+ else
+ hdmi->connector.ycbcr_420_allowed = false;
+
memset(&pdevinfo, 0, sizeof(pdevinfo));
pdevinfo.parent = dev;
pdevinfo.id = PLATFORM_DEVID_AUTO;
@@ -3076,7 +3391,7 @@ struct dw_hdmi *dw_hdmi_bind(struct platform_device *pdev,
if (IS_ERR(hdmi))
return hdmi;
- ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL);
+ ret = drm_bridge_attach(encoder, &hdmi->bridge, NULL, 0);
if (ret) {
dw_hdmi_remove(hdmi);
DRM_ERROR("Failed to initialize bridge with drm\n");
diff --git a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
index b18351b6760a..5ef0f154aa7b 100644
--- a/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
+++ b/drivers/gpu/drm/bridge/synopsys/dw-mipi-dsi.c
@@ -824,7 +824,8 @@ static void dw_mipi_dsi_bridge_post_disable(struct drm_bridge *bridge)
* This needs to be fixed in the drm_bridge framework and the API
* needs to be updated to manage our own call chains...
*/
- dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
+ if (dsi->panel_bridge->funcs->post_disable)
+ dsi->panel_bridge->funcs->post_disable(dsi->panel_bridge);
if (phy_ops->power_off)
phy_ops->power_off(dsi->plat_data->priv_data);
@@ -935,7 +936,8 @@ dw_mipi_dsi_bridge_mode_valid(struct drm_bridge *bridge,
return mode_status;
}
-static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
+static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct dw_mipi_dsi *dsi = bridge_to_dsi(bridge);
@@ -948,7 +950,8 @@ static int dw_mipi_dsi_bridge_attach(struct drm_bridge *bridge)
bridge->encoder->encoder_type = DRM_MODE_ENCODER_DSI;
/* Attach the panel-bridge to the dsi bridge */
- return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge);
+ return drm_bridge_attach(bridge->encoder, dsi->panel_bridge, bridge,
+ flags);
}
static const struct drm_bridge_funcs dw_mipi_dsi_bridge_funcs = {
@@ -1119,7 +1122,7 @@ int dw_mipi_dsi_bind(struct dw_mipi_dsi *dsi, struct drm_encoder *encoder)
{
int ret;
- ret = drm_bridge_attach(encoder, &dsi->bridge, NULL);
+ ret = drm_bridge_attach(encoder, &dsi->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
index 96207fcfde19..5ac1430fab04 100644
--- a/drivers/gpu/drm/bridge/tc358764.c
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -349,12 +349,18 @@ static void tc358764_enable(struct drm_bridge *bridge)
dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
}
-static int tc358764_attach(struct drm_bridge *bridge)
+static int tc358764_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
struct drm_device *drm = bridge->dev;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
ret = drm_connector_init(drm, &ctx->connector,
&tc358764_connector_funcs,
@@ -369,7 +375,6 @@ static int tc358764_attach(struct drm_bridge *bridge)
drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
drm_panel_attach(ctx->panel, &ctx->connector);
ctx->connector.funcs->reset(&ctx->connector);
- drm_fb_helper_add_one_connector(drm->fb_helper, &ctx->connector);
drm_connector_register(&ctx->connector);
return 0;
@@ -378,10 +383,8 @@ static int tc358764_attach(struct drm_bridge *bridge)
static void tc358764_detach(struct drm_bridge *bridge)
{
struct tc358764 *ctx = bridge_to_tc358764(bridge);
- struct drm_device *drm = bridge->dev;
drm_connector_unregister(&ctx->connector);
- drm_fb_helper_remove_one_connector(drm->fb_helper, &ctx->connector);
drm_panel_detach(ctx->panel);
ctx->panel = NULL;
drm_connector_put(&ctx->connector);
diff --git a/drivers/gpu/drm/bridge/tc358767.c b/drivers/gpu/drm/bridge/tc358767.c
index 3709e5ace724..e4c0ea03ae3a 100644
--- a/drivers/gpu/drm/bridge/tc358767.c
+++ b/drivers/gpu/drm/bridge/tc358767.c
@@ -31,6 +31,7 @@
#include <drm/drm_edid.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
/* Registers */
@@ -297,7 +298,7 @@ static inline int tc_poll_timeout(struct tc_data *tc, unsigned int addr,
static int tc_aux_wait_busy(struct tc_data *tc)
{
- return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 1000, 100000);
+ return tc_poll_timeout(tc, DP0_AUXSTATUS, AUX_BUSY, 0, 100, 100000);
}
static int tc_aux_write_data(struct tc_data *tc, const void *data,
@@ -640,7 +641,7 @@ static int tc_aux_link_setup(struct tc_data *tc)
if (ret)
goto err;
- ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
+ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 100, 100000);
if (ret == -ETIMEDOUT) {
dev_err(tc->dev, "Timeout waiting for PHY to become ready");
return ret;
@@ -876,7 +877,7 @@ static int tc_wait_link_training(struct tc_data *tc)
int ret;
ret = tc_poll_timeout(tc, DP0_LTSTAT, LT_LOOPDONE,
- LT_LOOPDONE, 1, 1000);
+ LT_LOOPDONE, 500, 100000);
if (ret) {
dev_err(tc->dev, "Link training timeout waiting for LT_LOOPDONE!\n");
return ret;
@@ -949,7 +950,7 @@ static int tc_main_link_enable(struct tc_data *tc)
dp_phy_ctrl &= ~(DP_PHY_RST | PHY_M1_RST | PHY_M0_RST);
ret = regmap_write(tc->regmap, DP_PHY_CTRL, dp_phy_ctrl);
- ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 1, 1000);
+ ret = tc_poll_timeout(tc, DP_PHY_CTRL, PHY_RDY, PHY_RDY, 500, 100000);
if (ret) {
dev_err(dev, "timeout waiting for phy become ready");
return ret;
@@ -1403,13 +1404,19 @@ static const struct drm_connector_funcs tc_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int tc_bridge_attach(struct drm_bridge *bridge)
+static int tc_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
u32 bus_format = MEDIA_BUS_FMT_RGB888_1X24;
struct tc_data *tc = bridge_to_tc(bridge);
struct drm_device *drm = bridge->dev;
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
/* Create DP/eDP connector */
drm_connector_helper_add(&tc->connector, &tc_connector_helper_funcs);
ret = drm_connector_init(drm, &tc->connector, &tc_connector_funcs,
diff --git a/drivers/gpu/drm/bridge/tc358768.c b/drivers/gpu/drm/bridge/tc358768.c
new file mode 100644
index 000000000000..1b39e8d37834
--- /dev/null
+++ b/drivers/gpu/drm/bridge/tc358768.c
@@ -0,0 +1,1046 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Texas Instruments Incorporated - http://www.ti.com
+ * Author: Peter Ujfalusi <[email protected]>
+ */
+
+#include <linux/clk.h>
+#include <linux/device.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <video/mipi_display.h>
+#include <video/videomode.h>
+
+/* Global (16-bit addressable) */
+#define TC358768_CHIPID 0x0000
+#define TC358768_SYSCTL 0x0002
+#define TC358768_CONFCTL 0x0004
+#define TC358768_VSDLY 0x0006
+#define TC358768_DATAFMT 0x0008
+#define TC358768_GPIOEN 0x000E
+#define TC358768_GPIODIR 0x0010
+#define TC358768_GPIOIN 0x0012
+#define TC358768_GPIOOUT 0x0014
+#define TC358768_PLLCTL0 0x0016
+#define TC358768_PLLCTL1 0x0018
+#define TC358768_CMDBYTE 0x0022
+#define TC358768_PP_MISC 0x0032
+#define TC358768_DSITX_DT 0x0050
+#define TC358768_FIFOSTATUS 0x00F8
+
+/* Debug (16-bit addressable) */
+#define TC358768_VBUFCTRL 0x00E0
+#define TC358768_DBG_WIDTH 0x00E2
+#define TC358768_DBG_VBLANK 0x00E4
+#define TC358768_DBG_DATA 0x00E8
+
+/* TX PHY (32-bit addressable) */
+#define TC358768_CLW_DPHYCONTTX 0x0100
+#define TC358768_D0W_DPHYCONTTX 0x0104
+#define TC358768_D1W_DPHYCONTTX 0x0108
+#define TC358768_D2W_DPHYCONTTX 0x010C
+#define TC358768_D3W_DPHYCONTTX 0x0110
+#define TC358768_CLW_CNTRL 0x0140
+#define TC358768_D0W_CNTRL 0x0144
+#define TC358768_D1W_CNTRL 0x0148
+#define TC358768_D2W_CNTRL 0x014C
+#define TC358768_D3W_CNTRL 0x0150
+
+/* TX PPI (32-bit addressable) */
+#define TC358768_STARTCNTRL 0x0204
+#define TC358768_DSITXSTATUS 0x0208
+#define TC358768_LINEINITCNT 0x0210
+#define TC358768_LPTXTIMECNT 0x0214
+#define TC358768_TCLK_HEADERCNT 0x0218
+#define TC358768_TCLK_TRAILCNT 0x021C
+#define TC358768_THS_HEADERCNT 0x0220
+#define TC358768_TWAKEUP 0x0224
+#define TC358768_TCLK_POSTCNT 0x0228
+#define TC358768_THS_TRAILCNT 0x022C
+#define TC358768_HSTXVREGCNT 0x0230
+#define TC358768_HSTXVREGEN 0x0234
+#define TC358768_TXOPTIONCNTRL 0x0238
+#define TC358768_BTACNTRL1 0x023C
+
+/* TX CTRL (32-bit addressable) */
+#define TC358768_DSI_CONTROL 0x040C
+#define TC358768_DSI_STATUS 0x0410
+#define TC358768_DSI_INT 0x0414
+#define TC358768_DSI_INT_ENA 0x0418
+#define TC358768_DSICMD_RDFIFO 0x0430
+#define TC358768_DSI_ACKERR 0x0434
+#define TC358768_DSI_ACKERR_INTENA 0x0438
+#define TC358768_DSI_ACKERR_HALT 0x043c
+#define TC358768_DSI_RXERR 0x0440
+#define TC358768_DSI_RXERR_INTENA 0x0444
+#define TC358768_DSI_RXERR_HALT 0x0448
+#define TC358768_DSI_ERR 0x044C
+#define TC358768_DSI_ERR_INTENA 0x0450
+#define TC358768_DSI_ERR_HALT 0x0454
+#define TC358768_DSI_CONFW 0x0500
+#define TC358768_DSI_LPCMD 0x0500
+#define TC358768_DSI_RESET 0x0504
+#define TC358768_DSI_INT_CLR 0x050C
+#define TC358768_DSI_START 0x0518
+
+/* DSITX CTRL (16-bit addressable) */
+#define TC358768_DSICMD_TX 0x0600
+#define TC358768_DSICMD_TYPE 0x0602
+#define TC358768_DSICMD_WC 0x0604
+#define TC358768_DSICMD_WD0 0x0610
+#define TC358768_DSICMD_WD1 0x0612
+#define TC358768_DSICMD_WD2 0x0614
+#define TC358768_DSICMD_WD3 0x0616
+#define TC358768_DSI_EVENT 0x0620
+#define TC358768_DSI_VSW 0x0622
+#define TC358768_DSI_VBPR 0x0624
+#define TC358768_DSI_VACT 0x0626
+#define TC358768_DSI_HSW 0x0628
+#define TC358768_DSI_HBPR 0x062A
+#define TC358768_DSI_HACT 0x062C
+
+/* TC358768_DSI_CONTROL (0x040C) register */
+#define TC358768_DSI_CONTROL_DIS_MODE BIT(15)
+#define TC358768_DSI_CONTROL_TXMD BIT(7)
+#define TC358768_DSI_CONTROL_HSCKMD BIT(5)
+#define TC358768_DSI_CONTROL_EOTDIS BIT(0)
+
+/* TC358768_DSI_CONFW (0x0500) register */
+#define TC358768_DSI_CONFW_MODE_SET (5 << 29)
+#define TC358768_DSI_CONFW_MODE_CLR (6 << 29)
+#define TC358768_DSI_CONFW_ADDR_DSI_CONTROL (0x3 << 24)
+
+static const char * const tc358768_supplies[] = {
+ "vddc", "vddmipi", "vddio"
+};
+
+struct tc358768_dsi_output {
+ struct mipi_dsi_device *dev;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+};
+
+struct tc358768_priv {
+ struct device *dev;
+ struct regmap *regmap;
+ struct gpio_desc *reset_gpio;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(tc358768_supplies)];
+ struct clk *refclk;
+ int enabled;
+ int error;
+
+ struct mipi_dsi_host dsi_host;
+ struct drm_bridge bridge;
+ struct tc358768_dsi_output output;
+
+ u32 pd_lines; /* number of Parallel Port Input Data Lines */
+ u32 dsi_lanes; /* number of DSI Lanes */
+
+ /* Parameters for PLL programming */
+ u32 fbd; /* PLL feedback divider */
+ u32 prd; /* PLL input divider */
+ u32 frs; /* PLL Freqency range for HSCK (post divider) */
+
+ u32 dsiclk; /* pll_clk / 2 */
+};
+
+static inline struct tc358768_priv *dsi_host_to_tc358768(struct mipi_dsi_host
+ *host)
+{
+ return container_of(host, struct tc358768_priv, dsi_host);
+}
+
+static inline struct tc358768_priv *bridge_to_tc358768(struct drm_bridge
+ *bridge)
+{
+ return container_of(bridge, struct tc358768_priv, bridge);
+}
+
+static int tc358768_clear_error(struct tc358768_priv *priv)
+{
+ int ret = priv->error;
+
+ priv->error = 0;
+ return ret;
+}
+
+static void tc358768_write(struct tc358768_priv *priv, u32 reg, u32 val)
+{
+ size_t count = 2;
+
+ if (priv->error)
+ return;
+
+ /* 16-bit register? */
+ if (reg < 0x100 || reg >= 0x600)
+ count = 1;
+
+ priv->error = regmap_bulk_write(priv->regmap, reg, &val, count);
+}
+
+static void tc358768_read(struct tc358768_priv *priv, u32 reg, u32 *val)
+{
+ size_t count = 2;
+
+ if (priv->error)
+ return;
+
+ /* 16-bit register? */
+ if (reg < 0x100 || reg >= 0x600) {
+ *val = 0;
+ count = 1;
+ }
+
+ priv->error = regmap_bulk_read(priv->regmap, reg, val, count);
+}
+
+static void tc358768_update_bits(struct tc358768_priv *priv, u32 reg, u32 mask,
+ u32 val)
+{
+ u32 tmp, orig;
+
+ tc358768_read(priv, reg, &orig);
+ tmp = orig & ~mask;
+ tmp |= val & mask;
+ if (tmp != orig)
+ tc358768_write(priv, reg, tmp);
+}
+
+static int tc358768_sw_reset(struct tc358768_priv *priv)
+{
+ /* Assert Reset */
+ tc358768_write(priv, TC358768_SYSCTL, 1);
+ /* Release Reset, Exit Sleep */
+ tc358768_write(priv, TC358768_SYSCTL, 0);
+
+ return tc358768_clear_error(priv);
+}
+
+static void tc358768_hw_enable(struct tc358768_priv *priv)
+{
+ int ret;
+
+ if (priv->enabled)
+ return;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(priv->supplies), priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "error enabling regulators (%d)\n", ret);
+
+ if (priv->reset_gpio)
+ usleep_range(200, 300);
+
+ /*
+ * The RESX is active low (GPIO_ACTIVE_LOW).
+ * DEASSERT (value = 0) the reset_gpio to enable the chip
+ */
+ gpiod_set_value_cansleep(priv->reset_gpio, 0);
+
+ /* wait for encoder clocks to stabilize */
+ usleep_range(1000, 2000);
+
+ priv->enabled = true;
+}
+
+static void tc358768_hw_disable(struct tc358768_priv *priv)
+{
+ int ret;
+
+ if (!priv->enabled)
+ return;
+
+ /*
+ * The RESX is active low (GPIO_ACTIVE_LOW).
+ * ASSERT (value = 1) the reset_gpio to disable the chip
+ */
+ gpiod_set_value_cansleep(priv->reset_gpio, 1);
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "error disabling regulators (%d)\n", ret);
+
+ priv->enabled = false;
+}
+
+static u32 tc358768_pll_to_pclk(struct tc358768_priv *priv, u32 pll_clk)
+{
+ return (u32)div_u64((u64)pll_clk * priv->dsi_lanes, priv->pd_lines);
+}
+
+static u32 tc358768_pclk_to_pll(struct tc358768_priv *priv, u32 pclk)
+{
+ return (u32)div_u64((u64)pclk * priv->pd_lines, priv->dsi_lanes);
+}
+
+static int tc358768_calc_pll(struct tc358768_priv *priv,
+ const struct drm_display_mode *mode,
+ bool verify_only)
+{
+ const u32 frs_limits[] = {
+ 1000000000,
+ 500000000,
+ 250000000,
+ 125000000,
+ 62500000
+ };
+ unsigned long refclk;
+ u32 prd, target_pll, i, max_pll, min_pll;
+ u32 frs, best_diff, best_pll, best_prd, best_fbd;
+
+ target_pll = tc358768_pclk_to_pll(priv, mode->clock * 1000);
+
+ /* pll_clk = RefClk * [(FBD + 1)/ (PRD + 1)] * [1 / (2^FRS)] */
+
+ for (i = 0; i < ARRAY_SIZE(frs_limits); i++)
+ if (target_pll >= frs_limits[i])
+ break;
+
+ if (i == ARRAY_SIZE(frs_limits) || i == 0)
+ return -EINVAL;
+
+ frs = i - 1;
+ max_pll = frs_limits[i - 1];
+ min_pll = frs_limits[i];
+
+ refclk = clk_get_rate(priv->refclk);
+
+ best_diff = UINT_MAX;
+ best_pll = 0;
+ best_prd = 0;
+ best_fbd = 0;
+
+ for (prd = 0; prd < 16; ++prd) {
+ u32 divisor = (prd + 1) * (1 << frs);
+ u32 fbd;
+
+ for (fbd = 0; fbd < 512; ++fbd) {
+ u32 pll, diff;
+
+ pll = (u32)div_u64((u64)refclk * (fbd + 1), divisor);
+
+ if (pll >= max_pll || pll < min_pll)
+ continue;
+
+ diff = max(pll, target_pll) - min(pll, target_pll);
+
+ if (diff < best_diff) {
+ best_diff = diff;
+ best_pll = pll;
+ best_prd = prd;
+ best_fbd = fbd;
+
+ if (best_diff == 0)
+ goto found;
+ }
+ }
+ }
+
+ if (best_diff == UINT_MAX) {
+ dev_err(priv->dev, "could not find suitable PLL setup\n");
+ return -EINVAL;
+ }
+
+found:
+ if (verify_only)
+ return 0;
+
+ priv->fbd = best_fbd;
+ priv->prd = best_prd;
+ priv->frs = frs;
+ priv->dsiclk = best_pll / 2;
+
+ return 0;
+}
+
+static int tc358768_dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+ struct drm_bridge *bridge;
+ struct drm_panel *panel;
+ struct device_node *ep;
+ int ret;
+
+ if (dev->lanes > 4) {
+ dev_err(priv->dev, "unsupported number of data lanes(%u)\n",
+ dev->lanes);
+ return -EINVAL;
+ }
+
+ /*
+ * tc358768 supports both Video and Pulse mode, but the driver only
+ * implements Video (event) mode currently
+ */
+ if (!(dev->mode_flags & MIPI_DSI_MODE_VIDEO)) {
+ dev_err(priv->dev, "Only MIPI_DSI_MODE_VIDEO is supported\n");
+ return -ENOTSUPP;
+ }
+
+ /*
+ * tc358768 supports RGB888, RGB666, RGB666_PACKED and RGB565, but only
+ * RGB888 is verified.
+ */
+ if (dev->format != MIPI_DSI_FMT_RGB888) {
+ dev_warn(priv->dev, "Only MIPI_DSI_FMT_RGB888 tested!\n");
+ return -ENOTSUPP;
+ }
+
+ ret = drm_of_find_panel_or_bridge(host->dev->of_node, 1, 0, &panel,
+ &bridge);
+ if (ret)
+ return ret;
+
+ if (panel) {
+ bridge = drm_panel_bridge_add_typed(panel,
+ DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR(bridge))
+ return PTR_ERR(bridge);
+ }
+
+ priv->output.dev = dev;
+ priv->output.bridge = bridge;
+ priv->output.panel = panel;
+
+ priv->dsi_lanes = dev->lanes;
+
+ /* get input ep (port0/endpoint0) */
+ ret = -EINVAL;
+ ep = of_graph_get_endpoint_by_regs(host->dev->of_node, 0, 0);
+ if (ep) {
+ ret = of_property_read_u32(ep, "data-lines", &priv->pd_lines);
+
+ of_node_put(ep);
+ }
+
+ if (ret)
+ priv->pd_lines = mipi_dsi_pixel_format_to_bpp(dev->format);
+
+ drm_bridge_add(&priv->bridge);
+
+ return 0;
+}
+
+static int tc358768_dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dev)
+{
+ struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+
+ drm_bridge_remove(&priv->bridge);
+ if (priv->output.panel)
+ drm_panel_bridge_remove(priv->output.bridge);
+
+ return 0;
+}
+
+static ssize_t tc358768_dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct tc358768_priv *priv = dsi_host_to_tc358768(host);
+ struct mipi_dsi_packet packet;
+ int ret;
+
+ if (!priv->enabled) {
+ dev_err(priv->dev, "Bridge is not enabled\n");
+ return -ENODEV;
+ }
+
+ if (msg->rx_len) {
+ dev_warn(priv->dev, "MIPI rx is not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (msg->tx_len > 8) {
+ dev_warn(priv->dev, "Maximum 8 byte MIPI tx is supported\n");
+ return -ENOTSUPP;
+ }
+
+ ret = mipi_dsi_create_packet(&packet, msg);
+ if (ret)
+ return ret;
+
+ if (mipi_dsi_packet_format_is_short(msg->type)) {
+ tc358768_write(priv, TC358768_DSICMD_TYPE,
+ (0x10 << 8) | (packet.header[0] & 0x3f));
+ tc358768_write(priv, TC358768_DSICMD_WC, 0);
+ tc358768_write(priv, TC358768_DSICMD_WD0,
+ (packet.header[2] << 8) | packet.header[1]);
+ } else {
+ int i;
+
+ tc358768_write(priv, TC358768_DSICMD_TYPE,
+ (0x40 << 8) | (packet.header[0] & 0x3f));
+ tc358768_write(priv, TC358768_DSICMD_WC, packet.payload_length);
+ for (i = 0; i < packet.payload_length; i += 2) {
+ u16 val = packet.payload[i];
+
+ if (i + 1 < packet.payload_length)
+ val |= packet.payload[i + 1] << 8;
+
+ tc358768_write(priv, TC358768_DSICMD_WD0 + i, val);
+ }
+ }
+
+ /* start transfer */
+ tc358768_write(priv, TC358768_DSICMD_TX, 1);
+
+ ret = tc358768_clear_error(priv);
+ if (ret)
+ dev_warn(priv->dev, "Software disable failed: %d\n", ret);
+ else
+ ret = packet.size;
+
+ return ret;
+}
+
+static const struct mipi_dsi_host_ops tc358768_dsi_host_ops = {
+ .attach = tc358768_dsi_host_attach,
+ .detach = tc358768_dsi_host_detach,
+ .transfer = tc358768_dsi_host_transfer,
+};
+
+static int tc358768_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+ if (!drm_core_check_feature(bridge->dev, DRIVER_ATOMIC)) {
+ dev_err(priv->dev, "needs atomic updates support\n");
+ return -ENOTSUPP;
+ }
+
+ return drm_bridge_attach(bridge->encoder, priv->output.bridge, bridge,
+ flags);
+}
+
+static enum drm_mode_status
+tc358768_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+ if (tc358768_calc_pll(priv, mode, true))
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static void tc358768_bridge_disable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+ int ret;
+
+ /* set FrmStop */
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(15), BIT(15));
+
+ /* wait at least for one frame */
+ msleep(50);
+
+ /* clear PP_en */
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), 0);
+
+ /* set RstPtr */
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(14), BIT(14));
+
+ ret = tc358768_clear_error(priv);
+ if (ret)
+ dev_warn(priv->dev, "Software disable failed: %d\n", ret);
+}
+
+static void tc358768_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+
+ tc358768_hw_disable(priv);
+}
+
+static int tc358768_setup_pll(struct tc358768_priv *priv,
+ const struct drm_display_mode *mode)
+{
+ u32 fbd, prd, frs;
+ int ret;
+
+ ret = tc358768_calc_pll(priv, mode, false);
+ if (ret) {
+ dev_err(priv->dev, "PLL calculation failed: %d\n", ret);
+ return ret;
+ }
+
+ fbd = priv->fbd;
+ prd = priv->prd;
+ frs = priv->frs;
+
+ dev_dbg(priv->dev, "PLL: refclk %lu, fbd %u, prd %u, frs %u\n",
+ clk_get_rate(priv->refclk), fbd, prd, frs);
+ dev_dbg(priv->dev, "PLL: pll_clk: %u, DSIClk %u, DSIByteClk %u\n",
+ priv->dsiclk * 2, priv->dsiclk, priv->dsiclk / 4);
+ dev_dbg(priv->dev, "PLL: pclk %u (panel: %u)\n",
+ tc358768_pll_to_pclk(priv, priv->dsiclk * 2),
+ mode->clock * 1000);
+
+ /* PRD[15:12] FBD[8:0] */
+ tc358768_write(priv, TC358768_PLLCTL0, (prd << 12) | fbd);
+
+ /* FRS[11:10] LBWS[9:8] CKEN[4] RESETB[1] EN[0] */
+ tc358768_write(priv, TC358768_PLLCTL1,
+ (frs << 10) | (0x2 << 8) | BIT(1) | BIT(0));
+
+ /* wait for lock */
+ usleep_range(1000, 2000);
+
+ /* FRS[11:10] LBWS[9:8] CKEN[4] PLL_CKEN[4] RESETB[1] EN[0] */
+ tc358768_write(priv, TC358768_PLLCTL1,
+ (frs << 10) | (0x2 << 8) | BIT(4) | BIT(1) | BIT(0));
+
+ return tc358768_clear_error(priv);
+}
+
+#define TC358768_PRECISION 1000
+static u32 tc358768_ns_to_cnt(u32 ns, u32 period_nsk)
+{
+ return (ns * TC358768_PRECISION + period_nsk) / period_nsk;
+}
+
+static u32 tc358768_to_ns(u32 nsk)
+{
+ return (nsk / TC358768_PRECISION);
+}
+
+static void tc358768_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+ struct mipi_dsi_device *dsi_dev = priv->output.dev;
+ u32 val, val2, lptxcnt, hact, data_type;
+ const struct drm_display_mode *mode;
+ u32 dsibclk_nsk, dsiclk_nsk, ui_nsk, phy_delay_nsk;
+ u32 dsiclk, dsibclk;
+ int ret, i;
+
+ tc358768_hw_enable(priv);
+
+ ret = tc358768_sw_reset(priv);
+ if (ret) {
+ dev_err(priv->dev, "Software reset failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
+ mode = &bridge->encoder->crtc->state->adjusted_mode;
+ ret = tc358768_setup_pll(priv, mode);
+ if (ret) {
+ dev_err(priv->dev, "PLL setup failed: %d\n", ret);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
+ dsiclk = priv->dsiclk;
+ dsibclk = dsiclk / 4;
+
+ /* Data Format Control Register */
+ val = BIT(2) | BIT(1) | BIT(0); /* rdswap_en | dsitx_en | txdt_en */
+ switch (dsi_dev->format) {
+ case MIPI_DSI_FMT_RGB888:
+ val |= (0x3 << 4);
+ hact = mode->hdisplay * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_24;
+ break;
+ case MIPI_DSI_FMT_RGB666:
+ val |= (0x4 << 4);
+ hact = mode->hdisplay * 3;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB666_PACKED:
+ val |= (0x4 << 4) | BIT(3);
+ hact = mode->hdisplay * 18 / 8;
+ data_type = MIPI_DSI_PIXEL_STREAM_3BYTE_18;
+ break;
+
+ case MIPI_DSI_FMT_RGB565:
+ val |= (0x5 << 4);
+ hact = mode->hdisplay * 2;
+ data_type = MIPI_DSI_PACKED_PIXEL_STREAM_16;
+ break;
+ default:
+ dev_err(priv->dev, "Invalid data format (%u)\n",
+ dsi_dev->format);
+ tc358768_hw_disable(priv);
+ return;
+ }
+
+ /* VSDly[9:0] */
+ tc358768_write(priv, TC358768_VSDLY, 1);
+
+ tc358768_write(priv, TC358768_DATAFMT, val);
+ tc358768_write(priv, TC358768_DSITX_DT, data_type);
+
+ /* Enable D-PHY (HiZ->LP11) */
+ tc358768_write(priv, TC358768_CLW_CNTRL, 0x0000);
+ /* Enable lanes */
+ for (i = 0; i < dsi_dev->lanes; i++)
+ tc358768_write(priv, TC358768_D0W_CNTRL + i * 4, 0x0000);
+
+ /* DSI Timings */
+ dsibclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION,
+ dsibclk);
+ dsiclk_nsk = (u32)div_u64((u64)1000000000 * TC358768_PRECISION, dsiclk);
+ ui_nsk = dsiclk_nsk / 2;
+ phy_delay_nsk = dsibclk_nsk + 2 * dsiclk_nsk;
+ dev_dbg(priv->dev, "dsiclk_nsk: %u\n", dsiclk_nsk);
+ dev_dbg(priv->dev, "ui_nsk: %u\n", ui_nsk);
+ dev_dbg(priv->dev, "dsibclk_nsk: %u\n", dsibclk_nsk);
+ dev_dbg(priv->dev, "phy_delay_nsk: %u\n", phy_delay_nsk);
+
+ /* LP11 > 100us for D-PHY Rx Init */
+ val = tc358768_ns_to_cnt(100 * 1000, dsibclk_nsk) - 1;
+ dev_dbg(priv->dev, "LINEINITCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_LINEINITCNT, val);
+
+ /* LPTimeCnt > 50ns */
+ val = tc358768_ns_to_cnt(50, dsibclk_nsk) - 1;
+ lptxcnt = val;
+ dev_dbg(priv->dev, "LPTXTIMECNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_LPTXTIMECNT, val);
+
+ /* 38ns < TCLK_PREPARE < 95ns */
+ val = tc358768_ns_to_cnt(65, dsibclk_nsk) - 1;
+ /* TCLK_PREPARE > 300ns */
+ val2 = tc358768_ns_to_cnt(300 + tc358768_to_ns(3 * ui_nsk),
+ dsibclk_nsk);
+ val |= (val2 - tc358768_to_ns(phy_delay_nsk - dsibclk_nsk)) << 8;
+ dev_dbg(priv->dev, "TCLK_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_HEADERCNT, val);
+
+ /* TCLK_TRAIL > 60ns + 3*UI */
+ val = 60 + tc358768_to_ns(3 * ui_nsk);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 5;
+ dev_dbg(priv->dev, "TCLK_TRAILCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_TRAILCNT, val);
+
+ /* 40ns + 4*UI < THS_PREPARE < 85ns + 6*UI */
+ val = 50 + tc358768_to_ns(4 * ui_nsk);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+ /* THS_ZERO > 145ns + 10*UI */
+ val2 = tc358768_ns_to_cnt(145 - tc358768_to_ns(ui_nsk), dsibclk_nsk);
+ val |= (val2 - tc358768_to_ns(phy_delay_nsk)) << 8;
+ dev_dbg(priv->dev, "THS_HEADERCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_HEADERCNT, val);
+
+ /* TWAKEUP > 1ms in lptxcnt steps */
+ val = tc358768_ns_to_cnt(1020000, dsibclk_nsk);
+ val = val / (lptxcnt + 1) - 1;
+ dev_dbg(priv->dev, "TWAKEUP: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TWAKEUP, val);
+
+ /* TCLK_POSTCNT > 60ns + 52*UI */
+ val = tc358768_ns_to_cnt(60 + tc358768_to_ns(52 * ui_nsk),
+ dsibclk_nsk) - 3;
+ dev_dbg(priv->dev, "TCLK_POSTCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_TCLK_POSTCNT, val);
+
+ /* 60ns + 4*UI < THS_PREPARE < 105ns + 12*UI */
+ val = tc358768_ns_to_cnt(60 + tc358768_to_ns(15 * ui_nsk),
+ dsibclk_nsk) - 5;
+ dev_dbg(priv->dev, "THS_TRAILCNT: 0x%x\n", val);
+ tc358768_write(priv, TC358768_THS_TRAILCNT, val);
+
+ val = BIT(0);
+ for (i = 0; i < dsi_dev->lanes; i++)
+ val |= BIT(i + 1);
+ tc358768_write(priv, TC358768_HSTXVREGEN, val);
+
+ if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ tc358768_write(priv, TC358768_TXOPTIONCNTRL, 0x1);
+
+ /* TXTAGOCNT[26:16] RXTASURECNT[10:0] */
+ val = tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk * 4);
+ val = tc358768_ns_to_cnt(val, dsibclk_nsk) - 1;
+ val2 = tc358768_ns_to_cnt(tc358768_to_ns((lptxcnt + 1) * dsibclk_nsk),
+ dsibclk_nsk) - 2;
+ val |= val2 << 16;
+ dev_dbg(priv->dev, "BTACNTRL1: 0x%x\n", val);
+ tc358768_write(priv, TC358768_BTACNTRL1, val);
+
+ /* START[0] */
+ tc358768_write(priv, TC358768_STARTCNTRL, 1);
+
+ /* Set event mode */
+ tc358768_write(priv, TC358768_DSI_EVENT, 1);
+
+ /* vsw (+ vbp) */
+ tc358768_write(priv, TC358768_DSI_VSW,
+ mode->vtotal - mode->vsync_start);
+ /* vbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_VBPR, 0);
+ /* vact */
+ tc358768_write(priv, TC358768_DSI_VACT, mode->vdisplay);
+
+ /* (hsw + hbp) * byteclk * ndl / pclk */
+ val = (u32)div_u64((mode->htotal - mode->hsync_start) *
+ ((u64)priv->dsiclk / 4) * priv->dsi_lanes,
+ mode->clock * 1000);
+ tc358768_write(priv, TC358768_DSI_HSW, val);
+ /* hbp (not used in event mode) */
+ tc358768_write(priv, TC358768_DSI_HBPR, 0);
+ /* hact (bytes) */
+ tc358768_write(priv, TC358768_DSI_HACT, hact);
+
+ /* VSYNC polarity */
+ if (!(mode->flags & DRM_MODE_FLAG_NVSYNC))
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(5), BIT(5));
+ /* HSYNC polarity */
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ tc358768_update_bits(priv, TC358768_PP_MISC, BIT(0), BIT(0));
+
+ /* Start DSI Tx */
+ tc358768_write(priv, TC358768_DSI_START, 0x1);
+
+ /* Configure DSI_Control register */
+ val = TC358768_DSI_CONFW_MODE_CLR | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= TC358768_DSI_CONTROL_TXMD | TC358768_DSI_CONTROL_HSCKMD |
+ 0x3 << 1 | TC358768_DSI_CONTROL_EOTDIS;
+ tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+ val = TC358768_DSI_CONFW_MODE_SET | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= (dsi_dev->lanes - 1) << 1;
+
+ if (!(dsi_dev->mode_flags & MIPI_DSI_MODE_LPM))
+ val |= TC358768_DSI_CONTROL_TXMD;
+
+ if (!(dsi_dev->mode_flags & MIPI_DSI_CLOCK_NON_CONTINUOUS))
+ val |= TC358768_DSI_CONTROL_HSCKMD;
+
+ if (dsi_dev->mode_flags & MIPI_DSI_MODE_EOT_PACKET)
+ val |= TC358768_DSI_CONTROL_EOTDIS;
+
+ tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+ val = TC358768_DSI_CONFW_MODE_CLR | TC358768_DSI_CONFW_ADDR_DSI_CONTROL;
+ val |= TC358768_DSI_CONTROL_DIS_MODE; /* DSI mode */
+ tc358768_write(priv, TC358768_DSI_CONFW, val);
+
+ ret = tc358768_clear_error(priv);
+ if (ret) {
+ dev_err(priv->dev, "Bridge pre_enable failed: %d\n", ret);
+ tc358768_bridge_disable(bridge);
+ tc358768_bridge_post_disable(bridge);
+ }
+}
+
+static void tc358768_bridge_enable(struct drm_bridge *bridge)
+{
+ struct tc358768_priv *priv = bridge_to_tc358768(bridge);
+ int ret;
+
+ if (!priv->enabled) {
+ dev_err(priv->dev, "Bridge is not enabled\n");
+ return;
+ }
+
+ /* clear FrmStop and RstPtr */
+ tc358768_update_bits(priv, TC358768_PP_MISC, 0x3 << 14, 0);
+
+ /* set PP_en */
+ tc358768_update_bits(priv, TC358768_CONFCTL, BIT(6), BIT(6));
+
+ ret = tc358768_clear_error(priv);
+ if (ret) {
+ dev_err(priv->dev, "Bridge enable failed: %d\n", ret);
+ tc358768_bridge_disable(bridge);
+ tc358768_bridge_post_disable(bridge);
+ }
+}
+
+static const struct drm_bridge_funcs tc358768_bridge_funcs = {
+ .attach = tc358768_bridge_attach,
+ .mode_valid = tc358768_bridge_mode_valid,
+ .pre_enable = tc358768_bridge_pre_enable,
+ .enable = tc358768_bridge_enable,
+ .disable = tc358768_bridge_disable,
+ .post_disable = tc358768_bridge_post_disable,
+};
+
+static const struct drm_bridge_timings default_tc358768_timings = {
+ .input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
+ | DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE
+ | DRM_BUS_FLAG_DE_HIGH,
+};
+
+static bool tc358768_is_reserved_reg(unsigned int reg)
+{
+ switch (reg) {
+ case 0x114 ... 0x13f:
+ case 0x200:
+ case 0x20c:
+ case 0x400 ... 0x408:
+ case 0x41c ... 0x42f:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool tc358768_writeable_reg(struct device *dev, unsigned int reg)
+{
+ if (tc358768_is_reserved_reg(reg))
+ return false;
+
+ switch (reg) {
+ case TC358768_CHIPID:
+ case TC358768_FIFOSTATUS:
+ case TC358768_DSITXSTATUS ... (TC358768_DSITXSTATUS + 2):
+ case TC358768_DSI_CONTROL ... (TC358768_DSI_INT_ENA + 2):
+ case TC358768_DSICMD_RDFIFO ... (TC358768_DSI_ERR_HALT + 2):
+ return false;
+ default:
+ return true;
+ }
+}
+
+static bool tc358768_readable_reg(struct device *dev, unsigned int reg)
+{
+ if (tc358768_is_reserved_reg(reg))
+ return false;
+
+ switch (reg) {
+ case TC358768_STARTCNTRL:
+ case TC358768_DSI_CONFW ... (TC358768_DSI_CONFW + 2):
+ case TC358768_DSI_INT_CLR ... (TC358768_DSI_INT_CLR + 2):
+ case TC358768_DSI_START ... (TC358768_DSI_START + 2):
+ case TC358768_DBG_DATA:
+ return false;
+ default:
+ return true;
+ }
+}
+
+static const struct regmap_config tc358768_regmap_config = {
+ .name = "tc358768",
+ .reg_bits = 16,
+ .val_bits = 16,
+ .max_register = TC358768_DSI_HACT,
+ .cache_type = REGCACHE_NONE,
+ .writeable_reg = tc358768_writeable_reg,
+ .readable_reg = tc358768_readable_reg,
+ .reg_format_endian = REGMAP_ENDIAN_BIG,
+ .val_format_endian = REGMAP_ENDIAN_BIG,
+};
+
+static const struct i2c_device_id tc358768_i2c_ids[] = {
+ { "tc358768", 0 },
+ { "tc358778", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, tc358768_i2c_ids);
+
+static const struct of_device_id tc358768_of_ids[] = {
+ { .compatible = "toshiba,tc358768", },
+ { .compatible = "toshiba,tc358778", },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tc358768_of_ids);
+
+static int tc358768_get_regulators(struct tc358768_priv *priv)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(priv->supplies); ++i)
+ priv->supplies[i].supply = tc358768_supplies[i];
+
+ ret = devm_regulator_bulk_get(priv->dev, ARRAY_SIZE(priv->supplies),
+ priv->supplies);
+ if (ret < 0)
+ dev_err(priv->dev, "failed to get regulators: %d\n", ret);
+
+ return ret;
+}
+
+static int tc358768_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct tc358768_priv *priv;
+ struct device *dev = &client->dev;
+ struct device_node *np = dev->of_node;
+ int ret;
+
+ if (!np)
+ return -ENODEV;
+
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+ priv->dev = dev;
+
+ ret = tc358768_get_regulators(priv);
+ if (ret)
+ return ret;
+
+ priv->refclk = devm_clk_get(dev, "refclk");
+ if (IS_ERR(priv->refclk))
+ return PTR_ERR(priv->refclk);
+
+ /*
+ * RESX is low active, to disable tc358768 initially (keep in reset)
+ * the gpio line must be LOW. This is the ASSERTED state of
+ * GPIO_ACTIVE_LOW (GPIOD_OUT_HIGH == ASSERTED).
+ */
+ priv->reset_gpio = devm_gpiod_get_optional(dev, "reset",
+ GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->reset_gpio))
+ return PTR_ERR(priv->reset_gpio);
+
+ priv->regmap = devm_regmap_init_i2c(client, &tc358768_regmap_config);
+ if (IS_ERR(priv->regmap)) {
+ dev_err(dev, "Failed to init regmap\n");
+ return PTR_ERR(priv->regmap);
+ }
+
+ priv->dsi_host.dev = dev;
+ priv->dsi_host.ops = &tc358768_dsi_host_ops;
+
+ priv->bridge.funcs = &tc358768_bridge_funcs;
+ priv->bridge.timings = &default_tc358768_timings;
+ priv->bridge.of_node = np;
+
+ i2c_set_clientdata(client, priv);
+
+ return mipi_dsi_host_register(&priv->dsi_host);
+}
+
+static int tc358768_i2c_remove(struct i2c_client *client)
+{
+ struct tc358768_priv *priv = i2c_get_clientdata(client);
+
+ mipi_dsi_host_unregister(&priv->dsi_host);
+
+ return 0;
+}
+
+static struct i2c_driver tc358768_driver = {
+ .driver = {
+ .name = "tc358768",
+ .of_match_table = tc358768_of_ids,
+ },
+ .id_table = tc358768_i2c_ids,
+ .probe = tc358768_i2c_probe,
+ .remove = tc358768_i2c_remove,
+};
+module_i2c_driver(tc358768_driver);
+
+MODULE_AUTHOR("Peter Ujfalusi <[email protected]>");
+MODULE_DESCRIPTION("TC358768AXBG/TC358778XBG DSI bridge");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/thc63lvd1024.c b/drivers/gpu/drm/bridge/thc63lvd1024.c
index 3d74129b2995..97d8129760e9 100644
--- a/drivers/gpu/drm/bridge/thc63lvd1024.c
+++ b/drivers/gpu/drm/bridge/thc63lvd1024.c
@@ -42,11 +42,12 @@ static inline struct thc63_dev *to_thc63(struct drm_bridge *bridge)
return container_of(bridge, struct thc63_dev, bridge);
}
-static int thc63_attach(struct drm_bridge *bridge)
+static int thc63_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct thc63_dev *thc63 = to_thc63(bridge);
- return drm_bridge_attach(bridge->encoder, thc63->next, bridge);
+ return drm_bridge_attach(bridge->encoder, thc63->next, bridge, flags);
}
static enum drm_mode_status thc63_mode_valid(struct drm_bridge *bridge,
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
index 9a2dd986afa5..6ad688b320ae 100644
--- a/drivers/gpu/drm/bridge/ti-sn65dsi86.c
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -51,6 +51,7 @@
#define SN_ENH_FRAME_REG 0x5A
#define VSTREAM_ENABLE BIT(3)
#define SN_DATA_FORMAT_REG 0x5B
+#define BPP_18_RGB BIT(0)
#define SN_HPD_DISABLE_REG 0x5C
#define HPD_DISABLE BIT(0)
#define SN_AUX_WDATA_REG(x) (0x64 + (x))
@@ -100,6 +101,7 @@ struct ti_sn_bridge {
struct drm_panel *panel;
struct gpio_desc *enable_gpio;
struct regulator_bulk_data supplies[SN_REGULATOR_SUPPLY_NUM];
+ int dp_lanes;
};
static const struct regmap_range ti_sn_bridge_volatile_ranges[] = {
@@ -264,7 +266,8 @@ static int ti_sn_bridge_parse_regulators(struct ti_sn_bridge *pdata)
pdata->supplies);
}
-static int ti_sn_bridge_attach(struct drm_bridge *bridge)
+static int ti_sn_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
int ret, val;
struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
@@ -275,6 +278,11 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge)
.node = NULL,
};
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
ret = drm_connector_init(bridge->dev, &pdata->connector,
&ti_sn_bridge_connector_funcs,
DRM_MODE_CONNECTOR_eDP);
@@ -312,7 +320,7 @@ static int ti_sn_bridge_attach(struct drm_bridge *bridge)
goto err_dsi_host;
}
- /* TODO: setting to 4 lanes always for now */
+ /* TODO: setting to 4 MIPI lanes always for now */
dsi->lanes = 4;
dsi->format = MIPI_DSI_FMT_RGB888;
dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
@@ -417,6 +425,32 @@ static void ti_sn_bridge_set_refclk_freq(struct ti_sn_bridge *pdata)
REFCLK_FREQ(i));
}
+static void ti_sn_bridge_set_dsi_rate(struct ti_sn_bridge *pdata)
+{
+ unsigned int bit_rate_mhz, clk_freq_mhz;
+ unsigned int val;
+ struct drm_display_mode *mode =
+ &pdata->bridge.encoder->crtc->state->adjusted_mode;
+
+ /* set DSIA clk frequency */
+ bit_rate_mhz = (mode->clock / 1000) *
+ mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
+ clk_freq_mhz = bit_rate_mhz / (pdata->dsi->lanes * 2);
+
+ /* for each increment in val, frequency increases by 5MHz */
+ val = (MIN_DSI_CLK_FREQ_MHZ / 5) +
+ (((clk_freq_mhz - MIN_DSI_CLK_FREQ_MHZ) / 5) & 0xFF);
+ regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
+}
+
+static unsigned int ti_sn_bridge_get_bpp(struct ti_sn_bridge *pdata)
+{
+ if (pdata->connector.display_info.bpc <= 6)
+ return 18;
+ else
+ return 24;
+}
+
/**
* LUT index corresponds to register value and
* LUT values corresponds to dp data rate supported
@@ -426,32 +460,106 @@ static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
};
-static void ti_sn_bridge_set_dsi_dp_rate(struct ti_sn_bridge *pdata)
+static int ti_sn_bridge_calc_min_dp_rate_idx(struct ti_sn_bridge *pdata)
{
- unsigned int bit_rate_mhz, clk_freq_mhz, dp_rate_mhz;
- unsigned int val, i;
+ unsigned int bit_rate_khz, dp_rate_mhz;
+ unsigned int i;
struct drm_display_mode *mode =
&pdata->bridge.encoder->crtc->state->adjusted_mode;
- /* set DSIA clk frequency */
- bit_rate_mhz = (mode->clock / 1000) *
- mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
- clk_freq_mhz = bit_rate_mhz / (pdata->dsi->lanes * 2);
+ /* Calculate minimum bit rate based on our pixel clock. */
+ bit_rate_khz = mode->clock * ti_sn_bridge_get_bpp(pdata);
- /* for each increment in val, frequency increases by 5MHz */
- val = (MIN_DSI_CLK_FREQ_MHZ / 5) +
- (((clk_freq_mhz - MIN_DSI_CLK_FREQ_MHZ) / 5) & 0xFF);
- regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
+ /* Calculate minimum DP data rate, taking 80% as per DP spec */
+ dp_rate_mhz = DIV_ROUND_UP(bit_rate_khz * DP_CLK_FUDGE_NUM,
+ 1000 * pdata->dp_lanes * DP_CLK_FUDGE_DEN);
- /* set DP data rate */
- dp_rate_mhz = ((bit_rate_mhz / pdata->dsi->lanes) * DP_CLK_FUDGE_NUM) /
- DP_CLK_FUDGE_DEN;
- for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
+ for (i = 1; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
if (ti_sn_bridge_dp_rate_lut[i] > dp_rate_mhz)
break;
- regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
- DP_DATARATE_MASK, DP_DATARATE(i));
+ return i;
+}
+
+static void ti_sn_bridge_read_valid_rates(struct ti_sn_bridge *pdata,
+ bool rate_valid[])
+{
+ unsigned int rate_per_200khz;
+ unsigned int rate_mhz;
+ u8 dpcd_val;
+ int ret;
+ int i, j;
+
+ ret = drm_dp_dpcd_readb(&pdata->aux, DP_EDP_DPCD_REV, &dpcd_val);
+ if (ret != 1) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read eDP rev (%d), assuming 1.1\n", ret);
+ dpcd_val = DP_EDP_11;
+ }
+
+ if (dpcd_val >= DP_EDP_14) {
+ /* eDP 1.4 devices must provide a custom table */
+ __le16 sink_rates[DP_MAX_SUPPORTED_RATES];
+
+ ret = drm_dp_dpcd_read(&pdata->aux, DP_SUPPORTED_LINK_RATES,
+ sink_rates, sizeof(sink_rates));
+
+ if (ret != sizeof(sink_rates)) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read supported rate table (%d)\n", ret);
+
+ /* By zeroing we'll fall back to DP_MAX_LINK_RATE. */
+ memset(sink_rates, 0, sizeof(sink_rates));
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sink_rates); i++) {
+ rate_per_200khz = le16_to_cpu(sink_rates[i]);
+
+ if (!rate_per_200khz)
+ break;
+
+ rate_mhz = rate_per_200khz * 200 / 1000;
+ for (j = 0;
+ j < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
+ j++) {
+ if (ti_sn_bridge_dp_rate_lut[j] == rate_mhz)
+ rate_valid[j] = true;
+ }
+ }
+
+ for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut); i++) {
+ if (rate_valid[i])
+ return;
+ }
+ DRM_DEV_ERROR(pdata->dev,
+ "No matching eDP rates in table; falling back\n");
+ }
+
+ /* On older versions best we can do is use DP_MAX_LINK_RATE */
+ ret = drm_dp_dpcd_readb(&pdata->aux, DP_MAX_LINK_RATE, &dpcd_val);
+ if (ret != 1) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read max rate (%d); assuming 5.4 GHz\n",
+ ret);
+ dpcd_val = DP_LINK_BW_5_4;
+ }
+
+ switch (dpcd_val) {
+ default:
+ DRM_DEV_ERROR(pdata->dev,
+ "Unexpected max rate (%#x); assuming 5.4 GHz\n",
+ (int)dpcd_val);
+ /* fall through */
+ case DP_LINK_BW_5_4:
+ rate_valid[7] = 1;
+ /* fall through */
+ case DP_LINK_BW_2_7:
+ rate_valid[4] = 1;
+ /* fall through */
+ case DP_LINK_BW_1_62:
+ rate_valid[1] = 1;
+ break;
+ }
}
static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
@@ -493,24 +601,30 @@ static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
usleep_range(10000, 10500); /* 10ms delay recommended by spec */
}
-static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+static unsigned int ti_sn_get_max_lanes(struct ti_sn_bridge *pdata)
{
- struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
- unsigned int val;
+ u8 data;
int ret;
- /* DSI_A lane config */
- val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
- regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
- CHA_DSI_LANES_MASK, val);
+ ret = drm_dp_dpcd_readb(&pdata->aux, DP_MAX_LANE_COUNT, &data);
+ if (ret != 1) {
+ DRM_DEV_ERROR(pdata->dev,
+ "Can't read lane count (%d); assuming 4\n", ret);
+ return 4;
+ }
- /* DP lane config */
- val = DP_NUM_LANES(pdata->dsi->lanes - 1);
- regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK,
- val);
+ return data & DP_LANE_COUNT_MASK;
+}
- /* set dsi/dp clk frequency value */
- ti_sn_bridge_set_dsi_dp_rate(pdata);
+static int ti_sn_link_training(struct ti_sn_bridge *pdata, int dp_rate_idx,
+ const char **last_err_str)
+{
+ unsigned int val;
+ int ret;
+
+ /* set dp clk frequency value */
+ regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
+ DP_DATARATE_MASK, DP_DATARATE(dp_rate_idx));
/* enable DP PLL */
regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 1);
@@ -519,10 +633,62 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
val & DPPLL_SRC_DP_PLL_LOCK, 1000,
50 * 1000);
if (ret) {
- DRM_ERROR("DP_PLL_LOCK polling failed (%d)\n", ret);
- return;
+ *last_err_str = "DP_PLL_LOCK polling failed";
+ goto exit;
+ }
+
+ /* Semi auto link training mode */
+ regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
+ val == ML_TX_MAIN_LINK_OFF ||
+ val == ML_TX_NORMAL_MODE, 1000,
+ 500 * 1000);
+ if (ret) {
+ *last_err_str = "Training complete polling failed";
+ } else if (val == ML_TX_MAIN_LINK_OFF) {
+ *last_err_str = "Link training failed, link is off";
+ ret = -EIO;
}
+exit:
+ /* Disable the PLL if we failed */
+ if (ret)
+ regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 0);
+
+ return ret;
+}
+
+static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ bool rate_valid[ARRAY_SIZE(ti_sn_bridge_dp_rate_lut)] = { };
+ const char *last_err_str = "No supported DP rate";
+ int dp_rate_idx;
+ unsigned int val;
+ int ret = -EINVAL;
+
+ /*
+ * Run with the maximum number of lanes that the DP sink supports.
+ *
+ * Depending use cases, we might want to revisit this later because:
+ * - It's plausible that someone may have run fewer lines to the
+ * sink than the sink actually supports, assuming that the lines
+ * will just be driven at a higher rate.
+ * - The DP spec seems to indicate that it's more important to minimize
+ * the number of lanes than the link rate.
+ *
+ * If we do revisit, it would be important to measure the power impact.
+ */
+ pdata->dp_lanes = ti_sn_get_max_lanes(pdata);
+
+ /* DSI_A lane config */
+ val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
+ regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
+ CHA_DSI_LANES_MASK, val);
+
+ /* set dsi clk frequency value */
+ ti_sn_bridge_set_dsi_rate(pdata);
+
/**
* The SN65DSI86 only supports ASSR Display Authentication method and
* this method is enabled by default. An eDP panel must support this
@@ -532,17 +698,30 @@ static void ti_sn_bridge_enable(struct drm_bridge *bridge)
drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET,
DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
- /* Semi auto link training mode */
- regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
- ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
- val == ML_TX_MAIN_LINK_OFF ||
- val == ML_TX_NORMAL_MODE, 1000,
- 500 * 1000);
+ /* Set the DP output format (18 bpp or 24 bpp) */
+ val = (ti_sn_bridge_get_bpp(pdata) == 18) ? BPP_18_RGB : 0;
+ regmap_update_bits(pdata->regmap, SN_DATA_FORMAT_REG, BPP_18_RGB, val);
+
+ /* DP lane config */
+ val = DP_NUM_LANES(min(pdata->dp_lanes, 3));
+ regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK,
+ val);
+
+ ti_sn_bridge_read_valid_rates(pdata, rate_valid);
+
+ /* Train until we run out of rates */
+ for (dp_rate_idx = ti_sn_bridge_calc_min_dp_rate_idx(pdata);
+ dp_rate_idx < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut);
+ dp_rate_idx++) {
+ if (!rate_valid[dp_rate_idx])
+ continue;
+
+ ret = ti_sn_link_training(pdata, dp_rate_idx, &last_err_str);
+ if (!ret)
+ break;
+ }
if (ret) {
- DRM_ERROR("Training complete polling failed (%d)\n", ret);
- return;
- } else if (val == ML_TX_MAIN_LINK_OFF) {
- DRM_ERROR("Link training failed, link is off\n");
+ DRM_DEV_ERROR(pdata->dev, "%s (%d)\n", last_err_str, ret);
return;
}
diff --git a/drivers/gpu/drm/bridge/ti-tfp410.c b/drivers/gpu/drm/bridge/ti-tfp410.c
index 6f6d6d1e60ae..e3eb6364c0f7 100644
--- a/drivers/gpu/drm/bridge/ti-tfp410.c
+++ b/drivers/gpu/drm/bridge/ti-tfp410.c
@@ -4,14 +4,12 @@
* Author: Jyri Sarha <[email protected]>
*/
-#include <linux/delay.h>
-#include <linux/fwnode.h>
#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
-#include <linux/irq.h>
#include <linux/module.h>
#include <linux/of_graph.h>
#include <linux/platform_device.h>
+#include <linux/workqueue.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
@@ -24,16 +22,13 @@
struct tfp410 {
struct drm_bridge bridge;
struct drm_connector connector;
- unsigned int connector_type;
u32 bus_format;
- struct i2c_adapter *ddc;
- struct gpio_desc *hpd;
- int hpd_irq;
struct delayed_work hpd_work;
struct gpio_desc *powerdown;
struct drm_bridge_timings timings;
+ struct drm_bridge *next_bridge;
struct device *dev;
};
@@ -56,13 +51,18 @@ static int tfp410_get_modes(struct drm_connector *connector)
struct edid *edid;
int ret;
- if (!dvi->ddc)
- goto fallback;
+ edid = drm_bridge_get_edid(dvi->next_bridge, connector);
+ if (IS_ERR_OR_NULL(edid)) {
+ if (edid != ERR_PTR(-ENOTSUPP))
+ DRM_INFO("EDID read failed. Fallback to standard modes\n");
- edid = drm_get_edid(connector, dvi->ddc);
- if (!edid) {
- DRM_INFO("EDID read failed. Fallback to standard modes\n");
- goto fallback;
+ /*
+ * No EDID, fallback on the XGA standard modes and prefer a mode
+ * pretty much anything can handle.
+ */
+ ret = drm_add_modes_noedid(connector, 1920, 1200);
+ drm_set_preferred_mode(connector, 1024, 768);
+ return ret;
}
drm_connector_update_edid_property(connector, edid);
@@ -72,15 +72,6 @@ static int tfp410_get_modes(struct drm_connector *connector)
kfree(edid);
return ret;
-
-fallback:
- /* No EDID, fallback on the XGA standard modes */
- ret = drm_add_modes_noedid(connector, 1920, 1200);
-
- /* And prefer a mode pretty much anything can handle */
- drm_set_preferred_mode(connector, 1024, 768);
-
- return ret;
}
static const struct drm_connector_helper_funcs tfp410_con_helper_funcs = {
@@ -92,21 +83,7 @@ tfp410_connector_detect(struct drm_connector *connector, bool force)
{
struct tfp410 *dvi = drm_connector_to_tfp410(connector);
- if (dvi->hpd) {
- if (gpiod_get_value_cansleep(dvi->hpd))
- return connector_status_connected;
- else
- return connector_status_disconnected;
- }
-
- if (dvi->ddc) {
- if (drm_probe_ddc(dvi->ddc))
- return connector_status_connected;
- else
- return connector_status_disconnected;
- }
-
- return connector_status_unknown;
+ return drm_bridge_detect(dvi->next_bridge);
}
static const struct drm_connector_funcs tfp410_con_funcs = {
@@ -118,41 +95,84 @@ static const struct drm_connector_funcs tfp410_con_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static int tfp410_attach(struct drm_bridge *bridge)
+static void tfp410_hpd_work_func(struct work_struct *work)
+{
+ struct tfp410 *dvi;
+
+ dvi = container_of(work, struct tfp410, hpd_work.work);
+
+ if (dvi->bridge.dev)
+ drm_helper_hpd_irq_event(dvi->bridge.dev);
+}
+
+static void tfp410_hpd_callback(void *arg, enum drm_connector_status status)
+{
+ struct tfp410 *dvi = arg;
+
+ mod_delayed_work(system_wq, &dvi->hpd_work,
+ msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
+}
+
+static int tfp410_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
int ret;
+ ret = drm_bridge_attach(bridge->encoder, dvi->next_bridge, bridge,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0)
+ return ret;
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)
+ return 0;
+
if (!bridge->encoder) {
dev_err(dvi->dev, "Missing encoder\n");
return -ENODEV;
}
- if (dvi->hpd_irq >= 0)
+ if (dvi->next_bridge->ops & DRM_BRIDGE_OP_DETECT)
dvi->connector.polled = DRM_CONNECTOR_POLL_HPD;
else
dvi->connector.polled = DRM_CONNECTOR_POLL_CONNECT | DRM_CONNECTOR_POLL_DISCONNECT;
+ if (dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+ INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
+ drm_bridge_hpd_enable(dvi->next_bridge, tfp410_hpd_callback,
+ dvi);
+ }
+
drm_connector_helper_add(&dvi->connector,
&tfp410_con_helper_funcs);
ret = drm_connector_init_with_ddc(bridge->dev, &dvi->connector,
&tfp410_con_funcs,
- dvi->connector_type,
- dvi->ddc);
+ dvi->next_bridge->type,
+ dvi->next_bridge->ddc);
if (ret) {
- dev_err(dvi->dev, "drm_connector_init() failed: %d\n", ret);
+ dev_err(dvi->dev, "drm_connector_init_with_ddc() failed: %d\n",
+ ret);
return ret;
}
drm_display_info_set_bus_formats(&dvi->connector.display_info,
&dvi->bus_format, 1);
- drm_connector_attach_encoder(&dvi->connector,
- bridge->encoder);
+ drm_connector_attach_encoder(&dvi->connector, bridge->encoder);
return 0;
}
+static void tfp410_detach(struct drm_bridge *bridge)
+{
+ struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
+
+ if (dvi->connector.dev && dvi->next_bridge->ops & DRM_BRIDGE_OP_HPD) {
+ drm_bridge_hpd_disable(dvi->next_bridge);
+ cancel_delayed_work_sync(&dvi->hpd_work);
+ }
+}
+
static void tfp410_enable(struct drm_bridge *bridge)
{
struct tfp410 *dvi = drm_bridge_to_tfp410(bridge);
@@ -167,31 +187,25 @@ static void tfp410_disable(struct drm_bridge *bridge)
gpiod_set_value_cansleep(dvi->powerdown, 1);
}
-static const struct drm_bridge_funcs tfp410_bridge_funcs = {
- .attach = tfp410_attach,
- .enable = tfp410_enable,
- .disable = tfp410_disable,
-};
-
-static void tfp410_hpd_work_func(struct work_struct *work)
+static enum drm_mode_status tfp410_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
{
- struct tfp410 *dvi;
+ if (mode->clock < 25000)
+ return MODE_CLOCK_LOW;
- dvi = container_of(work, struct tfp410, hpd_work.work);
+ if (mode->clock > 165000)
+ return MODE_CLOCK_HIGH;
- if (dvi->bridge.dev)
- drm_helper_hpd_irq_event(dvi->bridge.dev);
+ return MODE_OK;
}
-static irqreturn_t tfp410_hpd_irq_thread(int irq, void *arg)
-{
- struct tfp410 *dvi = arg;
-
- mod_delayed_work(system_wq, &dvi->hpd_work,
- msecs_to_jiffies(HOTPLUG_DEBOUNCE_MS));
-
- return IRQ_HANDLED;
-}
+static const struct drm_bridge_funcs tfp410_bridge_funcs = {
+ .attach = tfp410_attach,
+ .detach = tfp410_detach,
+ .enable = tfp410_enable,
+ .disable = tfp410_disable,
+ .mode_valid = tfp410_mode_valid,
+};
static const struct drm_bridge_timings tfp410_default_timings = {
.input_bus_flags = DRM_BUS_FLAG_PIXDATA_SAMPLE_POSEDGE
@@ -270,51 +284,9 @@ static int tfp410_parse_timings(struct tfp410 *dvi, bool i2c)
return 0;
}
-static int tfp410_get_connector_properties(struct tfp410 *dvi)
-{
- struct device_node *connector_node, *ddc_phandle;
- int ret = 0;
-
- /* port@1 is the connector node */
- connector_node = of_graph_get_remote_node(dvi->dev->of_node, 1, -1);
- if (!connector_node)
- return -ENODEV;
-
- if (of_device_is_compatible(connector_node, "hdmi-connector"))
- dvi->connector_type = DRM_MODE_CONNECTOR_HDMIA;
- else
- dvi->connector_type = DRM_MODE_CONNECTOR_DVID;
-
- dvi->hpd = fwnode_gpiod_get_index(&connector_node->fwnode,
- "hpd", 0, GPIOD_IN, "hpd");
- if (IS_ERR(dvi->hpd)) {
- ret = PTR_ERR(dvi->hpd);
- dvi->hpd = NULL;
- if (ret == -ENOENT)
- ret = 0;
- else
- goto fail;
- }
-
- ddc_phandle = of_parse_phandle(connector_node, "ddc-i2c-bus", 0);
- if (!ddc_phandle)
- goto fail;
-
- dvi->ddc = of_get_i2c_adapter_by_node(ddc_phandle);
- if (dvi->ddc)
- dev_info(dvi->dev, "Connector's ddc i2c bus found\n");
- else
- ret = -EPROBE_DEFER;
-
- of_node_put(ddc_phandle);
-
-fail:
- of_node_put(connector_node);
- return ret;
-}
-
static int tfp410_init(struct device *dev, bool i2c)
{
+ struct device_node *node;
struct tfp410 *dvi;
int ret;
@@ -326,21 +298,31 @@ static int tfp410_init(struct device *dev, bool i2c)
dvi = devm_kzalloc(dev, sizeof(*dvi), GFP_KERNEL);
if (!dvi)
return -ENOMEM;
+
+ dvi->dev = dev;
dev_set_drvdata(dev, dvi);
dvi->bridge.funcs = &tfp410_bridge_funcs;
dvi->bridge.of_node = dev->of_node;
dvi->bridge.timings = &dvi->timings;
- dvi->dev = dev;
+ dvi->bridge.type = DRM_MODE_CONNECTOR_DVID;
ret = tfp410_parse_timings(dvi, i2c);
if (ret)
- goto fail;
+ return ret;
- ret = tfp410_get_connector_properties(dvi);
- if (ret)
- goto fail;
+ /* Get the next bridge, connected to port@1. */
+ node = of_graph_get_remote_node(dev->of_node, 1, -1);
+ if (!node)
+ return -ENODEV;
+
+ dvi->next_bridge = of_drm_find_bridge(node);
+ of_node_put(node);
+ if (!dvi->next_bridge)
+ return -EPROBE_DEFER;
+
+ /* Get the powerdown GPIO. */
dvi->powerdown = devm_gpiod_get_optional(dev, "powerdown",
GPIOD_OUT_HIGH);
if (IS_ERR(dvi->powerdown)) {
@@ -348,48 +330,18 @@ static int tfp410_init(struct device *dev, bool i2c)
return PTR_ERR(dvi->powerdown);
}
- if (dvi->hpd)
- dvi->hpd_irq = gpiod_to_irq(dvi->hpd);
- else
- dvi->hpd_irq = -ENXIO;
-
- if (dvi->hpd_irq >= 0) {
- INIT_DELAYED_WORK(&dvi->hpd_work, tfp410_hpd_work_func);
-
- ret = devm_request_threaded_irq(dev, dvi->hpd_irq,
- NULL, tfp410_hpd_irq_thread, IRQF_TRIGGER_RISING |
- IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "hdmi-hpd", dvi);
- if (ret) {
- DRM_ERROR("failed to register hpd interrupt\n");
- goto fail;
- }
- }
-
+ /* Register the DRM bridge. */
drm_bridge_add(&dvi->bridge);
return 0;
-fail:
- i2c_put_adapter(dvi->ddc);
- if (dvi->hpd)
- gpiod_put(dvi->hpd);
- return ret;
}
static int tfp410_fini(struct device *dev)
{
struct tfp410 *dvi = dev_get_drvdata(dev);
- if (dvi->hpd_irq >= 0)
- cancel_delayed_work_sync(&dvi->hpd_work);
-
drm_bridge_remove(&dvi->bridge);
- if (dvi->ddc)
- i2c_put_adapter(dvi->ddc);
- if (dvi->hpd)
- gpiod_put(dvi->hpd);
-
return 0;
}
diff --git a/drivers/gpu/drm/bridge/ti-tpd12s015.c b/drivers/gpu/drm/bridge/ti-tpd12s015.c
new file mode 100644
index 000000000000..514cbf0eac75
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-tpd12s015.c
@@ -0,0 +1,211 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * TPD12S015 HDMI ESD protection & level shifter chip driver
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated
+ *
+ * Based on the omapdrm-specific encoder-opa362 driver
+ *
+ * Copyright (C) 2013 Texas Instruments Incorporated
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/platform_device.h>
+
+#include <drm/drm_bridge.h>
+
+struct tpd12s015_device {
+ struct drm_bridge bridge;
+
+ struct gpio_desc *ct_cp_hpd_gpio;
+ struct gpio_desc *ls_oe_gpio;
+ struct gpio_desc *hpd_gpio;
+ int hpd_irq;
+
+ struct drm_bridge *next_bridge;
+};
+
+static inline struct tpd12s015_device *to_tpd12s015(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tpd12s015_device, bridge);
+}
+
+static int tpd12s015_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+ int ret;
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ ret = drm_bridge_attach(bridge->encoder, tpd->next_bridge,
+ bridge, flags);
+ if (ret < 0)
+ return ret;
+
+ gpiod_set_value_cansleep(tpd->ls_oe_gpio, 1);
+
+ /* DC-DC converter needs at max 300us to get to 90% of 5V. */
+ usleep_range(300, 1000);
+
+ return 0;
+}
+
+static void tpd12s015_detach(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ gpiod_set_value_cansleep(tpd->ls_oe_gpio, 0);
+}
+
+static enum drm_connector_status tpd12s015_detect(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ if (gpiod_get_value_cansleep(tpd->hpd_gpio))
+ return connector_status_connected;
+ else
+ return connector_status_disconnected;
+}
+
+static void tpd12s015_hpd_enable(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ gpiod_set_value_cansleep(tpd->ct_cp_hpd_gpio, 1);
+}
+
+static void tpd12s015_hpd_disable(struct drm_bridge *bridge)
+{
+ struct tpd12s015_device *tpd = to_tpd12s015(bridge);
+
+ gpiod_set_value_cansleep(tpd->ct_cp_hpd_gpio, 0);
+}
+
+static const struct drm_bridge_funcs tpd12s015_bridge_funcs = {
+ .attach = tpd12s015_attach,
+ .detach = tpd12s015_detach,
+ .detect = tpd12s015_detect,
+ .hpd_enable = tpd12s015_hpd_enable,
+ .hpd_disable = tpd12s015_hpd_disable,
+};
+
+static irqreturn_t tpd12s015_hpd_isr(int irq, void *data)
+{
+ struct tpd12s015_device *tpd = data;
+ struct drm_bridge *bridge = &tpd->bridge;
+
+ drm_bridge_hpd_notify(bridge, tpd12s015_detect(bridge));
+
+ return IRQ_HANDLED;
+}
+
+static int tpd12s015_probe(struct platform_device *pdev)
+{
+ struct tpd12s015_device *tpd;
+ struct device_node *node;
+ struct gpio_desc *gpio;
+ int ret;
+
+ tpd = devm_kzalloc(&pdev->dev, sizeof(*tpd), GFP_KERNEL);
+ if (!tpd)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, tpd);
+
+ tpd->bridge.funcs = &tpd12s015_bridge_funcs;
+ tpd->bridge.of_node = pdev->dev.of_node;
+ tpd->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
+ tpd->bridge.ops = DRM_BRIDGE_OP_DETECT;
+
+ /* Get the next bridge, connected to port@1. */
+ node = of_graph_get_remote_node(pdev->dev.of_node, 1, -1);
+ if (!node)
+ return -ENODEV;
+
+ tpd->next_bridge = of_drm_find_bridge(node);
+ of_node_put(node);
+
+ if (!tpd->next_bridge)
+ return -EPROBE_DEFER;
+
+ /* Get the control and HPD GPIOs. */
+ gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ tpd->ct_cp_hpd_gpio = gpio;
+
+ gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
+ GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ tpd->ls_oe_gpio = gpio;
+
+ gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2, GPIOD_IN);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ tpd->hpd_gpio = gpio;
+
+ /* Register the IRQ if the HPD GPIO is IRQ-capable. */
+ tpd->hpd_irq = gpiod_to_irq(tpd->hpd_gpio);
+ if (tpd->hpd_irq) {
+ ret = devm_request_threaded_irq(&pdev->dev, tpd->hpd_irq, NULL,
+ tpd12s015_hpd_isr,
+ IRQF_TRIGGER_RISING |
+ IRQF_TRIGGER_FALLING |
+ IRQF_ONESHOT,
+ "tpd12s015 hpd", tpd);
+ if (ret)
+ return ret;
+
+ tpd->bridge.ops |= DRM_BRIDGE_OP_HPD;
+ }
+
+ /* Register the DRM bridge. */
+ drm_bridge_add(&tpd->bridge);
+
+ return 0;
+}
+
+static int __exit tpd12s015_remove(struct platform_device *pdev)
+{
+ struct tpd12s015_device *tpd = platform_get_drvdata(pdev);
+
+ drm_bridge_remove(&tpd->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id tpd12s015_of_match[] = {
+ { .compatible = "ti,tpd12s015", },
+ {},
+};
+
+MODULE_DEVICE_TABLE(of, tpd12s015_of_match);
+
+static struct platform_driver tpd12s015_driver = {
+ .probe = tpd12s015_probe,
+ .remove = __exit_p(tpd12s015_remove),
+ .driver = {
+ .name = "tpd12s015",
+ .of_match_table = tpd12s015_of_match,
+ },
+};
+
+module_platform_driver(tpd12s015_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
+MODULE_DESCRIPTION("TPD12S015 HDMI level shifter and ESD protection driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/cirrus/cirrus.c b/drivers/gpu/drm/cirrus/cirrus.c
index 248c9f765c45..d2ff63ce8eaf 100644
--- a/drivers/gpu/drm/cirrus/cirrus.c
+++ b/drivers/gpu/drm/cirrus/cirrus.c
@@ -38,7 +38,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vblank.h>
#define DRIVER_NAME "cirrus"
#define DRIVER_DESC "qemu cirrus vga"
@@ -152,9 +151,13 @@ static int cirrus_pitch(struct drm_framebuffer *fb)
static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
{
+ int idx;
u32 addr;
u8 tmp;
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ return;
+
addr = offset >> 2;
wreg_crt(cirrus, 0x0c, (u8)((addr >> 8) & 0xff));
wreg_crt(cirrus, 0x0d, (u8)(addr & 0xff));
@@ -169,6 +172,8 @@ static void cirrus_set_start_address(struct cirrus_device *cirrus, u32 offset)
tmp &= 0x7f;
tmp |= (addr >> 12) & 0x80;
wreg_crt(cirrus, 0x1d, tmp);
+
+ drm_dev_exit(idx);
}
static int cirrus_mode_set(struct cirrus_device *cirrus,
@@ -177,9 +182,12 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
{
int hsyncstart, hsyncend, htotal, hdispend;
int vtotal, vdispend;
- int tmp;
+ int tmp, idx;
int sr07 = 0, hdr = 0;
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ return -1;
+
htotal = mode->htotal / 8;
hsyncend = mode->hsync_end / 8;
hsyncstart = mode->hsync_start / 8;
@@ -265,6 +273,7 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
hdr = 0xc5;
break;
default:
+ drm_dev_exit(idx);
return -1;
}
@@ -293,6 +302,8 @@ static int cirrus_mode_set(struct cirrus_device *cirrus,
/* Unblank (needed on S3 resume, vgabios doesn't do it then) */
outb(0x20, 0x3c0);
+
+ drm_dev_exit(idx);
return 0;
}
@@ -301,10 +312,16 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
{
struct cirrus_device *cirrus = fb->dev->dev_private;
void *vmap;
+ int idx, ret;
+
+ ret = -ENODEV;
+ if (!drm_dev_enter(&cirrus->dev, &idx))
+ goto out;
+ ret = -ENOMEM;
vmap = drm_gem_shmem_vmap(fb->obj[0]);
if (!vmap)
- return -ENOMEM;
+ goto out_dev_exit;
if (cirrus->cpp == fb->format->cpp[0])
drm_fb_memcpy_dstclip(cirrus->vram,
@@ -324,7 +341,12 @@ static int cirrus_fb_blit_rect(struct drm_framebuffer *fb,
WARN_ON_ONCE("cpp mismatch");
drm_gem_shmem_vunmap(fb->obj[0], vmap);
- return 0;
+ ret = 0;
+
+out_dev_exit:
+ drm_dev_exit(idx);
+out:
+ return ret;
}
static int cirrus_fb_blit_fullscreen(struct drm_framebuffer *fb)
@@ -434,13 +456,6 @@ static void cirrus_pipe_update(struct drm_simple_display_pipe *pipe,
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
cirrus_fb_blit_rect(pipe->plane.state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
static const struct drm_simple_display_pipe_funcs cirrus_pipe_funcs = {
@@ -510,6 +525,14 @@ static void cirrus_mode_config_init(struct cirrus_device *cirrus)
/* ------------------------------------------------------------------ */
+static void cirrus_release(struct drm_device *dev)
+{
+ struct cirrus_device *cirrus = dev->dev_private;
+
+ drm_mode_config_cleanup(dev);
+ kfree(cirrus);
+}
+
DEFINE_DRM_GEM_FOPS(cirrus_fops);
static struct drm_driver cirrus_driver = {
@@ -523,6 +546,7 @@ static struct drm_driver cirrus_driver = {
.fops = &cirrus_fops,
DRM_GEM_SHMEM_DRIVER_OPS,
+ .release = cirrus_release,
};
static int cirrus_pci_probe(struct pci_dev *pdev,
@@ -606,12 +630,11 @@ static void cirrus_pci_remove(struct pci_dev *pdev)
struct drm_device *dev = pci_get_drvdata(pdev);
struct cirrus_device *cirrus = dev->dev_private;
- drm_dev_unregister(dev);
- drm_mode_config_cleanup(dev);
+ drm_dev_unplug(dev);
+ drm_atomic_helper_shutdown(dev);
iounmap(cirrus->mmio);
iounmap(cirrus->vram);
drm_dev_put(dev);
- kfree(cirrus);
pci_release_regions(pdev);
}
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index d33691512a8e..9ccfbf213d72 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -30,6 +30,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_uapi.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_debugfs.h>
#include <drm/drm_device.h>
#include <drm/drm_drv.h>
@@ -1018,6 +1019,122 @@ static void drm_atomic_connector_print_state(struct drm_printer *p,
}
/**
+ * drm_atomic_get_bridge_state - get bridge state
+ * @state: global atomic state object
+ * @bridge: bridge to get state object for
+ *
+ * This function returns the bridge state for the given bridge, allocating it
+ * if needed. It will also grab the relevant bridge lock to make sure that the
+ * state is consistent.
+ *
+ * Returns:
+ *
+ * Either the allocated state or the error code encoded into the pointer. When
+ * the error is EDEADLK then the w/w mutex code has detected a deadlock and the
+ * entire atomic sequence must be restarted.
+ */
+struct drm_bridge_state *
+drm_atomic_get_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge)
+{
+ struct drm_private_state *obj_state;
+
+ obj_state = drm_atomic_get_private_obj_state(state, &bridge->base);
+ if (IS_ERR(obj_state))
+ return ERR_CAST(obj_state);
+
+ return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_bridge_state);
+
+/**
+ * drm_atomic_get_old_bridge_state - get old bridge state, if it exists
+ * @state: global atomic state object
+ * @bridge: bridge to grab
+ *
+ * This function returns the old bridge state for the given bridge, or NULL if
+ * the bridge is not part of the global atomic state.
+ */
+struct drm_bridge_state *
+drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge)
+{
+ struct drm_private_state *obj_state;
+
+ obj_state = drm_atomic_get_old_private_obj_state(state, &bridge->base);
+ if (!obj_state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_old_bridge_state);
+
+/**
+ * drm_atomic_get_new_bridge_state - get new bridge state, if it exists
+ * @state: global atomic state object
+ * @bridge: bridge to grab
+ *
+ * This function returns the new bridge state for the given bridge, or NULL if
+ * the bridge is not part of the global atomic state.
+ */
+struct drm_bridge_state *
+drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge)
+{
+ struct drm_private_state *obj_state;
+
+ obj_state = drm_atomic_get_new_private_obj_state(state, &bridge->base);
+ if (!obj_state)
+ return NULL;
+
+ return drm_priv_to_bridge_state(obj_state);
+}
+EXPORT_SYMBOL(drm_atomic_get_new_bridge_state);
+
+/**
+ * drm_atomic_add_encoder_bridges - add bridges attached to an encoder
+ * @state: atomic state
+ * @encoder: DRM encoder
+ *
+ * This function adds all bridges attached to @encoder. This is needed to add
+ * bridge states to @state and make them available when
+ * &drm_bridge_funcs.atomic_check(), &drm_bridge_funcs.atomic_pre_enable(),
+ * &drm_bridge_funcs.atomic_enable(),
+ * &drm_bridge_funcs.atomic_disable_post_disable() are called.
+ *
+ * Returns:
+ * 0 on success or can fail with -EDEADLK or -ENOMEM. When the error is EDEADLK
+ * then the w/w mutex code has detected a deadlock and the entire atomic
+ * sequence must be restarted. All other errors are fatal.
+ */
+int
+drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
+ struct drm_encoder *encoder)
+{
+ struct drm_bridge_state *bridge_state;
+ struct drm_bridge *bridge;
+
+ if (!encoder)
+ return 0;
+
+ DRM_DEBUG_ATOMIC("Adding all bridges for [encoder:%d:%s] to %p\n",
+ encoder->base.id, encoder->name, state);
+
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ /* Skip bridges that don't implement the atomic state hooks. */
+ if (!bridge->funcs->atomic_duplicate_state)
+ continue;
+
+ bridge_state = drm_atomic_get_bridge_state(state, bridge);
+ if (IS_ERR(bridge_state))
+ return PTR_ERR(bridge_state);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_add_encoder_bridges);
+
+/**
* drm_atomic_add_affected_connectors - add connectors for CRTC
* @state: atomic state
* @crtc: DRM CRTC
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 4511c2e07bb9..85d163f16801 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -437,12 +437,12 @@ mode_fixup(struct drm_atomic_state *state)
funcs = encoder->helper_private;
bridge = drm_bridge_chain_get_first_bridge(encoder);
- ret = drm_bridge_chain_mode_fixup(bridge,
- &new_crtc_state->mode,
- &new_crtc_state->adjusted_mode);
- if (!ret) {
- DRM_DEBUG_ATOMIC("Bridge fixup failed\n");
- return -EINVAL;
+ ret = drm_atomic_bridge_chain_check(bridge,
+ new_crtc_state,
+ new_conn_state);
+ if (ret) {
+ DRM_DEBUG_ATOMIC("Bridge atomic check failed\n");
+ return ret;
}
if (funcs && funcs->atomic_check) {
@@ -583,6 +583,7 @@ mode_valid(struct drm_atomic_state *state)
* &drm_crtc_state.connectors_changed is set when a connector is added or
* removed from the CRTC. &drm_crtc_state.active_changed is set when
* &drm_crtc_state.active changes, which is used for DPMS.
+ * &drm_crtc_state.no_vblank is set from the result of drm_dev_has_vblank().
* See also: drm_atomic_crtc_needs_modeset()
*
* IMPORTANT:
@@ -649,6 +650,11 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return -EINVAL;
}
+
+ if (drm_dev_has_vblank(dev))
+ new_crtc_state->no_vblank = false;
+ else
+ new_crtc_state->no_vblank = true;
}
ret = handle_conflicting_encoders(state, false);
@@ -730,6 +736,26 @@ drm_atomic_helper_check_modeset(struct drm_device *dev,
return ret;
}
+ /*
+ * Iterate over all connectors again, and add all affected bridges to
+ * the state.
+ */
+ for_each_oldnew_connector_in_state(state, connector,
+ old_connector_state,
+ new_connector_state, i) {
+ struct drm_encoder *encoder;
+
+ encoder = old_connector_state->best_encoder;
+ ret = drm_atomic_add_encoder_bridges(state, encoder);
+ if (ret)
+ return ret;
+
+ encoder = new_connector_state->best_encoder;
+ ret = drm_atomic_add_encoder_bridges(state, encoder);
+ if (ret)
+ return ret;
+ }
+
ret = mode_valid(state);
if (ret)
return ret;
@@ -2215,7 +2241,9 @@ EXPORT_SYMBOL(drm_atomic_helper_wait_for_dependencies);
* when a job is queued, and any change to the pipeline that does not touch the
* connector is leading to timeouts when calling
* drm_atomic_helper_wait_for_vblanks() or
- * drm_atomic_helper_wait_for_flip_done().
+ * drm_atomic_helper_wait_for_flip_done(). In addition to writeback
+ * connectors, this function can also fake VBLANK events for CRTCs without
+ * VBLANK interrupt.
*
* This is part of the atomic helper support for nonblocking commits, see
* drm_atomic_helper_setup_commit() for an overview.
@@ -3508,3 +3536,44 @@ fail:
return ret;
}
EXPORT_SYMBOL(drm_atomic_helper_legacy_gamma_set);
+
+/**
+ * drm_atomic_helper_bridge_propagate_bus_fmt() - Propagate output format to
+ * the input end of a bridge
+ * @bridge: bridge control structure
+ * @bridge_state: new bridge state
+ * @crtc_state: new CRTC state
+ * @conn_state: new connector state
+ * @output_fmt: tested output bus format
+ * @num_input_fmts: will contain the size of the returned array
+ *
+ * This helper is a pluggable implementation of the
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts operation for bridges that don't
+ * modify the bus configuration between their input and their output. It
+ * returns an array of input formats with a single element set to @output_fmt.
+ *
+ * RETURNS:
+ * a valid format array of size @num_input_fmts, or NULL if the allocation
+ * failed
+ */
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts;
+
+ input_fmts = kzalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts) {
+ *num_input_fmts = 0;
+ return NULL;
+ }
+
+ *num_input_fmts = 1;
+ input_fmts[0] = output_fmt;
+ return input_fmts;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_propagate_bus_fmt);
diff --git a/drivers/gpu/drm/drm_atomic_state_helper.c b/drivers/gpu/drm/drm_atomic_state_helper.c
index 7cf3cf936547..8fce6a115dfe 100644
--- a/drivers/gpu/drm/drm_atomic_state_helper.c
+++ b/drivers/gpu/drm/drm_atomic_state_helper.c
@@ -26,6 +26,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_connector.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
@@ -551,3 +552,104 @@ void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj
memcpy(state, obj->state, sizeof(*state));
}
EXPORT_SYMBOL(__drm_atomic_helper_private_obj_duplicate_state);
+
+/**
+ * __drm_atomic_helper_bridge_duplicate_state() - Copy atomic bridge state
+ * @bridge: bridge object
+ * @state: atomic bridge state
+ *
+ * Copies atomic state from a bridge's current state and resets inferred values.
+ * This is useful for drivers that subclass the bridge state.
+ */
+void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ __drm_atomic_helper_private_obj_duplicate_state(&bridge->base,
+ &state->base);
+ state->bridge = bridge;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_bridge_duplicate_state);
+
+/**
+ * drm_atomic_helper_bridge_duplicate_state() - Duplicate a bridge state object
+ * @bridge: bridge object
+ *
+ * Allocates a new bridge state and initializes it with the current bridge
+ * state values. This helper is meant to be used as a bridge
+ * &drm_bridge_funcs.atomic_duplicate_state hook for bridges that don't
+ * subclass the bridge state.
+ */
+struct drm_bridge_state *
+drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge)
+{
+ struct drm_bridge_state *new;
+
+ if (WARN_ON(!bridge->base.state))
+ return NULL;
+
+ new = kzalloc(sizeof(*new), GFP_KERNEL);
+ if (new)
+ __drm_atomic_helper_bridge_duplicate_state(bridge, new);
+
+ return new;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_duplicate_state);
+
+/**
+ * drm_atomic_helper_bridge_destroy_state() - Destroy a bridge state object
+ * @bridge: the bridge this state refers to
+ * @state: bridge state to destroy
+ *
+ * Destroys a bridge state previously created by
+ * &drm_atomic_helper_bridge_reset() or
+ * &drm_atomic_helper_bridge_duplicate_state(). This helper is meant to be
+ * used as a bridge &drm_bridge_funcs.atomic_destroy_state hook for bridges
+ * that don't subclass the bridge state.
+ */
+void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ kfree(state);
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_destroy_state);
+
+/**
+ * __drm_atomic_helper_bridge_reset() - Initialize a bridge state to its
+ * default
+ * @bridge: the bridge this state refers to
+ * @state: bridge state to initialize
+ *
+ * Initializes the bridge state to default values. This is meant to be called
+ * by the bridge &drm_bridge_funcs.atomic_reset hook for bridges that subclass
+ * the bridge state.
+ */
+void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge,
+ struct drm_bridge_state *state)
+{
+ memset(state, 0, sizeof(*state));
+ state->bridge = bridge;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_bridge_reset);
+
+/**
+ * drm_atomic_helper_bridge_reset() - Allocate and initialize a bridge state
+ * to its default
+ * @bridge: the bridge this state refers to
+ *
+ * Allocates the bridge state and initializes it to default values. This helper
+ * is meant to be used as a bridge &drm_bridge_funcs.atomic_reset hook for
+ * bridges that don't subclass the bridge state.
+ */
+struct drm_bridge_state *
+drm_atomic_helper_bridge_reset(struct drm_bridge *bridge)
+{
+ struct drm_bridge_state *bridge_state;
+
+ bridge_state = kzalloc(sizeof(*bridge_state), GFP_KERNEL);
+ if (!bridge_state)
+ return ERR_PTR(-ENOMEM);
+
+ __drm_atomic_helper_bridge_reset(bridge, bridge_state);
+ return bridge_state;
+}
+EXPORT_SYMBOL(drm_atomic_helper_bridge_reset);
diff --git a/drivers/gpu/drm/drm_auth.c b/drivers/gpu/drm/drm_auth.c
index cc9acd986c68..531b876d0ed8 100644
--- a/drivers/gpu/drm/drm_auth.c
+++ b/drivers/gpu/drm/drm_auth.c
@@ -153,11 +153,6 @@ static int drm_new_set_master(struct drm_device *dev, struct drm_file *fpriv)
return -ENOMEM;
}
- if (dev->driver->master_create) {
- ret = dev->driver->master_create(dev, fpriv->master);
- if (ret)
- goto out_err;
- }
fpriv->is_master = 1;
fpriv->authenticated = 1;
@@ -332,9 +327,6 @@ static void drm_master_destroy(struct kref *kref)
if (drm_core_check_feature(dev, DRIVER_MODESET))
drm_lease_destroy(master);
- if (dev->driver->master_destroy)
- dev->driver->master_destroy(dev, master);
-
drm_legacy_master_rmmaps(dev, master);
idr_destroy(&master->magic_map);
diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c
index c2cf0c90fa26..afdec8e5fc68 100644
--- a/drivers/gpu/drm/drm_bridge.c
+++ b/drivers/gpu/drm/drm_bridge.c
@@ -25,6 +25,7 @@
#include <linux/module.h>
#include <linux/mutex.h>
+#include <drm/drm_atomic_state_helper.h>
#include <drm/drm_bridge.h>
#include <drm/drm_encoder.h>
@@ -38,26 +39,56 @@
* encoder chain.
*
* A bridge is always attached to a single &drm_encoder at a time, but can be
- * either connected to it directly, or through an intermediate bridge::
+ * either connected to it directly, or through a chain of bridges::
*
- * encoder ---> bridge B ---> bridge A
+ * [ CRTC ---> ] Encoder ---> Bridge A ---> Bridge B
*
- * Here, the output of the encoder feeds to bridge B, and that furthers feeds to
- * bridge A.
+ * Here, the output of the encoder feeds to bridge A, and that furthers feeds to
+ * bridge B. Bridge chains can be arbitrarily long, and shall be fully linear:
+ * Chaining multiple bridges to the output of a bridge, or the same bridge to
+ * the output of different bridges, is not supported.
*
- * The driver using the bridge is responsible to make the associations between
- * the encoder and bridges. Once these links are made, the bridges will
- * participate along with encoder functions to perform mode_set/enable/disable
- * through the ops provided in &drm_bridge_funcs.
+ * Display drivers are responsible for linking encoders with the first bridge
+ * in the chains. This is done by acquiring the appropriate bridge with
+ * of_drm_find_bridge() or drm_of_find_panel_or_bridge(), or creating it for a
+ * panel with drm_panel_bridge_add_typed() (or the managed version
+ * devm_drm_panel_bridge_add_typed()). Once acquired, the bridge shall be
+ * attached to the encoder with a call to drm_bridge_attach().
*
- * drm_bridge, like drm_panel, aren't drm_mode_object entities like planes,
+ * Bridges are responsible for linking themselves with the next bridge in the
+ * chain, if any. This is done the same way as for encoders, with the call to
+ * drm_bridge_attach() occurring in the &drm_bridge_funcs.attach operation.
+ *
+ * Once these links are created, the bridges can participate along with encoder
+ * functions to perform mode validation and fixup (through
+ * drm_bridge_chain_mode_valid() and drm_atomic_bridge_chain_check()), mode
+ * setting (through drm_bridge_chain_mode_set()), enable (through
+ * drm_atomic_bridge_chain_pre_enable() and drm_atomic_bridge_chain_enable())
+ * and disable (through drm_atomic_bridge_chain_disable() and
+ * drm_atomic_bridge_chain_post_disable()). Those functions call the
+ * corresponding operations provided in &drm_bridge_funcs in sequence for all
+ * bridges in the chain.
+ *
+ * For display drivers that use the atomic helpers
+ * drm_atomic_helper_check_modeset(),
+ * drm_atomic_helper_commit_modeset_enables() and
+ * drm_atomic_helper_commit_modeset_disables() (either directly in hand-rolled
+ * commit check and commit tail handlers, or through the higher-level
+ * drm_atomic_helper_check() and drm_atomic_helper_commit_tail() or
+ * drm_atomic_helper_commit_tail_rpm() helpers), this is done transparently and
+ * requires no intervention from the driver. For other drivers, the relevant
+ * DRM bridge chain functions shall be called manually.
+ *
+ * Bridges also participate in implementing the &drm_connector at the end of
+ * the bridge chain. Display drivers may use the drm_bridge_connector_init()
+ * helper to create the &drm_connector, or implement it manually on top of the
+ * connector-related operations exposed by the bridge (see the overview
+ * documentation of bridge operations for more details).
+ *
+ * &drm_bridge, like &drm_panel, aren't &drm_mode_object entities like planes,
* CRTCs, encoders or connectors and hence are not visible to userspace. They
* just provide additional hooks to get the desired output at the end of the
* encoder chain.
- *
- * Bridges can also be chained up using the &drm_bridge.chain_node field.
- *
- * Both legacy CRTC helpers and the new atomic modeset helpers support bridges.
*/
static DEFINE_MUTEX(bridge_lock);
@@ -70,6 +101,8 @@ static LIST_HEAD(bridge_list);
*/
void drm_bridge_add(struct drm_bridge *bridge)
{
+ mutex_init(&bridge->hpd_mutex);
+
mutex_lock(&bridge_lock);
list_add_tail(&bridge->list, &bridge_list);
mutex_unlock(&bridge_lock);
@@ -86,15 +119,43 @@ void drm_bridge_remove(struct drm_bridge *bridge)
mutex_lock(&bridge_lock);
list_del_init(&bridge->list);
mutex_unlock(&bridge_lock);
+
+ mutex_destroy(&bridge->hpd_mutex);
}
EXPORT_SYMBOL(drm_bridge_remove);
+static struct drm_private_state *
+drm_bridge_atomic_duplicate_priv_state(struct drm_private_obj *obj)
+{
+ struct drm_bridge *bridge = drm_priv_to_bridge(obj);
+ struct drm_bridge_state *state;
+
+ state = bridge->funcs->atomic_duplicate_state(bridge);
+ return state ? &state->base : NULL;
+}
+
+static void
+drm_bridge_atomic_destroy_priv_state(struct drm_private_obj *obj,
+ struct drm_private_state *s)
+{
+ struct drm_bridge_state *state = drm_priv_to_bridge_state(s);
+ struct drm_bridge *bridge = drm_priv_to_bridge(obj);
+
+ bridge->funcs->atomic_destroy_state(bridge, state);
+}
+
+static const struct drm_private_state_funcs drm_bridge_priv_state_funcs = {
+ .atomic_duplicate_state = drm_bridge_atomic_duplicate_priv_state,
+ .atomic_destroy_state = drm_bridge_atomic_destroy_priv_state,
+};
+
/**
* drm_bridge_attach - attach the bridge to an encoder's chain
*
* @encoder: DRM encoder
* @bridge: bridge to attach
* @previous: previous bridge in the chain (optional)
+ * @flags: DRM_BRIDGE_ATTACH_* flags
*
* Called by a kms driver to link the bridge to an encoder's chain. The previous
* argument specifies the previous bridge in the chain. If NULL, the bridge is
@@ -112,7 +173,8 @@ EXPORT_SYMBOL(drm_bridge_remove);
* Zero on success, error code on failure
*/
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
- struct drm_bridge *previous)
+ struct drm_bridge *previous,
+ enum drm_bridge_attach_flags flags)
{
int ret;
@@ -134,16 +196,36 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
list_add(&bridge->chain_node, &encoder->bridge_chain);
if (bridge->funcs->attach) {
- ret = bridge->funcs->attach(bridge);
- if (ret < 0) {
- list_del(&bridge->chain_node);
- bridge->dev = NULL;
- bridge->encoder = NULL;
- return ret;
+ ret = bridge->funcs->attach(bridge, flags);
+ if (ret < 0)
+ goto err_reset_bridge;
+ }
+
+ if (bridge->funcs->atomic_reset) {
+ struct drm_bridge_state *state;
+
+ state = bridge->funcs->atomic_reset(bridge);
+ if (IS_ERR(state)) {
+ ret = PTR_ERR(state);
+ goto err_detach_bridge;
}
+
+ drm_atomic_private_obj_init(bridge->dev, &bridge->base,
+ &state->base,
+ &drm_bridge_priv_state_funcs);
}
return 0;
+
+err_detach_bridge:
+ if (bridge->funcs->detach)
+ bridge->funcs->detach(bridge);
+
+err_reset_bridge:
+ bridge->dev = NULL;
+ bridge->encoder = NULL;
+ list_del(&bridge->chain_node);
+ return ret;
}
EXPORT_SYMBOL(drm_bridge_attach);
@@ -155,6 +237,9 @@ void drm_bridge_detach(struct drm_bridge *bridge)
if (WARN_ON(!bridge->dev))
return;
+ if (bridge->funcs->atomic_reset)
+ drm_atomic_private_obj_fini(&bridge->base);
+
if (bridge->funcs->detach)
bridge->funcs->detach(bridge);
@@ -163,14 +248,92 @@ void drm_bridge_detach(struct drm_bridge *bridge)
}
/**
- * DOC: bridge callbacks
+ * DOC: bridge operations
+ *
+ * Bridge drivers expose operations through the &drm_bridge_funcs structure.
+ * The DRM internals (atomic and CRTC helpers) use the helpers defined in
+ * drm_bridge.c to call bridge operations. Those operations are divided in
+ * three big categories to support different parts of the bridge usage.
+ *
+ * - The encoder-related operations support control of the bridges in the
+ * chain, and are roughly counterparts to the &drm_encoder_helper_funcs
+ * operations. They are used by the legacy CRTC and the atomic modeset
+ * helpers to perform mode validation, fixup and setting, and enable and
+ * disable the bridge automatically.
+ *
+ * The enable and disable operations are split in
+ * &drm_bridge_funcs.pre_enable, &drm_bridge_funcs.enable,
+ * &drm_bridge_funcs.disable and &drm_bridge_funcs.post_disable to provide
+ * finer-grained control.
+ *
+ * Bridge drivers may implement the legacy version of those operations, or
+ * the atomic version (prefixed with atomic\_), in which case they shall also
+ * implement the atomic state bookkeeping operations
+ * (&drm_bridge_funcs.atomic_duplicate_state,
+ * &drm_bridge_funcs.atomic_destroy_state and &drm_bridge_funcs.reset).
+ * Mixing atomic and non-atomic versions of the operations is not supported.
+ *
+ * - The bus format negotiation operations
+ * &drm_bridge_funcs.atomic_get_output_bus_fmts and
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts allow bridge drivers to
+ * negotiate the formats transmitted between bridges in the chain when
+ * multiple formats are supported. Negotiation for formats is performed
+ * transparently for display drivers by the atomic modeset helpers. Only
+ * atomic versions of those operations exist, bridge drivers that need to
+ * implement them shall thus also implement the atomic version of the
+ * encoder-related operations. This feature is not supported by the legacy
+ * CRTC helpers.
+ *
+ * - The connector-related operations support implementing a &drm_connector
+ * based on a chain of bridges. DRM bridges traditionally create a
+ * &drm_connector for bridges meant to be used at the end of the chain. This
+ * puts additional burden on bridge drivers, especially for bridges that may
+ * be used in the middle of a chain or at the end of it. Furthermore, it
+ * requires all operations of the &drm_connector to be handled by a single
+ * bridge, which doesn't always match the hardware architecture.
+ *
+ * To simplify bridge drivers and make the connector implementation more
+ * flexible, a new model allows bridges to unconditionally skip creation of
+ * &drm_connector and instead expose &drm_bridge_funcs operations to support
+ * an externally-implemented &drm_connector. Those operations are
+ * &drm_bridge_funcs.detect, &drm_bridge_funcs.get_modes,
+ * &drm_bridge_funcs.get_edid, &drm_bridge_funcs.hpd_notify,
+ * &drm_bridge_funcs.hpd_enable and &drm_bridge_funcs.hpd_disable. When
+ * implemented, display drivers shall create a &drm_connector instance for
+ * each chain of bridges, and implement those connector instances based on
+ * the bridge connector operations.
*
- * The &drm_bridge_funcs ops are populated by the bridge driver. The DRM
- * internals (atomic and CRTC helpers) use the helpers defined in drm_bridge.c
- * These helpers call a specific &drm_bridge_funcs op for all the bridges
- * during encoder configuration.
+ * Bridge drivers shall implement the connector-related operations for all
+ * the features that the bridge hardware support. For instance, if a bridge
+ * supports reading EDID, the &drm_bridge_funcs.get_edid shall be
+ * implemented. This however doesn't mean that the DDC lines are wired to the
+ * bridge on a particular platform, as they could also be connected to an I2C
+ * controller of the SoC. Support for the connector-related operations on the
+ * running platform is reported through the &drm_bridge.ops flags. Bridge
+ * drivers shall detect which operations they can support on the platform
+ * (usually this information is provided by ACPI or DT), and set the
+ * &drm_bridge.ops flags for all supported operations. A flag shall only be
+ * set if the corresponding &drm_bridge_funcs operation is implemented, but
+ * an implemented operation doesn't necessarily imply that the corresponding
+ * flag will be set. Display drivers shall use the &drm_bridge.ops flags to
+ * decide which bridge to delegate a connector operation to. This mechanism
+ * allows providing a single static const &drm_bridge_funcs instance in
+ * bridge drivers, improving security by storing function pointers in
+ * read-only memory.
*
- * For detailed specification of the bridge callbacks see &drm_bridge_funcs.
+ * In order to ease transition, bridge drivers may support both the old and
+ * new models by making connector creation optional and implementing the
+ * connected-related bridge operations. Connector creation is then controlled
+ * by the flags argument to the drm_bridge_attach() function. Display drivers
+ * that support the new model and create connectors themselves shall set the
+ * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag, and bridge drivers shall then skip
+ * connector creation. For intermediate bridges in the chain, the flag shall
+ * be passed to the drm_bridge_attach() call for the downstream bridge.
+ * Bridge drivers that implement the new model only shall return an error
+ * from their &drm_bridge_funcs.attach handler when the
+ * %DRM_BRIDGE_ATTACH_NO_CONNECTOR flag is not set. New display drivers
+ * should use the new model, and convert the bridge drivers they use if
+ * needed, in order to gradually transition to the new model.
*/
/**
@@ -409,10 +572,19 @@ void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->atomic_disable)
- iter->funcs->atomic_disable(iter, old_state);
- else if (iter->funcs->disable)
+ if (iter->funcs->atomic_disable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ iter);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ iter->funcs->atomic_disable(iter, old_bridge_state);
+ } else if (iter->funcs->disable) {
iter->funcs->disable(iter);
+ }
if (iter == bridge)
break;
@@ -443,10 +615,20 @@ void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->atomic_post_disable)
- bridge->funcs->atomic_post_disable(bridge, old_state);
- else if (bridge->funcs->post_disable)
+ if (bridge->funcs->atomic_post_disable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_post_disable(bridge,
+ old_bridge_state);
+ } else if (bridge->funcs->post_disable) {
bridge->funcs->post_disable(bridge);
+ }
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_post_disable);
@@ -475,10 +657,19 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
- if (iter->funcs->atomic_pre_enable)
- iter->funcs->atomic_pre_enable(iter, old_state);
- else if (iter->funcs->pre_enable)
+ if (iter->funcs->atomic_pre_enable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ iter);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ iter->funcs->atomic_pre_enable(iter, old_bridge_state);
+ } else if (iter->funcs->pre_enable) {
iter->funcs->pre_enable(iter);
+ }
if (iter == bridge)
break;
@@ -508,14 +699,498 @@ void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
encoder = bridge->encoder;
list_for_each_entry_from(bridge, &encoder->bridge_chain, chain_node) {
- if (bridge->funcs->atomic_enable)
- bridge->funcs->atomic_enable(bridge, old_state);
- else if (bridge->funcs->enable)
+ if (bridge->funcs->atomic_enable) {
+ struct drm_bridge_state *old_bridge_state;
+
+ old_bridge_state =
+ drm_atomic_get_old_bridge_state(old_state,
+ bridge);
+ if (WARN_ON(!old_bridge_state))
+ return;
+
+ bridge->funcs->atomic_enable(bridge, old_bridge_state);
+ } else if (bridge->funcs->enable) {
bridge->funcs->enable(bridge);
+ }
}
}
EXPORT_SYMBOL(drm_atomic_bridge_chain_enable);
+static int drm_atomic_bridge_check(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ if (bridge->funcs->atomic_check) {
+ struct drm_bridge_state *bridge_state;
+ int ret;
+
+ bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ bridge);
+ if (WARN_ON(!bridge_state))
+ return -EINVAL;
+
+ ret = bridge->funcs->atomic_check(bridge, bridge_state,
+ crtc_state, conn_state);
+ if (ret)
+ return ret;
+ } else if (bridge->funcs->mode_fixup) {
+ if (!bridge->funcs->mode_fixup(bridge, &crtc_state->mode,
+ &crtc_state->adjusted_mode))
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int select_bus_fmt_recursive(struct drm_bridge *first_bridge,
+ struct drm_bridge *cur_bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 out_bus_fmt)
+{
+ struct drm_bridge_state *cur_state;
+ unsigned int num_in_bus_fmts, i;
+ struct drm_bridge *prev_bridge;
+ u32 *in_bus_fmts;
+ int ret;
+
+ prev_bridge = drm_bridge_get_prev_bridge(cur_bridge);
+ cur_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ cur_bridge);
+
+ /*
+ * If bus format negotiation is not supported by this bridge, let's
+ * pass MEDIA_BUS_FMT_FIXED to the previous bridge in the chain and
+ * hope that it can handle this situation gracefully (by providing
+ * appropriate default values).
+ */
+ if (!cur_bridge->funcs->atomic_get_input_bus_fmts) {
+ if (cur_bridge != first_bridge) {
+ ret = select_bus_fmt_recursive(first_bridge,
+ prev_bridge, crtc_state,
+ conn_state,
+ MEDIA_BUS_FMT_FIXED);
+ if (ret)
+ return ret;
+ }
+
+ /*
+ * Driver does not implement the atomic state hooks, but that's
+ * fine, as long as it does not access the bridge state.
+ */
+ if (cur_state) {
+ cur_state->input_bus_cfg.format = MEDIA_BUS_FMT_FIXED;
+ cur_state->output_bus_cfg.format = out_bus_fmt;
+ }
+
+ return 0;
+ }
+
+ /*
+ * If the driver implements ->atomic_get_input_bus_fmts() it
+ * should also implement the atomic state hooks.
+ */
+ if (WARN_ON(!cur_state))
+ return -EINVAL;
+
+ in_bus_fmts = cur_bridge->funcs->atomic_get_input_bus_fmts(cur_bridge,
+ cur_state,
+ crtc_state,
+ conn_state,
+ out_bus_fmt,
+ &num_in_bus_fmts);
+ if (!num_in_bus_fmts)
+ return -ENOTSUPP;
+ else if (!in_bus_fmts)
+ return -ENOMEM;
+
+ if (first_bridge == cur_bridge) {
+ cur_state->input_bus_cfg.format = in_bus_fmts[0];
+ cur_state->output_bus_cfg.format = out_bus_fmt;
+ kfree(in_bus_fmts);
+ return 0;
+ }
+
+ for (i = 0; i < num_in_bus_fmts; i++) {
+ ret = select_bus_fmt_recursive(first_bridge, prev_bridge,
+ crtc_state, conn_state,
+ in_bus_fmts[i]);
+ if (ret != -ENOTSUPP)
+ break;
+ }
+
+ if (!ret) {
+ cur_state->input_bus_cfg.format = in_bus_fmts[i];
+ cur_state->output_bus_cfg.format = out_bus_fmt;
+ }
+
+ kfree(in_bus_fmts);
+ return ret;
+}
+
+/*
+ * This function is called by &drm_atomic_bridge_chain_check() just before
+ * calling &drm_bridge_funcs.atomic_check() on all elements of the chain.
+ * It performs bus format negotiation between bridge elements. The negotiation
+ * happens in reverse order, starting from the last element in the chain up to
+ * @bridge.
+ *
+ * Negotiation starts by retrieving supported output bus formats on the last
+ * bridge element and testing them one by one. The test is recursive, meaning
+ * that for each tested output format, the whole chain will be walked backward,
+ * and each element will have to choose an input bus format that can be
+ * transcoded to the requested output format. When a bridge element does not
+ * support transcoding into a specific output format -ENOTSUPP is returned and
+ * the next bridge element will have to try a different format. If none of the
+ * combinations worked, -ENOTSUPP is returned and the atomic modeset will fail.
+ *
+ * This implementation is relying on
+ * &drm_bridge_funcs.atomic_get_output_bus_fmts() and
+ * &drm_bridge_funcs.atomic_get_input_bus_fmts() to gather supported
+ * input/output formats.
+ *
+ * When &drm_bridge_funcs.atomic_get_output_bus_fmts() is not implemented by
+ * the last element of the chain, &drm_atomic_bridge_chain_select_bus_fmts()
+ * tries a single format: &drm_connector.display_info.bus_formats[0] if
+ * available, MEDIA_BUS_FMT_FIXED otherwise.
+ *
+ * When &drm_bridge_funcs.atomic_get_input_bus_fmts() is not implemented,
+ * &drm_atomic_bridge_chain_select_bus_fmts() skips the negotiation on the
+ * bridge element that lacks this hook and asks the previous element in the
+ * chain to try MEDIA_BUS_FMT_FIXED. It's up to bridge drivers to decide what
+ * to do in that case (fail if they want to enforce bus format negotiation, or
+ * provide a reasonable default if they need to support pipelines where not
+ * all elements support bus format negotiation).
+ */
+static int
+drm_atomic_bridge_chain_select_bus_fmts(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_encoder *encoder = bridge->encoder;
+ struct drm_bridge_state *last_bridge_state;
+ unsigned int i, num_out_bus_fmts;
+ struct drm_bridge *last_bridge;
+ u32 *out_bus_fmts;
+ int ret = 0;
+
+ last_bridge = list_last_entry(&encoder->bridge_chain,
+ struct drm_bridge, chain_node);
+ last_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ last_bridge);
+
+ if (last_bridge->funcs->atomic_get_output_bus_fmts) {
+ const struct drm_bridge_funcs *funcs = last_bridge->funcs;
+
+ /*
+ * If the driver implements ->atomic_get_output_bus_fmts() it
+ * should also implement the atomic state hooks.
+ */
+ if (WARN_ON(!last_bridge_state))
+ return -EINVAL;
+
+ out_bus_fmts = funcs->atomic_get_output_bus_fmts(last_bridge,
+ last_bridge_state,
+ crtc_state,
+ conn_state,
+ &num_out_bus_fmts);
+ if (!num_out_bus_fmts)
+ return -ENOTSUPP;
+ else if (!out_bus_fmts)
+ return -ENOMEM;
+ } else {
+ num_out_bus_fmts = 1;
+ out_bus_fmts = kmalloc(sizeof(*out_bus_fmts), GFP_KERNEL);
+ if (!out_bus_fmts)
+ return -ENOMEM;
+
+ if (conn->display_info.num_bus_formats &&
+ conn->display_info.bus_formats)
+ out_bus_fmts[0] = conn->display_info.bus_formats[0];
+ else
+ out_bus_fmts[0] = MEDIA_BUS_FMT_FIXED;
+ }
+
+ for (i = 0; i < num_out_bus_fmts; i++) {
+ ret = select_bus_fmt_recursive(bridge, last_bridge, crtc_state,
+ conn_state, out_bus_fmts[i]);
+ if (ret != -ENOTSUPP)
+ break;
+ }
+
+ kfree(out_bus_fmts);
+
+ return ret;
+}
+
+static void
+drm_atomic_bridge_propagate_bus_flags(struct drm_bridge *bridge,
+ struct drm_connector *conn,
+ struct drm_atomic_state *state)
+{
+ struct drm_bridge_state *bridge_state, *next_bridge_state;
+ struct drm_bridge *next_bridge;
+ u32 output_flags = 0;
+
+ bridge_state = drm_atomic_get_new_bridge_state(state, bridge);
+
+ /* No bridge state attached to this bridge => nothing to propagate. */
+ if (!bridge_state)
+ return;
+
+ next_bridge = drm_bridge_get_next_bridge(bridge);
+
+ /*
+ * Let's try to apply the most common case here, that is, propagate
+ * display_info flags for the last bridge, and propagate the input
+ * flags of the next bridge element to the output end of the current
+ * bridge when the bridge is not the last one.
+ * There are exceptions to this rule, like when signal inversion is
+ * happening at the board level, but that's something drivers can deal
+ * with from their &drm_bridge_funcs.atomic_check() implementation by
+ * simply overriding the flags value we've set here.
+ */
+ if (!next_bridge) {
+ output_flags = conn->display_info.bus_flags;
+ } else {
+ next_bridge_state = drm_atomic_get_new_bridge_state(state,
+ next_bridge);
+ /*
+ * No bridge state attached to the next bridge, just leave the
+ * flags to 0.
+ */
+ if (next_bridge_state)
+ output_flags = next_bridge_state->input_bus_cfg.flags;
+ }
+
+ bridge_state->output_bus_cfg.flags = output_flags;
+
+ /*
+ * Propage the output flags to the input end of the bridge. Again, it's
+ * not necessarily what all bridges want, but that's what most of them
+ * do, and by doing that by default we avoid forcing drivers to
+ * duplicate the "dummy propagation" logic.
+ */
+ bridge_state->input_bus_cfg.flags = output_flags;
+}
+
+/**
+ * drm_atomic_bridge_chain_check() - Do an atomic check on the bridge chain
+ * @bridge: bridge control structure
+ * @crtc_state: new CRTC state
+ * @conn_state: new connector state
+ *
+ * First trigger a bus format negotiation before calling
+ * &drm_bridge_funcs.atomic_check() (falls back on
+ * &drm_bridge_funcs.mode_fixup()) op for all the bridges in the encoder chain,
+ * starting from the last bridge to the first. These are called before calling
+ * &drm_encoder_helper_funcs.atomic_check()
+ *
+ * RETURNS:
+ * 0 on success, a negative error code on failure
+ */
+int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector *conn = conn_state->connector;
+ struct drm_encoder *encoder;
+ struct drm_bridge *iter;
+ int ret;
+
+ if (!bridge)
+ return 0;
+
+ ret = drm_atomic_bridge_chain_select_bus_fmts(bridge, crtc_state,
+ conn_state);
+ if (ret)
+ return ret;
+
+ encoder = bridge->encoder;
+ list_for_each_entry_reverse(iter, &encoder->bridge_chain, chain_node) {
+ int ret;
+
+ /*
+ * Bus flags are propagated by default. If a bridge needs to
+ * tweak the input bus flags for any reason, it should happen
+ * in its &drm_bridge_funcs.atomic_check() implementation such
+ * that preceding bridges in the chain can propagate the new
+ * bus flags.
+ */
+ drm_atomic_bridge_propagate_bus_flags(iter, conn,
+ crtc_state->state);
+
+ ret = drm_atomic_bridge_check(iter, crtc_state, conn_state);
+ if (ret)
+ return ret;
+
+ if (iter == bridge)
+ break;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_atomic_bridge_chain_check);
+
+/**
+ * drm_bridge_detect - check if anything is attached to the bridge output
+ * @bridge: bridge control structure
+ *
+ * If the bridge supports output detection, as reported by the
+ * DRM_BRIDGE_OP_DETECT bridge ops flag, call &drm_bridge_funcs.detect for the
+ * bridge and return the connection status. Otherwise return
+ * connector_status_unknown.
+ *
+ * RETURNS:
+ * The detection status on success, or connector_status_unknown if the bridge
+ * doesn't support output detection.
+ */
+enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_DETECT))
+ return connector_status_unknown;
+
+ return bridge->funcs->detect(bridge);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_detect);
+
+/**
+ * drm_bridge_get_modes - fill all modes currently valid for the sink into the
+ * @connector
+ * @bridge: bridge control structure
+ * @connector: the connector to fill with modes
+ *
+ * If the bridge supports output modes retrieval, as reported by the
+ * DRM_BRIDGE_OP_MODES bridge ops flag, call &drm_bridge_funcs.get_modes to
+ * fill the connector with all valid modes and return the number of modes
+ * added. Otherwise return 0.
+ *
+ * RETURNS:
+ * The number of modes added to the connector.
+ */
+int drm_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_MODES))
+ return 0;
+
+ return bridge->funcs->get_modes(bridge, connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_get_modes);
+
+/**
+ * drm_bridge_get_edid - get the EDID data of the connected display
+ * @bridge: bridge control structure
+ * @connector: the connector to read EDID for
+ *
+ * If the bridge supports output EDID retrieval, as reported by the
+ * DRM_BRIDGE_OP_EDID bridge ops flag, call &drm_bridge_funcs.get_edid to
+ * get the EDID and return it. Otherwise return ERR_PTR(-ENOTSUPP).
+ *
+ * RETURNS:
+ * The retrieved EDID on success, or an error pointer otherwise.
+ */
+struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_EDID))
+ return ERR_PTR(-ENOTSUPP);
+
+ return bridge->funcs->get_edid(bridge, connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_get_edid);
+
+/**
+ * drm_bridge_hpd_enable - enable hot plug detection for the bridge
+ * @bridge: bridge control structure
+ * @cb: hot-plug detection callback
+ * @data: data to be passed to the hot-plug detection callback
+ *
+ * Call &drm_bridge_funcs.hpd_enable if implemented and register the given @cb
+ * and @data as hot plug notification callback. From now on the @cb will be
+ * called with @data when an output status change is detected by the bridge,
+ * until hot plug notification gets disabled with drm_bridge_hpd_disable().
+ *
+ * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
+ * bridge->ops. This function shall not be called when the flag is not set.
+ *
+ * Only one hot plug detection callback can be registered at a time, it is an
+ * error to call this function when hot plug detection is already enabled for
+ * the bridge.
+ */
+void drm_bridge_hpd_enable(struct drm_bridge *bridge,
+ void (*cb)(void *data,
+ enum drm_connector_status status),
+ void *data)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
+ return;
+
+ mutex_lock(&bridge->hpd_mutex);
+
+ if (WARN(bridge->hpd_cb, "Hot plug detection already enabled\n"))
+ goto unlock;
+
+ bridge->hpd_cb = cb;
+ bridge->hpd_data = data;
+
+ if (bridge->funcs->hpd_enable)
+ bridge->funcs->hpd_enable(bridge);
+
+unlock:
+ mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_enable);
+
+/**
+ * drm_bridge_hpd_disable - disable hot plug detection for the bridge
+ * @bridge: bridge control structure
+ *
+ * Call &drm_bridge_funcs.hpd_disable if implemented and unregister the hot
+ * plug detection callback previously registered with drm_bridge_hpd_enable().
+ * Once this function returns the callback will not be called by the bridge
+ * when an output status change occurs.
+ *
+ * Hot plug detection is supported only if the DRM_BRIDGE_OP_HPD flag is set in
+ * bridge->ops. This function shall not be called when the flag is not set.
+ */
+void drm_bridge_hpd_disable(struct drm_bridge *bridge)
+{
+ if (!(bridge->ops & DRM_BRIDGE_OP_HPD))
+ return;
+
+ mutex_lock(&bridge->hpd_mutex);
+ if (bridge->funcs->hpd_disable)
+ bridge->funcs->hpd_disable(bridge);
+
+ bridge->hpd_cb = NULL;
+ bridge->hpd_data = NULL;
+ mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_disable);
+
+/**
+ * drm_bridge_hpd_notify - notify hot plug detection events
+ * @bridge: bridge control structure
+ * @status: output connection status
+ *
+ * Bridge drivers shall call this function to report hot plug events when they
+ * detect a change in the output status, when hot plug detection has been
+ * enabled by drm_bridge_hpd_enable().
+ *
+ * This function shall be called in a context that can sleep.
+ */
+void drm_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status)
+{
+ mutex_lock(&bridge->hpd_mutex);
+ if (bridge->hpd_cb)
+ bridge->hpd_cb(bridge->hpd_data, status);
+ mutex_unlock(&bridge->hpd_mutex);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_hpd_notify);
+
#ifdef CONFIG_OF
/**
* of_drm_find_bridge - find the bridge corresponding to the device node in
diff --git a/drivers/gpu/drm/drm_bridge_connector.c b/drivers/gpu/drm/drm_bridge_connector.c
new file mode 100644
index 000000000000..c6994fe673f3
--- /dev/null
+++ b/drivers/gpu/drm/drm_bridge_connector.c
@@ -0,0 +1,379 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Laurent Pinchart <[email protected]>
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+
+#include <drm/drm_atomic_state_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
+#include <drm/drm_connector.h>
+#include <drm/drm_device.h>
+#include <drm/drm_edid.h>
+#include <drm/drm_modeset_helper_vtables.h>
+#include <drm/drm_probe_helper.h>
+
+/**
+ * DOC: overview
+ *
+ * The DRM bridge connector helper object provides a DRM connector
+ * implementation that wraps a chain of &struct drm_bridge. The connector
+ * operations are fully implemented based on the operations of the bridges in
+ * the chain, and don't require any intervention from the display controller
+ * driver at runtime.
+ *
+ * To use the helper, display controller drivers create a bridge connector with
+ * a call to drm_bridge_connector_init(). This associates the newly created
+ * connector with the chain of bridges passed to the function and registers it
+ * with the DRM device. At that point the connector becomes fully usable, no
+ * further operation is needed.
+ *
+ * The DRM bridge connector operations are implemented based on the operations
+ * provided by the bridges in the chain. Each connector operation is delegated
+ * to the bridge closest to the connector (at the end of the chain) that
+ * provides the relevant functionality.
+ *
+ * To make use of this helper, all bridges in the chain shall report bridge
+ * operation flags (&drm_bridge->ops) and bridge output type
+ * (&drm_bridge->type), as well as the DRM_BRIDGE_ATTACH_NO_CONNECTOR attach
+ * flag (none of the bridges shall create a DRM connector directly).
+ */
+
+/**
+ * struct drm_bridge_connector - A connector backed by a chain of bridges
+ */
+struct drm_bridge_connector {
+ /**
+ * @base: The base DRM connector
+ */
+ struct drm_connector base;
+ /**
+ * @encoder:
+ *
+ * The encoder at the start of the bridges chain.
+ */
+ struct drm_encoder *encoder;
+ /**
+ * @bridge_edid:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * EDID read support, if any (see &DRM_BRIDGE_OP_EDID).
+ */
+ struct drm_bridge *bridge_edid;
+ /**
+ * @bridge_hpd:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * hot-plug detection notification, if any (see &DRM_BRIDGE_OP_HPD).
+ */
+ struct drm_bridge *bridge_hpd;
+ /**
+ * @bridge_detect:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * connector detection, if any (see &DRM_BRIDGE_OP_DETECT).
+ */
+ struct drm_bridge *bridge_detect;
+ /**
+ * @bridge_modes:
+ *
+ * The last bridge in the chain (closest to the connector) that provides
+ * connector modes detection, if any (see &DRM_BRIDGE_OP_MODES).
+ */
+ struct drm_bridge *bridge_modes;
+};
+
+#define to_drm_bridge_connector(x) \
+ container_of(x, struct drm_bridge_connector, base)
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Hot-Plug Handling
+ */
+
+static void drm_bridge_connector_hpd_notify(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ /* Notify all bridges in the pipeline of hotplug events. */
+ drm_for_each_bridge_in_chain(bridge_connector->encoder, bridge) {
+ if (bridge->funcs->hpd_notify)
+ bridge->funcs->hpd_notify(bridge, status);
+ }
+}
+
+static void drm_bridge_connector_hpd_cb(void *cb_data,
+ enum drm_connector_status status)
+{
+ struct drm_bridge_connector *drm_bridge_connector = cb_data;
+ struct drm_connector *connector = &drm_bridge_connector->base;
+ struct drm_device *dev = connector->dev;
+ enum drm_connector_status old_status;
+
+ mutex_lock(&dev->mode_config.mutex);
+ old_status = connector->status;
+ connector->status = status;
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (old_status == status)
+ return;
+
+ drm_bridge_connector_hpd_notify(connector, status);
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+/**
+ * drm_bridge_connector_enable_hpd - Enable hot-plug detection for the connector
+ * @connector: The DRM bridge connector
+ *
+ * This function enables hot-plug detection for the given bridge connector.
+ * This is typically used by display drivers in their resume handler.
+ */
+void drm_bridge_connector_enable_hpd(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ if (hpd)
+ drm_bridge_hpd_enable(hpd, drm_bridge_connector_hpd_cb,
+ bridge_connector);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_enable_hpd);
+
+/**
+ * drm_bridge_connector_disable_hpd - Disable hot-plug detection for the
+ * connector
+ * @connector: The DRM bridge connector
+ *
+ * This function disables hot-plug detection for the given bridge connector.
+ * This is typically used by display drivers in their suspend handler.
+ */
+void drm_bridge_connector_disable_hpd(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ if (hpd)
+ drm_bridge_hpd_disable(hpd);
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_disable_hpd);
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Functions
+ */
+
+static enum drm_connector_status
+drm_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *detect = bridge_connector->bridge_detect;
+ enum drm_connector_status status;
+
+ if (detect) {
+ status = detect->funcs->detect(detect);
+
+ drm_bridge_connector_hpd_notify(connector, status);
+ } else {
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DPI:
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_DSI:
+ status = connector_status_connected;
+ break;
+ default:
+ status = connector_status_unknown;
+ break;
+ }
+ }
+
+ return status;
+}
+
+static void drm_bridge_connector_destroy(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+
+ if (bridge_connector->bridge_hpd) {
+ struct drm_bridge *hpd = bridge_connector->bridge_hpd;
+
+ drm_bridge_hpd_disable(hpd);
+ }
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+
+ kfree(bridge_connector);
+}
+
+static const struct drm_connector_funcs drm_bridge_connector_funcs = {
+ .reset = drm_atomic_helper_connector_reset,
+ .detect = drm_bridge_connector_detect,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_bridge_connector_destroy,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Helper Functions
+ */
+
+static int drm_bridge_connector_get_modes_edid(struct drm_connector *connector,
+ struct drm_bridge *bridge)
+{
+ enum drm_connector_status status;
+ struct edid *edid;
+ int n;
+
+ status = drm_bridge_connector_detect(connector, false);
+ if (status != connector_status_connected)
+ goto no_edid;
+
+ edid = bridge->funcs->get_edid(bridge, connector);
+ if (!edid || !drm_edid_is_valid(edid)) {
+ kfree(edid);
+ goto no_edid;
+ }
+
+ drm_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
+
+ kfree(edid);
+ return n;
+
+no_edid:
+ drm_connector_update_edid_property(connector, NULL);
+ return 0;
+}
+
+static int drm_bridge_connector_get_modes(struct drm_connector *connector)
+{
+ struct drm_bridge_connector *bridge_connector =
+ to_drm_bridge_connector(connector);
+ struct drm_bridge *bridge;
+
+ /*
+ * If display exposes EDID, then we parse that in the normal way to
+ * build table of supported modes.
+ */
+ bridge = bridge_connector->bridge_edid;
+ if (bridge)
+ return drm_bridge_connector_get_modes_edid(connector, bridge);
+
+ /*
+ * Otherwise if the display pipeline reports modes (e.g. with a fixed
+ * resolution panel or an analog TV output), query it.
+ */
+ bridge = bridge_connector->bridge_modes;
+ if (bridge)
+ return bridge->funcs->get_modes(bridge, connector);
+
+ /*
+ * We can't retrieve modes, which can happen for instance for a DVI or
+ * VGA output with the DDC bus unconnected. The KMS core will add the
+ * default modes.
+ */
+ return 0;
+}
+
+static const struct drm_connector_helper_funcs drm_bridge_connector_helper_funcs = {
+ .get_modes = drm_bridge_connector_get_modes,
+ /* No need for .mode_valid(), the bridges are checked by the core. */
+};
+
+/* -----------------------------------------------------------------------------
+ * Bridge Connector Initialisation
+ */
+
+/**
+ * drm_bridge_connector_init - Initialise a connector for a chain of bridges
+ * @drm: the DRM device
+ * @encoder: the encoder where the bridge chain starts
+ *
+ * Allocate, initialise and register a &drm_bridge_connector with the @drm
+ * device. The connector is associated with a chain of bridges that starts at
+ * the @encoder. All bridges in the chain shall report bridge operation flags
+ * (&drm_bridge->ops) and bridge output type (&drm_bridge->type), and none of
+ * them may create a DRM connector directly.
+ *
+ * Returns a pointer to the new connector on success, or a negative error
+ * pointer otherwise.
+ */
+struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
+ struct drm_encoder *encoder)
+{
+ struct drm_bridge_connector *bridge_connector;
+ struct drm_connector *connector;
+ struct i2c_adapter *ddc = NULL;
+ struct drm_bridge *bridge;
+ int connector_type;
+
+ bridge_connector = kzalloc(sizeof(*bridge_connector), GFP_KERNEL);
+ if (!bridge_connector)
+ return ERR_PTR(-ENOMEM);
+
+ bridge_connector->encoder = encoder;
+
+ /*
+ * TODO: Handle doublescan_allowed, stereo_allowed and
+ * ycbcr_420_allowed.
+ */
+ connector = &bridge_connector->base;
+ connector->interlace_allowed = true;
+
+ /*
+ * Initialise connector status handling. First locate the furthest
+ * bridges in the pipeline that support HPD and output detection. Then
+ * initialise the connector polling mode, using HPD if available and
+ * falling back to polling if supported. If neither HPD nor output
+ * detection are available, we don't support hotplug detection at all.
+ */
+ connector_type = DRM_MODE_CONNECTOR_Unknown;
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ if (!bridge->interlace_allowed)
+ connector->interlace_allowed = false;
+
+ if (bridge->ops & DRM_BRIDGE_OP_EDID)
+ bridge_connector->bridge_edid = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_HPD)
+ bridge_connector->bridge_hpd = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_DETECT)
+ bridge_connector->bridge_detect = bridge;
+ if (bridge->ops & DRM_BRIDGE_OP_MODES)
+ bridge_connector->bridge_modes = bridge;
+
+ if (!drm_bridge_get_next_bridge(bridge))
+ connector_type = bridge->type;
+
+ if (bridge->ddc)
+ ddc = bridge->ddc;
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_Unknown) {
+ kfree(bridge_connector);
+ return ERR_PTR(-EINVAL);
+ }
+
+ drm_connector_init_with_ddc(drm, connector, &drm_bridge_connector_funcs,
+ connector_type, ddc);
+ drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
+
+ if (bridge_connector->bridge_hpd)
+ connector->polled = DRM_CONNECTOR_POLL_HPD;
+ else if (bridge_connector->bridge_detect)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT
+ | DRM_CONNECTOR_POLL_DISCONNECT;
+
+ return connector;
+}
+EXPORT_SYMBOL_GPL(drm_bridge_connector_init);
diff --git a/drivers/gpu/drm/drm_bufs.c b/drivers/gpu/drm/drm_bufs.c
index 8ce9d73fab4f..dcabf5698333 100644
--- a/drivers/gpu/drm/drm_bufs.c
+++ b/drivers/gpu/drm/drm_bufs.c
@@ -134,7 +134,7 @@ static int drm_map_handle(struct drm_device *dev, struct drm_hash_item *hash,
shift, add);
}
-/**
+/*
* Core function to create a range of memory available for mapping by a
* non-root process.
*
@@ -149,7 +149,6 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
{
struct drm_local_map *map;
struct drm_map_list *list;
- drm_dma_handle_t *dmah;
unsigned long user_token;
int ret;
@@ -324,14 +323,14 @@ static int drm_addmap_core(struct drm_device *dev, resource_size_t offset,
* As we're limiting the address to 2^32-1 (or less),
* casting it down to 32 bits is no problem, but we
* need to point to a 64bit variable first. */
- dmah = drm_pci_alloc(dev, map->size, map->size);
- if (!dmah) {
+ map->handle = dma_alloc_coherent(&dev->pdev->dev,
+ map->size,
+ &map->offset,
+ GFP_KERNEL);
+ if (!map->handle) {
kfree(map);
return -ENOMEM;
}
- map->handle = dmah->vaddr;
- map->offset = (unsigned long)dmah->busaddr;
- kfree(dmah);
break;
default:
kfree(map);
@@ -399,7 +398,7 @@ struct drm_local_map *drm_legacy_findmap(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_legacy_findmap);
-/**
+/*
* Ioctl to specify a range of memory that is available for mapping by a
* non-root process.
*
@@ -500,7 +499,7 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Remove a map private from list and deallocate resources if the mapping
* isn't in use.
*
@@ -513,7 +512,6 @@ int drm_legacy_getmap_ioctl(struct drm_device *dev, void *data,
int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
{
struct drm_map_list *r_list = NULL, *list_t;
- drm_dma_handle_t dmah;
int found = 0;
struct drm_master *master;
@@ -554,10 +552,10 @@ int drm_legacy_rmmap_locked(struct drm_device *dev, struct drm_local_map *map)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_legacy_pci_free(dev, &dmah);
+ dma_free_coherent(&dev->pdev->dev,
+ map->size,
+ map->handle,
+ map->offset);
break;
}
kfree(map);
@@ -661,7 +659,7 @@ int drm_legacy_rmmap_ioctl(struct drm_device *dev, void *data,
return ret;
}
-/**
+/*
* Cleanup after an error on one of the addbufs() functions.
*
* \param dev DRM device.
@@ -696,7 +694,7 @@ static void drm_cleanup_buf_error(struct drm_device *dev,
}
#if IS_ENABLED(CONFIG_AGP)
-/**
+/*
* Add AGP buffers for DMA transfers.
*
* \param dev struct drm_device to which the buffers are to be added.
@@ -1232,7 +1230,7 @@ static int drm_legacy_addbufs_sg(struct drm_device *dev,
return 0;
}
-/**
+/*
* Add buffers for DMA transfers (ioctl).
*
* \param inode device inode.
@@ -1273,7 +1271,7 @@ int drm_legacy_addbufs(struct drm_device *dev, void *data,
return ret;
}
-/**
+/*
* Get information about the buffer mappings.
*
* This was originally mean for debugging purposes, or by a sophisticated
@@ -1364,7 +1362,7 @@ int drm_legacy_infobufs(struct drm_device *dev, void *data,
return __drm_legacy_infobufs(dev, data, &request->count, copy_one_buf);
}
-/**
+/*
* Specifies a low and high water mark for buffer allocation
*
* \param inode device inode.
@@ -1413,7 +1411,7 @@ int drm_legacy_markbufs(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Unreserve the buffers in list, previously reserved using drmDMA.
*
* \param inode device inode.
@@ -1465,7 +1463,7 @@ int drm_legacy_freebufs(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Maps all of the DMA buffers into client-virtual space (ioctl).
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_client.c b/drivers/gpu/drm/drm_client.c
index b031b45aa8ef..6b0c6ef8b9b3 100644
--- a/drivers/gpu/drm/drm_client.c
+++ b/drivers/gpu/drm/drm_client.c
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0
+// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright 2018 Noralf Trønnes
*/
diff --git a/drivers/gpu/drm/drm_client_modeset.c b/drivers/gpu/drm/drm_client_modeset.c
index 6d4a29e99ae2..7443114bd713 100644
--- a/drivers/gpu/drm/drm_client_modeset.c
+++ b/drivers/gpu/drm/drm_client_modeset.c
@@ -951,7 +951,8 @@ bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation)
* depending on the hardware this may require the framebuffer
* to be in a specific tiling format.
*/
- if ((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180 ||
+ if (((*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_0 &&
+ (*rotation & DRM_MODE_ROTATE_MASK) != DRM_MODE_ROTATE_180) ||
!plane->rotation_property)
return false;
@@ -1094,15 +1095,17 @@ out:
}
/**
- * drm_client_modeset_commit_force() - Force commit CRTC configuration
+ * drm_client_modeset_commit_locked() - Force commit CRTC configuration
* @client: DRM client
*
- * Commit modeset configuration to crtcs without checking if there is a DRM master.
+ * Commit modeset configuration to crtcs without checking if there is a DRM
+ * master. The assumption is that the caller already holds an internal DRM
+ * master reference acquired with drm_master_internal_acquire().
*
* Returns:
* Zero on success or negative error code on failure.
*/
-int drm_client_modeset_commit_force(struct drm_client_dev *client)
+int drm_client_modeset_commit_locked(struct drm_client_dev *client)
{
struct drm_device *dev = client->dev;
int ret;
@@ -1116,7 +1119,7 @@ int drm_client_modeset_commit_force(struct drm_client_dev *client)
return ret;
}
-EXPORT_SYMBOL(drm_client_modeset_commit_force);
+EXPORT_SYMBOL(drm_client_modeset_commit_locked);
/**
* drm_client_modeset_commit() - Commit CRTC configuration
@@ -1135,7 +1138,7 @@ int drm_client_modeset_commit(struct drm_client_dev *client)
if (!drm_master_internal_acquire(dev))
return -EBUSY;
- ret = drm_client_modeset_commit_force(client);
+ ret = drm_client_modeset_commit_locked(client);
drm_master_internal_release(dev);
diff --git a/drivers/gpu/drm/drm_connector.c b/drivers/gpu/drm/drm_connector.c
index 2166000ed057..644f0ad10671 100644
--- a/drivers/gpu/drm/drm_connector.c
+++ b/drivers/gpu/drm/drm_connector.c
@@ -112,6 +112,21 @@ void drm_connector_ida_destroy(void)
}
/**
+ * drm_get_connector_type_name - return a string for connector type
+ * @type: The connector type (DRM_MODE_CONNECTOR_*)
+ *
+ * Returns: the name of the connector type, or NULL if the type is not valid.
+ */
+const char *drm_get_connector_type_name(unsigned int type)
+{
+ if (type < ARRAY_SIZE(drm_connector_enum_list))
+ return drm_connector_enum_list[type].name;
+
+ return NULL;
+}
+EXPORT_SYMBOL(drm_get_connector_type_name);
+
+/**
* drm_connector_get_cmdline_mode - reads the user's cmdline mode
* @connector: connector to quwery
*
@@ -140,6 +155,13 @@ static void drm_connector_get_cmdline_mode(struct drm_connector *connector)
connector->force = mode->force;
}
+ if (mode->panel_orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN) {
+ DRM_INFO("cmdline forces connector %s panel_orientation to %d\n",
+ connector->name, mode->panel_orientation);
+ drm_connector_set_panel_orientation(connector,
+ mode->panel_orientation);
+ }
+
DRM_DEBUG_KMS("cmdline mode for connector %s %s %dx%d@%dHz%s%s%s\n",
connector->name, mode->name,
mode->xres, mode->yres,
@@ -1139,7 +1161,8 @@ static const struct drm_prop_enum_list dp_colorspaces[] = {
* coordinates, so if userspace rotates the picture to adjust for
* the orientation it must also apply the same transformation to the
* touchscreen input coordinates. This property is initialized by calling
- * drm_connector_init_panel_orientation_property().
+ * drm_connector_set_panel_orientation() or
+ * drm_connector_set_panel_orientation_with_quirk()
*
* scaling mode:
* This property defines how a non-native mode is upscaled to the native
@@ -2046,38 +2069,41 @@ void drm_connector_set_vrr_capable_property(
EXPORT_SYMBOL(drm_connector_set_vrr_capable_property);
/**
- * drm_connector_init_panel_orientation_property -
- * initialize the connecters panel_orientation property
- * @connector: connector for which to init the panel-orientation property.
- * @width: width in pixels of the panel, used for panel quirk detection
- * @height: height in pixels of the panel, used for panel quirk detection
+ * drm_connector_set_panel_orientation - sets the connecter's panel_orientation
+ * @connector: connector for which to set the panel-orientation property.
+ * @panel_orientation: drm_panel_orientation value to set
+ *
+ * This function sets the connector's panel_orientation and attaches
+ * a "panel orientation" property to the connector.
*
- * This function should only be called for built-in panels, after setting
- * connector->display_info.panel_orientation first (if known).
+ * Calling this function on a connector where the panel_orientation has
+ * already been set is a no-op (e.g. the orientation has been overridden with
+ * a kernel commandline option).
*
- * This function will check for platform specific (e.g. DMI based) quirks
- * overriding display_info.panel_orientation first, then if panel_orientation
- * is not DRM_MODE_PANEL_ORIENTATION_UNKNOWN it will attach the
- * "panel orientation" property to the connector.
+ * It is allowed to call this function with a panel_orientation of
+ * DRM_MODE_PANEL_ORIENTATION_UNKNOWN, in which case it is a no-op.
*
* Returns:
* Zero on success, negative errno on failure.
*/
-int drm_connector_init_panel_orientation_property(
- struct drm_connector *connector, int width, int height)
+int drm_connector_set_panel_orientation(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation)
{
struct drm_device *dev = connector->dev;
struct drm_display_info *info = &connector->display_info;
struct drm_property *prop;
- int orientation_quirk;
- orientation_quirk = drm_get_panel_orientation_quirk(width, height);
- if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
- info->panel_orientation = orientation_quirk;
+ /* Already set? */
+ if (info->panel_orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ return 0;
- if (info->panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ /* Don't attach the property if the orientation is unknown */
+ if (panel_orientation == DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
return 0;
+ info->panel_orientation = panel_orientation;
+
prop = dev->mode_config.panel_orientation_property;
if (!prop) {
prop = drm_property_create_enum(dev, DRM_MODE_PROP_IMMUTABLE,
@@ -2094,7 +2120,37 @@ int drm_connector_init_panel_orientation_property(
info->panel_orientation);
return 0;
}
-EXPORT_SYMBOL(drm_connector_init_panel_orientation_property);
+EXPORT_SYMBOL(drm_connector_set_panel_orientation);
+
+/**
+ * drm_connector_set_panel_orientation_with_quirk -
+ * set the connecter's panel_orientation after checking for quirks
+ * @connector: connector for which to init the panel-orientation property.
+ * @panel_orientation: drm_panel_orientation value to set
+ * @width: width in pixels of the panel, used for panel quirk detection
+ * @height: height in pixels of the panel, used for panel quirk detection
+ *
+ * Like drm_connector_set_panel_orientation(), but with a check for platform
+ * specific (e.g. DMI based) quirks overriding the passed in panel_orientation.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_connector_set_panel_orientation_with_quirk(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation,
+ int width, int height)
+{
+ int orientation_quirk;
+
+ orientation_quirk = drm_get_panel_orientation_quirk(width, height);
+ if (orientation_quirk != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
+ panel_orientation = orientation_quirk;
+
+ return drm_connector_set_panel_orientation(connector,
+ panel_orientation);
+}
+EXPORT_SYMBOL(drm_connector_set_panel_orientation_with_quirk);
int drm_connector_set_obj_prop(struct drm_mode_object *obj,
struct drm_property *property,
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 1f802d8e5681..c99be950bf17 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -47,7 +47,7 @@ struct drm_ctx_list {
/** \name Context bitmap support */
/*@{*/
-/**
+/*
* Free a handle from the context bitmap.
*
* \param dev DRM device.
@@ -68,7 +68,7 @@ void drm_legacy_ctxbitmap_free(struct drm_device * dev, int ctx_handle)
mutex_unlock(&dev->struct_mutex);
}
-/**
+/*
* Context bitmap allocation.
*
* \param dev DRM device.
@@ -88,7 +88,7 @@ static int drm_legacy_ctxbitmap_next(struct drm_device * dev)
return ret;
}
-/**
+/*
* Context bitmap initialization.
*
* \param dev DRM device.
@@ -104,7 +104,7 @@ void drm_legacy_ctxbitmap_init(struct drm_device * dev)
idr_init(&dev->ctx_idr);
}
-/**
+/*
* Context bitmap cleanup.
*
* \param dev DRM device.
@@ -163,7 +163,7 @@ void drm_legacy_ctxbitmap_flush(struct drm_device *dev, struct drm_file *file)
/** \name Per Context SAREA Support */
/*@{*/
-/**
+/*
* Get per-context SAREA.
*
* \param inode device inode.
@@ -211,7 +211,7 @@ int drm_legacy_getsareactx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Set per-context SAREA.
*
* \param inode device inode.
@@ -263,7 +263,7 @@ int drm_legacy_setsareactx(struct drm_device *dev, void *data,
/** \name The actual DRM context handling routines */
/*@{*/
-/**
+/*
* Switch context.
*
* \param dev DRM device.
@@ -290,7 +290,7 @@ static int drm_context_switch(struct drm_device * dev, int old, int new)
return 0;
}
-/**
+/*
* Complete context switch.
*
* \param dev DRM device.
@@ -318,7 +318,7 @@ static int drm_context_switch_complete(struct drm_device *dev,
return 0;
}
-/**
+/*
* Reserve contexts.
*
* \param inode device inode.
@@ -351,7 +351,7 @@ int drm_legacy_resctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Add context.
*
* \param inode device inode.
@@ -404,7 +404,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Get context.
*
* \param inode device inode.
@@ -428,7 +428,7 @@ int drm_legacy_getctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Switch context.
*
* \param inode device inode.
@@ -452,7 +452,7 @@ int drm_legacy_switchctx(struct drm_device *dev, void *data,
return drm_context_switch(dev, dev->last_context, ctx->handle);
}
-/**
+/*
* New context.
*
* \param inode device inode.
@@ -478,7 +478,7 @@ int drm_legacy_newctx(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Remove context.
*
* \param inode device inode.
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index 93a4eec429e8..a4d36aca45ea 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -244,10 +244,6 @@ drm_crtc_prepare_encoders(struct drm_device *dev)
/* Disable unused encoders */
if (encoder->crtc == NULL)
drm_encoder_disable(encoder);
- /* Disable encoders whose CRTC is about to change */
- if (encoder_funcs->get_crtc &&
- encoder->crtc != (*encoder_funcs->get_crtc)(encoder))
- drm_encoder_disable(encoder);
}
}
diff --git a/drivers/gpu/drm/drm_crtc_internal.h b/drivers/gpu/drm/drm_crtc_internal.h
index c7d5e4c21423..16f2413403aa 100644
--- a/drivers/gpu/drm/drm_crtc_internal.h
+++ b/drivers/gpu/drm/drm_crtc_internal.h
@@ -216,6 +216,8 @@ int drm_mode_rmfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
int drm_mode_getfb(struct drm_device *dev,
void *data, struct drm_file *file_priv);
+int drm_mode_getfb2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv);
int drm_mode_dirtyfb_ioctl(struct drm_device *dev,
void *data, struct drm_file *file_priv);
diff --git a/drivers/gpu/drm/drm_debugfs.c b/drivers/gpu/drm/drm_debugfs.c
index eab0f2687cd6..4e673d318503 100644
--- a/drivers/gpu/drm/drm_debugfs.c
+++ b/drivers/gpu/drm/drm_debugfs.c
@@ -182,8 +182,7 @@ int drm_debugfs_create_files(const struct drm_info_list *files, int count,
for (i = 0; i < count; i++) {
u32 features = files[i].driver_features;
- if (features != 0 &&
- (dev->driver->driver_features & features) != features)
+ if (features && !drm_core_check_all_features(dev, features))
continue;
tmp = kmalloc(sizeof(struct drm_info_node), GFP_KERNEL);
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index e22b812c4b80..5d67a41f7c3a 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -372,7 +372,7 @@ void drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
- debugfs_create_file("control", S_IRUGO, crc_ent, crtc,
+ debugfs_create_file("control", S_IRUGO | S_IWUSR, crc_ent, crtc,
&drm_crtc_crc_control_fops);
debugfs_create_file("data", S_IRUGO, crc_ent, crtc,
&drm_crtc_crc_data_fops);
diff --git a/drivers/gpu/drm/drm_dma.c b/drivers/gpu/drm/drm_dma.c
index e45b07890c5a..a7add55a85b4 100644
--- a/drivers/gpu/drm/drm_dma.c
+++ b/drivers/gpu/drm/drm_dma.c
@@ -42,10 +42,10 @@
#include "drm_legacy.h"
/**
- * Initialize the DMA data.
+ * drm_legacy_dma_setup() - Initialize the DMA data.
*
- * \param dev DRM device.
- * \return zero on success or a negative value on failure.
+ * @dev: DRM device.
+ * Return: zero on success or a negative value on failure.
*
* Allocate and initialize a drm_device_dma structure.
*/
@@ -71,9 +71,9 @@ int drm_legacy_dma_setup(struct drm_device *dev)
}
/**
- * Cleanup the DMA resources.
+ * drm_legacy_dma_takedown() - Cleanup the DMA resources.
*
- * \param dev DRM device.
+ * @dev: DRM device.
*
* Free all pages associated with DMA buffers, the buffers and pages lists, and
* finally the drm_device::dma structure itself.
@@ -120,10 +120,10 @@ void drm_legacy_dma_takedown(struct drm_device *dev)
}
/**
- * Free a buffer.
+ * drm_legacy_free_buffer() - Free a buffer.
*
- * \param dev DRM device.
- * \param buf buffer to free.
+ * @dev: DRM device.
+ * @buf: buffer to free.
*
* Resets the fields of \p buf.
*/
@@ -139,9 +139,10 @@ void drm_legacy_free_buffer(struct drm_device *dev, struct drm_buf * buf)
}
/**
- * Reclaim the buffers.
+ * drm_legacy_reclaim_buffers() - Reclaim the buffers.
*
- * \param file_priv DRM file private.
+ * @dev: DRM device.
+ * @file_priv: DRM file private.
*
* Frees each buffer associated with \p file_priv not already on the hardware.
*/
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index a5364b5192b8..c6fbe6e6bc9d 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -362,6 +362,65 @@ int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
EXPORT_SYMBOL(drm_dp_dpcd_read_link_status);
/**
+ * drm_dp_send_real_edid_checksum() - send back real edid checksum value
+ * @aux: DisplayPort AUX channel
+ * @real_edid_checksum: real edid checksum for the last block
+ *
+ * Returns:
+ * True on success
+ */
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+ u8 real_edid_checksum)
+{
+ u8 link_edid_read = 0, auto_test_req = 0, test_resp = 0;
+
+ if (drm_dp_dpcd_read(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req, 1) < 1) {
+ DRM_ERROR("DPCD failed read at register 0x%x\n",
+ DP_DEVICE_SERVICE_IRQ_VECTOR);
+ return false;
+ }
+ auto_test_req &= DP_AUTOMATED_TEST_REQUEST;
+
+ if (drm_dp_dpcd_read(aux, DP_TEST_REQUEST, &link_edid_read, 1) < 1) {
+ DRM_ERROR("DPCD failed read at register 0x%x\n",
+ DP_TEST_REQUEST);
+ return false;
+ }
+ link_edid_read &= DP_TEST_LINK_EDID_READ;
+
+ if (!auto_test_req || !link_edid_read) {
+ DRM_DEBUG_KMS("Source DUT does not support TEST_EDID_READ\n");
+ return false;
+ }
+
+ if (drm_dp_dpcd_write(aux, DP_DEVICE_SERVICE_IRQ_VECTOR,
+ &auto_test_req, 1) < 1) {
+ DRM_ERROR("DPCD failed write at register 0x%x\n",
+ DP_DEVICE_SERVICE_IRQ_VECTOR);
+ return false;
+ }
+
+ /* send back checksum for the last edid extension block data */
+ if (drm_dp_dpcd_write(aux, DP_TEST_EDID_CHECKSUM,
+ &real_edid_checksum, 1) < 1) {
+ DRM_ERROR("DPCD failed write at register 0x%x\n",
+ DP_TEST_EDID_CHECKSUM);
+ return false;
+ }
+
+ test_resp |= DP_TEST_EDID_CHECKSUM_WRITE;
+ if (drm_dp_dpcd_write(aux, DP_TEST_RESPONSE, &test_resp, 1) < 1) {
+ DRM_ERROR("DPCD failed write at register 0x%x\n",
+ DP_TEST_RESPONSE);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(drm_dp_send_real_edid_checksum);
+
+/**
* drm_dp_downstream_max_clock() - extract branch device max
* pixel rate for legacy VGA
* converter or max TMDS clock
@@ -470,8 +529,7 @@ void drm_dp_downstream_debug(struct seq_file *m,
int len;
uint8_t rev[2];
int type = port_cap[0] & DP_DS_PORT_TYPE_MASK;
- bool branch_device = dpcd[DP_DOWNSTREAMPORT_PRESENT] &
- DP_DWN_STRM_PORT_PRESENT;
+ bool branch_device = drm_dp_is_branch(dpcd);
seq_printf(m, "\tDP branch device present: %s\n",
branch_device ? "yes" : "no");
@@ -1222,6 +1280,85 @@ drm_dp_get_quirks(const struct drm_dp_dpcd_ident *ident, bool is_branch)
#undef DEVICE_ID_ANY
#undef DEVICE_ID
+struct edid_quirk {
+ u8 mfg_id[2];
+ u8 prod_id[2];
+ u32 quirks;
+};
+
+#define MFG(first, second) { (first), (second) }
+#define PROD_ID(first, second) { (first), (second) }
+
+/*
+ * Some devices have unreliable OUIDs where they don't set the device ID
+ * correctly, and as a result we need to use the EDID for finding additional
+ * DP quirks in such cases.
+ */
+static const struct edid_quirk edid_quirk_list[] = {
+ /* Optional 4K AMOLED panel in the ThinkPad X1 Extreme 2nd Generation
+ * only supports DPCD backlight controls
+ */
+ { MFG(0x4c, 0x83), PROD_ID(0x41, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ /*
+ * Some Dell CML 2020 systems have panels support both AUX and PWM
+ * backlight control, and some only support AUX backlight control. All
+ * said panels start up in AUX mode by default, and we don't have any
+ * support for disabling HDR mode on these panels which would be
+ * required to switch to PWM backlight control mode (plus, I'm not
+ * even sure we want PWM backlight controls over DPCD backlight
+ * controls anyway...). Until we have a better way of detecting these,
+ * force DPCD backlight mode on all of them.
+ */
+ { MFG(0x06, 0xaf), PROD_ID(0x9b, 0x32), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x06, 0xaf), PROD_ID(0xeb, 0x41), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4d, 0x10), PROD_ID(0xc7, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+ { MFG(0x4d, 0x10), PROD_ID(0xe6, 0x14), BIT(DP_QUIRK_FORCE_DPCD_BACKLIGHT) },
+};
+
+#undef MFG
+#undef PROD_ID
+
+/**
+ * drm_dp_get_edid_quirks() - Check the EDID of a DP device to find additional
+ * DP-specific quirks
+ * @edid: The EDID to check
+ *
+ * While OUIDs are meant to be used to recognize a DisplayPort device, a lot
+ * of manufacturers don't seem to like following standards and neglect to fill
+ * the dev-ID in, making it impossible to only use OUIDs for determining
+ * quirks in some cases. This function can be used to check the EDID and look
+ * up any additional DP quirks. The bits returned by this function correspond
+ * to the quirk bits in &drm_dp_quirk.
+ *
+ * Returns: a bitmask of quirks, if any. The driver can check this using
+ * drm_dp_has_quirk().
+ */
+u32 drm_dp_get_edid_quirks(const struct edid *edid)
+{
+ const struct edid_quirk *quirk;
+ u32 quirks = 0;
+ int i;
+
+ if (!edid)
+ return 0;
+
+ for (i = 0; i < ARRAY_SIZE(edid_quirk_list); i++) {
+ quirk = &edid_quirk_list[i];
+ if (memcmp(quirk->mfg_id, edid->mfg_id,
+ sizeof(edid->mfg_id)) == 0 &&
+ memcmp(quirk->prod_id, edid->prod_code,
+ sizeof(edid->prod_code)) == 0)
+ quirks |= quirk->quirks;
+ }
+
+ DRM_DEBUG_KMS("DP sink: EDID mfg %*phD prod-ID %*phD quirks: 0x%04x\n",
+ (int)sizeof(edid->mfg_id), edid->mfg_id,
+ (int)sizeof(edid->prod_code), edid->prod_code, quirks);
+
+ return quirks;
+}
+EXPORT_SYMBOL(drm_dp_get_edid_quirks);
+
/**
* drm_dp_read_desc - read sink/branch descriptor from DPCD
* @aux: DisplayPort AUX channel
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 20cdaf3146b8..4b255e25e4a1 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -736,6 +736,10 @@ static bool drm_dp_sideband_msg_build(struct drm_dp_sideband_msg_rx *msg,
if (msg->curchunk_idx >= msg->curchunk_len) {
/* do CRC */
crc4 = drm_dp_msg_data_crc4(msg->chunk, msg->curchunk_len - 1);
+ if (crc4 != msg->chunk[msg->curchunk_len - 1])
+ print_hex_dump(KERN_DEBUG, "wrong crc",
+ DUMP_PREFIX_NONE, 16, 1,
+ msg->chunk, msg->curchunk_len, false);
/* copy chunk into bigger msg */
memcpy(&msg->msg[msg->curlen], msg->chunk, msg->curchunk_len - 1);
msg->curlen += msg->curchunk_len - 1;
@@ -1035,7 +1039,8 @@ static bool drm_dp_sideband_parse_req(struct drm_dp_sideband_msg_rx *raw,
}
}
-static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
+static void build_dpcd_write(struct drm_dp_sideband_msg_tx *msg,
+ u8 port_num, u32 offset, u8 num_bytes, u8 *bytes)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1045,17 +1050,14 @@ static int build_dpcd_write(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
req.u.dpcd_write.num_bytes = num_bytes;
req.u.dpcd_write.bytes = bytes;
drm_dp_encode_sideband_req(&req, msg);
-
- return 0;
}
-static int build_link_address(struct drm_dp_sideband_msg_tx *msg)
+static void build_link_address(struct drm_dp_sideband_msg_tx *msg)
{
struct drm_dp_sideband_msg_req_body req;
req.req_type = DP_LINK_ADDRESS;
drm_dp_encode_sideband_req(&req, msg);
- return 0;
}
static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
@@ -1067,7 +1069,8 @@ static int build_clear_payload_id_table(struct drm_dp_sideband_msg_tx *msg)
return 0;
}
-static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int port_num)
+static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg,
+ int port_num)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1078,10 +1081,11 @@ static int build_enum_path_resources(struct drm_dp_sideband_msg_tx *msg, int por
return 0;
}
-static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_num,
- u8 vcpi, uint16_t pbn,
- u8 number_sdp_streams,
- u8 *sdp_stream_sink)
+static void build_allocate_payload(struct drm_dp_sideband_msg_tx *msg,
+ int port_num,
+ u8 vcpi, uint16_t pbn,
+ u8 number_sdp_streams,
+ u8 *sdp_stream_sink)
{
struct drm_dp_sideband_msg_req_body req;
memset(&req, 0, sizeof(req));
@@ -1094,11 +1098,10 @@ static int build_allocate_payload(struct drm_dp_sideband_msg_tx *msg, int port_n
number_sdp_streams);
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
- return 0;
}
-static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
- int port_num, bool power_up)
+static void build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
+ int port_num, bool power_up)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1110,7 +1113,6 @@ static int build_power_updown_phy(struct drm_dp_sideband_msg_tx *msg,
req.u.port_num.port_number = port_num;
drm_dp_encode_sideband_req(&req, msg);
msg->path_msg = true;
- return 0;
}
static int drm_dp_mst_assign_payload_id(struct drm_dp_mst_topology_mgr *mgr,
@@ -2061,7 +2063,7 @@ ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
* sideband messaging as drm_dp_dpcd_write() does for local
* devices via actual AUX CH.
*
- * Return: 0 on success, negative error code on failure.
+ * Return: number of bytes written on success, negative error code on failure.
*/
ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
unsigned int offset, void *buffer, size_t size)
@@ -2073,29 +2075,27 @@ ssize_t drm_dp_mst_dpcd_write(struct drm_dp_aux *aux,
offset, size, buffer);
}
-static void drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
+static int drm_dp_check_mstb_guid(struct drm_dp_mst_branch *mstb, u8 *guid)
{
- int ret;
+ int ret = 0;
memcpy(mstb->guid, guid, 16);
if (!drm_dp_validate_guid(mstb->mgr, mstb->guid)) {
if (mstb->port_parent) {
- ret = drm_dp_send_dpcd_write(
- mstb->mgr,
- mstb->port_parent,
- DP_GUID,
- 16,
- mstb->guid);
+ ret = drm_dp_send_dpcd_write(mstb->mgr,
+ mstb->port_parent,
+ DP_GUID, 16, mstb->guid);
} else {
-
- ret = drm_dp_dpcd_write(
- mstb->mgr->aux,
- DP_GUID,
- mstb->guid,
- 16);
+ ret = drm_dp_dpcd_write(mstb->mgr->aux,
+ DP_GUID, mstb->guid, 16);
}
}
+
+ if (ret < 16 && ret > 0)
+ return -EPROTO;
+
+ return ret == 16 ? 0 : ret;
}
static void build_mst_prop_path(const struct drm_dp_mst_branch *mstb,
@@ -2178,7 +2178,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
drm_connector_set_tile_property(port->connector);
}
- mgr->cbs->register_connector(port->connector);
+ drm_connector_register(port->connector);
return;
error:
@@ -2645,7 +2645,8 @@ static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
return false;
}
-static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
+static void build_dpcd_read(struct drm_dp_sideband_msg_tx *msg,
+ u8 port_num, u32 offset, u8 num_bytes)
{
struct drm_dp_sideband_msg_req_body req;
@@ -2654,8 +2655,6 @@ static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32
req.u.dpcd_read.dpcd_address = offset;
req.u.dpcd_read.num_bytes = num_bytes;
drm_dp_encode_sideband_req(&req, msg);
-
- return 0;
}
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
@@ -2881,7 +2880,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_link_address_ack_reply *reply;
struct drm_dp_mst_port *port, *tmp;
- int i, len, ret, port_mask = 0;
+ int i, ret, port_mask = 0;
bool changed = false;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2889,7 +2888,7 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
return -ENOMEM;
txmsg->dst = mstb;
- len = build_link_address(txmsg);
+ build_link_address(txmsg);
mstb->link_address_sent = true;
drm_dp_queue_down_tx(mgr, txmsg);
@@ -2910,7 +2909,15 @@ static int drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
DRM_DEBUG_KMS("link address reply: %d\n", reply->nports);
drm_dp_dump_link_address(reply);
- drm_dp_check_mstb_guid(mstb, reply->guid);
+ ret = drm_dp_check_mstb_guid(mstb, reply->guid);
+ if (ret) {
+ char buf[64];
+
+ drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, buf, sizeof(buf));
+ DRM_ERROR("GUID check on %s failed: %d\n",
+ buf, ret);
+ goto out;
+ }
for (i = 0; i < reply->nports; i++) {
port_mask |= BIT(reply->ports[i].port_number);
@@ -2951,14 +2958,14 @@ void drm_dp_send_clear_payload_id_table(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb)
{
struct drm_dp_sideband_msg_tx *txmsg;
- int len, ret;
+ int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
if (!txmsg)
return;
txmsg->dst = mstb;
- len = build_clear_payload_id_table(txmsg);
+ build_clear_payload_id_table(txmsg);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -2976,7 +2983,6 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
{
struct drm_dp_enum_path_resources_ack_reply *path_res;
struct drm_dp_sideband_msg_tx *txmsg;
- int len;
int ret;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
@@ -2984,7 +2990,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
return -ENOMEM;
txmsg->dst = mstb;
- len = build_enum_path_resources(txmsg, port->port_num);
+ build_enum_path_resources(txmsg, port->port_num);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3068,7 +3074,7 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
{
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
- int len, ret, port_num;
+ int ret, port_num;
u8 sinks[DRM_DP_MAX_SDP_STREAMS];
int i;
@@ -3093,9 +3099,9 @@ static int drm_dp_payload_send_msg(struct drm_dp_mst_topology_mgr *mgr,
sinks[i] = i;
txmsg->dst = mstb;
- len = build_allocate_payload(txmsg, port_num,
- id,
- pbn, port->num_sdp_streams, sinks);
+ build_allocate_payload(txmsg, port_num,
+ id,
+ pbn, port->num_sdp_streams, sinks);
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3124,7 +3130,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, bool power_up)
{
struct drm_dp_sideband_msg_tx *txmsg;
- int len, ret;
+ int ret;
port = drm_dp_mst_topology_get_port_validated(mgr, port);
if (!port)
@@ -3137,7 +3143,7 @@ int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
}
txmsg->dst = port->parent;
- len = build_power_updown_phy(txmsg, port->port_num, power_up);
+ build_power_updown_phy(txmsg, port->port_num, power_up);
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(port->parent, txmsg);
@@ -3359,7 +3365,6 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes)
{
- int len;
int ret = 0;
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
@@ -3374,7 +3379,7 @@ static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
goto fail_put;
}
- len = build_dpcd_read(txmsg, port->port_num, offset, size);
+ build_dpcd_read(txmsg, port->port_num, offset, size);
txmsg->dst = port->parent;
drm_dp_queue_down_tx(mgr, txmsg);
@@ -3412,7 +3417,6 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes)
{
- int len;
int ret;
struct drm_dp_sideband_msg_tx *txmsg;
struct drm_dp_mst_branch *mstb;
@@ -3427,18 +3431,15 @@ static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
goto fail_put;
}
- len = build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
+ build_dpcd_write(txmsg, port->port_num, offset, size, bytes);
txmsg->dst = mstb;
drm_dp_queue_down_tx(mgr, txmsg);
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
- if (ret > 0) {
- if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
- ret = -EIO;
- else
- ret = 0;
- }
+ if (ret > 0 && txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK)
+ ret = -EIO;
+
kfree(txmsg);
fail_put:
drm_dp_mst_topology_put_mstb(mstb);
@@ -3499,9 +3500,9 @@ static int drm_dp_get_vc_payload_bw(u8 dp_link_bw, u8 dp_link_count)
int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool mst_state)
{
int ret = 0;
- int i = 0;
struct drm_dp_mst_branch *mstb = NULL;
+ mutex_lock(&mgr->payload_lock);
mutex_lock(&mgr->lock);
if (mst_state == mgr->mst_state)
goto out_unlock;
@@ -3509,6 +3510,8 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
mgr->mst_state = mst_state;
/* set the device into MST mode */
if (mst_state) {
+ struct drm_dp_payload reset_pay;
+
WARN_ON(mgr->mst_primary);
/* get dpcd info */
@@ -3538,17 +3541,15 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
drm_dp_mst_topology_get_mstb(mgr->mst_primary);
ret = drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL,
- DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
- if (ret < 0) {
+ DP_MST_EN |
+ DP_UP_REQ_EN |
+ DP_UPSTREAM_IS_SRC);
+ if (ret < 0)
goto out_unlock;
- }
- {
- struct drm_dp_payload reset_pay;
- reset_pay.start_slot = 0;
- reset_pay.num_slots = 0x3f;
- drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
- }
+ reset_pay.start_slot = 0;
+ reset_pay.num_slots = 0x3f;
+ drm_dp_dpcd_write_payload(mgr, 0, &reset_pay);
queue_work(system_long_wq, &mgr->work);
@@ -3560,27 +3561,19 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
/* this can fail if the device is gone */
drm_dp_dpcd_writeb(mgr->aux, DP_MSTM_CTRL, 0);
ret = 0;
- mutex_lock(&mgr->payload_lock);
- memset(mgr->payloads, 0, mgr->max_payloads * sizeof(struct drm_dp_payload));
+ memset(mgr->payloads, 0,
+ mgr->max_payloads * sizeof(mgr->payloads[0]));
+ memset(mgr->proposed_vcpis, 0,
+ mgr->max_payloads * sizeof(mgr->proposed_vcpis[0]));
mgr->payload_mask = 0;
set_bit(0, &mgr->payload_mask);
- for (i = 0; i < mgr->max_payloads; i++) {
- struct drm_dp_vcpi *vcpi = mgr->proposed_vcpis[i];
-
- if (vcpi) {
- vcpi->vcpi = 0;
- vcpi->num_slots = 0;
- }
- mgr->proposed_vcpis[i] = NULL;
- }
mgr->vcpi_mask = 0;
- mutex_unlock(&mgr->payload_lock);
-
mgr->payload_id_table_cleared = false;
}
out_unlock:
mutex_unlock(&mgr->lock);
+ mutex_unlock(&mgr->payload_lock);
if (mstb)
drm_dp_mst_topology_put_mstb(mstb);
return ret;
@@ -3681,7 +3674,12 @@ int drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
DRM_DEBUG_KMS("dpcd read failed - undocked during suspend?\n");
goto out_fail;
}
- drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+
+ ret = drm_dp_check_mstb_guid(mgr->mst_primary, guid);
+ if (ret) {
+ DRM_DEBUG_KMS("check mstb failed - undocked during suspend?\n");
+ goto out_fail;
+ }
/*
* For the final step of resuming the topology, we need to bring the
@@ -3708,7 +3706,7 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
{
int len;
u8 replyblock[32];
- int replylen, origlen, curreply;
+ int replylen, curreply;
int ret;
struct drm_dp_sideband_msg_rx *msg;
int basereg = up ? DP_SIDEBAND_MSG_UP_REQ_BASE : DP_SIDEBAND_MSG_DOWN_REP_BASE;
@@ -3728,7 +3726,6 @@ static bool drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up)
}
replylen = msg->curchunk_len + msg->curchunk_hdrlen;
- origlen = replylen;
replylen -= len;
curreply = len;
while (replylen > 0) {
@@ -3838,7 +3835,8 @@ drm_dp_mst_process_up_req(struct drm_dp_mst_topology_mgr *mgr,
else if (msg->req_type == DP_RESOURCE_STATUS_NOTIFY)
guid = msg->u.resource_stat.guid;
- mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
+ if (guid)
+ mstb = drm_dp_get_mst_branch_device_by_guid(mgr, guid);
} else {
mstb = drm_dp_get_mst_branch_device(mgr, hdr->lct, hdr->rad);
}
@@ -4623,15 +4621,34 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
int ret;
ret = drm_dp_dpcd_read(mgr->aux, DP_DPCD_REV, buf, DP_RECEIVER_CAP_SIZE);
+ if (ret) {
+ seq_printf(m, "dpcd read failed\n");
+ goto out;
+ }
seq_printf(m, "dpcd: %*ph\n", DP_RECEIVER_CAP_SIZE, buf);
+
ret = drm_dp_dpcd_read(mgr->aux, DP_FAUX_CAP, buf, 2);
+ if (ret) {
+ seq_printf(m, "faux/mst read failed\n");
+ goto out;
+ }
seq_printf(m, "faux/mst: %*ph\n", 2, buf);
+
ret = drm_dp_dpcd_read(mgr->aux, DP_MSTM_CTRL, buf, 1);
+ if (ret) {
+ seq_printf(m, "mst ctrl read failed\n");
+ goto out;
+ }
seq_printf(m, "mst ctrl: %*ph\n", 1, buf);
/* dump the standard OUI branch header */
ret = drm_dp_dpcd_read(mgr->aux, DP_BRANCH_OUI, buf, DP_BRANCH_OUI_HEADER_SIZE);
+ if (ret) {
+ seq_printf(m, "branch oui read failed\n");
+ goto out;
+ }
seq_printf(m, "branch oui: %*phN devid: ", 3, buf);
+
for (i = 0x3; i < 0x8 && buf[i]; i++)
seq_printf(m, "%c", buf[i]);
seq_printf(m, " revision: hw: %x.%x sw: %x.%x\n",
@@ -4640,6 +4657,7 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
seq_printf(m, "payload table: %*ph\n", DP_PAYLOAD_TABLE_SIZE, buf);
}
+out:
mutex_unlock(&mgr->lock);
}
@@ -4655,11 +4673,23 @@ static void drm_dp_tx_work(struct work_struct *work)
mutex_unlock(&mgr->qlock);
}
+static inline void drm_dp_destroy_connector(struct drm_dp_mst_port *port)
+{
+ if (!port->connector)
+ return;
+
+ if (port->mgr->cbs->destroy_connector) {
+ port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+ } else {
+ drm_connector_unregister(port->connector);
+ drm_connector_put(port->connector);
+ }
+}
+
static inline void
drm_dp_delayed_destroy_port(struct drm_dp_mst_port *port)
{
- if (port->connector)
- port->mgr->cbs->destroy_connector(port->mgr, port->connector);
+ drm_dp_destroy_connector(port);
drm_dp_port_set_pdt(port, DP_PEER_DEVICE_NONE, port->mcs);
drm_dp_mst_put_port_malloc(port);
@@ -5460,7 +5490,8 @@ struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port)
if (drm_dp_read_desc(port->mgr->aux, &desc, true))
return NULL;
- if (drm_dp_has_quirk(&desc, DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
+ if (drm_dp_has_quirk(&desc, 0,
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD) &&
port->mgr->dpcd[DP_DPCD_REV] >= DP_DPCD_REV_14 &&
port->parent == port->mgr->mst_primary) {
u8 downstreamport;
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 7c18a980cd4b..7b1a628d1f6e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -946,7 +946,8 @@ int drm_dev_register(struct drm_device *dev, unsigned long flags)
struct drm_driver *driver = dev->driver;
int ret;
- mutex_lock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_lock(&drm_global_mutex);
ret = drm_minor_register(dev, DRM_MINOR_RENDER);
if (ret)
@@ -986,7 +987,8 @@ err_minors:
drm_minor_unregister(dev, DRM_MINOR_PRIMARY);
drm_minor_unregister(dev, DRM_MINOR_RENDER);
out_unlock:
- mutex_unlock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
return ret;
}
EXPORT_SYMBOL(drm_dev_register);
@@ -1079,17 +1081,14 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
DRM_DEBUG("\n");
- mutex_lock(&drm_global_mutex);
minor = drm_minor_acquire(iminor(inode));
- if (IS_ERR(minor)) {
- err = PTR_ERR(minor);
- goto out_unlock;
- }
+ if (IS_ERR(minor))
+ return PTR_ERR(minor);
new_fops = fops_get(minor->dev->driver->fops);
if (!new_fops) {
err = -ENODEV;
- goto out_release;
+ goto out;
}
replace_fops(filp, new_fops);
@@ -1098,10 +1097,9 @@ static int drm_stub_open(struct inode *inode, struct file *filp)
else
err = 0;
-out_release:
+out:
drm_minor_release(minor);
-out_unlock:
- mutex_unlock(&drm_global_mutex);
+
return err;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 99769d6c9f84..116451101426 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -1590,11 +1590,22 @@ static int validate_displayid(u8 *displayid, int length, int idx);
static int drm_edid_block_checksum(const u8 *raw_edid)
{
int i;
- u8 csum = 0;
- for (i = 0; i < EDID_LENGTH; i++)
+ u8 csum = 0, crc = 0;
+
+ for (i = 0; i < EDID_LENGTH - 1; i++)
csum += raw_edid[i];
- return csum;
+ crc = 0x100 - csum;
+
+ return crc;
+}
+
+static bool drm_edid_block_checksum_diff(const u8 *raw_edid, u8 real_checksum)
+{
+ if (raw_edid[EDID_LENGTH - 1] != real_checksum)
+ return true;
+ else
+ return false;
}
static bool drm_edid_is_zero(const u8 *in_edid, int length)
@@ -1652,7 +1663,7 @@ bool drm_edid_block_valid(u8 *raw_edid, int block, bool print_bad_edid,
}
csum = drm_edid_block_checksum(raw_edid);
- if (csum) {
+ if (drm_edid_block_checksum_diff(raw_edid, csum)) {
if (edid_corrupt)
*edid_corrupt = true;
@@ -1793,6 +1804,11 @@ static void connector_bad_edid(struct drm_connector *connector,
u8 *edid, int num_blocks)
{
int i;
+ u8 num_of_ext = edid[0x7e];
+
+ /* Calculate real checksum for the last edid extension block data */
+ connector->real_edid_checksum =
+ drm_edid_block_checksum(edid + num_of_ext * EDID_LENGTH);
if (connector->bad_edid_counter++ && !drm_debug_enabled(DRM_UT_KMS))
return;
@@ -2196,15 +2212,29 @@ struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
}
EXPORT_SYMBOL(drm_mode_find_dmt);
+static bool is_display_descriptor(const u8 d[18], u8 tag)
+{
+ return d[0] == 0x00 && d[1] == 0x00 &&
+ d[2] == 0x00 && d[3] == tag;
+}
+
+static bool is_detailed_timing_descriptor(const u8 d[18])
+{
+ return d[0] != 0x00 || d[1] != 0x00;
+}
+
typedef void detailed_cb(struct detailed_timing *timing, void *closure);
static void
cea_for_each_detailed_block(u8 *ext, detailed_cb *cb, void *closure)
{
- int i, n = 0;
+ int i, n;
u8 d = ext[0x02];
u8 *det_base = ext + d;
+ if (d < 4 || d > 127)
+ return;
+
n = (127 - d) / 18;
for (i = 0; i < n; i++)
cb((struct detailed_timing *)(det_base + 18 * i), closure);
@@ -2254,9 +2284,12 @@ static void
is_rb(struct detailed_timing *t, void *data)
{
u8 *r = (u8 *)t;
- if (r[3] == EDID_DETAIL_MONITOR_RANGE)
- if (r[15] & 0x10)
- *(bool *)data = true;
+
+ if (!is_display_descriptor(r, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ if (r[15] & 0x10)
+ *(bool *)data = true;
}
/* EDID 1.4 defines this explicitly. For EDID 1.3, we guess, badly. */
@@ -2276,7 +2309,11 @@ static void
find_gtf2(struct detailed_timing *t, void *data)
{
u8 *r = (u8 *)t;
- if (r[3] == EDID_DETAIL_MONITOR_RANGE && r[10] == 0x02)
+
+ if (!is_display_descriptor(r, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ if (r[10] == 0x02)
*(u8 **)data = r;
}
@@ -2815,13 +2852,13 @@ do_inferred_modes(struct detailed_timing *timing, void *c)
struct detailed_non_pixel *data = &timing->data.other_data;
struct detailed_data_monitor_range *range = &data->data.range;
- if (data->type != EDID_DETAIL_MONITOR_RANGE)
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_MONITOR_RANGE))
return;
closure->modes += drm_dmt_modes_for_range(closure->connector,
closure->edid,
timing);
-
+
if (!version_greater(closure->edid, 1, 1))
return; /* GTF not defined yet */
@@ -2894,10 +2931,11 @@ static void
do_established_modes(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
- struct detailed_non_pixel *data = &timing->data.other_data;
- if (data->type == EDID_DETAIL_EST_TIMINGS)
- closure->modes += drm_est3_modes(closure->connector, timing);
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_EST_TIMINGS))
+ return;
+
+ closure->modes += drm_est3_modes(closure->connector, timing);
}
/**
@@ -2946,19 +2984,19 @@ do_standard_modes(struct detailed_timing *timing, void *c)
struct detailed_non_pixel *data = &timing->data.other_data;
struct drm_connector *connector = closure->connector;
struct edid *edid = closure->edid;
+ int i;
- if (data->type == EDID_DETAIL_STD_MODES) {
- int i;
- for (i = 0; i < 6; i++) {
- struct std_timing *std;
- struct drm_display_mode *newmode;
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_STD_MODES))
+ return;
- std = &data->data.timings[i];
- newmode = drm_mode_std(connector, edid, std);
- if (newmode) {
- drm_mode_probed_add(connector, newmode);
- closure->modes++;
- }
+ for (i = 0; i < 6; i++) {
+ struct std_timing *std = &data->data.timings[i];
+ struct drm_display_mode *newmode;
+
+ newmode = drm_mode_std(connector, edid, std);
+ if (newmode) {
+ drm_mode_probed_add(connector, newmode);
+ closure->modes++;
}
}
}
@@ -3053,15 +3091,16 @@ static void
do_cvt_mode(struct detailed_timing *timing, void *c)
{
struct detailed_mode_closure *closure = c;
- struct detailed_non_pixel *data = &timing->data.other_data;
- if (data->type == EDID_DETAIL_CVT_3BYTE)
- closure->modes += drm_cvt_modes(closure->connector, timing);
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_CVT_3BYTE))
+ return;
+
+ closure->modes += drm_cvt_modes(closure->connector, timing);
}
static int
add_cvt_modes(struct drm_connector *connector, struct edid *edid)
-{
+{
struct detailed_mode_closure closure = {
.connector = connector,
.edid = edid,
@@ -3083,27 +3122,28 @@ do_detailed_mode(struct detailed_timing *timing, void *c)
struct detailed_mode_closure *closure = c;
struct drm_display_mode *newmode;
- if (timing->pixel_clock) {
- newmode = drm_mode_detailed(closure->connector->dev,
- closure->edid, timing,
- closure->quirks);
- if (!newmode)
- return;
+ if (!is_detailed_timing_descriptor((const u8 *)timing))
+ return;
- if (closure->preferred)
- newmode->type |= DRM_MODE_TYPE_PREFERRED;
+ newmode = drm_mode_detailed(closure->connector->dev,
+ closure->edid, timing,
+ closure->quirks);
+ if (!newmode)
+ return;
- /*
- * Detailed modes are limited to 10kHz pixel clock resolution,
- * so fix up anything that looks like CEA/HDMI mode, but the clock
- * is just slightly off.
- */
- fixup_detailed_cea_mode_clock(newmode);
+ if (closure->preferred)
+ newmode->type |= DRM_MODE_TYPE_PREFERRED;
- drm_mode_probed_add(closure->connector, newmode);
- closure->modes++;
- closure->preferred = false;
- }
+ /*
+ * Detailed modes are limited to 10kHz pixel clock resolution,
+ * so fix up anything that looks like CEA/HDMI mode, but the clock
+ * is just slightly off.
+ */
+ fixup_detailed_cea_mode_clock(newmode);
+
+ drm_mode_probed_add(closure->connector, newmode);
+ closure->modes++;
+ closure->preferred = false;
}
/*
@@ -3211,7 +3251,7 @@ static u8 *drm_find_cea_extension(const struct edid *edid)
return cea;
}
-static const struct drm_display_mode *cea_mode_for_vic(u8 vic)
+static __always_inline const struct drm_display_mode *cea_mode_for_vic(u8 vic)
{
BUILD_BUG_ON(1 + ARRAY_SIZE(edid_cea_modes_1) - 1 != 127);
BUILD_BUG_ON(193 + ARRAY_SIZE(edid_cea_modes_193) - 1 != 219);
@@ -3953,6 +3993,13 @@ cea_db_tag(const u8 *db)
static int
cea_revision(const u8 *cea)
{
+ /*
+ * FIXME is this correct for the DispID variant?
+ * The DispID spec doesn't really specify whether
+ * this is the revision of the CEA extension or
+ * the DispID CEA data block. And the only value
+ * given as an example is 0.
+ */
return cea[1];
}
@@ -3977,6 +4024,10 @@ cea_db_offsets(const u8 *cea, int *start, int *end)
* no non-DTD data.
*/
if (cea[0] == DATA_BLOCK_CTA) {
+ /*
+ * for_each_displayid_db() has already verified
+ * that these stay within expected bounds.
+ */
*start = 3;
*end = *start + cea[2];
} else if (cea[0] == CEA_EXT) {
@@ -4282,8 +4333,10 @@ drm_parse_hdmi_vsdb_audio(struct drm_connector *connector, const u8 *db)
static void
monitor_name(struct detailed_timing *t, void *data)
{
- if (t->data.other_data.type == EDID_DETAIL_MONITOR_NAME)
- *(u8 **)data = t->data.other_data.data.str.str;
+ if (!is_display_descriptor((const u8 *)t, EDID_DETAIL_MONITOR_NAME))
+ return;
+
+ *(u8 **)data = t->data.other_data.data.str.str;
}
static int get_monitor_name(struct edid *edid, char name[13])
@@ -4316,7 +4369,7 @@ void drm_edid_get_monitor_name(struct edid *edid, char *name, int bufsize)
{
int name_length;
char buf[13];
-
+
if (bufsize <= 0)
return;
@@ -4381,6 +4434,7 @@ static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
if (cea_revision(cea) >= 3) {
int i, start, end;
+ int sad_count;
if (cea_db_offsets(cea, &start, &end)) {
start = 0;
@@ -4392,8 +4446,6 @@ static void drm_edid_to_eld(struct drm_connector *connector, struct edid *edid)
dbl = cea_db_payload_len(db);
switch (cea_db_tag(db)) {
- int sad_count;
-
case AUDIO_BLOCK:
/* Audio Data Block, contains SADs */
sad_count = min(dbl / 3, 15 - total_sad_count);
@@ -4594,6 +4646,9 @@ EXPORT_SYMBOL(drm_av_sync_delay);
*
* Parse the CEA extension according to CEA-861-B.
*
+ * Drivers that have added the modes parsed from EDID to drm_display_info
+ * should use &drm_display_info.is_hdmi instead of calling this function.
+ *
* Return: True if the monitor is HDMI, false if not or unknown.
*/
bool drm_detect_hdmi_monitor(struct edid *edid)
@@ -4828,6 +4883,8 @@ drm_parse_hdmi_vsdb_video(struct drm_connector *connector, const u8 *db)
struct drm_display_info *info = &connector->display_info;
u8 len = cea_db_payload_len(db);
+ info->is_hdmi = true;
+
if (len >= 6)
info->dvi_dual = db[6] & 1;
if (len >= 7)
@@ -4880,6 +4937,47 @@ static void drm_parse_cea_ext(struct drm_connector *connector,
}
}
+static
+void get_monitor_range(struct detailed_timing *timing,
+ void *info_monitor_range)
+{
+ struct drm_monitor_range_info *monitor_range = info_monitor_range;
+ const struct detailed_non_pixel *data = &timing->data.other_data;
+ const struct detailed_data_monitor_range *range = &data->data.range;
+
+ if (!is_display_descriptor((const u8 *)timing, EDID_DETAIL_MONITOR_RANGE))
+ return;
+
+ /*
+ * Check for flag range limits only. If flag == 1 then
+ * no additional timing information provided.
+ * Default GTF, GTF Secondary curve and CVT are not
+ * supported
+ */
+ if (range->flags != DRM_EDID_RANGE_LIMITS_ONLY_FLAG)
+ return;
+
+ monitor_range->min_vfreq = range->min_vfreq;
+ monitor_range->max_vfreq = range->max_vfreq;
+}
+
+static
+void drm_get_monitor_range(struct drm_connector *connector,
+ const struct edid *edid)
+{
+ struct drm_display_info *info = &connector->display_info;
+
+ if (!version_greater(edid, 1, 1))
+ return;
+
+ drm_for_each_detailed_block((u8 *)edid, get_monitor_range,
+ &info->monitor_range);
+
+ DRM_DEBUG_KMS("Supported Monitor Refresh rate range is %d Hz - %d Hz\n",
+ info->monitor_range.min_vfreq,
+ info->monitor_range.max_vfreq);
+}
+
/* A connector has no EDID information, so we've got no EDID to compute quirks from. Reset
* all of the values which would have been set from EDID
*/
@@ -4896,11 +4994,13 @@ drm_reset_display_info(struct drm_connector *connector)
info->cea_rev = 0;
info->max_tmds_clock = 0;
info->dvi_dual = false;
+ info->is_hdmi = false;
info->has_hdmi_infoframe = false;
info->rgb_quant_range_selectable = false;
memset(&info->hdmi, 0, sizeof(info->hdmi));
info->non_desktop = 0;
+ memset(&info->monitor_range, 0, sizeof(info->monitor_range));
}
u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edid)
@@ -4916,6 +5016,8 @@ u32 drm_add_display_info(struct drm_connector *connector, const struct edid *edi
info->non_desktop = !!(quirks & EDID_QUIRK_NON_DESKTOP);
+ drm_get_monitor_range(connector, edid);
+
DRM_DEBUG_KMS("non_desktop set to %d\n", info->non_desktop);
if (edid->revision < 3)
@@ -5396,14 +5498,11 @@ drm_hdmi_avi_infoframe_from_display_mode(struct hdmi_avi_infoframe *frame,
{
enum hdmi_picture_aspect picture_aspect;
u8 vic, hdmi_vic;
- int err;
if (!frame || !mode)
return -EINVAL;
- err = hdmi_avi_infoframe_init(frame);
- if (err < 0)
- return err;
+ hdmi_avi_infoframe_init(frame);
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
frame->pixel_repeat = 1;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 4c7cbce7bae7..a9771de4d17e 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -250,17 +250,7 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper)
return 0;
mutex_lock(&fb_helper->lock);
- /*
- * TODO:
- * We should bail out here if there is a master by dropping _force.
- * Currently these igt tests fail if we do that:
- * - kms_fbcon_fbt@psr
- * - kms_fbcon_fbt@psr-suspend
- *
- * So first these tests need to be fixed so they drop master or don't
- * have an fd open.
- */
- ret = drm_client_modeset_commit_force(&fb_helper->client);
+ ret = drm_client_modeset_commit(&fb_helper->client);
do_delayed = fb_helper->delayed_hotplug;
if (do_delayed)
@@ -294,7 +284,7 @@ static bool drm_fb_helper_force_kernel_mode(void)
continue;
mutex_lock(&helper->lock);
- ret = drm_client_modeset_commit_force(&helper->client);
+ ret = drm_client_modeset_commit_locked(&helper->client);
if (ret)
error = true;
mutex_unlock(&helper->lock);
@@ -460,7 +450,6 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
* drm_fb_helper_init - initialize a &struct drm_fb_helper
* @dev: drm device
* @fb_helper: driver-allocated fbdev helper structure to initialize
- * @max_conn_count: max connector count (not used)
*
* This allocates the structures for the fbdev helper with the given limits.
* Note that this won't yet touch the hardware (through the driver interfaces)
@@ -473,8 +462,7 @@ EXPORT_SYMBOL(drm_fb_helper_prepare);
* Zero if everything went ok, nonzero otherwise.
*/
int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *fb_helper,
- int max_conn_count)
+ struct drm_fb_helper *fb_helper)
{
int ret;
@@ -1357,7 +1345,7 @@ static int pan_display_atomic(struct fb_var_screeninfo *var,
pan_set(fb_helper, var->xoffset, var->yoffset);
- ret = drm_client_modeset_commit_force(&fb_helper->client);
+ ret = drm_client_modeset_commit_locked(&fb_helper->client);
if (!ret) {
info->var.xoffset = var->xoffset;
info->var.yoffset = var->yoffset;
@@ -2135,7 +2123,7 @@ static int drm_fbdev_client_hotplug(struct drm_client_dev *client)
drm_fb_helper_prepare(dev, fb_helper, &drm_fb_helper_generic_funcs);
- ret = drm_fb_helper_init(dev, fb_helper, 0);
+ ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto err;
diff --git a/drivers/gpu/drm/drm_file.c b/drivers/gpu/drm/drm_file.c
index 92d16724f949..c4c704e01961 100644
--- a/drivers/gpu/drm/drm_file.c
+++ b/drivers/gpu/drm/drm_file.c
@@ -51,6 +51,37 @@
/* from BKL pushdown */
DEFINE_MUTEX(drm_global_mutex);
+bool drm_dev_needs_global_mutex(struct drm_device *dev)
+{
+ /*
+ * Legacy drivers rely on all kinds of BKL locking semantics, don't
+ * bother. They also still need BKL locking for their ioctls, so better
+ * safe than sorry.
+ */
+ if (drm_core_check_feature(dev, DRIVER_LEGACY))
+ return true;
+
+ /*
+ * The deprecated ->load callback must be called after the driver is
+ * already registered. This means such drivers rely on the BKL to make
+ * sure an open can't proceed until the driver is actually fully set up.
+ * Similar hilarity holds for the unload callback.
+ */
+ if (dev->driver->load || dev->driver->unload)
+ return true;
+
+ /*
+ * Drivers with the lastclose callback assume that it's synchronized
+ * against concurrent opens, which again needs the BKL. The proper fix
+ * is to use the drm_client infrastructure with proper locking for each
+ * client.
+ */
+ if (dev->driver->lastclose)
+ return true;
+
+ return false;
+}
+
/**
* DOC: file operations
*
@@ -220,7 +251,7 @@ void drm_file_free(struct drm_file *file)
DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
task_pid_nr(current),
(long)old_encode_dev(file->minor->kdev->devt),
- dev->open_count);
+ atomic_read(&dev->open_count));
if (drm_core_check_feature(dev, DRIVER_LEGACY) &&
dev->driver->preclose)
@@ -379,7 +410,10 @@ int drm_open(struct inode *inode, struct file *filp)
return PTR_ERR(minor);
dev = minor->dev;
- if (!dev->open_count++)
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_lock(&drm_global_mutex);
+
+ if (!atomic_fetch_inc(&dev->open_count))
need_setup = 1;
/* share address_space across all char-devs of a single device */
@@ -395,10 +429,16 @@ int drm_open(struct inode *inode, struct file *filp)
goto err_undo;
}
}
+
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
+
return 0;
err_undo:
- dev->open_count--;
+ atomic_dec(&dev->open_count);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
return retcode;
}
@@ -438,16 +478,18 @@ int drm_release(struct inode *inode, struct file *filp)
struct drm_minor *minor = file_priv->minor;
struct drm_device *dev = minor->dev;
- mutex_lock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_lock(&drm_global_mutex);
- DRM_DEBUG("open_count = %d\n", dev->open_count);
+ DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count));
drm_close_helper(filp);
- if (!--dev->open_count)
+ if (atomic_dec_and_test(&dev->open_count))
drm_lastclose(dev);
- mutex_unlock(&drm_global_mutex);
+ if (drm_dev_needs_global_mutex(dev))
+ mutex_unlock(&drm_global_mutex);
drm_minor_release(minor);
@@ -456,6 +498,40 @@ int drm_release(struct inode *inode, struct file *filp)
EXPORT_SYMBOL(drm_release);
/**
+ * drm_release_noglobal - release method for DRM file
+ * @inode: device inode
+ * @filp: file pointer.
+ *
+ * This function may be used by drivers as their &file_operations.release
+ * method. It frees any resources associated with the open file prior to taking
+ * the drm_global_mutex, which then calls the &drm_driver.postclose driver
+ * callback. If this is the last open file for the DRM device also proceeds to
+ * call the &drm_driver.lastclose driver callback.
+ *
+ * RETURNS:
+ *
+ * Always succeeds and returns 0.
+ */
+int drm_release_noglobal(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_minor *minor = file_priv->minor;
+ struct drm_device *dev = minor->dev;
+
+ drm_close_helper(filp);
+
+ if (atomic_dec_and_mutex_lock(&dev->open_count, &drm_global_mutex)) {
+ drm_lastclose(dev);
+ mutex_unlock(&drm_global_mutex);
+ }
+
+ drm_minor_release(minor);
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_release_noglobal);
+
+/**
* drm_read - read method for DRM file
* @filp: file pointer
* @buffer: userspace destination pointer for the read
diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c
index 0897cb9aeaff..3b818f2b2392 100644
--- a/drivers/gpu/drm/drm_format_helper.c
+++ b/drivers/gpu/drm/drm_format_helper.c
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 */
+// SPDX-License-Identifier: GPL-2.0 or MIT
/*
* Copyright (C) 2016 Noralf Trønnes
*
diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c
index 57564318ceea..57ac94ce9b9e 100644
--- a/drivers/gpu/drm/drm_framebuffer.c
+++ b/drivers/gpu/drm/drm_framebuffer.c
@@ -31,6 +31,7 @@
#include <drm/drm_file.h>
#include <drm/drm_fourcc.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_gem.h>
#include <drm/drm_print.h>
#include <drm/drm_util.h>
@@ -548,7 +549,128 @@ int drm_mode_getfb(struct drm_device *dev,
out:
drm_framebuffer_put(fb);
+ return ret;
+}
+
+/**
+ * drm_mode_getfb2 - get extended FB info
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Lookup the FB given its ID and return info about it.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+int drm_mode_getfb2_ioctl(struct drm_device *dev,
+ void *data, struct drm_file *file_priv)
+{
+ struct drm_mode_fb_cmd2 *r = data;
+ struct drm_framebuffer *fb;
+ unsigned int i;
+ int ret;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, file_priv, r->fb_id);
+ if (!fb)
+ return -ENOENT;
+
+ /* For multi-plane framebuffers, we require the driver to place the
+ * GEM objects directly in the drm_framebuffer. For single-plane
+ * framebuffers, we can fall back to create_handle.
+ */
+ if (!fb->obj[0] &&
+ (fb->format->num_planes > 1 || !fb->funcs->create_handle)) {
+ ret = -ENODEV;
+ goto out;
+ }
+
+ r->height = fb->height;
+ r->width = fb->width;
+ r->pixel_format = fb->format->format;
+
+ r->flags = 0;
+ if (dev->mode_config.allow_fb_modifiers)
+ r->flags |= DRM_MODE_FB_MODIFIERS;
+
+ for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
+ r->handles[i] = 0;
+ r->pitches[i] = 0;
+ r->offsets[i] = 0;
+ r->modifier[i] = 0;
+ }
+
+ for (i = 0; i < fb->format->num_planes; i++) {
+ r->pitches[i] = fb->pitches[i];
+ r->offsets[i] = fb->offsets[i];
+ if (dev->mode_config.allow_fb_modifiers)
+ r->modifier[i] = fb->modifier;
+ }
+
+ /* GET_FB2() is an unprivileged ioctl so we must not return a
+ * buffer-handle to non master/root processes! To match GET_FB()
+ * just return invalid handles (0) for non masters/root
+ * rather than making GET_FB2() privileged.
+ */
+ if (!drm_is_current_master(file_priv) && !capable(CAP_SYS_ADMIN)) {
+ ret = 0;
+ goto out;
+ }
+ for (i = 0; i < fb->format->num_planes; i++) {
+ int j;
+
+ /* If we reuse the same object for multiple planes, also
+ * return the same handle.
+ */
+ for (j = 0; j < i; j++) {
+ if (fb->obj[i] == fb->obj[j]) {
+ r->handles[i] = r->handles[j];
+ break;
+ }
+ }
+
+ if (r->handles[i])
+ continue;
+
+ if (fb->obj[i]) {
+ ret = drm_gem_handle_create(file_priv, fb->obj[i],
+ &r->handles[i]);
+ } else {
+ WARN_ON(i > 0);
+ ret = fb->funcs->create_handle(fb, file_priv,
+ &r->handles[i]);
+ }
+
+ if (ret != 0)
+ goto out;
+ }
+
+out:
+ if (ret != 0) {
+ /* Delete any previously-created handles on failure. */
+ for (i = 0; i < ARRAY_SIZE(r->handles); i++) {
+ int j;
+
+ if (r->handles[i])
+ drm_gem_handle_delete(file_priv, r->handles[i]);
+
+ /* Zero out any handles identical to the one we just
+ * deleted.
+ */
+ for (j = i + 1; j < ARRAY_SIZE(r->handles); j++) {
+ if (r->handles[j] == r->handles[i])
+ r->handles[j] = 0;
+ }
+ }
+ }
+
+ drm_framebuffer_put(fb);
return ret;
}
diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c
index a9e4a610445a..37627d06fb06 100644
--- a/drivers/gpu/drm/drm_gem.c
+++ b/drivers/gpu/drm/drm_gem.c
@@ -218,7 +218,7 @@ drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
struct drm_device *dev = obj->dev;
bool final = false;
- if (WARN_ON(obj->handle_count == 0))
+ if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
return;
/*
diff --git a/drivers/gpu/drm/drm_gem_shmem_helper.c b/drivers/gpu/drm/drm_gem_shmem_helper.c
index a421a2eed48a..df31e5782eed 100644
--- a/drivers/gpu/drm/drm_gem_shmem_helper.c
+++ b/drivers/gpu/drm/drm_gem_shmem_helper.c
@@ -254,11 +254,16 @@ static void *drm_gem_shmem_vmap_locked(struct drm_gem_shmem_object *shmem)
if (ret)
goto err_zero_use;
- if (obj->import_attach)
+ if (obj->import_attach) {
shmem->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
- else
+ } else {
+ pgprot_t prot = PAGE_KERNEL;
+
+ if (!shmem->map_cached)
+ prot = pgprot_writecombine(prot);
shmem->vaddr = vmap(shmem->pages, obj->size >> PAGE_SHIFT,
- VM_MAP, pgprot_writecombine(PAGE_KERNEL));
+ VM_MAP, prot);
+ }
if (!shmem->vaddr) {
DRM_DEBUG_KMS("Failed to vmap pages\n");
@@ -540,8 +545,9 @@ int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
}
vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
- vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
- vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
+ if (!shmem->map_cached)
+ vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
vma->vm_ops = &drm_gem_shmem_vm_ops;
return 0;
diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c
index a4863326061a..92a11bb42365 100644
--- a/drivers/gpu/drm/drm_gem_vram_helper.c
+++ b/drivers/gpu/drm/drm_gem_vram_helper.c
@@ -1141,3 +1141,64 @@ void drm_vram_helper_release_mm(struct drm_device *dev)
dev->vram_mm = NULL;
}
EXPORT_SYMBOL(drm_vram_helper_release_mm);
+
+/*
+ * Mode-config helpers
+ */
+
+static enum drm_mode_status
+drm_vram_helper_mode_valid_internal(struct drm_device *dev,
+ const struct drm_display_mode *mode,
+ unsigned long max_bpp)
+{
+ struct drm_vram_mm *vmm = dev->vram_mm;
+ unsigned long fbsize, fbpages, max_fbpages;
+
+ if (WARN_ON(!dev->vram_mm))
+ return MODE_BAD;
+
+ max_fbpages = (vmm->vram_size / 2) >> PAGE_SHIFT;
+
+ fbsize = mode->hdisplay * mode->vdisplay * max_bpp;
+ fbpages = DIV_ROUND_UP(fbsize, PAGE_SIZE);
+
+ if (fbpages > max_fbpages)
+ return MODE_MEM;
+
+ return MODE_OK;
+}
+
+/**
+ * drm_vram_helper_mode_valid - Tests if a display mode's
+ * framebuffer fits into the available video memory.
+ * @dev: the DRM device
+ * @mode: the mode to test
+ *
+ * This function tests if enough video memory is available for using the
+ * specified display mode. Atomic modesetting requires importing the
+ * designated framebuffer into video memory before evicting the active
+ * one. Hence, any framebuffer may consume at most half of the available
+ * VRAM. Display modes that require a larger framebuffer can not be used,
+ * even if the CRTC does support them. Each framebuffer is assumed to
+ * have 32-bit color depth.
+ *
+ * Note:
+ * The function can only test if the display mode is supported in
+ * general. If there are too many framebuffers pinned to video memory,
+ * a display mode may still not be usable in practice. The color depth of
+ * 32-bit fits all current use case. A more flexible test can be added
+ * when necessary.
+ *
+ * Returns:
+ * MODE_OK if the display mode is supported, or an error code of type
+ * enum drm_mode_status otherwise.
+ */
+enum drm_mode_status
+drm_vram_helper_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode)
+{
+ static const unsigned long max_bpp = 4; /* DRM_FORMAT_XRGB8888 */
+
+ return drm_vram_helper_mode_valid_internal(dev, mode, max_bpp);
+}
+EXPORT_SYMBOL(drm_vram_helper_mode_valid);
diff --git a/drivers/gpu/drm/drm_hdcp.c b/drivers/gpu/drm/drm_hdcp.c
index 9191633a3c43..7f386adcf872 100644
--- a/drivers/gpu/drm/drm_hdcp.c
+++ b/drivers/gpu/drm/drm_hdcp.c
@@ -23,14 +23,6 @@
#include "drm_internal.h"
-static struct hdcp_srm {
- u32 revoked_ksv_cnt;
- u8 *revoked_ksv_list;
-
- /* Mutex to protect above struct member */
- struct mutex mutex;
-} *srm_data;
-
static inline void drm_hdcp_print_ksv(const u8 *ksv)
{
DRM_DEBUG("\t%#02x, %#02x, %#02x, %#02x, %#02x\n",
@@ -60,11 +52,11 @@ static u32 drm_hdcp_get_revoked_ksv_count(const u8 *buf, u32 vrls_length)
return ksv_count;
}
-static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
+static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 **revoked_ksv_list,
u32 vrls_length)
{
- u32 parsed_bytes = 0, ksv_count = 0;
u32 vrl_ksv_cnt, vrl_ksv_sz, vrl_idx = 0;
+ u32 parsed_bytes = 0, ksv_count = 0;
do {
vrl_ksv_cnt = *buf;
@@ -74,10 +66,10 @@ static u32 drm_hdcp_get_revoked_ksvs(const u8 *buf, u8 *revoked_ksv_list,
DRM_DEBUG("vrl: %d, Revoked KSVs: %d\n", vrl_idx++,
vrl_ksv_cnt);
- memcpy(revoked_ksv_list, buf, vrl_ksv_sz);
+ memcpy((*revoked_ksv_list) + (ksv_count * DRM_HDCP_KSV_LEN),
+ buf, vrl_ksv_sz);
ksv_count += vrl_ksv_cnt;
- revoked_ksv_list += vrl_ksv_sz;
buf += vrl_ksv_sz;
parsed_bytes += (vrl_ksv_sz + 1);
@@ -91,7 +83,8 @@ static inline u32 get_vrl_length(const u8 *buf)
return drm_hdcp_be24_to_cpu(buf);
}
-static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
+static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count;
@@ -131,29 +124,28 @@ static int drm_hdcp_parse_hdcp1_srm(const u8 *buf, size_t count)
ksv_count = drm_hdcp_get_revoked_ksv_count(buf, vrl_length);
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
- return count;
+ return 0;
}
- kfree(srm_data->revoked_ksv_list);
- srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
- GFP_KERNEL);
- if (!srm_data->revoked_ksv_list) {
+ *revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
+ if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
- if (drm_hdcp_get_revoked_ksvs(buf, srm_data->revoked_ksv_list,
+ if (drm_hdcp_get_revoked_ksvs(buf, revoked_ksv_list,
vrl_length) != ksv_count) {
- srm_data->revoked_ksv_cnt = 0;
- kfree(srm_data->revoked_ksv_list);
+ *revoked_ksv_cnt = 0;
+ kfree(*revoked_ksv_list);
return -EINVAL;
}
- srm_data->revoked_ksv_cnt = ksv_count;
- return count;
+ *revoked_ksv_cnt = ksv_count;
+ return 0;
}
-static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
+static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
struct hdcp_srm_header *header;
u32 vrl_length, ksv_count, ksv_sz;
@@ -195,13 +187,11 @@ static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
ksv_count = (*buf << 2) | DRM_HDCP_2_KSV_COUNT_2_LSBITS(*(buf + 1));
if (!ksv_count) {
DRM_DEBUG("Revoked KSV count is 0\n");
- return count;
+ return 0;
}
- kfree(srm_data->revoked_ksv_list);
- srm_data->revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN,
- GFP_KERNEL);
- if (!srm_data->revoked_ksv_list) {
+ *revoked_ksv_list = kcalloc(ksv_count, DRM_HDCP_KSV_LEN, GFP_KERNEL);
+ if (!*revoked_ksv_list) {
DRM_ERROR("Out of Memory\n");
return -ENOMEM;
}
@@ -210,10 +200,10 @@ static int drm_hdcp_parse_hdcp2_srm(const u8 *buf, size_t count)
buf += DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ;
DRM_DEBUG("Revoked KSVs: %d\n", ksv_count);
- memcpy(srm_data->revoked_ksv_list, buf, ksv_sz);
+ memcpy(*revoked_ksv_list, buf, ksv_sz);
- srm_data->revoked_ksv_cnt = ksv_count;
- return count;
+ *revoked_ksv_cnt = ksv_count;
+ return 0;
}
static inline bool is_srm_version_hdcp1(const u8 *buf)
@@ -226,22 +216,27 @@ static inline bool is_srm_version_hdcp2(const u8 *buf)
return *buf == (u8)(DRM_HDCP_2_SRM_ID << 4 | DRM_HDCP_2_INDICATOR);
}
-static void drm_hdcp_srm_update(const u8 *buf, size_t count)
+static int drm_hdcp_srm_update(const u8 *buf, size_t count,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
if (count < sizeof(struct hdcp_srm_header))
- return;
+ return -EINVAL;
if (is_srm_version_hdcp1(buf))
- drm_hdcp_parse_hdcp1_srm(buf, count);
+ return drm_hdcp_parse_hdcp1_srm(buf, count, revoked_ksv_list,
+ revoked_ksv_cnt);
else if (is_srm_version_hdcp2(buf))
- drm_hdcp_parse_hdcp2_srm(buf, count);
+ return drm_hdcp_parse_hdcp2_srm(buf, count, revoked_ksv_list,
+ revoked_ksv_cnt);
+ else
+ return -EINVAL;
}
-static void drm_hdcp_request_srm(struct drm_device *drm_dev)
+static int drm_hdcp_request_srm(struct drm_device *drm_dev,
+ u8 **revoked_ksv_list, u32 *revoked_ksv_cnt)
{
char fw_name[36] = "display_hdcp_srm.bin";
const struct firmware *fw;
-
int ret;
ret = request_firmware_direct(&fw, (const char *)fw_name,
@@ -250,10 +245,12 @@ static void drm_hdcp_request_srm(struct drm_device *drm_dev)
goto exit;
if (fw->size && fw->data)
- drm_hdcp_srm_update(fw->data, fw->size);
+ ret = drm_hdcp_srm_update(fw->data, fw->size, revoked_ksv_list,
+ revoked_ksv_cnt);
exit:
release_firmware(fw);
+ return ret;
}
/**
@@ -279,71 +276,34 @@ exit:
* https://www.digital-cp.com/sites/default/files/specifications/HDCP%20on%20HDMI%20Specification%20Rev2_2_Final1.pdf
*
* Returns:
- * TRUE on any of the KSV is revoked, else FALSE.
+ * Count of the revoked KSVs or -ve error number incase of the failure.
*/
-bool drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
- u32 ksv_count)
+int drm_hdcp_check_ksvs_revoked(struct drm_device *drm_dev, u8 *ksvs,
+ u32 ksv_count)
{
- u32 rev_ksv_cnt, cnt, i, j;
- u8 *rev_ksv_list;
-
- if (!srm_data)
- return false;
-
- mutex_lock(&srm_data->mutex);
- drm_hdcp_request_srm(drm_dev);
-
- rev_ksv_cnt = srm_data->revoked_ksv_cnt;
- rev_ksv_list = srm_data->revoked_ksv_list;
-
- /* If the Revoked ksv list is empty */
- if (!rev_ksv_cnt || !rev_ksv_list) {
- mutex_unlock(&srm_data->mutex);
- return false;
- }
-
- for (cnt = 0; cnt < ksv_count; cnt++) {
- rev_ksv_list = srm_data->revoked_ksv_list;
- for (i = 0; i < rev_ksv_cnt; i++) {
- for (j = 0; j < DRM_HDCP_KSV_LEN; j++)
- if (ksvs[j] != rev_ksv_list[j]) {
- break;
- } else if (j == (DRM_HDCP_KSV_LEN - 1)) {
- DRM_DEBUG("Revoked KSV is ");
- drm_hdcp_print_ksv(ksvs);
- mutex_unlock(&srm_data->mutex);
- return true;
- }
- /* Move the offset to next KSV in the revoked list */
- rev_ksv_list += DRM_HDCP_KSV_LEN;
- }
-
- /* Iterate to next ksv_offset */
- ksvs += DRM_HDCP_KSV_LEN;
- }
- mutex_unlock(&srm_data->mutex);
- return false;
+ u32 revoked_ksv_cnt = 0, i, j;
+ u8 *revoked_ksv_list = NULL;
+ int ret = 0;
+
+ ret = drm_hdcp_request_srm(drm_dev, &revoked_ksv_list,
+ &revoked_ksv_cnt);
+
+ /* revoked_ksv_cnt will be zero when above function failed */
+ for (i = 0; i < revoked_ksv_cnt; i++)
+ for (j = 0; j < ksv_count; j++)
+ if (!memcmp(&ksvs[j * DRM_HDCP_KSV_LEN],
+ &revoked_ksv_list[i * DRM_HDCP_KSV_LEN],
+ DRM_HDCP_KSV_LEN)) {
+ DRM_DEBUG("Revoked KSV is ");
+ drm_hdcp_print_ksv(&ksvs[j * DRM_HDCP_KSV_LEN]);
+ ret++;
+ }
+
+ kfree(revoked_ksv_list);
+ return ret;
}
EXPORT_SYMBOL_GPL(drm_hdcp_check_ksvs_revoked);
-int drm_setup_hdcp_srm(struct class *drm_class)
-{
- srm_data = kzalloc(sizeof(*srm_data), GFP_KERNEL);
- if (!srm_data)
- return -ENOMEM;
- mutex_init(&srm_data->mutex);
-
- return 0;
-}
-
-void drm_teardown_hdcp_srm(struct class *drm_class)
-{
- if (srm_data) {
- kfree(srm_data->revoked_ksv_list);
- kfree(srm_data);
- }
-}
-
static struct drm_prop_enum_list drm_cp_enum_list[] = {
{ DRM_MODE_CONTENT_PROTECTION_UNDESIRED, "Undesired" },
{ DRM_MODE_CONTENT_PROTECTION_DESIRED, "Desired" },
diff --git a/drivers/gpu/drm/drm_internal.h b/drivers/gpu/drm/drm_internal.h
index 6937bf923f05..5714a78365ac 100644
--- a/drivers/gpu/drm/drm_internal.h
+++ b/drivers/gpu/drm/drm_internal.h
@@ -41,6 +41,7 @@ struct drm_printer;
/* drm_file.c */
extern struct mutex drm_global_mutex;
+bool drm_dev_needs_global_mutex(struct drm_device *dev);
struct drm_file *drm_file_alloc(struct drm_minor *minor);
void drm_file_free(struct drm_file *file);
void drm_lastclose(struct drm_device *dev);
@@ -235,7 +236,3 @@ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
void drm_framebuffer_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_framebuffer *fb);
int drm_framebuffer_debugfs_init(struct drm_minor *minor);
-
-/* drm_hdcp.c */
-int drm_setup_hdcp_srm(struct class *drm_class);
-void drm_teardown_hdcp_srm(struct class *drm_class);
diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
index 5afb39688b55..9e41972c4bbc 100644
--- a/drivers/gpu/drm/drm_ioctl.c
+++ b/drivers/gpu/drm/drm_ioctl.c
@@ -671,6 +671,7 @@ static const struct drm_ioctl_desc drm_ioctls[] = {
DRM_IOCTL_DEF(DRM_IOCTL_MODE_SETPROPERTY, drm_connector_property_set_ioctl, DRM_MASTER),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETPROPBLOB, drm_mode_getblob_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB, drm_mode_getfb, 0),
+ DRM_IOCTL_DEF(DRM_IOCTL_MODE_GETFB2, drm_mode_getfb2_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB, drm_mode_addfb_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_ADDFB2, drm_mode_addfb2_ioctl, 0),
DRM_IOCTL_DEF(DRM_IOCTL_MODE_RMFB, drm_mode_rmfb_ioctl, 0),
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 03bce566a8c3..588be45abd7a 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -111,10 +111,6 @@ int drm_irq_install(struct drm_device *dev, int irq)
if (irq == 0)
return -EINVAL;
- /* Driver must have been initialized */
- if (!dev->dev_private)
- return -EINVAL;
-
if (dev->irq_enabled)
return -EBUSY;
dev->irq_enabled = true;
diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
index 2c79e8199e3c..f16eefbf2829 100644
--- a/drivers/gpu/drm/drm_lock.c
+++ b/drivers/gpu/drm/drm_lock.c
@@ -46,7 +46,7 @@
static int drm_lock_take(struct drm_lock_data *lock_data, unsigned int context);
-/**
+/*
* Take the heavyweight lock.
*
* \param lock lock pointer.
@@ -93,7 +93,7 @@ int drm_lock_take(struct drm_lock_data *lock_data,
return 0;
}
-/**
+/*
* This takes a lock forcibly and hands it to context. Should ONLY be used
* inside *_unlock to give lock to kernel before calling *_dma_schedule.
*
@@ -150,7 +150,7 @@ static int drm_legacy_lock_free(struct drm_lock_data *lock_data,
return 0;
}
-/**
+/*
* Lock ioctl.
*
* \param inode device inode.
@@ -243,7 +243,7 @@ int drm_legacy_lock(struct drm_device *dev, void *data,
return 0;
}
-/**
+/*
* Unlock ioctl.
*
* \param inode device inode.
@@ -275,7 +275,7 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
return 0;
}
-/**
+/*
* This function returns immediately and takes the hw lock
* with the kernel context if it is free, otherwise it gets the highest priority when and if
* it is eventually released.
@@ -287,7 +287,6 @@ int drm_legacy_unlock(struct drm_device *dev, void *data, struct drm_file *file_
* This should be sufficient to wait for GPU idle without
* having to worry about starvation.
*/
-
void drm_legacy_idlelock_take(struct drm_lock_data *lock_data)
{
int ret;
diff --git a/drivers/gpu/drm/drm_mipi_dbi.c b/drivers/gpu/drm/drm_mipi_dbi.c
index 16bff1be4b8a..558baf989f5a 100644
--- a/drivers/gpu/drm/drm_mipi_dbi.c
+++ b/drivers/gpu/drm/drm_mipi_dbi.c
@@ -24,7 +24,6 @@
#include <drm/drm_modes.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#include <video/mipi_display.h>
#define MIPI_DBI_MAX_SPI_READ_SPEED 2000000 /* 2MHz */
@@ -238,6 +237,23 @@ int mipi_dbi_buf_copy(void *dst, struct drm_framebuffer *fb,
}
EXPORT_SYMBOL(mipi_dbi_buf_copy);
+static void mipi_dbi_set_window_address(struct mipi_dbi_dev *dbidev,
+ unsigned int xs, unsigned int xe,
+ unsigned int ys, unsigned int ye)
+{
+ struct mipi_dbi *dbi = &dbidev->dbi;
+
+ xs += dbidev->left_offset;
+ xe += dbidev->left_offset;
+ ys += dbidev->top_offset;
+ ye += dbidev->top_offset;
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, (xs >> 8) & 0xff,
+ xs & 0xff, (xe >> 8) & 0xff, xe & 0xff);
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, (ys >> 8) & 0xff,
+ ys & 0xff, (ye >> 8) & 0xff, ye & 0xff);
+}
+
static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
{
struct drm_gem_object *gem = drm_gem_fb_get_obj(fb, 0);
@@ -271,12 +287,8 @@ static void mipi_dbi_fb_dirty(struct drm_framebuffer *fb, struct drm_rect *rect)
tr = cma_obj->vaddr;
}
- mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS,
- (rect->x1 >> 8) & 0xff, rect->x1 & 0xff,
- ((rect->x2 - 1) >> 8) & 0xff, (rect->x2 - 1) & 0xff);
- mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS,
- (rect->y1 >> 8) & 0xff, rect->y1 & 0xff,
- ((rect->y2 - 1) >> 8) & 0xff, (rect->y2 - 1) & 0xff);
+ mipi_dbi_set_window_address(dbidev, rect->x1, rect->x2 - 1, rect->y1,
+ rect->y2 - 1);
ret = mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START, tr,
width * height * 2);
@@ -299,18 +311,10 @@ void mipi_dbi_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
mipi_dbi_fb_dirty(state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
EXPORT_SYMBOL(mipi_dbi_pipe_update);
@@ -366,10 +370,7 @@ static void mipi_dbi_blank(struct mipi_dbi_dev *dbidev)
memset(dbidev->tx_buf, 0, len);
- mipi_dbi_command(dbi, MIPI_DCS_SET_COLUMN_ADDRESS, 0, 0,
- ((width - 1) >> 8) & 0xFF, (width - 1) & 0xFF);
- mipi_dbi_command(dbi, MIPI_DCS_SET_PAGE_ADDRESS, 0, 0,
- ((height - 1) >> 8) & 0xFF, (height - 1) & 0xFF);
+ mipi_dbi_set_window_address(dbidev, 0, width - 1, 0, height - 1);
mipi_dbi_command_buf(dbi, MIPI_DCS_WRITE_MEMORY_START,
(u8 *)dbidev->tx_buf, len);
diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c
index 2a6e34663146..bc6e208949e8 100644
--- a/drivers/gpu/drm/drm_mm.c
+++ b/drivers/gpu/drm/drm_mm.c
@@ -45,6 +45,7 @@
#include <linux/export.h>
#include <linux/interval_tree_generic.h>
#include <linux/seq_file.h>
+#include <linux/sched/signal.h>
#include <linux/slab.h>
#include <linux/stacktrace.h>
@@ -366,6 +367,11 @@ next_hole(struct drm_mm *mm,
struct drm_mm_node *node,
enum drm_mm_insert_mode mode)
{
+ /* Searching is slow; check if we ran out of time/patience */
+ cond_resched();
+ if (fatal_signal_pending(current))
+ return NULL;
+
switch (mode) {
default:
case DRM_MM_INSERT_BEST:
@@ -399,10 +405,10 @@ next_hole(struct drm_mm *mm,
*/
int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node)
{
- u64 end = node->start + node->size;
struct drm_mm_node *hole;
u64 hole_start, hole_end;
u64 adj_start, adj_end;
+ u64 end;
end = node->start + node->size;
if (unlikely(end <= node->start))
@@ -557,7 +563,7 @@ int drm_mm_insert_node_in_range(struct drm_mm * const mm,
return 0;
}
- return -ENOSPC;
+ return signal_pending(current) ? -ERESTARTSYS : -ENOSPC;
}
EXPORT_SYMBOL(drm_mm_insert_node_in_range);
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 10336b144c72..d4d64518e11b 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -1698,6 +1698,13 @@ static int drm_mode_parse_cmdline_options(const char *str,
if (rotation && freestanding)
return -EINVAL;
+ if (!(rotation & DRM_MODE_ROTATE_MASK))
+ rotation |= DRM_MODE_ROTATE_0;
+
+ /* Make sure there is exactly one rotation defined */
+ if (!is_power_of_2(rotation & DRM_MODE_ROTATE_MASK))
+ return -EINVAL;
+
mode->rotation_reflection = rotation;
return 0;
diff --git a/drivers/gpu/drm/drm_pci.c b/drivers/gpu/drm/drm_pci.c
index f2e43d341980..81aa21561982 100644
--- a/drivers/gpu/drm/drm_pci.c
+++ b/drivers/gpu/drm/drm_pci.c
@@ -51,8 +51,6 @@
drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t align)
{
drm_dma_handle_t *dmah;
- unsigned long addr;
- size_t sz;
/* pci_alloc_consistent only guarantees alignment to the smallest
* PAGE_SIZE order which is greater than or equal to the requested size.
@@ -68,47 +66,17 @@ drm_dma_handle_t *drm_pci_alloc(struct drm_device * dev, size_t size, size_t ali
dmah->size = size;
dmah->vaddr = dma_alloc_coherent(&dev->pdev->dev, size,
&dmah->busaddr,
- GFP_KERNEL | __GFP_COMP);
+ GFP_KERNEL);
if (dmah->vaddr == NULL) {
kfree(dmah);
return NULL;
}
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Reserve */
- for (addr = (unsigned long)dmah->vaddr, sz = size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- SetPageReserved(virt_to_page((void *)addr));
- }
-
return dmah;
}
-
EXPORT_SYMBOL(drm_pci_alloc);
-/*
- * Free a PCI consistent memory block without freeing its descriptor.
- *
- * This function is for internal use in the Linux-specific DRM core code.
- */
-void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
-{
- unsigned long addr;
- size_t sz;
-
- if (dmah->vaddr) {
- /* XXX - Is virt_to_page() legal for consistent mem? */
- /* Unreserve */
- for (addr = (unsigned long)dmah->vaddr, sz = dmah->size;
- sz > 0; addr += PAGE_SIZE, sz -= PAGE_SIZE) {
- ClearPageReserved(virt_to_page((void *)addr));
- }
- dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
- dmah->busaddr);
- }
-}
-
/**
* drm_pci_free - Free a PCI consistent memory block
* @dev: DRM device
@@ -119,7 +87,8 @@ void __drm_legacy_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
*/
void drm_pci_free(struct drm_device * dev, drm_dma_handle_t * dmah)
{
- __drm_legacy_pci_free(dev, dmah);
+ dma_free_coherent(&dev->pdev->dev, dmah->size, dmah->vaddr,
+ dmah->busaddr);
kfree(dmah);
}
@@ -197,6 +166,18 @@ int drm_irq_by_busid(struct drm_device *dev, void *data,
return drm_pci_irq_by_busid(dev, p);
}
+void drm_pci_agp_destroy(struct drm_device *dev)
+{
+ if (dev->agp) {
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ drm_legacy_agp_clear(dev);
+ kfree(dev->agp);
+ dev->agp = NULL;
+ }
+}
+
+#ifdef CONFIG_DRM_LEGACY
+
static void drm_pci_agp_init(struct drm_device *dev)
{
if (drm_core_check_feature(dev, DRIVER_USE_AGP)) {
@@ -211,33 +192,9 @@ static void drm_pci_agp_init(struct drm_device *dev)
}
}
-void drm_pci_agp_destroy(struct drm_device *dev)
-{
- if (dev->agp) {
- arch_phys_wc_del(dev->agp->agp_mtrr);
- drm_legacy_agp_clear(dev);
- kfree(dev->agp);
- dev->agp = NULL;
- }
-}
-
-/**
- * drm_get_pci_dev - Register a PCI device with the DRM subsystem
- * @pdev: PCI device
- * @ent: entry from the PCI ID table that matches @pdev
- * @driver: DRM device driver
- *
- * Attempt to gets inter module "drm" information. If we are first
- * then register the character device and inter module information.
- * Try and register, if we fail to register, backout previous work.
- *
- * NOTE: This function is deprecated, please use drm_dev_alloc() and
- * drm_dev_register() instead and remove your &drm_driver.load callback.
- *
- * Return: 0 on success or a negative error code on failure.
- */
-int drm_get_pci_dev(struct pci_dev *pdev, const struct pci_device_id *ent,
- struct drm_driver *driver)
+static int drm_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver)
{
struct drm_device *dev;
int ret;
@@ -280,9 +237,6 @@ err_free:
drm_dev_put(dev);
return ret;
}
-EXPORT_SYMBOL(drm_get_pci_dev);
-
-#ifdef CONFIG_DRM_LEGACY
/**
* drm_legacy_pci_init - shadow-attach a legacy DRM PCI driver
diff --git a/drivers/gpu/drm/drm_scatter.c b/drivers/gpu/drm/drm_scatter.c
index d5c386154246..ca520028b2cb 100644
--- a/drivers/gpu/drm/drm_scatter.c
+++ b/drivers/gpu/drm/drm_scatter.c
@@ -99,6 +99,9 @@ int drm_legacy_sg_alloc(struct drm_device *dev, void *data,
if (!drm_core_check_feature(dev, DRIVER_SG))
return -EOPNOTSUPP;
+ if (request->size > SIZE_MAX - PAGE_SIZE)
+ return -EINVAL;
+
if (dev->sg)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_simple_kms_helper.c b/drivers/gpu/drm/drm_simple_kms_helper.c
index 15fb516ae2d8..74946690aba4 100644
--- a/drivers/gpu/drm/drm_simple_kms_helper.c
+++ b/drivers/gpu/drm/drm_simple_kms_helper.c
@@ -26,12 +26,51 @@
* entity. Some flexibility for code reuse is provided through a separately
* allocated &drm_connector object and supporting optional &drm_bridge
* encoder drivers.
+ *
+ * Many drivers require only a very simple encoder that fulfills the minimum
+ * requirements of the display pipeline and does not add additional
+ * functionality. The function drm_simple_encoder_init() provides an
+ * implementation of such an encoder.
*/
-static const struct drm_encoder_funcs drm_simple_kms_encoder_funcs = {
+static const struct drm_encoder_funcs drm_simple_encoder_funcs_cleanup = {
.destroy = drm_encoder_cleanup,
};
+/**
+ * drm_simple_encoder_init - Initialize a preallocated encoder with
+ * basic functionality.
+ * @dev: drm device
+ * @encoder: the encoder to initialize
+ * @encoder_type: user visible type of the encoder
+ *
+ * Initialises a preallocated encoder that has no further functionality.
+ * Settings for possible CRTC and clones are left to their initial values.
+ * The encoder will be cleaned up automatically as part of the mode-setting
+ * cleanup.
+ *
+ * The caller of drm_simple_encoder_init() is responsible for freeing
+ * the encoder's memory after the encoder has been cleaned up. At the
+ * moment this only works reliably if the encoder data structure is
+ * stored in the device structure. Free the encoder's memory as part of
+ * the device release function.
+ *
+ * FIXME: Later improvements to DRM's resource management may allow for
+ * an automated kfree() of the encoder's memory.
+ *
+ * Returns:
+ * Zero on success, error code on failure.
+ */
+int drm_simple_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ int encoder_type)
+{
+ return drm_encoder_init(dev, encoder,
+ &drm_simple_encoder_funcs_cleanup,
+ encoder_type, NULL);
+}
+EXPORT_SYMBOL(drm_simple_encoder_init);
+
static enum drm_mode_status
drm_simple_kms_crtc_mode_valid(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
@@ -229,7 +268,7 @@ static const struct drm_plane_funcs drm_simple_kms_plane_funcs = {
int drm_simple_display_pipe_attach_bridge(struct drm_simple_display_pipe *pipe,
struct drm_bridge *bridge)
{
- return drm_bridge_attach(&pipe->encoder, bridge, NULL);
+ return drm_bridge_attach(&pipe->encoder, bridge, NULL, 0);
}
EXPORT_SYMBOL(drm_simple_display_pipe_attach_bridge);
@@ -288,8 +327,7 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
return ret;
encoder->possible_crtcs = drm_crtc_mask(crtc);
- ret = drm_encoder_init(dev, encoder, &drm_simple_kms_encoder_funcs,
- DRM_MODE_ENCODER_NONE, NULL);
+ ret = drm_simple_encoder_init(dev, encoder, DRM_MODE_ENCODER_NONE);
if (ret || !connector)
return ret;
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index 669c93fe2500..42d46414f767 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -43,27 +43,66 @@
* - Signal a syncobj (set a trivially signaled fence)
* - Wait for a syncobj's fence to appear and be signaled
*
+ * The syncobj userspace API also provides operations to manipulate a syncobj
+ * in terms of a timeline of struct &dma_fence_chain rather than a single
+ * struct &dma_fence, through the following operations:
+ *
+ * - Signal a given point on the timeline
+ * - Wait for a given point to appear and/or be signaled
+ * - Import and export from/to a given point of a timeline
+ *
* At it's core, a syncobj is simply a wrapper around a pointer to a struct
* &dma_fence which may be NULL.
* When a syncobj is first created, its pointer is either NULL or a pointer
* to an already signaled fence depending on whether the
* &DRM_SYNCOBJ_CREATE_SIGNALED flag is passed to
* &DRM_IOCTL_SYNCOBJ_CREATE.
- * When GPU work which signals a syncobj is enqueued in a DRM driver,
- * the syncobj fence is replaced with a fence which will be signaled by the
- * completion of that work.
- * When GPU work which waits on a syncobj is enqueued in a DRM driver, the
- * driver retrieves syncobj's current fence at the time the work is enqueued
- * waits on that fence before submitting the work to hardware.
- * If the syncobj's fence is NULL, the enqueue operation is expected to fail.
- * All manipulation of the syncobjs's fence happens in terms of the current
- * fence at the time the ioctl is called by userspace regardless of whether
- * that operation is an immediate host-side operation (signal or reset) or
- * or an operation which is enqueued in some driver queue.
- * &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used to
- * manipulate a syncobj from the host by resetting its pointer to NULL or
+ *
+ * If the syncobj is considered as a binary (its state is either signaled or
+ * unsignaled) primitive, when GPU work is enqueued in a DRM driver to signal
+ * the syncobj, the syncobj's fence is replaced with a fence which will be
+ * signaled by the completion of that work.
+ * If the syncobj is considered as a timeline primitive, when GPU work is
+ * enqueued in a DRM driver to signal the a given point of the syncobj, a new
+ * struct &dma_fence_chain pointing to the DRM driver's fence and also
+ * pointing to the previous fence that was in the syncobj. The new struct
+ * &dma_fence_chain fence replace the syncobj's fence and will be signaled by
+ * completion of the DRM driver's work and also any work associated with the
+ * fence previously in the syncobj.
+ *
+ * When GPU work which waits on a syncobj is enqueued in a DRM driver, at the
+ * time the work is enqueued, it waits on the syncobj's fence before
+ * submitting the work to hardware. That fence is either :
+ *
+ * - The syncobj's current fence if the syncobj is considered as a binary
+ * primitive.
+ * - The struct &dma_fence associated with a given point if the syncobj is
+ * considered as a timeline primitive.
+ *
+ * If the syncobj's fence is NULL or not present in the syncobj's timeline,
+ * the enqueue operation is expected to fail.
+ *
+ * With binary syncobj, all manipulation of the syncobjs's fence happens in
+ * terms of the current fence at the time the ioctl is called by userspace
+ * regardless of whether that operation is an immediate host-side operation
+ * (signal or reset) or or an operation which is enqueued in some driver
+ * queue. &DRM_IOCTL_SYNCOBJ_RESET and &DRM_IOCTL_SYNCOBJ_SIGNAL can be used
+ * to manipulate a syncobj from the host by resetting its pointer to NULL or
* setting its pointer to a fence which is already signaled.
*
+ * With a timeline syncobj, all manipulation of the synobj's fence happens in
+ * terms of a u64 value referring to point in the timeline. See
+ * dma_fence_chain_find_seqno() to see how a given point is found in the
+ * timeline.
+ *
+ * Note that applications should be careful to always use timeline set of
+ * ioctl() when dealing with syncobj considered as timeline. Using a binary
+ * set of ioctl() with a syncobj considered as timeline could result incorrect
+ * synchronization. The use of binary syncobj is supported through the
+ * timeline set of ioctl() by using a point value of 0, this will reproduce
+ * the behavior of the binary set of ioctl() (for example replace the
+ * syncobj's fence when signaling).
+ *
*
* Host-side wait on syncobjs
* --------------------------
@@ -87,6 +126,16 @@
* synchronize between the two.
* This requirement is inherited from the Vulkan fence API.
*
+ * Similarly, &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT takes an array of syncobj
+ * handles as well as an array of u64 points and does a host-side wait on all
+ * of syncobj fences at the given points simultaneously.
+ *
+ * &DRM_IOCTL_SYNCOBJ_TIMELINE_WAIT also adds the ability to wait for a given
+ * fence to materialize on the timeline without waiting for the fence to be
+ * signaled by using the &DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE flag. This
+ * requirement is inherited from the wait-before-signal behavior required by
+ * the Vulkan timeline semaphore API.
+ *
*
* Import/export of syncobjs
* -------------------------
@@ -120,6 +169,18 @@
* Because sync files are immutable, resetting or signaling the syncobj
* will not affect any sync files whose fences have been imported into the
* syncobj.
+ *
+ *
+ * Import/export of timeline points in timeline syncobjs
+ * -----------------------------------------------------
+ *
+ * &DRM_IOCTL_SYNCOBJ_TRANSFER provides a mechanism to transfer a struct
+ * &dma_fence_chain of a syncobj at a given u64 point to another u64 point
+ * into another syncobj.
+ *
+ * Note that if you want to transfer a struct &dma_fence_chain from a given
+ * point on a timeline syncobj from/into a binary syncobj, you can use the
+ * point 0 to mean take/replace the fence in the syncobj.
*/
#include <linux/anon_inodes.h>
diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
index dd2bc85f43cc..939f0032aab1 100644
--- a/drivers/gpu/drm/drm_sysfs.c
+++ b/drivers/gpu/drm/drm_sysfs.c
@@ -85,7 +85,6 @@ int drm_sysfs_init(void)
}
drm_class->devnode = drm_devnode;
- drm_setup_hdcp_srm(drm_class);
return 0;
}
@@ -98,7 +97,6 @@ void drm_sysfs_destroy(void)
{
if (IS_ERR_OR_NULL(drm_class))
return;
- drm_teardown_hdcp_srm(drm_class);
class_remove_file(drm_class, &class_attr_version.attr);
class_destroy(drm_class);
drm_class = NULL;
@@ -230,7 +228,7 @@ static ssize_t modes_show(struct device *device,
mutex_lock(&connector->dev->mode_config.mutex);
list_for_each_entry(mode, &connector->modes, head) {
- written += snprintf(buf + written, PAGE_SIZE - written, "%s\n",
+ written += scnprintf(buf + written, PAGE_SIZE - written, "%s\n",
mode->name);
}
mutex_unlock(&connector->dev->mode_config.mutex);
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 1659b13b178c..da7b0b0c1090 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -30,6 +30,7 @@
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
#include <drm/drm_framebuffer.h>
+#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_print.h>
#include <drm/drm_vblank.h>
@@ -69,6 +70,12 @@
* &drm_driver.max_vblank_count. In that case the vblank core only disables the
* vblanks after a timer has expired, which can be configured through the
* ``vblankoffdelay`` module parameter.
+ *
+ * Drivers for hardware without support for vertical-blanking interrupts
+ * must not call drm_vblank_init(). For such drivers, atomic helpers will
+ * automatically generate fake vblank events as part of the display update.
+ * This functionality also can be controlled by the driver by enabling and
+ * disabling struct drm_crtc_state.no_vblank.
*/
/* Retry timestamp calculation up to 3 times to satisfy
@@ -137,10 +144,9 @@ static u32 __get_vblank_counter(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->get_vblank_counter)
return crtc->funcs->get_vblank_counter(crtc);
- }
-
- if (dev->driver->get_vblank_counter)
+ } else if (dev->driver->get_vblank_counter) {
return dev->driver->get_vblank_counter(dev, pipe);
+ }
return drm_vblank_no_hw_counter(dev, pipe);
}
@@ -332,7 +338,8 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc)
u64 vblank;
unsigned long flags;
- WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) && !dev->driver->get_vblank_timestamp,
+ WARN_ONCE(drm_debug_enabled(DRM_UT_VBL) &&
+ !crtc->funcs->get_vblank_timestamp,
"This function requires support for accurate vblank timestamps.");
spin_lock_irqsave(&dev->vblank_time_lock, flags);
@@ -354,13 +361,11 @@ static void __disable_vblank(struct drm_device *dev, unsigned int pipe)
if (WARN_ON(!crtc))
return;
- if (crtc->funcs->disable_vblank) {
+ if (crtc->funcs->disable_vblank)
crtc->funcs->disable_vblank(crtc);
- return;
- }
+ } else {
+ dev->driver->disable_vblank(dev, pipe);
}
-
- dev->driver->disable_vblank(dev, pipe);
}
/*
@@ -480,19 +485,6 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
- /* Driver specific high-precision vblank timestamping supported? */
- if (dev->driver->get_vblank_timestamp)
- DRM_INFO("Driver supports precise vblank timestamp query.\n");
- else
- DRM_INFO("No driver support for vblank timestamp query.\n");
-
- /* Must have precise timestamping for reliable vblank instant disable */
- if (dev->vblank_disable_immediate && !dev->driver->get_vblank_timestamp) {
- dev->vblank_disable_immediate = false;
- DRM_INFO("Setting vblank_disable_immediate to false because "
- "get_vblank_timestamp == NULL\n");
- }
-
return 0;
err:
@@ -502,6 +494,28 @@ err:
EXPORT_SYMBOL(drm_vblank_init);
/**
+ * drm_dev_has_vblank - test if vblanking has been initialized for
+ * a device
+ * @dev: the device
+ *
+ * Drivers may call this function to test if vblank support is
+ * initialized for a device. For most hardware this means that vblanking
+ * can also be enabled.
+ *
+ * Atomic helpers use this function to initialize
+ * &drm_crtc_state.no_vblank. See also drm_atomic_helper_check_modeset().
+ *
+ * Returns:
+ * True if vblanking has been initialized for the given device, false
+ * otherwise.
+ */
+bool drm_dev_has_vblank(const struct drm_device *dev)
+{
+ return dev->num_crtcs != 0;
+}
+EXPORT_SYMBOL(drm_dev_has_vblank);
+
+/**
* drm_crtc_vblank_waitqueue - get vblank waitqueue for the CRTC
* @crtc: which CRTC's vblank waitqueue to retrieve
*
@@ -523,9 +537,9 @@ EXPORT_SYMBOL(drm_crtc_vblank_waitqueue);
*
* Calculate and store various constants which are later needed by vblank and
* swap-completion timestamping, e.g, by
- * drm_calc_vbltimestamp_from_scanoutpos(). They are derived from CRTC's true
- * scanout timing, so they take things like panel scaling or other adjustments
- * into account.
+ * drm_crtc_vblank_helper_get_vblank_timestamp(). They are derived from
+ * CRTC's true scanout timing, so they take things like panel scaling or
+ * other adjustments into account.
*/
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
@@ -576,9 +590,9 @@ void drm_calc_timestamping_constants(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_calc_timestamping_constants);
/**
- * drm_calc_vbltimestamp_from_scanoutpos - precise vblank timestamp helper
- * @dev: DRM device
- * @pipe: index of CRTC whose vblank timestamp to retrieve
+ * drm_crtc_vblank_helper_get_vblank_timestamp_internal - precise vblank
+ * timestamp helper
+ * @crtc: CRTC whose vblank timestamp to retrieve
* @max_error: Desired maximum allowable error in timestamps (nanosecs)
* On return contains true maximum error of timestamp
* @vblank_time: Pointer to time which should receive the timestamp
@@ -586,11 +600,12 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* True when called from drm_crtc_handle_vblank(). Some drivers
* need to apply some workarounds for gpu-specific vblank irq quirks
* if flag is set.
+ * @get_scanout_position:
+ * Callback function to retrieve the scanout position. See
+ * @struct drm_crtc_helper_funcs.get_scanout_position.
*
* Implements calculation of exact vblank timestamps from given drm_display_mode
- * timings and current video scanout position of a CRTC. This can be directly
- * used as the &drm_driver.get_vblank_timestamp implementation of a kms driver
- * if &drm_driver.get_scanout_position is implemented.
+ * timings and current video scanout position of a CRTC.
*
* The current implementation only handles standard video modes. For double scan
* and interlaced modes the driver is supposed to adjust the hardware mode
@@ -606,34 +621,30 @@ EXPORT_SYMBOL(drm_calc_timestamping_constants);
* Returns true on success, and false on failure, i.e. when no accurate
* timestamp could be acquired.
*/
-bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe,
- int *max_error,
- ktime_t *vblank_time,
- bool in_vblank_irq)
+bool
+drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ struct drm_crtc *crtc, int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq,
+ drm_vblank_get_scanout_position_func get_scanout_position)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+ struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
struct timespec64 ts_etime, ts_vblank_time;
ktime_t stime, etime;
bool vbl_status;
- struct drm_crtc *crtc;
const struct drm_display_mode *mode;
- struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
int vpos, hpos, i;
int delta_ns, duration_ns;
- if (!drm_core_check_feature(dev, DRIVER_MODESET))
- return false;
-
- crtc = drm_crtc_from_index(dev, pipe);
-
- if (pipe >= dev->num_crtcs || !crtc) {
+ if (pipe >= dev->num_crtcs) {
DRM_ERROR("Invalid crtc %u\n", pipe);
return false;
}
/* Scanout position query not supported? Should not happen. */
- if (!dev->driver->get_scanout_position) {
- DRM_ERROR("Called from driver w/o get_scanout_position()!?\n");
+ if (!get_scanout_position) {
+ DRM_ERROR("Called from CRTC w/o get_scanout_position()!?\n");
return false;
}
@@ -648,7 +659,6 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
if (mode->crtc_clock == 0) {
DRM_DEBUG("crtc %u: Noop due to uninitialized mode.\n", pipe);
WARN_ON_ONCE(drm_drv_uses_atomic_modeset(dev));
-
return false;
}
@@ -664,11 +674,10 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
* Get vertical and horizontal scanout position vpos, hpos,
* and bounding timestamps stime, etime, pre/post query.
*/
- vbl_status = dev->driver->get_scanout_position(dev, pipe,
- in_vblank_irq,
- &vpos, &hpos,
- &stime, &etime,
- mode);
+ vbl_status = get_scanout_position(crtc, in_vblank_irq,
+ &vpos, &hpos,
+ &stime, &etime,
+ mode);
/* Return as no-op if scanout query unsupported or failed. */
if (!vbl_status) {
@@ -720,7 +729,49 @@ bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
return true;
}
-EXPORT_SYMBOL(drm_calc_vbltimestamp_from_scanoutpos);
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp_internal);
+
+/**
+ * drm_crtc_vblank_helper_get_vblank_timestamp - precise vblank timestamp
+ * helper
+ * @crtc: CRTC whose vblank timestamp to retrieve
+ * @max_error: Desired maximum allowable error in timestamps (nanosecs)
+ * On return contains true maximum error of timestamp
+ * @vblank_time: Pointer to time which should receive the timestamp
+ * @in_vblank_irq:
+ * True when called from drm_crtc_handle_vblank(). Some drivers
+ * need to apply some workarounds for gpu-specific vblank irq quirks
+ * if flag is set.
+ *
+ * Implements calculation of exact vblank timestamps from given drm_display_mode
+ * timings and current video scanout position of a CRTC. This can be directly
+ * used as the &drm_crtc_funcs.get_vblank_timestamp implementation of a kms
+ * driver if &drm_crtc_helper_funcs.get_scanout_position is implemented.
+ *
+ * The current implementation only handles standard video modes. For double scan
+ * and interlaced modes the driver is supposed to adjust the hardware mode
+ * (taken from &drm_crtc_state.adjusted mode for atomic modeset drivers) to
+ * match the scanout position reported.
+ *
+ * Note that atomic drivers must call drm_calc_timestamping_constants() before
+ * enabling a CRTC. The atomic helpers already take care of that in
+ * drm_atomic_helper_update_legacy_modeset_state().
+ *
+ * Returns:
+ *
+ * Returns true on success, and false on failure, i.e. when no accurate
+ * timestamp could be acquired.
+ */
+bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq)
+{
+ return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ crtc, max_error, vblank_time, in_vblank_irq,
+ crtc->helper_private->get_scanout_position);
+}
+EXPORT_SYMBOL(drm_crtc_vblank_helper_get_vblank_timestamp);
/**
* drm_get_last_vbltimestamp - retrieve raw timestamp for the most recent
@@ -747,15 +798,19 @@ static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
ktime_t *tvblank, bool in_vblank_irq)
{
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
bool ret = false;
/* Define requested maximum error on timestamps (nanoseconds). */
int max_error = (int) drm_timestamp_precision * 1000;
/* Query driver if possible and precision timestamping enabled. */
- if (dev->driver->get_vblank_timestamp && (max_error > 0))
- ret = dev->driver->get_vblank_timestamp(dev, pipe, &max_error,
+ if (crtc && crtc->funcs->get_vblank_timestamp && max_error > 0) {
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+
+ ret = crtc->funcs->get_vblank_timestamp(crtc, &max_error,
tvblank, in_vblank_irq);
+ }
/* GPU high precision timestamp query unsupported or failed.
* Return current monotonic/gettimeofday timestamp as best estimate.
@@ -977,9 +1032,11 @@ static int __enable_vblank(struct drm_device *dev, unsigned int pipe)
if (crtc->funcs->enable_vblank)
return crtc->funcs->enable_vblank(crtc);
+ } else if (dev->driver->enable_vblank) {
+ return dev->driver->enable_vblank(dev, pipe);
}
- return dev->driver->enable_vblank(dev, pipe);
+ return -EINVAL;
}
static int drm_vblank_enable(struct drm_device *dev, unsigned int pipe)
@@ -1738,6 +1795,8 @@ done:
static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
{
+ struct drm_crtc *crtc = drm_crtc_from_index(dev, pipe);
+ bool high_prec = false;
struct drm_pending_vblank_event *e, *t;
ktime_t now;
u64 seq;
@@ -1760,8 +1819,10 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
send_vblank_event(dev, e, seq, now);
}
- trace_drm_vblank_event(pipe, seq, now,
- dev->driver->get_vblank_timestamp != NULL);
+ if (crtc && crtc->funcs->get_vblank_timestamp)
+ high_prec = true;
+
+ trace_drm_vblank_event(pipe, seq, now, high_prec);
}
/**
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 52e87e4869a5..aa88911bbc06 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -102,7 +102,7 @@ static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma)
return tmp;
}
-/**
+/*
* \c fault method for AGP virtual memory.
*
* \param vma virtual memory area.
@@ -192,7 +192,7 @@ static vm_fault_t drm_vm_fault(struct vm_fault *vmf)
}
#endif
-/**
+/*
* \c nopage method for shared virtual memory.
*
* \param vma virtual memory area.
@@ -225,7 +225,7 @@ static vm_fault_t drm_vm_shm_fault(struct vm_fault *vmf)
return 0;
}
-/**
+/*
* \c close method for shared virtual memory.
*
* \param vma virtual memory area.
@@ -269,8 +269,6 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
}
if (!found_maps) {
- drm_dma_handle_t dmah;
-
switch (map->type) {
case _DRM_REGISTERS:
case _DRM_FRAME_BUFFER:
@@ -284,10 +282,10 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
case _DRM_SCATTER_GATHER:
break;
case _DRM_CONSISTENT:
- dmah.vaddr = map->handle;
- dmah.busaddr = map->offset;
- dmah.size = map->size;
- __drm_legacy_pci_free(dev, &dmah);
+ dma_free_coherent(&dev->pdev->dev,
+ map->size,
+ map->handle,
+ map->offset);
break;
}
kfree(map);
@@ -296,7 +294,7 @@ static void drm_vm_shm_close(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
-/**
+/*
* \c fault method for DMA virtual memory.
*
* \param address access address.
@@ -331,7 +329,7 @@ static vm_fault_t drm_vm_dma_fault(struct vm_fault *vmf)
return 0;
}
-/**
+/*
* \c fault method for scatter-gather virtual memory.
*
* \param address access address.
@@ -437,7 +435,7 @@ static void drm_vm_close_locked(struct drm_device *dev,
}
}
-/**
+/*
* \c close method for all virtual memory types.
*
* \param vma virtual memory area.
@@ -455,7 +453,7 @@ static void drm_vm_close(struct vm_area_struct *vma)
mutex_unlock(&dev->struct_mutex);
}
-/**
+/*
* mmap DMA memory.
*
* \param file_priv DRM file private.
@@ -515,7 +513,7 @@ static resource_size_t drm_core_get_reg_ofs(struct drm_device *dev)
#endif
}
-/**
+/*
* mmap DMA memory.
*
* \param file_priv DRM file private.
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index ff59c641fa80..e7b58097ccdc 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -139,7 +139,7 @@ static void decon_ctx_remove(struct decon_context *ctx)
static u32 decon_calc_clkdiv(struct decon_context *ctx,
const struct drm_display_mode *mode)
{
- unsigned long ideal_clk = mode->htotal * mode->vtotal * mode->vrefresh;
+ unsigned long ideal_clk = mode->clock;
u32 clkdiv;
/* Find the clock divider value that gets us closest to ideal_clk */
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index 4785885c0f4f..d23d3502ca91 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -106,7 +106,8 @@ static int exynos_dp_bridge_attach(struct analogix_dp_plat_data *plat_data,
/* Pre-empt DP connector creation if there's a bridge */
if (dp->ptn_bridge) {
- ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge);
+ ret = drm_bridge_attach(&dp->encoder, dp->ptn_bridge, bridge,
+ 0);
if (ret) {
DRM_DEV_ERROR(dp->dev,
"Failed to attach bridge to drm\n");
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index ba0f868b2477..57defeb44522 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -270,7 +270,7 @@ static int exynos_drm_bind(struct device *dev)
struct drm_encoder *encoder;
struct drm_device *drm;
unsigned int clone_mask;
- int cnt, ret;
+ int ret;
drm = drm_dev_alloc(&exynos_drm_driver, dev);
if (IS_ERR(drm))
@@ -293,10 +293,9 @@ static int exynos_drm_bind(struct device *dev)
exynos_drm_mode_config_init(drm);
/* setup possible_clones. */
- cnt = 0;
clone_mask = 0;
list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
- clone_mask |= (1 << (cnt++));
+ clone_mask |= drm_encoder_mask(encoder);
list_for_each_entry(encoder, &drm->mode_config.encoder_list, head)
encoder->possible_clones = clone_mask;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 33628d85edad..e080aa92338c 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1514,7 +1514,6 @@ static int exynos_dsi_create_connector(struct drm_encoder *encoder)
return 0;
connector->funcs->reset(connector);
- drm_fb_helper_add_one_connector(drm->fb_helper, connector);
drm_connector_register(connector);
return 0;
}
@@ -1540,7 +1539,7 @@ static int exynos_dsi_host_attach(struct mipi_dsi_host *host,
out_bridge = of_drm_find_bridge(device->dev.of_node);
if (out_bridge) {
- drm_bridge_attach(encoder, out_bridge, NULL);
+ drm_bridge_attach(encoder, out_bridge, NULL, 0);
dsi->out_bridge = out_bridge;
list_splice_init(&encoder->bridge_chain, &dsi->bridge_chain);
} else {
@@ -1717,7 +1716,7 @@ static int exynos_dsi_bind(struct device *dev, struct device *master,
if (dsi->in_bridge_node) {
in_bridge = of_drm_find_bridge(dsi->in_bridge_node);
if (in_bridge)
- drm_bridge_attach(encoder, in_bridge, NULL);
+ drm_bridge_attach(encoder, in_bridge, NULL, 0);
}
return mipi_dsi_host_register(&dsi->dsi_host);
@@ -1773,8 +1772,9 @@ static int exynos_dsi_probe(struct platform_device *pdev)
ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(dsi->supplies),
dsi->supplies);
if (ret) {
- dev_info(dev, "failed to get regulators: %d\n", ret);
- return -EPROBE_DEFER;
+ if (ret != -EPROBE_DEFER)
+ dev_info(dev, "failed to get regulators: %d\n", ret);
+ return ret;
}
dsi->clks = devm_kcalloc(dev,
@@ -1787,9 +1787,10 @@ static int exynos_dsi_probe(struct platform_device *pdev)
dsi->clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(dsi->clks[i])) {
if (strcmp(clk_names[i], "sclk_mipi") == 0) {
- strcpy(clk_names[i], OLD_SCLK_MIPI_CLK_NAME);
- i--;
- continue;
+ dsi->clks[i] = devm_clk_get(dev,
+ OLD_SCLK_MIPI_CLK_NAME);
+ if (!IS_ERR(dsi->clks[i]))
+ continue;
}
dev_info(dev, "failed to get the clock: %s\n",
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index 647a1fd1d815..e6ceaf36fb04 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -200,21 +200,13 @@ int exynos_drm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &exynos_drm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, MAX_CONNECTOR);
+ ret = drm_fb_helper_init(dev, helper);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
"failed to initialize drm fb helper.\n");
goto err_init;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "failed to register drm_fb_helper_connector.\n");
- goto err_setup;
-
- }
-
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 9ff921f43a93..1a7c828fc41d 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -960,7 +960,7 @@ static int hdmi_create_connector(struct drm_encoder *encoder)
drm_connector_attach_encoder(connector, encoder);
if (hdata->bridge) {
- ret = drm_bridge_attach(encoder, hdata->bridge, NULL);
+ ret = drm_bridge_attach(encoder, hdata->bridge, NULL, 0);
if (ret)
DRM_DEV_ERROR(hdata->dev, "Failed to attach bridge\n");
}
@@ -1805,18 +1805,10 @@ static int hdmi_resources_init(struct hdmi_context *hdata)
hdata->reg_hdmi_en = devm_regulator_get_optional(dev, "hdmi-en");
- if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV) {
+ if (PTR_ERR(hdata->reg_hdmi_en) != -ENODEV)
if (IS_ERR(hdata->reg_hdmi_en))
return PTR_ERR(hdata->reg_hdmi_en);
- ret = regulator_enable(hdata->reg_hdmi_en);
- if (ret) {
- DRM_DEV_ERROR(dev,
- "failed to enable hdmi-en regulator\n");
- return ret;
- }
- }
-
return hdmi_bridge_init(hdata);
}
@@ -2023,6 +2015,15 @@ static int hdmi_probe(struct platform_device *pdev)
}
}
+ if (!IS_ERR(hdata->reg_hdmi_en)) {
+ ret = regulator_enable(hdata->reg_hdmi_en);
+ if (ret) {
+ DRM_DEV_ERROR(dev,
+ "failed to enable hdmi-en regulator\n");
+ goto err_hdmiphy;
+ }
+ }
+
pm_runtime_enable(dev);
audio_infoframe = &hdata->audio.infoframe;
@@ -2047,7 +2048,8 @@ err_unregister_audio:
err_rpm_disable:
pm_runtime_disable(dev);
-
+ if (!IS_ERR(hdata->reg_hdmi_en))
+ regulator_disable(hdata->reg_hdmi_en);
err_hdmiphy:
if (hdata->hdmiphy_port)
put_device(&hdata->hdmiphy_port->dev);
diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
index 9598ee3cc4d2..cff344367f81 100644
--- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
+++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_rgb.c
@@ -151,5 +151,5 @@ int fsl_dcu_create_outputs(struct fsl_dcu_drm_device *fsl_dev)
return fsl_dcu_attach_panel(fsl_dev, panel);
}
- return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL);
+ return drm_bridge_attach(&fsl_dev->encoder, bridge, NULL, 0);
}
diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
index 1ed854f498b7..686385a66167 100644
--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
+++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
@@ -977,6 +977,9 @@ const struct drm_crtc_funcs cdv_intel_crtc_funcs = {
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
+ .enable_vblank = psb_enable_vblank,
+ .disable_vblank = psb_disable_vblank,
+ .get_vblank_counter = psb_get_vblank_counter,
};
const struct gma_clock_funcs cdv_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 1459076d1980..1d8f67e4795a 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -513,14 +513,10 @@ int psb_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, fb_helper, &psb_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, fb_helper, INTELFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(dev, fb_helper);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(fb_helper);
- if (ret)
- goto fini;
-
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
diff --git a/drivers/gpu/drm/gma500/intel_bios.h b/drivers/gpu/drm/gma500/intel_bios.h
index a1f9ce9465a5..0e6facf21e33 100644
--- a/drivers/gpu/drm/gma500/intel_bios.h
+++ b/drivers/gpu/drm/gma500/intel_bios.h
@@ -227,7 +227,7 @@ struct bdb_general_definitions {
* number = (block_size - sizeof(bdb_general_definitions))/
* sizeof(child_device_config);
*/
- struct child_device_config devices[0];
+ struct child_device_config devices[];
};
struct bdb_lvds_options {
diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c
index 6956c8e7501c..2411eb9827b8 100644
--- a/drivers/gpu/drm/gma500/psb_drv.c
+++ b/drivers/gpu/drm/gma500/psb_drv.c
@@ -363,7 +363,6 @@ static int psb_driver_load(struct drm_device *dev, unsigned long flags)
drm_irq_install(dev, dev->pdev->irq);
dev->max_vblank_count = 0xffffff; /* only 24 bits of frame count */
- dev->driver->get_vblank_counter = psb_get_vblank_counter;
psb_modeset_init(dev);
psb_fbdev_init(dev);
@@ -507,9 +506,6 @@ static struct drm_driver driver = {
.irq_postinstall = psb_irq_postinstall,
.irq_uninstall = psb_irq_uninstall,
.irq_handler = psb_irq_handler,
- .enable_vblank = psb_enable_vblank,
- .disable_vblank = psb_disable_vblank,
- .get_vblank_counter = psb_get_vblank_counter,
.gem_free_object = psb_gem_free_object,
.gem_vm_ops = &psb_gem_vm_ops,
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index 3d4ef3071d45..956926341316 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -681,15 +681,15 @@ extern void psb_irq_turn_off_dpst(struct drm_device *dev);
extern void psb_irq_uninstall_islands(struct drm_device *dev, int hw_islands);
extern int psb_vblank_wait2(struct drm_device *dev, unsigned int *sequence);
extern int psb_vblank_wait(struct drm_device *dev, unsigned int *sequence);
-extern int psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
-extern void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
+extern int psb_enable_vblank(struct drm_crtc *crtc);
+extern void psb_disable_vblank(struct drm_crtc *crtc);
void
psb_enable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
void
psb_disable_pipestat(struct drm_psb_private *dev_priv, int pipe, u32 mask);
-extern u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+extern u32 psb_get_vblank_counter(struct drm_crtc *crtc);
/* framebuffer.c */
extern int psbfb_probed(struct drm_device *dev);
diff --git a/drivers/gpu/drm/gma500/psb_intel_display.c b/drivers/gpu/drm/gma500/psb_intel_display.c
index fed3b563e62e..531c5485be17 100644
--- a/drivers/gpu/drm/gma500/psb_intel_display.c
+++ b/drivers/gpu/drm/gma500/psb_intel_display.c
@@ -433,6 +433,9 @@ const struct drm_crtc_funcs psb_intel_crtc_funcs = {
.set_config = gma_crtc_set_config,
.destroy = gma_crtc_destroy,
.page_flip = gma_crtc_page_flip,
+ .enable_vblank = psb_enable_vblank,
+ .disable_vblank = psb_disable_vblank,
+ .get_vblank_counter = psb_get_vblank_counter,
};
const struct gma_clock_funcs psb_clock_funcs = {
diff --git a/drivers/gpu/drm/gma500/psb_irq.c b/drivers/gpu/drm/gma500/psb_irq.c
index 91f90016dba9..15eb3770d817 100644
--- a/drivers/gpu/drm/gma500/psb_irq.c
+++ b/drivers/gpu/drm/gma500/psb_irq.c
@@ -506,8 +506,10 @@ int psb_irq_disable_dpst(struct drm_device *dev)
/*
* It is used to enable VBLANK interrupt
*/
-int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int psb_enable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
uint32_t reg_val = 0;
@@ -545,8 +547,10 @@ int psb_enable_vblank(struct drm_device *dev, unsigned int pipe)
/*
* It is used to disable VBLANK interrupt
*/
-void psb_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void psb_disable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct drm_psb_private *dev_priv = dev->dev_private;
unsigned long irqflags;
@@ -618,8 +622,10 @@ void mdfld_disable_te(struct drm_device *dev, int pipe)
/* Called from drm generic code, passed a 'crtc', which
* we use as a pipe index
*/
-u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 psb_get_vblank_counter(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
uint32_t high_frame = PIPEAFRAMEHIGH;
uint32_t low_frame = PIPEAFRAMEPIXEL;
uint32_t pipeconf_reg = PIPEACONF;
diff --git a/drivers/gpu/drm/gma500/psb_irq.h b/drivers/gpu/drm/gma500/psb_irq.h
index 58fd502e3b9d..4f73998848d1 100644
--- a/drivers/gpu/drm/gma500/psb_irq.h
+++ b/drivers/gpu/drm/gma500/psb_irq.h
@@ -12,6 +12,7 @@
#ifndef _PSB_IRQ_H_
#define _PSB_IRQ_H_
+struct drm_crtc;
struct drm_device;
bool sysirq_init(struct drm_device *dev);
@@ -26,9 +27,9 @@ int psb_irq_enable_dpst(struct drm_device *dev);
int psb_irq_disable_dpst(struct drm_device *dev);
void psb_irq_turn_on_dpst(struct drm_device *dev);
void psb_irq_turn_off_dpst(struct drm_device *dev);
-int psb_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void psb_disable_vblank(struct drm_device *dev, unsigned int pipe);
-u32 psb_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
+int psb_enable_vblank(struct drm_crtc *crtc);
+void psb_disable_vblank(struct drm_crtc *crtc);
+u32 psb_get_vblank_counter(struct drm_crtc *crtc);
int mdfld_enable_te(struct drm_device *dev, int pipe);
void mdfld_disable_te(struct drm_device *dev, int pipe);
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
index 7fa7d4933f60..55b46a7150a5 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_de.c
@@ -40,6 +40,7 @@ struct hibmc_dislay_pll_config {
};
static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
+ {640, 480, CRT_PLL1_HS_25MHZ, CRT_PLL2_HS_25MHZ},
{800, 600, CRT_PLL1_HS_40MHZ, CRT_PLL2_HS_40MHZ},
{1024, 768, CRT_PLL1_HS_65MHZ, CRT_PLL2_HS_65MHZ},
{1152, 864, CRT_PLL1_HS_80MHZ_1152, CRT_PLL2_HS_80MHZ},
@@ -47,6 +48,8 @@ static const struct hibmc_dislay_pll_config hibmc_pll_table[] = {
{1280, 720, CRT_PLL1_HS_74MHZ, CRT_PLL2_HS_74MHZ},
{1280, 960, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
{1280, 1024, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
+ {1440, 900, CRT_PLL1_HS_106MHZ, CRT_PLL2_HS_106MHZ},
+ {1600, 900, CRT_PLL1_HS_108MHZ, CRT_PLL2_HS_108MHZ},
{1600, 1200, CRT_PLL1_HS_162MHZ, CRT_PLL2_HS_162MHZ},
{1920, 1080, CRT_PLL1_HS_148MHZ, CRT_PLL2_HS_148MHZ},
{1920, 1200, CRT_PLL1_HS_193MHZ, CRT_PLL2_HS_193MHZ},
@@ -80,6 +83,9 @@ static int hibmc_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
+ if (!crtc_state->enable)
+ return 0;
+
if (state->crtc_x + state->crtc_w >
crtc_state->adjusted_mode.hdisplay ||
state->crtc_y + state->crtc_h >
@@ -184,6 +190,20 @@ static struct drm_plane *hibmc_plane_init(struct hibmc_drm_private *priv)
return plane;
}
+static void hibmc_crtc_dpms(struct drm_crtc *crtc, int dpms)
+{
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ unsigned int reg;
+
+ reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+ reg &= ~HIBMC_CRT_DISP_CTL_DPMS_MASK;
+ reg |= HIBMC_FIELD(HIBMC_CRT_DISP_CTL_DPMS, dpms);
+ reg &= ~HIBMC_CRT_DISP_CTL_TIMING_MASK;
+ if (dpms == HIBMC_CRT_DPMS_ON)
+ reg |= HIBMC_CRT_DISP_CTL_TIMING(1);
+ writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -200,6 +220,7 @@ static void hibmc_crtc_atomic_enable(struct drm_crtc *crtc,
reg |= HIBMC_CURR_GATE_DISPLAY(1);
hibmc_set_current_gate(priv, reg);
drm_crtc_vblank_on(crtc);
+ hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_ON);
}
static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -208,6 +229,7 @@ static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
unsigned int reg;
struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ hibmc_crtc_dpms(crtc, HIBMC_CRT_DPMS_OFF);
drm_crtc_vblank_off(crtc);
hibmc_set_power_mode(priv, HIBMC_PW_MODE_CTL_MODE_SLEEP);
@@ -221,6 +243,25 @@ static void hibmc_crtc_atomic_disable(struct drm_crtc *crtc,
hibmc_set_current_gate(priv, reg);
}
+static enum drm_mode_status
+hibmc_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ int i = 0;
+ int vrefresh = drm_mode_vrefresh(mode);
+
+ if (vrefresh < 59 || vrefresh > 61)
+ return MODE_NOCLOCK;
+
+ for (i = 0; i < ARRAY_SIZE(hibmc_pll_table); i++) {
+ if (hibmc_pll_table[i].hdisplay == mode->hdisplay &&
+ hibmc_pll_table[i].vdisplay == mode->vdisplay)
+ return MODE_OK;
+ }
+
+ return MODE_BAD;
+}
+
static unsigned int format_pll_reg(void)
{
unsigned int pllreg = 0;
@@ -435,6 +476,42 @@ static void hibmc_crtc_disable_vblank(struct drm_crtc *crtc)
priv->mmio + HIBMC_RAW_INTERRUPT_EN);
}
+static void hibmc_crtc_load_lut(struct drm_crtc *crtc)
+{
+ struct hibmc_drm_private *priv = crtc->dev->dev_private;
+ void __iomem *mmio = priv->mmio;
+ u16 *r, *g, *b;
+ unsigned int reg;
+ int i;
+
+ r = crtc->gamma_store;
+ g = r + crtc->gamma_size;
+ b = g + crtc->gamma_size;
+
+ for (i = 0; i < crtc->gamma_size; i++) {
+ unsigned int offset = i << 2;
+ u8 red = *r++ >> 8;
+ u8 green = *g++ >> 8;
+ u8 blue = *b++ >> 8;
+ u32 rgb = (red << 16) | (green << 8) | blue;
+
+ writel(rgb, mmio + HIBMC_CRT_PALETTE + offset);
+ }
+
+ reg = readl(priv->mmio + HIBMC_CRT_DISP_CTL);
+ reg |= HIBMC_FIELD(HIBMC_CTL_DISP_CTL_GAMMA, 1);
+ writel(reg, priv->mmio + HIBMC_CRT_DISP_CTL);
+}
+
+static int hibmc_crtc_gamma_set(struct drm_crtc *crtc, u16 *red, u16 *green,
+ u16 *blue, uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx)
+{
+ hibmc_crtc_load_lut(crtc);
+
+ return 0;
+}
+
static const struct drm_crtc_funcs hibmc_crtc_funcs = {
.page_flip = drm_atomic_helper_page_flip,
.set_config = drm_atomic_helper_set_config,
@@ -444,6 +521,7 @@ static const struct drm_crtc_funcs hibmc_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = hibmc_crtc_enable_vblank,
.disable_vblank = hibmc_crtc_disable_vblank,
+ .gamma_set = hibmc_crtc_gamma_set,
};
static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
@@ -452,6 +530,7 @@ static const struct drm_crtc_helper_funcs hibmc_crtc_helper_funcs = {
.atomic_flush = hibmc_crtc_atomic_flush,
.atomic_enable = hibmc_crtc_atomic_enable,
.atomic_disable = hibmc_crtc_atomic_disable,
+ .mode_valid = hibmc_crtc_mode_valid,
};
int hibmc_de_init(struct hibmc_drm_private *priv)
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
index 4a8a4cfb4b75..222356a4f9a8 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_drv.c
@@ -91,11 +91,11 @@ static int hibmc_kms_init(struct hibmc_drm_private *priv)
priv->dev->mode_config.min_width = 0;
priv->dev->mode_config.min_height = 0;
priv->dev->mode_config.max_width = 1920;
- priv->dev->mode_config.max_height = 1440;
+ priv->dev->mode_config.max_height = 1200;
priv->dev->mode_config.fb_base = priv->fb_base;
priv->dev->mode_config.preferred_depth = 24;
- priv->dev->mode_config.prefer_shadow = 0;
+ priv->dev->mode_config.prefer_shadow = 1;
priv->dev->mode_config.funcs = (void *)&hibmc_mode_funcs;
@@ -327,6 +327,11 @@ static int hibmc_pci_probe(struct pci_dev *pdev,
struct drm_device *dev;
int ret;
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
+ "hibmcdrmfb");
+ if (ret)
+ return ret;
+
dev = drm_dev_alloc(&hibmc_driver, &pdev->dev);
if (IS_ERR(dev)) {
DRM_ERROR("failed to allocate drm_device\n");
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
index b63a1ee15ceb..17b30c393b10 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_regs.h
@@ -68,6 +68,12 @@
#define HIBMC_CRT_DISP_CTL 0x80200
+#define HIBMC_CRT_DISP_CTL_DPMS(x) ((x) << 30)
+#define HIBMC_CRT_DISP_CTL_DPMS_MASK 0xc0000000
+
+#define HIBMC_CRT_DPMS_ON 0
+#define HIBMC_CRT_DPMS_OFF 3
+
#define HIBMC_CRT_DISP_CTL_CRTSELECT(x) ((x) << 25)
#define HIBMC_CRT_DISP_CTL_CRTSELECT_MASK 0x2000000
@@ -85,6 +91,9 @@
#define HIBMC_CRT_DISP_CTL_TIMING(x) ((x) << 8)
#define HIBMC_CRT_DISP_CTL_TIMING_MASK 0x100
+#define HIBMC_CTL_DISP_CTL_GAMMA(x) ((x) << 3)
+#define HIBMC_CTL_DISP_CTL_GAMMA_MASK 0x08
+
#define HIBMC_CRT_DISP_CTL_PLANE(x) ((x) << 2)
#define HIBMC_CRT_DISP_CTL_PLANE_MASK 4
@@ -170,6 +179,7 @@
#define CRT_PLL1_HS_74MHZ 0x23941dc2
#define CRT_PLL1_HS_80MHZ 0x23941001
#define CRT_PLL1_HS_80MHZ_1152 0x23540fc2
+#define CRT_PLL1_HS_106MHZ 0x237C1641
#define CRT_PLL1_HS_108MHZ 0x23b41b01
#define CRT_PLL1_HS_162MHZ 0x23480681
#define CRT_PLL1_HS_148MHZ 0x23541dc2
@@ -182,10 +192,13 @@
#define CRT_PLL2_HS_78MHZ 0x50E147AE
#define CRT_PLL2_HS_74MHZ 0x602B6AE7
#define CRT_PLL2_HS_80MHZ 0x70000000
+#define CRT_PLL2_HS_106MHZ 0x0075c28f
#define CRT_PLL2_HS_108MHZ 0x80000000
#define CRT_PLL2_HS_162MHZ 0xA0000000
#define CRT_PLL2_HS_148MHZ 0xB0CCCCCD
#define CRT_PLL2_HS_193MHZ 0xC0872B02
+#define HIBMC_CRT_PALETTE 0x80C00
+
#define HIBMC_FIELD(field, value) (field(value) & field##_MASK)
#endif
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
index 6d98fdc06f6c..678ac2ef2a93 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_drm_vdac.c
@@ -11,8 +11,10 @@
* Jianhua Li <[email protected]>
*/
+#include <drm/drm_gem_vram_helper.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_crtc_helper.h>
#include <drm/drm_print.h>
#include "hibmc_drm_drv.h"
@@ -20,7 +22,14 @@
static int hibmc_connector_get_modes(struct drm_connector *connector)
{
- return drm_add_modes_noedid(connector, 800, 600);
+ int count;
+
+ count = drm_add_modes_noedid(connector,
+ connector->dev->mode_config.max_width,
+ connector->dev->mode_config.max_height);
+ drm_set_preferred_mode(connector, 1024, 768);
+
+ return count;
}
static enum drm_mode_status hibmc_connector_mode_valid(struct drm_connector *connector,
diff --git a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
index 50b988fdd5cc..99397ac3b363 100644
--- a/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
+++ b/drivers/gpu/drm/hisilicon/hibmc/hibmc_ttm.c
@@ -54,6 +54,7 @@ int hibmc_dumb_create(struct drm_file *file, struct drm_device *dev,
}
const struct drm_mode_config_funcs hibmc_mode_funcs = {
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
.fb_create = drm_gem_fb_create,
diff --git a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
index bdcf9c6ae9e9..f31068d74b18 100644
--- a/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
+++ b/drivers/gpu/drm/hisilicon/kirin/dw_drm_dsi.c
@@ -777,7 +777,7 @@ static int dsi_bridge_init(struct drm_device *dev, struct dw_dsi *dsi)
int ret;
/* associate the bridge to dsi encoder */
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
DRM_ERROR("failed to attach external bridge\n");
return ret;
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
index 0da860200410..e2ac09894a6d 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_ade_reg.h
@@ -83,7 +83,6 @@
#define VSIZE_OFST 20
#define LDI_INT_EN 0x741C
#define FRAME_END_INT_EN_OFST 1
-#define UNDERFLOW_INT_EN_OFST 2
#define LDI_CTRL 0x7420
#define BPP_OFST 3
#define DATA_GATE_EN BIT(2)
diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
index 73cd28a6ea07..86000127d4ee 100644
--- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
+++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_ade.c
@@ -46,7 +46,6 @@ struct ade_hw_ctx {
struct clk *media_noc_clk;
struct clk *ade_pix_clk;
struct reset_control *reset;
- struct work_struct display_reset_wq;
bool power_on;
int irq;
@@ -136,7 +135,6 @@ static void ade_init(struct ade_hw_ctx *ctx)
*/
ade_update_bits(base + ADE_CTRL, FRM_END_START_OFST,
FRM_END_START_MASK, REG_EFFECTIVE_IN_ADEEN_FRMEND);
- ade_update_bits(base + LDI_INT_EN, UNDERFLOW_INT_EN_OFST, MASK(1), 1);
}
static bool ade_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -304,17 +302,6 @@ static void ade_crtc_disable_vblank(struct drm_crtc *crtc)
MASK(1), 0);
}
-static void drm_underflow_wq(struct work_struct *work)
-{
- struct ade_hw_ctx *ctx = container_of(work, struct ade_hw_ctx,
- display_reset_wq);
- struct drm_device *drm_dev = ctx->crtc->dev;
- struct drm_atomic_state *state;
-
- state = drm_atomic_helper_suspend(drm_dev);
- drm_atomic_helper_resume(drm_dev, state);
-}
-
static irqreturn_t ade_irq_handler(int irq, void *data)
{
struct ade_hw_ctx *ctx = data;
@@ -331,12 +318,6 @@ static irqreturn_t ade_irq_handler(int irq, void *data)
MASK(1), 1);
drm_crtc_handle_vblank(crtc);
}
- if (status & BIT(UNDERFLOW_INT_EN_OFST)) {
- ade_update_bits(base + LDI_INT_CLR, UNDERFLOW_INT_EN_OFST,
- MASK(1), 1);
- DRM_ERROR("LDI underflow!");
- schedule_work(&ctx->display_reset_wq);
- }
return IRQ_HANDLED;
}
@@ -919,7 +900,6 @@ static void *ade_hw_ctx_alloc(struct platform_device *pdev,
if (ret)
return ERR_PTR(-EIO);
- INIT_WORK(&ctx->display_reset_wq, drm_underflow_wq);
ctx->crtc = crtc;
return ctx;
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index a63790d32d75..c3332209f27a 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -1356,10 +1356,16 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
/* DRM bridge functions */
-static int tda998x_bridge_attach(struct drm_bridge *bridge)
+static int tda998x_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
return tda998x_connector_init(priv, bridge->dev);
}
@@ -2022,7 +2028,7 @@ static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
if (ret)
goto err_encoder;
- ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL);
+ ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL, 0);
if (ret)
goto err_bridge;
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index ba9595960bbe..9afa5c4a6bf0 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -42,16 +42,9 @@ config DRM_I915
If "M" is selected, the module will be called i915.
-config DRM_I915_ALPHA_SUPPORT
- bool "Enable alpha quality support for new Intel hardware by default"
- depends on DRM_I915
- help
- This option is deprecated. Use DRM_I915_FORCE_PROBE option instead.
-
config DRM_I915_FORCE_PROBE
string "Force probe driver for selected new Intel hardware"
depends on DRM_I915
- default "*" if DRM_I915_ALPHA_SUPPORT
help
This is the default value for the i915.force_probe module
parameter. Using the module parameter overrides this option.
@@ -75,9 +68,8 @@ config DRM_I915_CAPTURE_ERROR
help
This option enables capturing the GPU state when a hang is detected.
This information is vital for triaging hangs and assists in debugging.
- Please report any hang to
- https://bugs.freedesktop.org/enter_bug.cgi?product=DRI
- for triaging.
+ Please report any hang for triaging according to:
+ https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs
If in doubt, say "Y".
diff --git a/drivers/gpu/drm/i915/Kconfig.profile b/drivers/gpu/drm/i915/Kconfig.profile
index c280b6ae38eb..0bfd276c19fe 100644
--- a/drivers/gpu/drm/i915/Kconfig.profile
+++ b/drivers/gpu/drm/i915/Kconfig.profile
@@ -20,6 +20,9 @@ config DRM_I915_HEARTBEAT_INTERVAL
check the health of the GPU and undertake regular house-keeping of
internal driver state.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/heartbeat_interval_ms
+
May be 0 to disable heartbeats and therefore disable automatic GPU
hang detection.
@@ -33,11 +36,18 @@ config DRM_I915_PREEMPT_TIMEOUT
expires, the HW will be reset to allow the more important context
to execute.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/preempt_timeout_ms
+
May be 0 to disable the timeout.
-config DRM_I915_SPIN_REQUEST
- int "Busywait for request completion (us)"
- default 5 # microseconds
+ The compiled in default may get overridden at driver probe time on
+ certain platforms and certain engines which will be reflected in the
+ sysfs control.
+
+config DRM_I915_MAX_REQUEST_BUSYWAIT
+ int "Busywait for request completion limit (ns)"
+ default 8000 # nanoseconds
help
Before sleeping waiting for a request (GPU operation) to complete,
we may spend some time polling for its completion. As the IRQ may
@@ -45,6 +55,9 @@ config DRM_I915_SPIN_REQUEST
check if the request will complete in the time it would have taken
us to enable the interrupt.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/max_busywait_duration_ns
+
May be 0 to disable the initial spin. In practice, we estimate
the cost of enabling the interrupt (if currently disabled) to be
a few microseconds.
@@ -60,6 +73,9 @@ config DRM_I915_STOP_TIMEOUT
that the reset itself may take longer and so be more disruptive to
interactive or low latency workloads.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/stop_timeout_ms
+
config DRM_I915_TIMESLICE_DURATION
int "Scheduling quantum for userspace batches (ms, jiffy granularity)"
default 1 # milliseconds
@@ -73,4 +89,7 @@ config DRM_I915_TIMESLICE_DURATION
is scheduled for execution for the timeslice duration, before
switching to the next context.
+ This is adjustable via
+ /sys/class/drm/card?/engine/*/timeslice_duration_ms
+
May be 0 to disable timeslicing.
diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile
index b8c5f8934dbd..9f887a86e555 100644
--- a/drivers/gpu/drm/i915/Makefile
+++ b/drivers/gpu/drm/i915/Makefile
@@ -46,15 +46,16 @@ i915-y += i915_drv.o \
i915_switcheroo.o \
i915_sysfs.o \
i915_utils.o \
- intel_csr.o \
intel_device_info.o \
+ intel_dram.o \
intel_memory_region.o \
intel_pch.o \
intel_pm.o \
intel_runtime_pm.o \
intel_sideband.o \
intel_uncore.o \
- intel_wakeref.o
+ intel_wakeref.o \
+ vlv_suspend.o
# core library code
i915-y += \
@@ -66,7 +67,11 @@ i915-y += \
i915_user_extensions.o
i915-$(CONFIG_COMPAT) += i915_ioc32.o
-i915-$(CONFIG_DEBUG_FS) += i915_debugfs.o display/intel_pipe_crc.o
+i915-$(CONFIG_DEBUG_FS) += \
+ i915_debugfs.o \
+ i915_debugfs_params.o \
+ display/intel_display_debugfs.o \
+ display/intel_pipe_crc.o
i915-$(CONFIG_PERF_EVENTS) += i915_pmu.o
# "Graphics Technology" (aka we talk to the gpu)
@@ -75,9 +80,12 @@ gt-y += \
gt/debugfs_gt.o \
gt/debugfs_gt_pm.o \
gt/gen6_ppgtt.o \
+ gt/gen7_renderclear.o \
gt/gen8_ppgtt.o \
gt/intel_breadcrumbs.o \
gt/intel_context.o \
+ gt/intel_context_param.o \
+ gt/intel_context_sseu.o \
gt/intel_engine_cs.o \
gt/intel_engine_heartbeat.o \
gt/intel_engine_pm.o \
@@ -102,7 +110,8 @@ gt-y += \
gt/intel_rps.o \
gt/intel_sseu.o \
gt/intel_timeline.o \
- gt/intel_workarounds.o
+ gt/intel_workarounds.o \
+ gt/sysfs_engines.o
# autogenerated null render state
gt-y += \
gt/gen6_renderstate.o \
@@ -179,6 +188,7 @@ i915-y += \
display/intel_color.o \
display/intel_combo_phy.o \
display/intel_connector.o \
+ display/intel_csr.o \
display/intel_display.o \
display/intel_display_power.o \
display/intel_dpio_phy.o \
@@ -187,6 +197,7 @@ i915-y += \
display/intel_fbc.o \
display/intel_fifo_underrun.o \
display/intel_frontbuffer.o \
+ display/intel_global_state.o \
display/intel_hdcp.o \
display/intel_hotplug.o \
display/intel_lpe_audio.o \
@@ -294,7 +305,7 @@ extra-$(CONFIG_DRM_I915_WERROR) += \
$(shell cd $(srctree)/$(src) && find * -name '*.h')))
quiet_cmd_hdrtest = HDRTEST $(patsubst %.hdrtest,%.h,$@)
- cmd_hdrtest = $(CC) $(c_flags) -S -o /dev/null -x c /dev/null -include $<; touch $@
+ cmd_hdrtest = $(CC) $(filter-out $(CFLAGS_GCOV), $(c_flags)) -S -o /dev/null -x c /dev/null -include $<; touch $@
$(obj)/%.hdrtest: $(src)/%.h FORCE
$(call if_changed_dep,hdrtest)
diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c
index f8e882101396..17cee6f80d8b 100644
--- a/drivers/gpu/drm/i915/display/icl_dsi.c
+++ b/drivers/gpu/drm/i915/display/icl_dsi.c
@@ -39,14 +39,14 @@
static inline int header_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
{
- return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
+ return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_HEADER_CREDIT_MASK)
>> FREE_HEADER_CREDIT_SHIFT;
}
static inline int payload_credits_available(struct drm_i915_private *dev_priv,
enum transcoder dsi_trans)
{
- return (I915_READ(DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
+ return (intel_de_read(dev_priv, DSI_CMD_TXCTL(dsi_trans)) & FREE_PLOAD_CREDIT_MASK)
>> FREE_PLOAD_CREDIT_SHIFT;
}
@@ -55,7 +55,7 @@ static void wait_for_header_credits(struct drm_i915_private *dev_priv,
{
if (wait_for_us(header_credits_available(dev_priv, dsi_trans) >=
MAX_HEADER_CREDIT, 100))
- DRM_ERROR("DSI header credits not released\n");
+ drm_err(&dev_priv->drm, "DSI header credits not released\n");
}
static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
@@ -63,7 +63,7 @@ static void wait_for_payload_credits(struct drm_i915_private *dev_priv,
{
if (wait_for_us(payload_credits_available(dev_priv, dsi_trans) >=
MAX_PLOAD_CREDIT, 100))
- DRM_ERROR("DSI payload credits not released\n");
+ drm_err(&dev_priv->drm, "DSI payload credits not released\n");
}
static enum transcoder dsi_port_to_transcoder(enum port port)
@@ -97,7 +97,8 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
dsi->channel = 0;
ret = mipi_dsi_dcs_nop(dsi);
if (ret < 0)
- DRM_ERROR("error sending DCS NOP command\n");
+ drm_err(&dev_priv->drm,
+ "error sending DCS NOP command\n");
}
/* wait for header credits to be released */
@@ -109,9 +110,9 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder)
/* wait for LP TX in progress bit to be cleared */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us(!(I915_READ(DSI_LP_MSG(dsi_trans)) &
+ if (wait_for_us(!(intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
LPTX_IN_PROGRESS), 20))
- DRM_ERROR("LPTX bit not cleared\n");
+ drm_err(&dev_priv->drm, "LPTX bit not cleared\n");
}
}
@@ -129,14 +130,15 @@ static bool add_payld_to_queue(struct intel_dsi_host *host, const u8 *data,
free_credits = payload_credits_available(dev_priv, dsi_trans);
if (free_credits < 1) {
- DRM_ERROR("Payload credit not available\n");
+ drm_err(&dev_priv->drm,
+ "Payload credit not available\n");
return false;
}
for (j = 0; j < min_t(u32, len - i, 4); j++)
tmp |= *data++ << 8 * j;
- I915_WRITE(DSI_CMD_TXPYLD(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_CMD_TXPYLD(dsi_trans), tmp);
}
return true;
@@ -154,11 +156,12 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
/* check if header credit available */
free_credits = header_credits_available(dev_priv, dsi_trans);
if (free_credits < 1) {
- DRM_ERROR("send pkt header failed, not enough hdr credits\n");
+ drm_err(&dev_priv->drm,
+ "send pkt header failed, not enough hdr credits\n");
return -1;
}
- tmp = I915_READ(DSI_CMD_TXHDR(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_CMD_TXHDR(dsi_trans));
if (pkt.payload)
tmp |= PAYLOAD_PRESENT;
@@ -175,7 +178,7 @@ static int dsi_send_pkt_hdr(struct intel_dsi_host *host,
tmp |= ((pkt.header[0] & DT_MASK) << DT_SHIFT);
tmp |= (pkt.header[1] << PARAM_WC_LOWER_SHIFT);
tmp |= (pkt.header[2] << PARAM_WC_UPPER_SHIFT);
- I915_WRITE(DSI_CMD_TXHDR(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_CMD_TXHDR(dsi_trans), tmp);
return 0;
}
@@ -212,53 +215,55 @@ static void dsi_program_swing_and_deemphasis(struct intel_encoder *encoder)
* Program voltage swing and pre-emphasis level values as per
* table in BSPEC under DDI buffer programing
*/
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK);
tmp |= SCALING_MODE_SEL(0x2);
tmp |= TAP2_DISABLE | TAP3_DISABLE;
tmp |= RTERM_SELECT(0x6);
- I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
tmp |= SWING_SEL_UPPER(0x2);
tmp |= SWING_SEL_LOWER(0x2);
tmp |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
/* Bspec: must not use GRP register for write */
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
tmp |= POST_CURSOR_1(0x0);
tmp |= POST_CURSOR_2(0x0);
tmp |= CURSOR_COEFF(0x3f);
- I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
+ intel_de_write(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
}
@@ -270,7 +275,7 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
u32 dss_ctl1;
- dss_ctl1 = I915_READ(DSS_CTL1);
+ dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
dss_ctl1 |= SPLITTER_ENABLE;
dss_ctl1 &= ~OVERLAP_PIXELS_MASK;
dss_ctl1 |= OVERLAP_PIXELS(intel_dsi->pixel_overlap);
@@ -286,20 +291,21 @@ static void configure_dual_link_mode(struct intel_encoder *encoder,
dl_buffer_depth = hactive / 2 + intel_dsi->pixel_overlap;
if (dl_buffer_depth > MAX_DL_BUFFER_TARGET_DEPTH)
- DRM_ERROR("DL buffer depth exceed max value\n");
+ drm_err(&dev_priv->drm,
+ "DL buffer depth exceed max value\n");
dss_ctl1 &= ~LEFT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl1 |= LEFT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- dss_ctl2 = I915_READ(DSS_CTL2);
+ dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
dss_ctl2 &= ~RIGHT_DL_BUF_TARGET_DEPTH_MASK;
dss_ctl2 |= RIGHT_DL_BUF_TARGET_DEPTH(dl_buffer_depth);
- I915_WRITE(DSS_CTL2, dss_ctl2);
+ intel_de_write(dev_priv, DSS_CTL2, dss_ctl2);
} else {
/* Interleave */
dss_ctl1 |= DUAL_LINK_MODE_INTERLEAVE;
}
- I915_WRITE(DSS_CTL1, dss_ctl1);
+ intel_de_write(dev_priv, DSS_CTL1, dss_ctl1);
}
/* aka DSI 8X clock */
@@ -330,15 +336,15 @@ static void gen11_dsi_program_esc_clk_div(struct intel_encoder *encoder,
esc_clk_div_m = DIV_ROUND_UP(afe_clk_khz, DSI_MAX_ESC_CLK);
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(ICL_DSI_ESC_CLK_DIV(port),
- esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
- POSTING_READ(ICL_DSI_ESC_CLK_DIV(port));
+ intel_de_write(dev_priv, ICL_DSI_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ intel_de_posting_read(dev_priv, ICL_DSI_ESC_CLK_DIV(port));
}
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(ICL_DPHY_ESC_CLK_DIV(port),
- esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
- POSTING_READ(ICL_DPHY_ESC_CLK_DIV(port));
+ intel_de_write(dev_priv, ICL_DPHY_ESC_CLK_DIV(port),
+ esc_clk_div_m & ICL_ESC_CLK_DIV_MASK);
+ intel_de_posting_read(dev_priv, ICL_DPHY_ESC_CLK_DIV(port));
}
}
@@ -348,7 +354,7 @@ static void get_dsi_io_power_domains(struct drm_i915_private *dev_priv,
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- WARN_ON(intel_dsi->io_wakeref[port]);
+ drm_WARN_ON(&dev_priv->drm, intel_dsi->io_wakeref[port]);
intel_dsi->io_wakeref[port] =
intel_display_power_get(dev_priv,
port == PORT_A ?
@@ -365,9 +371,9 @@ static void gen11_dsi_enable_io_power(struct intel_encoder *encoder)
u32 tmp;
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+ tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
tmp |= COMBO_PHY_MODE_DSI;
- I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+ intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
}
get_dsi_io_power_domains(dev_priv, intel_dsi);
@@ -394,40 +400,46 @@ static void gen11_dsi_config_phy_lanes_sequence(struct intel_encoder *encoder)
/* Step 4b(i) set loadgen select for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_TX_DW4_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW4_AUX(phy));
tmp &= ~LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_AUX(phy), tmp);
for (lane = 0; lane <= 3; lane++) {
- tmp = I915_READ(ICL_PORT_TX_DW4_LN(lane, phy));
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy));
tmp &= ~LOADGEN_SELECT;
if (lane != 2)
tmp |= LOADGEN_SELECT;
- I915_WRITE(ICL_PORT_TX_DW4_LN(lane, phy), tmp);
+ intel_de_write(dev_priv,
+ ICL_PORT_TX_DW4_LN(lane, phy), tmp);
}
}
/* Step 4b(ii) set latency optimization for transmit and aux lanes */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_TX_DW2_AUX(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_AUX(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- I915_WRITE(ICL_PORT_TX_DW2_AUX(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_AUX(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
tmp &= ~FRC_LATENCY_OPTIM_MASK;
tmp |= FRC_LATENCY_OPTIM_VAL(0x5);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), tmp);
/* For EHL, TGL, set latency optimization for PCS_DW1 lanes */
if (IS_ELKHARTLAKE(dev_priv) || (INTEL_GEN(dev_priv) >= 12)) {
- tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0);
- I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy),
+ tmp);
- tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+ tmp = intel_de_read(dev_priv,
+ ICL_PORT_PCS_DW1_LN0(phy));
tmp &= ~LATENCY_OPTIM_MASK;
tmp |= LATENCY_OPTIM_VAL(0x1);
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy),
+ tmp);
}
}
@@ -442,12 +454,12 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* clear common keeper enable bit */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
tmp &= ~COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_PCS_DW1_AUX(phy));
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_AUX(phy));
tmp &= ~COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_AUX(phy), tmp);
}
/*
@@ -456,19 +468,19 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
* as part of lane phy sequence configuration
*/
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_CL_DW5(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
tmp |= SUS_CLOCK_CONFIG;
- I915_WRITE(ICL_PORT_CL_DW5(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), tmp);
}
/* Clear training enable to change swing values */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
tmp &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
tmp &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
}
/* Program swing and de-emphasis */
@@ -476,12 +488,12 @@ static void gen11_dsi_voltage_swing_program_seq(struct intel_encoder *encoder)
/* Set training enable to trigger update */
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
tmp |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), tmp);
- tmp = I915_READ(ICL_PORT_TX_DW5_AUX(phy));
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), tmp);
+ tmp = intel_de_read(dev_priv, ICL_PORT_TX_DW5_AUX(phy));
tmp |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_AUX(phy), tmp);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_AUX(phy), tmp);
}
}
@@ -493,14 +505,15 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder)
enum port port;
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
tmp |= DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), tmp);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
- if (wait_for_us(!(I915_READ(DDI_BUF_CTL(port)) &
+ if (wait_for_us(!(intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
500))
- DRM_ERROR("DDI port:%c buffer idle\n", port_name(port));
+ drm_err(&dev_priv->drm, "DDI port:%c buffer idle\n",
+ port_name(port));
}
}
@@ -516,28 +529,30 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
/* Program T-INIT master registers */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_DSI_T_INIT_MASTER(port));
+ tmp = intel_de_read(dev_priv, ICL_DSI_T_INIT_MASTER(port));
tmp &= ~MASTER_INIT_TIMER_MASK;
tmp |= intel_dsi->init_count;
- I915_WRITE(ICL_DSI_T_INIT_MASTER(port), tmp);
+ intel_de_write(dev_priv, ICL_DSI_T_INIT_MASTER(port), tmp);
}
/* Program DPHY clock lanes timings */
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(DPHY_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+ intel_de_write(dev_priv, DPHY_CLK_TIMING_PARAM(port),
+ intel_dsi->dphy_reg);
/* shadow register inside display core */
- I915_WRITE(DSI_CLK_TIMING_PARAM(port), intel_dsi->dphy_reg);
+ intel_de_write(dev_priv, DSI_CLK_TIMING_PARAM(port),
+ intel_dsi->dphy_reg);
}
/* Program DPHY data lanes timings */
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(DPHY_DATA_TIMING_PARAM(port),
- intel_dsi->dphy_data_lane_reg);
+ intel_de_write(dev_priv, DPHY_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
/* shadow register inside display core */
- I915_WRITE(DSI_DATA_TIMING_PARAM(port),
- intel_dsi->dphy_data_lane_reg);
+ intel_de_write(dev_priv, DSI_DATA_TIMING_PARAM(port),
+ intel_dsi->dphy_data_lane_reg);
}
/*
@@ -549,25 +564,30 @@ gen11_dsi_setup_dphy_timings(struct intel_encoder *encoder,
if (IS_GEN(dev_priv, 11)) {
if (afe_clk(encoder, crtc_state) <= 800000) {
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(DPHY_TA_TIMING_PARAM(port));
+ tmp = intel_de_read(dev_priv,
+ DPHY_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- I915_WRITE(DPHY_TA_TIMING_PARAM(port), tmp);
+ intel_de_write(dev_priv,
+ DPHY_TA_TIMING_PARAM(port),
+ tmp);
/* shadow register inside display core */
- tmp = I915_READ(DSI_TA_TIMING_PARAM(port));
+ tmp = intel_de_read(dev_priv,
+ DSI_TA_TIMING_PARAM(port));
tmp &= ~TA_SURE_MASK;
tmp |= TA_SURE_OVERRIDE | TA_SURE(0);
- I915_WRITE(DSI_TA_TIMING_PARAM(port), tmp);
+ intel_de_write(dev_priv,
+ DSI_TA_TIMING_PARAM(port), tmp);
}
}
}
if (IS_ELKHARTLAKE(dev_priv)) {
for_each_dsi_phy(phy, intel_dsi->phys) {
- tmp = I915_READ(ICL_DPHY_CHKN(phy));
+ tmp = intel_de_read(dev_priv, ICL_DPHY_CHKN(phy));
tmp |= ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP;
- I915_WRITE(ICL_DPHY_CHKN(phy), tmp);
+ intel_de_write(dev_priv, ICL_DPHY_CHKN(phy), tmp);
}
}
}
@@ -579,13 +599,13 @@ static void gen11_dsi_gate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll_lock);
- tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+ mutex_lock(&dev_priv->dpll.lock);
+ tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp |= ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll_lock);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
@@ -595,13 +615,13 @@ static void gen11_dsi_ungate_clocks(struct intel_encoder *encoder)
u32 tmp;
enum phy phy;
- mutex_lock(&dev_priv->dpll_lock);
- tmp = I915_READ(ICL_DPCLKA_CFGCR0);
+ mutex_lock(&dev_priv->dpll.lock);
+ tmp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys)
tmp &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, tmp);
- mutex_unlock(&dev_priv->dpll_lock);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, tmp);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void gen11_dsi_map_pll(struct intel_encoder *encoder,
@@ -613,14 +633,14 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
enum phy phy;
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
- val = I915_READ(ICL_DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_dsi_phy(phy, intel_dsi->phys) {
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
}
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
for_each_dsi_phy(phy, intel_dsi->phys) {
if (INTEL_GEN(dev_priv) >= 12)
@@ -628,11 +648,11 @@ static void gen11_dsi_map_pll(struct intel_encoder *encoder,
else
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_OFF(phy);
}
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- POSTING_READ(ICL_DPCLKA_CFGCR0);
+ intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void
@@ -649,7 +669,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans));
if (intel_dsi->eotp_pkt)
tmp &= ~EOTP_DISABLED;
@@ -726,16 +746,18 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
}
}
- I915_WRITE(DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans), tmp);
}
/* enable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans));
tmp |= PORT_SYNC_MODE_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
}
/* configure stream splitting */
@@ -746,7 +768,7 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
/* select data lane width */
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
tmp &= ~DDI_PORT_WIDTH_MASK;
tmp |= DDI_PORT_WIDTH(intel_dsi->lane_count);
@@ -772,15 +794,15 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder,
/* enable DDI buffer */
tmp |= TRANS_DDI_FUNC_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
}
/* wait for link ready */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- if (wait_for_us((I915_READ(DSI_TRANS_FUNC_CONF(dsi_trans)) &
- LINK_READY), 2500))
- DRM_ERROR("DSI link not ready\n");
+ if (wait_for_us((intel_de_read(dev_priv, DSI_TRANS_FUNC_CONF(dsi_trans)) &
+ LINK_READY), 2500))
+ drm_err(&dev_priv->drm, "DSI link not ready\n");
}
}
@@ -836,17 +858,18 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
/* minimum hactive as per bspec: 256 pixels */
if (adjusted_mode->crtc_hdisplay < 256)
- DRM_ERROR("hactive is less then 256 pixels\n");
+ drm_err(&dev_priv->drm, "hactive is less then 256 pixels\n");
/* if RGB666 format, then hactive must be multiple of 4 pixels */
if (intel_dsi->pixel_format == MIPI_DSI_FMT_RGB666 && hactive % 4 != 0)
- DRM_ERROR("hactive pixels are not multiple of 4\n");
+ drm_err(&dev_priv->drm,
+ "hactive pixels are not multiple of 4\n");
/* program TRANS_HTOTAL register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(HTOTAL(dsi_trans),
- (hactive - 1) | ((htotal - 1) << 16));
+ intel_de_write(dev_priv, HTOTAL(dsi_trans),
+ (hactive - 1) | ((htotal - 1) << 16));
}
/* TRANS_HSYNC register to be programmed only for video mode */
@@ -855,11 +878,12 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE) {
/* BSPEC: hsync size should be atleast 16 pixels */
if (hsync_size < 16)
- DRM_ERROR("hsync size < 16 pixels\n");
+ drm_err(&dev_priv->drm,
+ "hsync size < 16 pixels\n");
}
if (hback_porch < 16)
- DRM_ERROR("hback porch < 16 pixels\n");
+ drm_err(&dev_priv->drm, "hback porch < 16 pixels\n");
if (intel_dsi->dual_link) {
hsync_start /= 2;
@@ -868,8 +892,8 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(HSYNC(dsi_trans),
- (hsync_start - 1) | ((hsync_end - 1) << 16));
+ intel_de_write(dev_priv, HSYNC(dsi_trans),
+ (hsync_start - 1) | ((hsync_end - 1) << 16));
}
}
@@ -882,21 +906,21 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
* struct drm_display_mode.
* For interlace mode: program required pixel minus 2
*/
- I915_WRITE(VTOTAL(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, VTOTAL(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
}
if (vsync_end < vsync_start || vsync_end > vtotal)
- DRM_ERROR("Invalid vsync_end value\n");
+ drm_err(&dev_priv->drm, "Invalid vsync_end value\n");
if (vsync_start < vactive)
- DRM_ERROR("vsync_start less than vactive\n");
+ drm_err(&dev_priv->drm, "vsync_start less than vactive\n");
/* program TRANS_VSYNC register */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(VSYNC(dsi_trans),
- (vsync_start - 1) | ((vsync_end - 1) << 16));
+ intel_de_write(dev_priv, VSYNC(dsi_trans),
+ (vsync_start - 1) | ((vsync_end - 1) << 16));
}
/*
@@ -907,15 +931,15 @@ gen11_dsi_set_transcoder_timings(struct intel_encoder *encoder,
*/
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(VSYNCSHIFT(dsi_trans), vsync_shift);
+ intel_de_write(dev_priv, VSYNCSHIFT(dsi_trans), vsync_shift);
}
/* program TRANS_VBLANK register, should be same as vtotal programmed */
if (INTEL_GEN(dev_priv) >= 12) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- I915_WRITE(VBLANK(dsi_trans),
- (vactive - 1) | ((vtotal - 1) << 16));
+ intel_de_write(dev_priv, VBLANK(dsi_trans),
+ (vactive - 1) | ((vtotal - 1) << 16));
}
}
}
@@ -930,14 +954,15 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
tmp |= PIPECONF_ENABLE;
- I915_WRITE(PIPECONF(dsi_trans), tmp);
+ intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be enabled */
if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 10))
- DRM_ERROR("DSI transcoder not enabled\n");
+ drm_err(&dev_priv->drm,
+ "DSI transcoder not enabled\n");
}
}
@@ -968,26 +993,26 @@ static void gen11_dsi_setup_timeouts(struct intel_encoder *encoder,
dsi_trans = dsi_port_to_transcoder(port);
/* program hst_tx_timeout */
- tmp = I915_READ(DSI_HSTX_TO(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_HSTX_TO(dsi_trans));
tmp &= ~HSTX_TIMEOUT_VALUE_MASK;
tmp |= HSTX_TIMEOUT_VALUE(hs_tx_timeout);
- I915_WRITE(DSI_HSTX_TO(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_HSTX_TO(dsi_trans), tmp);
/* FIXME: DSI_CALIB_TO */
/* program lp_rx_host timeout */
- tmp = I915_READ(DSI_LPRX_HOST_TO(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_LPRX_HOST_TO(dsi_trans));
tmp &= ~LPRX_TIMEOUT_VALUE_MASK;
tmp |= LPRX_TIMEOUT_VALUE(lp_rx_timeout);
- I915_WRITE(DSI_LPRX_HOST_TO(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_LPRX_HOST_TO(dsi_trans), tmp);
/* FIXME: DSI_PWAIT_TO */
/* program turn around timeout */
- tmp = I915_READ(DSI_TA_TO(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_TA_TO(dsi_trans));
tmp &= ~TA_TIMEOUT_VALUE_MASK;
tmp |= TA_TIMEOUT_VALUE(ta_timeout);
- I915_WRITE(DSI_TA_TO(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_TA_TO(dsi_trans), tmp);
}
}
@@ -1041,14 +1066,15 @@ static void gen11_dsi_powerup_panel(struct intel_encoder *encoder)
* FIXME: This uses the number of DW's currently in the payload
* receive queue. This is probably not what we want here.
*/
- tmp = I915_READ(DSI_CMD_RXCTL(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_CMD_RXCTL(dsi_trans));
tmp &= NUMBER_RX_PLOAD_DW_MASK;
/* multiply "Number Rx Payload DW" by 4 to get max value */
tmp = tmp * 4;
dsi = intel_dsi->dsi_hosts[port]->device;
ret = mipi_dsi_set_maximum_return_packet_size(dsi, tmp);
if (ret < 0)
- DRM_ERROR("error setting max return pkt size%d\n", tmp);
+ drm_err(&dev_priv->drm,
+ "error setting max return pkt size%d\n", tmp);
}
/* panel power on related mipi dsi vbt sequences */
@@ -1077,8 +1103,6 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config,
const struct drm_connector_state *conn_state)
{
- struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
-
/* step3b */
gen11_dsi_map_pll(encoder, pipe_config);
@@ -1092,13 +1116,24 @@ static void gen11_dsi_pre_enable(struct intel_encoder *encoder,
/* step6c: configure transcoder timings */
gen11_dsi_set_transcoder_timings(encoder, pipe_config);
+}
+
+static void gen11_dsi_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
+
+ WARN_ON(crtc_state->has_pch_encoder);
/* step6d: enable dsi transcoder */
gen11_dsi_enable_transcoder(encoder);
/* step7: enable backlight */
- intel_panel_enable_backlight(pipe_config, conn_state);
+ intel_panel_enable_backlight(crtc_state, conn_state);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
+
+ intel_crtc_vblank_on(crtc_state);
}
static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
@@ -1113,14 +1148,15 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder)
dsi_trans = dsi_port_to_transcoder(port);
/* disable transcoder */
- tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
tmp &= ~PIPECONF_ENABLE;
- I915_WRITE(PIPECONF(dsi_trans), tmp);
+ intel_de_write(dev_priv, PIPECONF(dsi_trans), tmp);
/* wait for transcoder to be disabled */
if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans),
I965_PIPECONF_ACTIVE, 50))
- DRM_ERROR("DSI trancoder not disabled\n");
+ drm_err(&dev_priv->drm,
+ "DSI trancoder not disabled\n");
}
}
@@ -1147,32 +1183,34 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder)
/* put dsi link in ULPS */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(DSI_LP_MSG(dsi_trans));
+ tmp = intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans));
tmp |= LINK_ENTER_ULPS;
tmp &= ~LINK_ULPS_TYPE_LP11;
- I915_WRITE(DSI_LP_MSG(dsi_trans), tmp);
+ intel_de_write(dev_priv, DSI_LP_MSG(dsi_trans), tmp);
- if (wait_for_us((I915_READ(DSI_LP_MSG(dsi_trans)) &
- LINK_IN_ULPS),
+ if (wait_for_us((intel_de_read(dev_priv, DSI_LP_MSG(dsi_trans)) &
+ LINK_IN_ULPS),
10))
- DRM_ERROR("DSI link not in ULPS\n");
+ drm_err(&dev_priv->drm, "DSI link not in ULPS\n");
}
/* disable ddi function */
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
tmp &= ~TRANS_DDI_FUNC_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans), tmp);
}
/* disable port sync mode if dual link */
if (intel_dsi->dual_link) {
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL2(dsi_trans));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans));
tmp &= ~PORT_SYNC_MODE_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(dsi_trans), tmp);
}
}
}
@@ -1186,15 +1224,16 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder)
gen11_dsi_ungate_clocks(encoder);
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
tmp &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), tmp);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), tmp);
- if (wait_for_us((I915_READ(DDI_BUF_CTL(port)) &
+ if (wait_for_us((intel_de_read(dev_priv, DDI_BUF_CTL(port)) &
DDI_BUF_IS_IDLE),
8))
- DRM_ERROR("DDI port:%c buffer not idle\n",
- port_name(port));
+ drm_err(&dev_priv->drm,
+ "DDI port:%c buffer not idle\n",
+ port_name(port));
}
gen11_dsi_gate_clocks(encoder);
}
@@ -1219,9 +1258,9 @@ static void gen11_dsi_disable_io_power(struct intel_encoder *encoder)
/* set mode to DDI */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(ICL_DSI_IO_MODECTL(port));
+ tmp = intel_de_read(dev_priv, ICL_DSI_IO_MODECTL(port));
tmp &= ~COMBO_PHY_MODE_DSI;
- I915_WRITE(ICL_DSI_IO_MODECTL(port), tmp);
+ intel_de_write(dev_priv, ICL_DSI_IO_MODECTL(port), tmp);
}
}
@@ -1311,15 +1350,15 @@ static void gen11_dsi_get_timings(struct intel_encoder *encoder,
static void gen11_dsi_get_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
intel_dsc_get_config(encoder, pipe_config);
/* FIXME: adapt icl_ddi_clock_get() for DSI and use that? */
- pipe_config->port_clock =
- cnl_calc_wrpll_link(dev_priv, &pipe_config->dpll_hw_state);
+ pipe_config->port_clock = intel_dpll_get_freq(i915,
+ pipe_config->shared_dpll);
pipe_config->hw.adjusted_mode.crtc_clock = intel_dsi->pclk;
if (intel_dsi->dual_link)
@@ -1357,11 +1396,13 @@ static int gen11_dsi_dsc_compute_config(struct intel_encoder *encoder,
return ret;
/* DSI specific sanity checks on the common code */
- WARN_ON(vdsc_cfg->vbr_enable);
- WARN_ON(vdsc_cfg->simple_422);
- WARN_ON(vdsc_cfg->pic_width % vdsc_cfg->slice_width);
- WARN_ON(vdsc_cfg->slice_height < 8);
- WARN_ON(vdsc_cfg->pic_height % vdsc_cfg->slice_height);
+ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->vbr_enable);
+ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->simple_422);
+ drm_WARN_ON(&dev_priv->drm,
+ vdsc_cfg->pic_width % vdsc_cfg->slice_width);
+ drm_WARN_ON(&dev_priv->drm, vdsc_cfg->slice_height < 8);
+ drm_WARN_ON(&dev_priv->drm,
+ vdsc_cfg->pic_height % vdsc_cfg->slice_height);
ret = drm_dsc_compute_rc_parameters(vdsc_cfg);
if (ret)
@@ -1443,7 +1484,7 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
dsi_trans = dsi_port_to_transcoder(port);
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(dsi_trans));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(dsi_trans));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
case TRANS_DDI_EDP_INPUT_A_ON:
*pipe = PIPE_A;
@@ -1458,11 +1499,11 @@ static bool gen11_dsi_get_hw_state(struct intel_encoder *encoder,
*pipe = PIPE_D;
break;
default:
- DRM_ERROR("Invalid PIPE input\n");
+ drm_err(&dev_priv->drm, "Invalid PIPE input\n");
goto out;
}
- tmp = I915_READ(PIPECONF(dsi_trans));
+ tmp = intel_de_read(dev_priv, PIPECONF(dsi_trans));
ret = tmp & PIPECONF_ENABLE;
}
out:
@@ -1582,7 +1623,8 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
*/
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * 4, tlpx_ns);
if (prepare_cnt > ICL_PREPARE_CNT_MAX) {
- DRM_DEBUG_KMS("prepare_cnt out of range (%d)\n", prepare_cnt);
+ drm_dbg_kms(&dev_priv->drm, "prepare_cnt out of range (%d)\n",
+ prepare_cnt);
prepare_cnt = ICL_PREPARE_CNT_MAX;
}
@@ -1590,28 +1632,33 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
clk_zero_cnt = DIV_ROUND_UP(mipi_config->tclk_prepare_clkzero -
ths_prepare_ns, tlpx_ns);
if (clk_zero_cnt > ICL_CLK_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm,
+ "clk_zero_cnt out of range (%d)\n", clk_zero_cnt);
clk_zero_cnt = ICL_CLK_ZERO_CNT_MAX;
}
/* trail cnt in escape clocks*/
trail_cnt = DIV_ROUND_UP(tclk_trail_ns, tlpx_ns);
if (trail_cnt > ICL_TRAIL_CNT_MAX) {
- DRM_DEBUG_KMS("trail_cnt out of range (%d)\n", trail_cnt);
+ drm_dbg_kms(&dev_priv->drm, "trail_cnt out of range (%d)\n",
+ trail_cnt);
trail_cnt = ICL_TRAIL_CNT_MAX;
}
/* tclk pre count in escape clocks */
tclk_pre_cnt = DIV_ROUND_UP(mipi_config->tclk_pre, tlpx_ns);
if (tclk_pre_cnt > ICL_TCLK_PRE_CNT_MAX) {
- DRM_DEBUG_KMS("tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
+ drm_dbg_kms(&dev_priv->drm,
+ "tclk_pre_cnt out of range (%d)\n", tclk_pre_cnt);
tclk_pre_cnt = ICL_TCLK_PRE_CNT_MAX;
}
/* tclk post count in escape clocks */
tclk_post_cnt = DIV_ROUND_UP(mipi_config->tclk_post, tlpx_ns);
if (tclk_post_cnt > ICL_TCLK_POST_CNT_MAX) {
- DRM_DEBUG_KMS("tclk_post_cnt out of range (%d)\n", tclk_post_cnt);
+ drm_dbg_kms(&dev_priv->drm,
+ "tclk_post_cnt out of range (%d)\n",
+ tclk_post_cnt);
tclk_post_cnt = ICL_TCLK_POST_CNT_MAX;
}
@@ -1619,14 +1666,17 @@ static void icl_dphy_param_init(struct intel_dsi *intel_dsi)
hs_zero_cnt = DIV_ROUND_UP(mipi_config->ths_prepare_hszero -
ths_prepare_ns, tlpx_ns);
if (hs_zero_cnt > ICL_HS_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("hs_zero_cnt out of range (%d)\n", hs_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm, "hs_zero_cnt out of range (%d)\n",
+ hs_zero_cnt);
hs_zero_cnt = ICL_HS_ZERO_CNT_MAX;
}
/* hs exit zero cnt in escape clocks */
exit_zero_cnt = DIV_ROUND_UP(mipi_config->ths_exit, tlpx_ns);
if (exit_zero_cnt > ICL_EXIT_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("exit_zero_cnt out of range (%d)\n", exit_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm,
+ "exit_zero_cnt out of range (%d)\n",
+ exit_zero_cnt);
exit_zero_cnt = ICL_EXIT_ZERO_CNT_MAX;
}
@@ -1668,9 +1718,8 @@ static void icl_dsi_add_properties(struct intel_connector *connector)
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
- connector->base.display_info.panel_orientation =
- intel_dsi_get_panel_orientation(connector);
- drm_connector_init_panel_orientation_property(&connector->base,
+ drm_connector_set_panel_orientation_with_quirk(&connector->base,
+ intel_dsi_get_panel_orientation(connector),
connector->panel.fixed_mode->hdisplay,
connector->panel.fixed_mode->vdisplay);
}
@@ -1708,6 +1757,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
encoder->pre_pll_enable = gen11_dsi_pre_pll_enable;
encoder->pre_enable = gen11_dsi_pre_enable;
+ encoder->enable = gen11_dsi_enable;
encoder->disable = gen11_dsi_disable;
encoder->post_disable = gen11_dsi_post_disable;
encoder->port = port;
@@ -1738,7 +1788,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
- DRM_ERROR("DSI fixed mode info missing\n");
+ drm_err(&dev_priv->drm, "DSI fixed mode info missing\n");
goto err;
}
@@ -1764,7 +1814,7 @@ void icl_dsi_init(struct drm_i915_private *dev_priv)
}
if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
- DRM_DEBUG_KMS("no device found\n");
+ drm_dbg_kms(&dev_priv->drm, "no device found\n");
goto err;
}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.c b/drivers/gpu/drm/i915/display/intel_acpi.c
index 3456d33feb46..e21fb14d5e07 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.c
+++ b/drivers/gpu/drm/i915/display/intel_acpi.c
@@ -10,6 +10,7 @@
#include "i915_drv.h"
#include "intel_acpi.h"
+#include "intel_display_types.h"
#define INTEL_DSM_REVISION_ID 1 /* For Calpella anyway... */
#define INTEL_DSM_FN_PLATFORM_MUX_INFO 1 /* No args */
@@ -156,3 +157,91 @@ void intel_register_dsm_handler(void)
void intel_unregister_dsm_handler(void)
{
}
+
+/*
+ * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
+ * Attached to the Display Adapter).
+ */
+#define ACPI_DISPLAY_INDEX_SHIFT 0
+#define ACPI_DISPLAY_INDEX_MASK (0xf << 0)
+#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4
+#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4)
+#define ACPI_DISPLAY_TYPE_SHIFT 8
+#define ACPI_DISPLAY_TYPE_MASK (0xf << 8)
+#define ACPI_DISPLAY_TYPE_OTHER (0 << 8)
+#define ACPI_DISPLAY_TYPE_VGA (1 << 8)
+#define ACPI_DISPLAY_TYPE_TV (2 << 8)
+#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8)
+#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8)
+#define ACPI_VENDOR_SPECIFIC_SHIFT 12
+#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12)
+#define ACPI_BIOS_CAN_DETECT (1 << 16)
+#define ACPI_DEPENDS_ON_VGA (1 << 17)
+#define ACPI_PIPE_ID_SHIFT 18
+#define ACPI_PIPE_ID_MASK (7 << 18)
+#define ACPI_DEVICE_ID_SCHEME (1ULL << 31)
+
+static u32 acpi_display_type(struct intel_connector *connector)
+{
+ u32 display_type;
+
+ switch (connector->base.connector_type) {
+ case DRM_MODE_CONNECTOR_VGA:
+ case DRM_MODE_CONNECTOR_DVIA:
+ display_type = ACPI_DISPLAY_TYPE_VGA;
+ break;
+ case DRM_MODE_CONNECTOR_Composite:
+ case DRM_MODE_CONNECTOR_SVIDEO:
+ case DRM_MODE_CONNECTOR_Component:
+ case DRM_MODE_CONNECTOR_9PinDIN:
+ case DRM_MODE_CONNECTOR_TV:
+ display_type = ACPI_DISPLAY_TYPE_TV;
+ break;
+ case DRM_MODE_CONNECTOR_DVII:
+ case DRM_MODE_CONNECTOR_DVID:
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_HDMIA:
+ case DRM_MODE_CONNECTOR_HDMIB:
+ display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ case DRM_MODE_CONNECTOR_eDP:
+ case DRM_MODE_CONNECTOR_DSI:
+ display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
+ break;
+ case DRM_MODE_CONNECTOR_Unknown:
+ case DRM_MODE_CONNECTOR_VIRTUAL:
+ display_type = ACPI_DISPLAY_TYPE_OTHER;
+ break;
+ default:
+ MISSING_CASE(connector->base.connector_type);
+ display_type = ACPI_DISPLAY_TYPE_OTHER;
+ break;
+ }
+
+ return display_type;
+}
+
+void intel_acpi_device_id_update(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *drm_dev = &dev_priv->drm;
+ struct intel_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ u8 display_index[16] = {};
+
+ /* Populate the ACPI IDs for all connectors for a given drm_device */
+ drm_connector_list_iter_begin(drm_dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ u32 device_id, type;
+
+ device_id = acpi_display_type(connector);
+
+ /* Use display type specific display index. */
+ type = (device_id & ACPI_DISPLAY_TYPE_MASK)
+ >> ACPI_DISPLAY_TYPE_SHIFT;
+ device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
+
+ connector->acpi_device_id = device_id;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
diff --git a/drivers/gpu/drm/i915/display/intel_acpi.h b/drivers/gpu/drm/i915/display/intel_acpi.h
index 1c576b3fb712..e8b068661d22 100644
--- a/drivers/gpu/drm/i915/display/intel_acpi.h
+++ b/drivers/gpu/drm/i915/display/intel_acpi.h
@@ -6,12 +6,17 @@
#ifndef __INTEL_ACPI_H__
#define __INTEL_ACPI_H__
+struct drm_i915_private;
+
#ifdef CONFIG_ACPI
void intel_register_dsm_handler(void);
void intel_unregister_dsm_handler(void);
+void intel_acpi_device_id_update(struct drm_i915_private *i915);
#else
static inline void intel_register_dsm_handler(void) { return; }
static inline void intel_unregister_dsm_handler(void) { return; }
+static inline
+void intel_acpi_device_id_update(struct drm_i915_private *i915) { return; }
#endif /* CONFIG_ACPI */
#endif /* __INTEL_ACPI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c
index c362eecdd414..d043057d2fa0 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic.c
@@ -35,7 +35,9 @@
#include <drm/drm_plane_helper.h>
#include "intel_atomic.h"
+#include "intel_cdclk.h"
#include "intel_display_types.h"
+#include "intel_global_state.h"
#include "intel_hdcp.h"
#include "intel_psr.h"
#include "intel_sprite.h"
@@ -64,8 +66,9 @@ int intel_digital_connector_atomic_get_property(struct drm_connector *connector,
else if (property == dev_priv->broadcast_rgb_property)
*val = intel_conn_state->broadcast_rgb;
else {
- DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
- property->base.id, property->name);
+ drm_dbg_atomic(&dev_priv->drm,
+ "Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -101,8 +104,8 @@ int intel_digital_connector_atomic_set_property(struct drm_connector *connector,
return 0;
}
- DRM_DEBUG_ATOMIC("Unknown property [PROP:%d:%s]\n",
- property->base.id, property->name);
+ drm_dbg_atomic(&dev_priv->drm, "Unknown property [PROP:%d:%s]\n",
+ property->base.id, property->name);
return -EINVAL;
}
@@ -178,6 +181,8 @@ intel_digital_connector_duplicate_state(struct drm_connector *connector)
/**
* intel_connector_needs_modeset - check if connector needs a modeset
+ * @state: the atomic state corresponding to this modeset
+ * @connector: the connector
*/
bool
intel_connector_needs_modeset(struct intel_atomic_state *state,
@@ -314,7 +319,8 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
}
}
- if (WARN(*scaler_id < 0, "Cannot find scaler for %s:%d\n", name, idx))
+ if (drm_WARN(&dev_priv->drm, *scaler_id < 0,
+ "Cannot find scaler for %s:%d\n", name, idx))
return;
/* set scaler mode */
@@ -357,8 +363,8 @@ static void intel_atomic_setup_scaler(struct intel_crtc_scaler_state *scaler_sta
mode = SKL_PS_SCALER_MODE_DYN;
}
- DRM_DEBUG_KMS("Attached scaler id %u.%u to %s:%d\n",
- intel_crtc->pipe, *scaler_id, name, idx);
+ drm_dbg_kms(&dev_priv->drm, "Attached scaler id %u.%u to %s:%d\n",
+ intel_crtc->pipe, *scaler_id, name, idx);
scaler_state->scalers[*scaler_id].mode = mode;
}
@@ -409,8 +415,9 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
/* fail if required scalers > available scalers */
if (num_scalers_need > intel_crtc->num_scalers){
- DRM_DEBUG_KMS("Too many scaling requests %d > %d\n",
- num_scalers_need, intel_crtc->num_scalers);
+ drm_dbg_kms(&dev_priv->drm,
+ "Too many scaling requests %d > %d\n",
+ num_scalers_need, intel_crtc->num_scalers);
return -EINVAL;
}
@@ -455,8 +462,9 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
plane = drm_plane_from_index(&dev_priv->drm, i);
state = drm_atomic_get_plane_state(drm_state, plane);
if (IS_ERR(state)) {
- DRM_DEBUG_KMS("Failed to add [PLANE:%d] to drm_state\n",
- plane->base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to add [PLANE:%d] to drm_state\n",
+ plane->base.id);
return PTR_ERR(state);
}
}
@@ -465,7 +473,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
idx = plane->base.id;
/* plane on different crtc cannot be a scaler user of this crtc */
- if (WARN_ON(intel_plane->pipe != intel_crtc->pipe))
+ if (drm_WARN_ON(&dev_priv->drm,
+ intel_plane->pipe != intel_crtc->pipe))
continue;
plane_state = intel_atomic_get_new_plane_state(intel_state,
@@ -494,18 +503,28 @@ intel_atomic_state_alloc(struct drm_device *dev)
return &state->base;
}
+void intel_atomic_state_free(struct drm_atomic_state *_state)
+{
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+
+ drm_atomic_state_default_release(&state->base);
+ kfree(state->global_objs);
+
+ i915_sw_fence_fini(&state->commit_ready);
+
+ kfree(state);
+}
+
void intel_atomic_state_clear(struct drm_atomic_state *s)
{
struct intel_atomic_state *state = to_intel_atomic_state(s);
+
drm_atomic_state_default_clear(&state->base);
+ intel_atomic_clear_global_state(state);
+
state->dpll_set = state->modeset = false;
state->global_state_changed = false;
state->active_pipes = 0;
- memset(&state->min_cdclk, 0, sizeof(state->min_cdclk));
- memset(&state->min_voltage_level, 0, sizeof(state->min_voltage_level));
- memset(&state->cdclk.logical, 0, sizeof(state->cdclk.logical));
- memset(&state->cdclk.actual, 0, sizeof(state->cdclk.actual));
- state->cdclk.pipe = INVALID_PIPE;
}
struct intel_crtc_state *
@@ -520,7 +539,7 @@ intel_atomic_get_crtc_state(struct drm_atomic_state *state,
return to_intel_crtc_state(crtc_state);
}
-int intel_atomic_lock_global_state(struct intel_atomic_state *state)
+int _intel_atomic_lock_global_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
@@ -539,7 +558,7 @@ int intel_atomic_lock_global_state(struct intel_atomic_state *state)
return 0;
}
-int intel_atomic_serialize_global_state(struct intel_atomic_state *state)
+int _intel_atomic_serialize_global_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic.h b/drivers/gpu/drm/i915/display/intel_atomic.h
index 74c749dbfb4f..11146292b06f 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic.h
@@ -45,6 +45,7 @@ void intel_crtc_destroy_state(struct drm_crtc *crtc,
void intel_crtc_free_hw_state(struct intel_crtc_state *crtc_state);
void intel_crtc_copy_color_blobs(struct intel_crtc_state *crtc_state);
struct drm_atomic_state *intel_atomic_state_alloc(struct drm_device *dev);
+void intel_atomic_state_free(struct drm_atomic_state *state);
void intel_atomic_state_clear(struct drm_atomic_state *state);
struct intel_crtc_state *
@@ -55,8 +56,8 @@ int intel_atomic_setup_scalers(struct drm_i915_private *dev_priv,
struct intel_crtc *intel_crtc,
struct intel_crtc_state *crtc_state);
-int intel_atomic_lock_global_state(struct intel_atomic_state *state);
+int _intel_atomic_lock_global_state(struct intel_atomic_state *state);
-int intel_atomic_serialize_global_state(struct intel_atomic_state *state);
+int _intel_atomic_serialize_global_state(struct intel_atomic_state *state);
#endif /* __INTEL_ATOMIC_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
index 3e97af682b1b..457b258683d3 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c
@@ -37,6 +37,7 @@
#include "i915_trace.h"
#include "intel_atomic_plane.h"
+#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_pm.h"
#include "intel_sprite.h"
@@ -132,15 +133,37 @@ intel_plane_destroy_state(struct drm_plane *plane,
kfree(plane_state);
}
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state)
+{
+ unsigned int src_w, src_h, dst_w, dst_h;
+ unsigned int pixel_rate = crtc_state->pixel_rate;
+
+ src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
+ src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
+ dst_w = drm_rect_width(&plane_state->uapi.dst);
+ dst_h = drm_rect_height(&plane_state->uapi.dst);
+
+ /* Downscaling limits the maximum pixel rate */
+ dst_w = min(src_w, dst_w);
+ dst_h = min(src_h, dst_h);
+
+ return DIV_ROUND_UP_ULL(mul_u32_u32(pixel_rate, src_w * src_h),
+ dst_w * dst_h);
+}
+
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int cpp;
+ unsigned int pixel_rate;
if (!plane_state->uapi.visible)
return 0;
+ pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
+
cpp = fb->format->cpp[0];
/*
@@ -152,45 +175,67 @@ unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
if (fb->format->is_yuv && fb->format->num_planes > 1)
cpp *= 4;
- return cpp * crtc_state->pixel_rate;
+ return pixel_rate * cpp;
}
-bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
- struct intel_plane *plane)
+int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane,
+ bool *need_cdclk_calc)
{
struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
const struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
- struct intel_crtc_state *crtc_state;
+ const struct intel_cdclk_state *cdclk_state;
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc_state *new_crtc_state;
if (!plane_state->uapi.visible || !plane->min_cdclk)
- return false;
+ return 0;
+
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+ new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+
+ new_crtc_state->min_cdclk[plane->id] =
+ plane->min_cdclk(new_crtc_state, plane_state);
- crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ /*
+ * No need to check against the cdclk state if
+ * the min cdclk for the plane doesn't increase.
+ *
+ * Ie. we only ever increase the cdclk due to plane
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
+ */
+ if (new_crtc_state->min_cdclk[plane->id] <=
+ old_crtc_state->min_cdclk[plane->id])
+ return 0;
- crtc_state->min_cdclk[plane->id] =
- plane->min_cdclk(crtc_state, plane_state);
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
/*
- * Does the cdclk need to be bumbed up?
+ * No need to recalculate the cdclk state if
+ * the min cdclk for the pipe doesn't increase.
*
- * Note: we obviously need to be called before the new
- * cdclk frequency is calculated so state->cdclk.logical
- * hasn't been populated yet. Hence we look at the old
- * cdclk state under dev_priv->cdclk.logical. This is
- * safe as long we hold at least one crtc mutex (which
- * must be true since we have crtc_state).
+ * Ie. we only ever increase the cdclk due to plane
+ * requirements. This can reduce back and forth
+ * display blinking due to constant cdclk changes.
*/
- if (crtc_state->min_cdclk[plane->id] > dev_priv->cdclk.logical.cdclk) {
- DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk (%d kHz) > logical cdclk (%d kHz)\n",
- plane->base.base.id, plane->base.name,
- crtc_state->min_cdclk[plane->id],
- dev_priv->cdclk.logical.cdclk);
- return true;
- }
+ if (new_crtc_state->min_cdclk[plane->id] <=
+ cdclk_state->min_cdclk[crtc->pipe])
+ return 0;
+
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] min cdclk (%d kHz) > [CRTC:%d:%s] min cdclk (%d kHz)\n",
+ plane->base.base.id, plane->base.name,
+ new_crtc_state->min_cdclk[plane->id],
+ crtc->base.base.id, crtc->base.name,
+ cdclk_state->min_cdclk[crtc->pipe]);
+ *need_cdclk_calc = true;
- return false;
+ return 0;
}
static void intel_plane_clear_hw_state(struct intel_plane_state *plane_state)
@@ -225,12 +270,9 @@ int intel_plane_atomic_check_with_state(const struct intel_crtc_state *old_crtc_
struct intel_plane_state *new_plane_state)
{
struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane);
- const struct drm_framebuffer *fb;
+ const struct drm_framebuffer *fb = new_plane_state->hw.fb;
int ret;
- intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
- fb = new_plane_state->hw.fb;
-
new_crtc_state->active_planes &= ~BIT(plane->id);
new_crtc_state->nv12_planes &= ~BIT(plane->id);
new_crtc_state->c8_planes &= ~BIT(plane->id);
@@ -292,6 +334,7 @@ int intel_plane_atomic_check(struct intel_atomic_state *state,
const struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
new_plane_state->uapi.visible = false;
if (!crtc)
return 0;
diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
index 5cedafdddb55..a6bbf42bae1f 100644
--- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h
+++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h
@@ -18,6 +18,9 @@ struct intel_plane_state;
extern const struct drm_plane_helper_funcs intel_plane_helper_funcs;
+unsigned int intel_plane_pixel_rate(const struct intel_crtc_state *crtc_state,
+ const struct intel_plane_state *plane_state);
+
unsigned int intel_plane_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state);
void intel_plane_copy_uapi_to_hw_state(struct intel_plane_state *plane_state,
@@ -46,7 +49,8 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *old_plane_state,
struct intel_plane_state *plane_state);
-bool intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
- struct intel_plane *plane);
+int intel_plane_calc_min_cdclk(struct intel_atomic_state *state,
+ struct intel_plane *plane,
+ bool *need_cdclk_calc);
#endif /* __INTEL_ATOMIC_PLANE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_audio.c b/drivers/gpu/drm/i915/display/intel_audio.c
index b18040793d9e..62f234f641de 100644
--- a/drivers/gpu/drm/i915/display/intel_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_audio.c
@@ -30,6 +30,7 @@
#include "i915_drv.h"
#include "intel_atomic.h"
#include "intel_audio.h"
+#include "intel_cdclk.h"
#include "intel_display_types.h"
#include "intel_lpe_audio.h"
@@ -148,6 +149,10 @@ static const struct {
{ 74250, AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 },
{ 148352, AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 },
{ 148500, AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 },
+ { 296703, AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 },
+ { 297000, AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 },
+ { 593407, AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 },
+ { 594000, AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 },
};
/* HDMI N/CTS table */
@@ -233,6 +238,7 @@ static const struct hdmi_aud_ncts hdmi_aud_ncts_36bpp[] = {
/* get AUD_CONFIG_PIXEL_CLOCK_HDMI_* value for mode */
static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
int i;
@@ -242,6 +248,9 @@ static u32 audio_config_hdmi_pixel_clock(const struct intel_crtc_state *crtc_sta
break;
}
+ if (INTEL_GEN(dev_priv) < 12 && adjusted_mode->crtc_clock > 148500)
+ i = ARRAY_SIZE(hdmi_audio_clock);
+
if (i == ARRAY_SIZE(hdmi_audio_clock)) {
DRM_DEBUG_KMS("HDMI audio pixel clock setting for %d not found, falling back to defaults\n",
adjusted_mode->crtc_clock);
@@ -291,18 +300,18 @@ static bool intel_eld_uptodate(struct drm_connector *connector,
u32 tmp;
int i;
- tmp = I915_READ(reg_eldv);
+ tmp = intel_de_read(dev_priv, reg_eldv);
tmp &= bits_eldv;
if (!tmp)
return false;
- tmp = I915_READ(reg_elda);
+ tmp = intel_de_read(dev_priv, reg_elda);
tmp &= ~bits_elda;
- I915_WRITE(reg_elda, tmp);
+ intel_de_write(dev_priv, reg_elda, tmp);
for (i = 0; i < drm_eld_size(eld) / 4; i++)
- if (I915_READ(reg_edid) != *((const u32 *)eld + i))
+ if (intel_de_read(dev_priv, reg_edid) != *((const u32 *)eld + i))
return false;
return true;
@@ -315,18 +324,18 @@ static void g4x_audio_codec_disable(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 eldv, tmp;
- DRM_DEBUG_KMS("Disable audio codec\n");
+ drm_dbg_kms(&dev_priv->drm, "Disable audio codec\n");
- tmp = I915_READ(G4X_AUD_VID_DID);
+ tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
eldv = G4X_ELDV_DEVCL_DEVBLC;
else
eldv = G4X_ELDV_DEVCTG;
/* Invalidate ELD */
- tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
tmp &= ~eldv;
- I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+ intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
}
static void g4x_audio_codec_enable(struct intel_encoder *encoder,
@@ -340,9 +349,10 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
u32 tmp;
int len, i;
- DRM_DEBUG_KMS("Enable audio codec, %u bytes ELD\n", drm_eld_size(eld));
+ drm_dbg_kms(&dev_priv->drm, "Enable audio codec, %u bytes ELD\n",
+ drm_eld_size(eld));
- tmp = I915_READ(G4X_AUD_VID_DID);
+ tmp = intel_de_read(dev_priv, G4X_AUD_VID_DID);
if (tmp == INTEL_AUDIO_DEVBLC || tmp == INTEL_AUDIO_DEVCL)
eldv = G4X_ELDV_DEVCL_DEVBLC;
else
@@ -354,19 +364,20 @@ static void g4x_audio_codec_enable(struct intel_encoder *encoder,
G4X_HDMIW_HDMIEDID))
return;
- tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
tmp &= ~(eldv | G4X_ELD_ADDR_MASK);
len = (tmp >> 9) & 0x1f; /* ELD buffer size */
- I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+ intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
len = min(drm_eld_size(eld) / 4, len);
- DRM_DEBUG_DRIVER("ELD size %d\n", len);
+ drm_dbg(&dev_priv->drm, "ELD size %d\n", len);
for (i = 0; i < len; i++)
- I915_WRITE(G4X_HDMIW_HDMIEDID, *((const u32 *)eld + i));
+ intel_de_write(dev_priv, G4X_HDMIW_HDMIEDID,
+ *((const u32 *)eld + i));
- tmp = I915_READ(G4X_AUD_CNTL_ST);
+ tmp = intel_de_read(dev_priv, G4X_AUD_CNTL_ST);
tmp |= eldv;
- I915_WRITE(G4X_AUD_CNTL_ST, tmp);
+ intel_de_write(dev_priv, G4X_AUD_CNTL_ST, tmp);
}
static void
@@ -384,11 +395,12 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
rate = acomp ? acomp->aud_sample_rate[port] : 0;
nm = audio_config_dp_get_n_m(crtc_state, rate);
if (nm)
- DRM_DEBUG_KMS("using Maud %u, Naud %u\n", nm->m, nm->n);
+ drm_dbg_kms(&dev_priv->drm, "using Maud %u, Naud %u\n", nm->m,
+ nm->n);
else
- DRM_DEBUG_KMS("using automatic Maud, Naud\n");
+ drm_dbg_kms(&dev_priv->drm, "using automatic Maud, Naud\n");
- tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -400,9 +412,9 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
tmp |= AUD_CONFIG_N_PROG_ENABLE;
}
- I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
- tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_CONFIG_M_MASK;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
@@ -413,7 +425,7 @@ hsw_dp_audio_config_update(struct intel_encoder *encoder,
tmp |= AUD_M_CTS_M_PROG_ENABLE;
}
- I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -429,7 +441,7 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
rate = acomp ? acomp->aud_sample_rate[port] : 0;
- tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
@@ -437,25 +449,25 @@ hsw_hdmi_audio_config_update(struct intel_encoder *encoder,
n = audio_config_hdmi_get_n(crtc_state, rate);
if (n != 0) {
- DRM_DEBUG_KMS("using N %d\n", n);
+ drm_dbg_kms(&dev_priv->drm, "using N %d\n", n);
tmp &= ~AUD_CONFIG_N_MASK;
tmp |= AUD_CONFIG_N(n);
tmp |= AUD_CONFIG_N_PROG_ENABLE;
} else {
- DRM_DEBUG_KMS("using automatic N\n");
+ drm_dbg_kms(&dev_priv->drm, "using automatic N\n");
}
- I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
/*
* Let's disable "Enable CTS or M Prog bit"
* and let HW calculate the value
*/
- tmp = I915_READ(HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder));
tmp &= ~AUD_M_CTS_M_PROG_ENABLE;
tmp &= ~AUD_M_CTS_M_VALUE_INDEX;
- I915_WRITE(HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_M_CTS_ENABLE(cpu_transcoder), tmp);
}
static void
@@ -476,26 +488,26 @@ static void hsw_audio_codec_disable(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 tmp;
- DRM_DEBUG_KMS("Disable audio codec on transcoder %s\n",
- transcoder_name(cpu_transcoder));
+ drm_dbg_kms(&dev_priv->drm, "Disable audio codec on transcoder %s\n",
+ transcoder_name(cpu_transcoder));
mutex_lock(&dev_priv->av_mutex);
/* Disable timestamps */
- tmp = I915_READ(HSW_AUD_CFG(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_CFG(cpu_transcoder));
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
- I915_WRITE(HSW_AUD_CFG(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_CFG(cpu_transcoder), tmp);
/* Invalidate ELD */
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
tmp &= ~AUDIO_OUTPUT_ENABLE(cpu_transcoder);
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
mutex_unlock(&dev_priv->av_mutex);
}
@@ -511,16 +523,17 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
u32 tmp;
int len, i;
- DRM_DEBUG_KMS("Enable audio codec on transcoder %s, %u bytes ELD\n",
- transcoder_name(cpu_transcoder), drm_eld_size(eld));
+ drm_dbg_kms(&dev_priv->drm,
+ "Enable audio codec on transcoder %s, %u bytes ELD\n",
+ transcoder_name(cpu_transcoder), drm_eld_size(eld));
mutex_lock(&dev_priv->av_mutex);
/* Enable audio presence detect, invalidate ELD */
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
tmp |= AUDIO_OUTPUT_ENABLE(cpu_transcoder);
tmp &= ~AUDIO_ELD_VALID(cpu_transcoder);
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
/*
* FIXME: We're supposed to wait for vblank here, but we have vblanks
@@ -530,19 +543,20 @@ static void hsw_audio_codec_enable(struct intel_encoder *encoder,
*/
/* Reset ELD write address */
- tmp = I915_READ(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder));
tmp &= ~IBX_ELD_ADDRESS_MASK;
- I915_WRITE(HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
+ intel_de_write(dev_priv, HSW_AUD_DIP_ELD_CTRL(cpu_transcoder), tmp);
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(HSW_AUD_EDID_DATA(cpu_transcoder), *((const u32 *)eld + i));
+ intel_de_write(dev_priv, HSW_AUD_EDID_DATA(cpu_transcoder),
+ *((const u32 *)eld + i));
/* ELD valid */
- tmp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+ tmp = intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD);
tmp |= AUDIO_ELD_VALID(cpu_transcoder);
- I915_WRITE(HSW_AUD_PIN_ELD_CP_VLD, tmp);
+ intel_de_write(dev_priv, HSW_AUD_PIN_ELD_CP_VLD, tmp);
/* Enable timestamps */
hsw_audio_config_update(encoder, crtc_state);
@@ -561,11 +575,12 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
u32 tmp, eldv;
i915_reg_t aud_config, aud_cntrl_st2;
- DRM_DEBUG_KMS("Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
- encoder->base.base.id, encoder->base.name,
- pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "Disable audio codec on [ENCODER:%d:%s], pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe));
- if (WARN_ON(port == PORT_A))
+ if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
return;
if (HAS_PCH_IBX(dev_priv)) {
@@ -580,21 +595,21 @@ static void ilk_audio_codec_disable(struct intel_encoder *encoder,
}
/* Disable timestamps */
- tmp = I915_READ(aud_config);
+ tmp = intel_de_read(dev_priv, aud_config);
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp |= AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_UPPER_N_MASK;
tmp &= ~AUD_CONFIG_LOWER_N_MASK;
if (intel_crtc_has_dp_encoder(old_crtc_state))
tmp |= AUD_CONFIG_N_VALUE_INDEX;
- I915_WRITE(aud_config, tmp);
+ intel_de_write(dev_priv, aud_config, tmp);
eldv = IBX_ELD_VALID(port);
/* Invalidate ELD */
- tmp = I915_READ(aud_cntrl_st2);
+ tmp = intel_de_read(dev_priv, aud_cntrl_st2);
tmp &= ~eldv;
- I915_WRITE(aud_cntrl_st2, tmp);
+ intel_de_write(dev_priv, aud_cntrl_st2, tmp);
}
static void ilk_audio_codec_enable(struct intel_encoder *encoder,
@@ -611,11 +626,12 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
int len, i;
i915_reg_t hdmiw_hdmiedid, aud_config, aud_cntl_st, aud_cntrl_st2;
- DRM_DEBUG_KMS("Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
- encoder->base.base.id, encoder->base.name,
- pipe_name(pipe), drm_eld_size(eld));
+ drm_dbg_kms(&dev_priv->drm,
+ "Enable audio codec on [ENCODER:%d:%s], pipe %c, %u bytes ELD\n",
+ encoder->base.base.id, encoder->base.name,
+ pipe_name(pipe), drm_eld_size(eld));
- if (WARN_ON(port == PORT_A))
+ if (drm_WARN_ON(&dev_priv->drm, port == PORT_A))
return;
/*
@@ -646,27 +662,28 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
eldv = IBX_ELD_VALID(port);
/* Invalidate ELD */
- tmp = I915_READ(aud_cntrl_st2);
+ tmp = intel_de_read(dev_priv, aud_cntrl_st2);
tmp &= ~eldv;
- I915_WRITE(aud_cntrl_st2, tmp);
+ intel_de_write(dev_priv, aud_cntrl_st2, tmp);
/* Reset ELD write address */
- tmp = I915_READ(aud_cntl_st);
+ tmp = intel_de_read(dev_priv, aud_cntl_st);
tmp &= ~IBX_ELD_ADDRESS_MASK;
- I915_WRITE(aud_cntl_st, tmp);
+ intel_de_write(dev_priv, aud_cntl_st, tmp);
/* Up to 84 bytes of hw ELD buffer */
len = min(drm_eld_size(eld), 84);
for (i = 0; i < len / 4; i++)
- I915_WRITE(hdmiw_hdmiedid, *((const u32 *)eld + i));
+ intel_de_write(dev_priv, hdmiw_hdmiedid,
+ *((const u32 *)eld + i));
/* ELD valid */
- tmp = I915_READ(aud_cntrl_st2);
+ tmp = intel_de_read(dev_priv, aud_cntrl_st2);
tmp |= eldv;
- I915_WRITE(aud_cntrl_st2, tmp);
+ intel_de_write(dev_priv, aud_cntrl_st2, tmp);
/* Enable timestamps */
- tmp = I915_READ(aud_config);
+ tmp = intel_de_read(dev_priv, aud_config);
tmp &= ~AUD_CONFIG_N_VALUE_INDEX;
tmp &= ~AUD_CONFIG_N_PROG_ENABLE;
tmp &= ~AUD_CONFIG_PIXEL_CLOCK_HDMI_MASK;
@@ -674,7 +691,7 @@ static void ilk_audio_codec_enable(struct intel_encoder *encoder,
tmp |= AUD_CONFIG_N_VALUE_INDEX;
else
tmp |= audio_config_hdmi_pixel_clock(crtc_state);
- I915_WRITE(aud_config, tmp);
+ intel_de_write(dev_priv, aud_config, tmp);
}
/**
@@ -701,14 +718,15 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
/* FIXME precompute the ELD in .compute_config() */
if (!connector->eld[0])
- DRM_DEBUG_KMS("Bogus ELD on [CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Bogus ELD on [CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
- DRM_DEBUG_DRIVER("ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id,
- connector->name,
- encoder->base.base.id,
- encoder->base.name);
+ drm_dbg(&dev_priv->drm, "ELD on [CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id,
+ connector->name,
+ encoder->base.base.id,
+ encoder->base.name);
connector->eld[6] = drm_av_sync_delay(connector, adjusted_mode) / 2;
@@ -800,37 +818,61 @@ void intel_init_audio_hooks(struct drm_i915_private *dev_priv)
}
}
+static int glk_force_audio_cdclk_commit(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ bool enable)
+{
+ struct intel_cdclk_state *cdclk_state;
+ int ret;
+
+ /* need to hold at least one crtc lock for the global state */
+ ret = drm_modeset_lock(&crtc->base.mutex, state->base.acquire_ctx);
+ if (ret)
+ return ret;
+
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ cdclk_state->force_min_cdclk_changed = true;
+ cdclk_state->force_min_cdclk = enable ? 2 * 96000 : 0;
+
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
+ if (ret)
+ return ret;
+
+ return drm_atomic_commit(&state->base);
+}
+
static void glk_force_audio_cdclk(struct drm_i915_private *dev_priv,
bool enable)
{
struct drm_modeset_acquire_ctx ctx;
struct drm_atomic_state *state;
+ struct intel_crtc *crtc;
int ret;
+ crtc = intel_get_first_crtc(dev_priv);
+ if (!crtc)
+ return;
+
drm_modeset_acquire_init(&ctx, 0);
state = drm_atomic_state_alloc(&dev_priv->drm);
- if (WARN_ON(!state))
+ if (drm_WARN_ON(&dev_priv->drm, !state))
return;
state->acquire_ctx = &ctx;
retry:
- to_intel_atomic_state(state)->cdclk.force_min_cdclk_changed = true;
- to_intel_atomic_state(state)->cdclk.force_min_cdclk =
- enable ? 2 * 96000 : 0;
-
- /* Protects dev_priv->cdclk.force_min_cdclk */
- ret = intel_atomic_lock_global_state(to_intel_atomic_state(state));
- if (!ret)
- ret = drm_atomic_commit(state);
-
+ ret = glk_force_audio_cdclk_commit(to_intel_atomic_state(state), crtc,
+ enable);
if (ret == -EDEADLK) {
drm_atomic_state_clear(state);
drm_modeset_backoff(&ctx);
goto retry;
}
- WARN_ON(ret);
+ drm_WARN_ON(&dev_priv->drm, ret);
drm_atomic_state_put(state);
@@ -850,9 +892,11 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
if (dev_priv->audio_power_refcount++ == 0) {
if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
- I915_WRITE(AUD_FREQ_CNTRL, dev_priv->audio_freq_cntrl);
- DRM_DEBUG_KMS("restored AUD_FREQ_CNTRL to 0x%x\n",
- dev_priv->audio_freq_cntrl);
+ intel_de_write(dev_priv, AUD_FREQ_CNTRL,
+ dev_priv->audio_freq_cntrl);
+ drm_dbg_kms(&dev_priv->drm,
+ "restored AUD_FREQ_CNTRL to 0x%x\n",
+ dev_priv->audio_freq_cntrl);
}
/* Force CDCLK to 2*BCLK as long as we need audio powered. */
@@ -860,9 +904,8 @@ static unsigned long i915_audio_component_get_power(struct device *kdev)
glk_force_audio_cdclk(dev_priv, true);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- I915_WRITE(AUD_PIN_BUF_CTL,
- (I915_READ(AUD_PIN_BUF_CTL) |
- AUD_PIN_BUF_ENABLE));
+ intel_de_write(dev_priv, AUD_PIN_BUF_CTL,
+ (intel_de_read(dev_priv, AUD_PIN_BUF_CTL) | AUD_PIN_BUF_ENABLE));
}
return ret;
@@ -897,15 +940,15 @@ static void i915_audio_component_codec_wake_override(struct device *kdev,
* Enable/disable generating the codec wake signal, overriding the
* internal logic to generate the codec wake to controller.
*/
- tmp = I915_READ(HSW_AUD_CHICKENBIT);
+ tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT);
tmp &= ~SKL_AUD_CODEC_WAKE_SIGNAL;
- I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
+ intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp);
usleep_range(1000, 1500);
if (enable) {
- tmp = I915_READ(HSW_AUD_CHICKENBIT);
+ tmp = intel_de_read(dev_priv, HSW_AUD_CHICKENBIT);
tmp |= SKL_AUD_CODEC_WAKE_SIGNAL;
- I915_WRITE(HSW_AUD_CHICKENBIT, tmp);
+ intel_de_write(dev_priv, HSW_AUD_CHICKENBIT, tmp);
usleep_range(1000, 1500);
}
@@ -917,7 +960,7 @@ static int i915_audio_component_get_cdclk_freq(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
- if (WARN_ON_ONCE(!HAS_DDI(dev_priv)))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_DDI(dev_priv)))
return -ENODEV;
return dev_priv->cdclk.hw.cdclk;
@@ -940,7 +983,8 @@ static struct intel_encoder *get_saved_enc(struct drm_i915_private *dev_priv,
/* MST */
if (pipe >= 0) {
- if (WARN_ON(pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ pipe >= ARRAY_SIZE(dev_priv->av_enc_map)))
return NULL;
encoder = dev_priv->av_enc_map[pipe];
@@ -992,7 +1036,8 @@ static int i915_audio_component_sync_audio_rate(struct device *kdev, int port,
/* 1. get the pipe */
encoder = get_saved_enc(dev_priv, port, pipe);
if (!encoder || !encoder->base.crtc) {
- DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
+ port_name(port));
err = -ENODEV;
goto unlock;
}
@@ -1023,7 +1068,8 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
intel_encoder = get_saved_enc(dev_priv, port, pipe);
if (!intel_encoder) {
- DRM_DEBUG_KMS("Not valid for port %c\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "Not valid for port %c\n",
+ port_name(port));
mutex_unlock(&dev_priv->av_mutex);
return ret;
}
@@ -1057,10 +1103,12 @@ static int i915_audio_component_bind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
int i;
- if (WARN_ON(acomp->base.ops || acomp->base.dev))
+ if (drm_WARN_ON(&dev_priv->drm, acomp->base.ops || acomp->base.dev))
return -EEXIST;
- if (WARN_ON(!device_link_add(hda_kdev, i915_kdev, DL_FLAG_STATELESS)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !device_link_add(hda_kdev, i915_kdev,
+ DL_FLAG_STATELESS)))
return -ENOMEM;
drm_modeset_lock_all(&dev_priv->drm);
@@ -1119,15 +1167,18 @@ static void i915_audio_component_init(struct drm_i915_private *dev_priv)
&i915_audio_component_bind_ops,
I915_COMPONENT_AUDIO);
if (ret < 0) {
- DRM_ERROR("failed to add audio component (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "failed to add audio component (%d)\n", ret);
/* continue with reduced functionality */
return;
}
if (IS_TIGERLAKE(dev_priv) || IS_ICELAKE(dev_priv)) {
- dev_priv->audio_freq_cntrl = I915_READ(AUD_FREQ_CNTRL);
- DRM_DEBUG_KMS("init value of AUD_FREQ_CNTRL of 0x%x\n",
- dev_priv->audio_freq_cntrl);
+ dev_priv->audio_freq_cntrl = intel_de_read(dev_priv,
+ AUD_FREQ_CNTRL);
+ drm_dbg_kms(&dev_priv->drm,
+ "init value of AUD_FREQ_CNTRL of 0x%x\n",
+ dev_priv->audio_freq_cntrl);
}
dev_priv->audio_component_registered = true;
diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c
index 8beac06e3f10..839124647202 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.c
+++ b/drivers/gpu/drm/i915/display/intel_bios.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_dp_helper.h>
-#include <drm/i915_drm.h>
#include "display/intel_display.h"
#include "display/intel_display_types.h"
@@ -228,17 +227,20 @@ parse_panel_options(struct drm_i915_private *dev_priv,
ret = intel_opregion_get_panel_type(dev_priv);
if (ret >= 0) {
- WARN_ON(ret > 0xf);
+ drm_WARN_ON(&dev_priv->drm, ret > 0xf);
panel_type = ret;
- DRM_DEBUG_KMS("Panel type: %d (OpRegion)\n", panel_type);
+ drm_dbg_kms(&dev_priv->drm, "Panel type: %d (OpRegion)\n",
+ panel_type);
} else {
if (lvds_options->panel_type > 0xf) {
- DRM_DEBUG_KMS("Invalid VBT panel type 0x%x\n",
- lvds_options->panel_type);
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid VBT panel type 0x%x\n",
+ lvds_options->panel_type);
return;
}
panel_type = lvds_options->panel_type;
- DRM_DEBUG_KMS("Panel type: %d (VBT)\n", panel_type);
+ drm_dbg_kms(&dev_priv->drm, "Panel type: %d (VBT)\n",
+ panel_type);
}
dev_priv->vbt.panel_type = panel_type;
@@ -253,15 +255,17 @@ parse_panel_options(struct drm_i915_private *dev_priv,
switch (drrs_mode) {
case 0:
dev_priv->vbt.drrs_type = STATIC_DRRS_SUPPORT;
- DRM_DEBUG_KMS("DRRS supported mode is static\n");
+ drm_dbg_kms(&dev_priv->drm, "DRRS supported mode is static\n");
break;
case 2:
dev_priv->vbt.drrs_type = SEAMLESS_DRRS_SUPPORT;
- DRM_DEBUG_KMS("DRRS supported mode is seamless\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS supported mode is seamless\n");
break;
default:
dev_priv->vbt.drrs_type = DRRS_NOT_SUPPORTED;
- DRM_DEBUG_KMS("DRRS not supported (VBT input)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS not supported (VBT input)\n");
break;
}
}
@@ -298,7 +302,8 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG_KMS("Found panel mode in BIOS VBT legacy lfp table:\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found panel mode in BIOS VBT legacy lfp table:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data,
@@ -309,8 +314,9 @@ parse_lfp_panel_dtd(struct drm_i915_private *dev_priv,
if (fp_timing->x_res == panel_fixed_mode->hdisplay &&
fp_timing->y_res == panel_fixed_mode->vdisplay) {
dev_priv->vbt.bios_lvds_val = fp_timing->lvds_reg_val;
- DRM_DEBUG_KMS("VBT initial LVDS value %x\n",
- dev_priv->vbt.bios_lvds_val);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT initial LVDS value %x\n",
+ dev_priv->vbt.bios_lvds_val);
}
}
}
@@ -329,20 +335,22 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
return;
if (generic_dtd->gdtd_size < sizeof(struct generic_dtd_entry)) {
- DRM_ERROR("GDTD size %u is too small.\n",
- generic_dtd->gdtd_size);
+ drm_err(&dev_priv->drm, "GDTD size %u is too small.\n",
+ generic_dtd->gdtd_size);
return;
} else if (generic_dtd->gdtd_size !=
sizeof(struct generic_dtd_entry)) {
- DRM_ERROR("Unexpected GDTD size %u\n", generic_dtd->gdtd_size);
+ drm_err(&dev_priv->drm, "Unexpected GDTD size %u\n",
+ generic_dtd->gdtd_size);
/* DTD has unknown fields, but keep going */
}
num_dtd = (get_blocksize(generic_dtd) -
sizeof(struct bdb_generic_dtd)) / generic_dtd->gdtd_size;
if (dev_priv->vbt.panel_type >= num_dtd) {
- DRM_ERROR("Panel type %d not found in table of %d DTD's\n",
- dev_priv->vbt.panel_type, num_dtd);
+ drm_err(&dev_priv->drm,
+ "Panel type %d not found in table of %d DTD's\n",
+ dev_priv->vbt.panel_type, num_dtd);
return;
}
@@ -357,14 +365,16 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
panel_fixed_mode->hdisplay + dtd->hfront_porch;
panel_fixed_mode->hsync_end =
panel_fixed_mode->hsync_start + dtd->hsync;
- panel_fixed_mode->htotal = panel_fixed_mode->hsync_end;
+ panel_fixed_mode->htotal =
+ panel_fixed_mode->hdisplay + dtd->hblank;
panel_fixed_mode->vdisplay = dtd->vactive;
panel_fixed_mode->vsync_start =
panel_fixed_mode->vdisplay + dtd->vfront_porch;
panel_fixed_mode->vsync_end =
panel_fixed_mode->vsync_start + dtd->vsync;
- panel_fixed_mode->vtotal = panel_fixed_mode->vsync_end;
+ panel_fixed_mode->vtotal =
+ panel_fixed_mode->vdisplay + dtd->vblank;
panel_fixed_mode->clock = dtd->pixel_clock;
panel_fixed_mode->width_mm = dtd->width_mm;
@@ -383,7 +393,8 @@ parse_generic_dtd(struct drm_i915_private *dev_priv,
else
panel_fixed_mode->flags |= DRM_MODE_FLAG_NVSYNC;
- DRM_DEBUG_KMS("Found panel mode in BIOS VBT generic dtd table:\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found panel mode in BIOS VBT generic dtd table:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
dev_priv->vbt.lfp_lvds_vbt_mode = panel_fixed_mode;
@@ -420,8 +431,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
return;
if (backlight_data->entry_size != sizeof(backlight_data->data[0])) {
- DRM_DEBUG_KMS("Unsupported backlight data entry size %u\n",
- backlight_data->entry_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported backlight data entry size %u\n",
+ backlight_data->entry_size);
return;
}
@@ -429,8 +441,9 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
dev_priv->vbt.backlight.present = entry->type == BDB_BACKLIGHT_TYPE_PWM;
if (!dev_priv->vbt.backlight.present) {
- DRM_DEBUG_KMS("PWM backlight not present in VBT (type %u)\n",
- entry->type);
+ drm_dbg_kms(&dev_priv->drm,
+ "PWM backlight not present in VBT (type %u)\n",
+ entry->type);
return;
}
@@ -447,13 +460,14 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv,
dev_priv->vbt.backlight.pwm_freq_hz = entry->pwm_freq_hz;
dev_priv->vbt.backlight.active_low_pwm = entry->active_low_pwm;
dev_priv->vbt.backlight.min_brightness = entry->min_brightness;
- DRM_DEBUG_KMS("VBT backlight PWM modulation frequency %u Hz, "
- "active %s, min brightness %u, level %u, controller %u\n",
- dev_priv->vbt.backlight.pwm_freq_hz,
- dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
- dev_priv->vbt.backlight.min_brightness,
- backlight_data->level[panel_type],
- dev_priv->vbt.backlight.controller);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT backlight PWM modulation frequency %u Hz, "
+ "active %s, min brightness %u, level %u, controller %u\n",
+ dev_priv->vbt.backlight.pwm_freq_hz,
+ dev_priv->vbt.backlight.active_low_pwm ? "low" : "high",
+ dev_priv->vbt.backlight.min_brightness,
+ backlight_data->level[panel_type],
+ dev_priv->vbt.backlight.controller);
}
/* Try to find sdvo panel data */
@@ -467,7 +481,8 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
index = i915_modparams.vbt_sdvo_panel_type;
if (index == -2) {
- DRM_DEBUG_KMS("Ignore SDVO panel mode from BIOS VBT tables.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Ignore SDVO panel mode from BIOS VBT tables.\n");
return;
}
@@ -493,7 +508,8 @@ parse_sdvo_panel_data(struct drm_i915_private *dev_priv,
dev_priv->vbt.sdvo_lvds_vbt_mode = panel_fixed_mode;
- DRM_DEBUG_KMS("Found SDVO panel mode in BIOS VBT tables:\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found SDVO panel mode in BIOS VBT tables:\n");
drm_mode_debug_printmodeline(panel_fixed_mode);
}
@@ -538,13 +554,14 @@ parse_general_features(struct drm_i915_private *dev_priv,
} else {
dev_priv->vbt.orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
}
- DRM_DEBUG_KMS("BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
- dev_priv->vbt.int_tv_support,
- dev_priv->vbt.int_crt_support,
- dev_priv->vbt.lvds_use_ssc,
- dev_priv->vbt.lvds_ssc_freq,
- dev_priv->vbt.display_clock_mode,
- dev_priv->vbt.fdi_rx_polarity_inverted);
+ drm_dbg_kms(&dev_priv->drm,
+ "BDB_GENERAL_FEATURES int_tv_support %d int_crt_support %d lvds_use_ssc %d lvds_ssc_freq %d display_clock_mode %d fdi_rx_polarity_inverted %d\n",
+ dev_priv->vbt.int_tv_support,
+ dev_priv->vbt.int_crt_support,
+ dev_priv->vbt.lvds_use_ssc,
+ dev_priv->vbt.lvds_ssc_freq,
+ dev_priv->vbt.display_clock_mode,
+ dev_priv->vbt.fdi_rx_polarity_inverted);
}
static const struct child_device_config *
@@ -566,7 +583,7 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
* accurate and doesn't have to be, as long as it's not too strict.
*/
if (!IS_GEN_RANGE(dev_priv, 3, 7)) {
- DRM_DEBUG_KMS("Skipping SDVO device mapping\n");
+ drm_dbg_kms(&dev_priv->drm, "Skipping SDVO device mapping\n");
return;
}
@@ -584,14 +601,16 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
if (child->dvo_port != DEVICE_PORT_DVOB &&
child->dvo_port != DEVICE_PORT_DVOC) {
/* skip the incorrect SDVO port */
- DRM_DEBUG_KMS("Incorrect SDVO port. Skip it\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Incorrect SDVO port. Skip it\n");
continue;
}
- DRM_DEBUG_KMS("the SDVO device with slave addr %2x is found on"
- " %s port\n",
- child->slave_addr,
- (child->dvo_port == DEVICE_PORT_DVOB) ?
- "SDVOB" : "SDVOC");
+ drm_dbg_kms(&dev_priv->drm,
+ "the SDVO device with slave addr %2x is found on"
+ " %s port\n",
+ child->slave_addr,
+ (child->dvo_port == DEVICE_PORT_DVOB) ?
+ "SDVOB" : "SDVOC");
mapping = &dev_priv->vbt.sdvo_mappings[child->dvo_port - 1];
if (!mapping->initialized) {
mapping->dvo_port = child->dvo_port;
@@ -600,28 +619,30 @@ parse_sdvo_device_mapping(struct drm_i915_private *dev_priv, u8 bdb_version)
mapping->ddc_pin = child->ddc_pin;
mapping->i2c_pin = child->i2c_pin;
mapping->initialized = 1;
- DRM_DEBUG_KMS("SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
- mapping->dvo_port,
- mapping->slave_addr,
- mapping->dvo_wiring,
- mapping->ddc_pin,
- mapping->i2c_pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "SDVO device: dvo=%x, addr=%x, wiring=%d, ddc_pin=%d, i2c_pin=%d\n",
+ mapping->dvo_port, mapping->slave_addr,
+ mapping->dvo_wiring, mapping->ddc_pin,
+ mapping->i2c_pin);
} else {
- DRM_DEBUG_KMS("Maybe one SDVO port is shared by "
- "two SDVO device.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Maybe one SDVO port is shared by "
+ "two SDVO device.\n");
}
if (child->slave2_addr) {
/* Maybe this is a SDVO device with multiple inputs */
/* And the mapping info is not added */
- DRM_DEBUG_KMS("there exists the slave2_addr. Maybe this"
- " is a SDVO device with multiple inputs.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "there exists the slave2_addr. Maybe this"
+ " is a SDVO device with multiple inputs.\n");
}
count++;
}
if (!count) {
/* No SDVO device info is found */
- DRM_DEBUG_KMS("No SDVO device info is found in VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No SDVO device info is found in VBT\n");
}
}
@@ -662,7 +683,8 @@ parse_driver_features(struct drm_i915_private *dev_priv,
}
if (bdb->version < 228) {
- DRM_DEBUG_KMS("DRRS State Enabled:%d\n", driver->drrs_enabled);
+ drm_dbg_kms(&dev_priv->drm, "DRRS State Enabled:%d\n",
+ driver->drrs_enabled);
/*
* If DRRS is not supported, drrs_type has to be set to 0.
* This is because, VBT is configured in such a way that
@@ -686,7 +708,7 @@ parse_power_conservation_features(struct drm_i915_private *dev_priv,
if (bdb->version < 228)
return;
- power = find_section(bdb, BDB_LVDS_POWER);
+ power = find_section(bdb, BDB_LFP_POWER);
if (!power)
return;
@@ -740,8 +762,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.edp.rate = DP_LINK_BW_2_7;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown eDP link rate value %u\n",
- edp_link_params->rate);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown eDP link rate value %u\n",
+ edp_link_params->rate);
break;
}
@@ -756,8 +779,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.edp.lanes = 4;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown eDP lane count value %u\n",
- edp_link_params->lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown eDP lane count value %u\n",
+ edp_link_params->lanes);
break;
}
@@ -775,8 +799,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.edp.preemphasis = DP_TRAIN_PRE_EMPH_LEVEL_3;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown eDP pre-emphasis value %u\n",
- edp_link_params->preemphasis);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown eDP pre-emphasis value %u\n",
+ edp_link_params->preemphasis);
break;
}
@@ -794,8 +819,9 @@ parse_edp(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.edp.vswing = DP_TRAIN_VOLTAGE_SWING_LEVEL_3;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown eDP voltage swing value %u\n",
- edp_link_params->vswing);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown eDP voltage swing value %u\n",
+ edp_link_params->vswing);
break;
}
@@ -822,7 +848,7 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
psr = find_section(bdb, BDB_PSR);
if (!psr) {
- DRM_DEBUG_KMS("No PSR BDB found.\n");
+ drm_dbg_kms(&dev_priv->drm, "No PSR BDB found.\n");
return;
}
@@ -849,8 +875,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT;
break;
default:
- DRM_DEBUG_KMS("VBT has unknown PSR lines to wait %u\n",
- psr_table->lines_to_wait);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unknown PSR lines to wait %u\n",
+ psr_table->lines_to_wait);
break;
}
@@ -872,8 +899,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp1_wakeup_time_us = 0;
break;
default:
- DRM_DEBUG_KMS("VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
- psr_table->tp1_wakeup_time);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT tp1 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp1_wakeup_time);
/* fallthrough */
case 2:
dev_priv->vbt.psr.tp1_wakeup_time_us = 2500;
@@ -891,8 +919,9 @@ parse_psr(struct drm_i915_private *dev_priv, const struct bdb_header *bdb)
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 0;
break;
default:
- DRM_DEBUG_KMS("VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
- psr_table->tp2_tp3_wakeup_time);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT tp2_tp3 wakeup time value %d is outside range[0-3], defaulting to max value 2500us\n",
+ psr_table->tp2_tp3_wakeup_time);
/* fallthrough */
case 2:
dev_priv->vbt.psr.tp2_tp3_wakeup_time_us = 2500;
@@ -998,12 +1027,12 @@ parse_mipi_config(struct drm_i915_private *dev_priv,
*/
start = find_section(bdb, BDB_MIPI_CONFIG);
if (!start) {
- DRM_DEBUG_KMS("No MIPI config BDB found");
+ drm_dbg_kms(&dev_priv->drm, "No MIPI config BDB found");
return;
}
- DRM_DEBUG_DRIVER("Found MIPI Config block, panel index = %d\n",
- panel_type);
+ drm_dbg(&dev_priv->drm, "Found MIPI Config block, panel index = %d\n",
+ panel_type);
/*
* get hold of the correct configuration block and pps data as per
@@ -1218,7 +1247,8 @@ static int get_init_otp_deassert_fragment_len(struct drm_i915_private *dev_priv)
const u8 *data = dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
int index, len;
- if (WARN_ON(!data || dev_priv->vbt.dsi.seq_version != 1))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !data || dev_priv->vbt.dsi.seq_version != 1))
return 0;
/* index = 1 to skip sequence byte */
@@ -1271,7 +1301,8 @@ static void fixup_mipi_sequences(struct drm_i915_private *dev_priv)
if (!len)
return;
- DRM_DEBUG_KMS("Using init OTP fragment to deassert reset\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Using init OTP fragment to deassert reset\n");
/* Copy the fragment, update seq byte and terminate it */
init_otp = (u8 *)dev_priv->vbt.dsi.sequence[MIPI_SEQ_INIT_OTP];
@@ -1306,18 +1337,21 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
sequence = find_section(bdb, BDB_MIPI_SEQUENCE);
if (!sequence) {
- DRM_DEBUG_KMS("No MIPI Sequence found, parsing complete\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No MIPI Sequence found, parsing complete\n");
return;
}
/* Fail gracefully for forward incompatible sequence block. */
if (sequence->version >= 4) {
- DRM_ERROR("Unable to parse MIPI Sequence Block v%u\n",
- sequence->version);
+ drm_err(&dev_priv->drm,
+ "Unable to parse MIPI Sequence Block v%u\n",
+ sequence->version);
return;
}
- DRM_DEBUG_DRIVER("Found MIPI sequence block v%u\n", sequence->version);
+ drm_dbg(&dev_priv->drm, "Found MIPI sequence block v%u\n",
+ sequence->version);
seq_data = find_panel_sequence_block(sequence, panel_type, &seq_size);
if (!seq_data)
@@ -1334,13 +1368,15 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
break;
if (seq_id >= MIPI_SEQ_MAX) {
- DRM_ERROR("Unknown sequence %u\n", seq_id);
+ drm_err(&dev_priv->drm, "Unknown sequence %u\n",
+ seq_id);
goto err;
}
/* Log about presence of sequences we won't run. */
if (seq_id == MIPI_SEQ_TEAR_ON || seq_id == MIPI_SEQ_TEAR_OFF)
- DRM_DEBUG_KMS("Unsupported sequence %u\n", seq_id);
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported sequence %u\n", seq_id);
dev_priv->vbt.dsi.sequence[seq_id] = data + index;
@@ -1349,7 +1385,8 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
else
index = goto_next_sequence(data, index, seq_size);
if (!index) {
- DRM_ERROR("Invalid sequence %u\n", seq_id);
+ drm_err(&dev_priv->drm, "Invalid sequence %u\n",
+ seq_id);
goto err;
}
}
@@ -1360,7 +1397,7 @@ parse_mipi_sequence(struct drm_i915_private *dev_priv,
fixup_mipi_sequences(dev_priv);
- DRM_DEBUG_DRIVER("MIPI related VBT parsing complete\n");
+ drm_dbg(&dev_priv->drm, "MIPI related VBT parsing complete\n");
return;
err:
@@ -1385,13 +1422,15 @@ parse_compression_parameters(struct drm_i915_private *i915,
if (params) {
/* Sanity checks */
if (params->entry_size != sizeof(params->data[0])) {
- DRM_DEBUG_KMS("VBT: unsupported compression param entry size\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT: unsupported compression param entry size\n");
return;
}
block_size = get_blocksize(params);
if (block_size < sizeof(*params)) {
- DRM_DEBUG_KMS("VBT: expected 16 compression param entries\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT: expected 16 compression param entries\n");
return;
}
}
@@ -1403,12 +1442,14 @@ parse_compression_parameters(struct drm_i915_private *i915,
continue;
if (!params) {
- DRM_DEBUG_KMS("VBT: compression params not available\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT: compression params not available\n");
continue;
}
if (child->compression_method_cps) {
- DRM_DEBUG_KMS("VBT: CPS compression not supported\n");
+ drm_dbg_kms(&i915->drm,
+ "VBT: CPS compression not supported\n");
continue;
}
@@ -1456,10 +1497,11 @@ static void sanitize_ddc_pin(struct drm_i915_private *dev_priv,
p = get_port_by_ddc_pin(dev_priv, info->alternate_ddc_pin);
if (p != PORT_NONE) {
- DRM_DEBUG_KMS("port %c trying to use the same DDC pin (0x%x) as port %c, "
- "disabling port %c DVI/HDMI support\n",
- port_name(port), info->alternate_ddc_pin,
- port_name(p), port_name(p));
+ drm_dbg_kms(&dev_priv->drm,
+ "port %c trying to use the same DDC pin (0x%x) as port %c, "
+ "disabling port %c DVI/HDMI support\n",
+ port_name(port), info->alternate_ddc_pin,
+ port_name(p), port_name(p));
/*
* If we have multiple ports supposedly sharing the
@@ -1507,10 +1549,11 @@ static void sanitize_aux_ch(struct drm_i915_private *dev_priv,
p = get_port_by_aux_ch(dev_priv, info->alternate_aux_channel);
if (p != PORT_NONE) {
- DRM_DEBUG_KMS("port %c trying to use the same AUX CH (0x%x) as port %c, "
- "disabling port %c DP support\n",
- port_name(port), info->alternate_aux_channel,
- port_name(p), port_name(p));
+ drm_dbg_kms(&dev_priv->drm,
+ "port %c trying to use the same AUX CH (0x%x) as port %c, "
+ "disabling port %c DP support\n",
+ port_name(port), info->alternate_aux_channel,
+ port_name(p), port_name(p));
/*
* If we have multiple ports supposedlt sharing the
@@ -1570,8 +1613,9 @@ static u8 map_ddc_pin(struct drm_i915_private *dev_priv, u8 vbt_pin)
if (vbt_pin < n_entries && ddc_pin_map[vbt_pin] != 0)
return ddc_pin_map[vbt_pin];
- DRM_DEBUG_KMS("Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
- vbt_pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "Ignoring alternate pin: VBT claims DDC pin %d, which is not valid for this platform\n",
+ vbt_pin);
return 0;
}
@@ -1622,8 +1666,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
info = &dev_priv->vbt.ddi_port_info[port];
if (info->child) {
- DRM_DEBUG_KMS("More than one child device for port %c in VBT, using the first.\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "More than one child device for port %c in VBT, using the first.\n",
+ port_name(port));
return;
}
@@ -1634,8 +1679,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
is_edp = is_dp && (child->device_type & DEVICE_TYPE_INTERNAL_CONNECTOR);
if (port == PORT_A && is_dvi && INTEL_GEN(dev_priv) < 12) {
- DRM_DEBUG_KMS("VBT claims port A supports DVI%s, ignoring\n",
- is_hdmi ? "/HDMI" : "");
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT claims port A supports DVI%s, ignoring\n",
+ is_hdmi ? "/HDMI" : "");
is_dvi = false;
is_hdmi = false;
}
@@ -1651,11 +1697,12 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (bdb_version >= 209)
info->supports_tbt = child->tbt;
- DRM_DEBUG_KMS("Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
- port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
- HAS_LSPCON(dev_priv) && child->lspcon,
- info->supports_typec_usb, info->supports_tbt,
- devdata->dsc != NULL);
+ drm_dbg_kms(&dev_priv->drm,
+ "Port %c VBT info: CRT:%d DVI:%d HDMI:%d DP:%d eDP:%d LSPCON:%d USB-Type-C:%d TBT:%d DSC:%d\n",
+ port_name(port), is_crt, is_dvi, is_hdmi, is_dp, is_edp,
+ HAS_LSPCON(dev_priv) && child->lspcon,
+ info->supports_typec_usb, info->supports_tbt,
+ devdata->dsc != NULL);
if (is_dvi) {
u8 ddc_pin;
@@ -1665,9 +1712,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
info->alternate_ddc_pin = ddc_pin;
sanitize_ddc_pin(dev_priv, port);
} else {
- DRM_DEBUG_KMS("Port %c has invalid DDC pin %d, "
- "sticking to defaults\n",
- port_name(port), ddc_pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "Port %c has invalid DDC pin %d, "
+ "sticking to defaults\n",
+ port_name(port), ddc_pin);
}
}
@@ -1680,9 +1728,10 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
if (bdb_version >= 158) {
/* The VBT HDMI level shift values match the table we have. */
u8 hdmi_level_shift = child->hdmi_level_shifter_value;
- DRM_DEBUG_KMS("VBT HDMI level shift for port %c: %d\n",
- port_name(port),
- hdmi_level_shift);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT HDMI level shift for port %c: %d\n",
+ port_name(port),
+ hdmi_level_shift);
info->hdmi_level_shift = hdmi_level_shift;
info->hdmi_level_shift_set = true;
}
@@ -1706,19 +1755,22 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
}
if (max_tmds_clock)
- DRM_DEBUG_KMS("VBT HDMI max TMDS clock for port %c: %d kHz\n",
- port_name(port), max_tmds_clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT HDMI max TMDS clock for port %c: %d kHz\n",
+ port_name(port), max_tmds_clock);
info->max_tmds_clock = max_tmds_clock;
}
/* Parse the I_boost config for SKL and above */
if (bdb_version >= 196 && child->iboost) {
info->dp_boost_level = translate_iboost(child->dp_iboost_level);
- DRM_DEBUG_KMS("VBT (e)DP boost level for port %c: %d\n",
- port_name(port), info->dp_boost_level);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT (e)DP boost level for port %c: %d\n",
+ port_name(port), info->dp_boost_level);
info->hdmi_boost_level = translate_iboost(child->hdmi_iboost_level);
- DRM_DEBUG_KMS("VBT HDMI boost level for port %c: %d\n",
- port_name(port), info->hdmi_boost_level);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT HDMI boost level for port %c: %d\n",
+ port_name(port), info->hdmi_boost_level);
}
/* DP max link rate for CNL+ */
@@ -1738,8 +1790,9 @@ static void parse_ddi_port(struct drm_i915_private *dev_priv,
info->dp_max_link_rate = 162000;
break;
}
- DRM_DEBUG_KMS("VBT DP max link rate for port %c: %d\n",
- port_name(port), info->dp_max_link_rate);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT DP max link rate for port %c: %d\n",
+ port_name(port), info->dp_max_link_rate);
}
info->child = child;
@@ -1773,19 +1826,21 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
if (!defs) {
- DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No general definition block is found, no devices defined.\n");
return;
}
block_size = get_blocksize(defs);
if (block_size < sizeof(*defs)) {
- DRM_DEBUG_KMS("General definitions block too small (%u)\n",
- block_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "General definitions block too small (%u)\n",
+ block_size);
return;
}
bus_pin = defs->crt_ddc_gmbus_pin;
- DRM_DEBUG_KMS("crt_ddc_bus_pin: %d\n", bus_pin);
+ drm_dbg_kms(&dev_priv->drm, "crt_ddc_bus_pin: %d\n", bus_pin);
if (intel_gmbus_is_valid_pin(dev_priv, bus_pin))
dev_priv->vbt.crt_ddc_pin = bus_pin;
@@ -1804,19 +1859,22 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
} else {
expected_size = sizeof(*child);
BUILD_BUG_ON(sizeof(*child) < 39);
- DRM_DEBUG_DRIVER("Expected child device config size for VBT version %u not known; assuming %u\n",
- bdb->version, expected_size);
+ drm_dbg(&dev_priv->drm,
+ "Expected child device config size for VBT version %u not known; assuming %u\n",
+ bdb->version, expected_size);
}
/* Flag an error for unexpected size, but continue anyway. */
if (defs->child_dev_size != expected_size)
- DRM_ERROR("Unexpected child device config size %u (expected %u for VBT version %u)\n",
- defs->child_dev_size, expected_size, bdb->version);
+ drm_err(&dev_priv->drm,
+ "Unexpected child device config size %u (expected %u for VBT version %u)\n",
+ defs->child_dev_size, expected_size, bdb->version);
/* The legacy sized child device config is the minimum we need. */
if (defs->child_dev_size < LEGACY_CHILD_DEVICE_CONFIG_SIZE) {
- DRM_DEBUG_KMS("Child device config size %u is too small.\n",
- defs->child_dev_size);
+ drm_dbg_kms(&dev_priv->drm,
+ "Child device config size %u is too small.\n",
+ defs->child_dev_size);
return;
}
@@ -1828,8 +1886,9 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
if (!child->device_type)
continue;
- DRM_DEBUG_KMS("Found VBT child device with type 0x%x\n",
- child->device_type);
+ drm_dbg_kms(&dev_priv->drm,
+ "Found VBT child device with type 0x%x\n",
+ child->device_type);
devdata = kzalloc(sizeof(*devdata), GFP_KERNEL);
if (!devdata)
@@ -1847,7 +1906,8 @@ parse_general_definitions(struct drm_i915_private *dev_priv,
}
if (list_empty(&dev_priv->vbt.display_devices))
- DRM_DEBUG_KMS("no child dev is parsed from VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no child dev is parsed from VBT\n");
}
/* Common defaults which may be overridden by VBT. */
@@ -1880,7 +1940,8 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
*/
dev_priv->vbt.lvds_ssc_freq = intel_bios_ssc_frequency(dev_priv,
!HAS_PCH_SPLIT(dev_priv));
- DRM_DEBUG_KMS("Set default to SSC at %d kHz\n", dev_priv->vbt.lvds_ssc_freq);
+ drm_dbg_kms(&dev_priv->drm, "Set default to SSC at %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
}
/* Defaults to initialize only if there is no VBT. */
@@ -1990,13 +2051,14 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *dev_priv)
goto err_unmap_oprom;
if (sizeof(struct vbt_header) > size) {
- DRM_DEBUG_DRIVER("VBT header incomplete\n");
+ drm_dbg(&dev_priv->drm, "VBT header incomplete\n");
goto err_unmap_oprom;
}
vbt_size = ioread16(p + offsetof(struct vbt_header, vbt_size));
if (vbt_size > size) {
- DRM_DEBUG_DRIVER("VBT incomplete (vbt_size overflows)\n");
+ drm_dbg(&dev_priv->drm,
+ "VBT incomplete (vbt_size overflows)\n");
goto err_unmap_oprom;
}
@@ -2039,7 +2101,8 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
INIT_LIST_HEAD(&dev_priv->vbt.display_devices);
if (!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)) {
- DRM_DEBUG_KMS("Skipping VBT init due to disabled display.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Skipping VBT init due to disabled display.\n");
return;
}
@@ -2053,13 +2116,14 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
vbt = oprom_vbt;
- DRM_DEBUG_KMS("Found valid VBT in PCI ROM\n");
+ drm_dbg_kms(&dev_priv->drm, "Found valid VBT in PCI ROM\n");
}
bdb = get_bdb_header(vbt);
- DRM_DEBUG_KMS("VBT signature \"%.*s\", BDB version %d\n",
- (int)sizeof(vbt->signature), vbt->signature, bdb->version);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT signature \"%.*s\", BDB version %d\n",
+ (int)sizeof(vbt->signature), vbt->signature, bdb->version);
/* Grab useful general definitions */
parse_general_features(dev_priv, bdb);
@@ -2084,7 +2148,8 @@ void intel_bios_init(struct drm_i915_private *dev_priv)
out:
if (!vbt) {
- DRM_INFO("Failed to find VBIOS tables (VBT)\n");
+ drm_info(&dev_priv->drm,
+ "Failed to find VBIOS tables (VBT)\n");
init_vbt_missing_defaults(dev_priv);
}
@@ -2236,13 +2301,12 @@ bool intel_bios_is_port_present(struct drm_i915_private *dev_priv, enum port por
const struct ddi_vbt_port_info *port_info =
&dev_priv->vbt.ddi_port_info[port];
- return port_info->supports_dp ||
- port_info->supports_dvi ||
- port_info->supports_hdmi;
+ return port_info->child;
}
/* FIXME maybe deal with port A as well? */
- if (WARN_ON(port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
+ if (drm_WARN_ON(&dev_priv->drm,
+ port == PORT_A) || port >= ARRAY_SIZE(port_mapping))
return false;
list_for_each_entry(devdata, &dev_priv->vbt.display_devices, node) {
@@ -2371,8 +2435,9 @@ bool intel_bios_is_dsi_present(struct drm_i915_private *dev_priv,
} else if (dvo_port == DVO_PORT_MIPIB ||
dvo_port == DVO_PORT_MIPIC ||
dvo_port == DVO_PORT_MIPID) {
- DRM_DEBUG_KMS("VBT has unsupported DSI port %c\n",
- port_name(dvo_port - DVO_PORT_MIPIA));
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT has unsupported DSI port %c\n",
+ port_name(dvo_port - DVO_PORT_MIPIA));
}
}
@@ -2491,7 +2556,7 @@ intel_bios_is_port_hpd_inverted(const struct drm_i915_private *i915,
const struct child_device_config *child =
i915->vbt.ddi_port_info[port].child;
- if (WARN_ON_ONCE(!IS_GEN9_LP(i915)))
+ if (drm_WARN_ON_ONCE(&i915->drm, !IS_GEN9_LP(i915)))
return false;
return child && child->hpd_invert;
@@ -2524,8 +2589,9 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
if (!info->alternate_aux_channel) {
aux_ch = (enum aux_ch)port;
- DRM_DEBUG_KMS("using AUX %c for port %c (platform default)\n",
- aux_ch_name(aux_ch), port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "using AUX %c for port %c (platform default)\n",
+ aux_ch_name(aux_ch), port_name(port));
return aux_ch;
}
@@ -2557,8 +2623,78 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv,
break;
}
- DRM_DEBUG_KMS("using AUX %c for port %c (VBT)\n",
- aux_ch_name(aux_ch), port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "using AUX %c for port %c (VBT)\n",
+ aux_ch_name(aux_ch), port_name(port));
return aux_ch;
}
+
+int intel_bios_max_tmds_clock(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].max_tmds_clock;
+}
+
+int intel_bios_hdmi_level_shift(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ const struct ddi_vbt_port_info *info =
+ &i915->vbt.ddi_port_info[encoder->port];
+
+ return info->hdmi_level_shift_set ? info->hdmi_level_shift : -1;
+}
+
+int intel_bios_dp_boost_level(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].dp_boost_level;
+}
+
+int intel_bios_hdmi_boost_level(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].hdmi_boost_level;
+}
+
+int intel_bios_dp_max_link_rate(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].dp_max_link_rate;
+}
+
+int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+
+ return i915->vbt.ddi_port_info[encoder->port].alternate_ddc_pin;
+}
+
+bool intel_bios_port_supports_dvi(struct drm_i915_private *i915, enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_dvi;
+}
+
+bool intel_bios_port_supports_hdmi(struct drm_i915_private *i915, enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_hdmi;
+}
+
+bool intel_bios_port_supports_dp(struct drm_i915_private *i915, enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_dp;
+}
+
+bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915,
+ enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_typec_usb;
+}
+
+bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port)
+{
+ return i915->vbt.ddi_port_info[port].supports_tbt;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h
index d6a0c29d37ac..e29e79faa01b 100644
--- a/drivers/gpu/drm/i915/display/intel_bios.h
+++ b/drivers/gpu/drm/i915/display/intel_bios.h
@@ -32,8 +32,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_i915_private;
struct intel_crtc_state;
struct intel_encoder;
@@ -247,5 +245,16 @@ enum aux_ch intel_bios_port_aux_ch(struct drm_i915_private *dev_priv, enum port
bool intel_bios_get_dsc_params(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
int dsc_max_bpc);
+int intel_bios_max_tmds_clock(struct intel_encoder *encoder);
+int intel_bios_hdmi_level_shift(struct intel_encoder *encoder);
+int intel_bios_dp_boost_level(struct intel_encoder *encoder);
+int intel_bios_hdmi_boost_level(struct intel_encoder *encoder);
+int intel_bios_dp_max_link_rate(struct intel_encoder *encoder);
+int intel_bios_alternate_ddc_pin(struct intel_encoder *encoder);
+bool intel_bios_port_supports_dvi(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_hdmi(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_dp(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_typec_usb(struct drm_i915_private *i915, enum port port);
+bool intel_bios_port_supports_tbt(struct drm_i915_private *i915, enum port port);
#endif /* _INTEL_BIOS_H_ */
diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c
index b228671d5a5d..58b264bc318d 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.c
+++ b/drivers/gpu/drm/i915/display/intel_bw.c
@@ -122,7 +122,8 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- if (WARN_ON(qi->num_points > ARRAY_SIZE(qi->points)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ qi->num_points > ARRAY_SIZE(qi->points)))
qi->num_points = ARRAY_SIZE(qi->points);
for (i = 0; i < qi->num_points; i++) {
@@ -132,9 +133,10 @@ static int icl_get_qgv_points(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- DRM_DEBUG_KMS("QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
- i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
- sp->t_rcd, sp->t_rc);
+ drm_dbg_kms(&dev_priv->drm,
+ "QGV %d: DCLK=%d tRP=%d tRDPRE=%d tRAS=%d tRCD=%d tRC=%d\n",
+ i, sp->dclk, sp->t_rp, sp->t_rdpre, sp->t_ras,
+ sp->t_rcd, sp->t_rc);
}
return 0;
@@ -187,7 +189,8 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
ret = icl_get_qgv_points(dev_priv, &qi);
if (ret) {
- DRM_DEBUG_KMS("Failed to get memory subsystem information, ignoring bandwidth limits");
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to get memory subsystem information, ignoring bandwidth limits");
return ret;
}
num_channels = qi.num_channels;
@@ -228,8 +231,9 @@ static int icl_get_bw_info(struct drm_i915_private *dev_priv, const struct intel
bi->deratedbw[j] = min(maxdebw,
bw * 9 / 10); /* 90% */
- DRM_DEBUG_KMS("BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
- i, j, bi->num_planes, bi->deratedbw[j]);
+ drm_dbg_kms(&dev_priv->drm,
+ "BW%d / QGV %d: num_planes=%d deratedbw=%u\n",
+ i, j, bi->num_planes, bi->deratedbw[j]);
}
if (bi->num_planes == 1)
@@ -374,10 +378,9 @@ static struct intel_bw_state *
intel_atomic_get_bw_state(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct drm_private_state *bw_state;
+ struct intel_global_state *bw_state;
- bw_state = drm_atomic_get_private_obj_state(&state->base,
- &dev_priv->bw_obj);
+ bw_state = intel_atomic_get_global_obj_state(state, &dev_priv->bw_obj);
if (IS_ERR(bw_state))
return ERR_CAST(bw_state);
@@ -392,7 +395,7 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
unsigned int data_rate, max_data_rate;
unsigned int num_active_planes;
struct intel_crtc *crtc;
- int i;
+ int i, ret;
/* FIXME earlier gens need some checks too */
if (INTEL_GEN(dev_priv) < 11)
@@ -424,15 +427,20 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
bw_state->data_rate[crtc->pipe] = new_data_rate;
bw_state->num_active_planes[crtc->pipe] = new_active_planes;
- DRM_DEBUG_KMS("pipe %c data rate %u num active planes %u\n",
- pipe_name(crtc->pipe),
- bw_state->data_rate[crtc->pipe],
- bw_state->num_active_planes[crtc->pipe]);
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe %c data rate %u num active planes %u\n",
+ pipe_name(crtc->pipe),
+ bw_state->data_rate[crtc->pipe],
+ bw_state->num_active_planes[crtc->pipe]);
}
if (!bw_state)
return 0;
+ ret = intel_atomic_lock_global_state(&bw_state->base);
+ if (ret)
+ return ret;
+
data_rate = intel_bw_data_rate(dev_priv, bw_state);
num_active_planes = intel_bw_num_active_planes(dev_priv, bw_state);
@@ -441,15 +449,17 @@ int intel_bw_atomic_check(struct intel_atomic_state *state)
data_rate = DIV_ROUND_UP(data_rate, 1000);
if (data_rate > max_data_rate) {
- DRM_DEBUG_KMS("Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
- data_rate, max_data_rate, num_active_planes);
+ drm_dbg_kms(&dev_priv->drm,
+ "Bandwidth %u MB/s exceeds max available %d MB/s (%d active planes)\n",
+ data_rate, max_data_rate, num_active_planes);
return -EINVAL;
}
return 0;
}
-static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj *obj)
+static struct intel_global_state *
+intel_bw_duplicate_state(struct intel_global_obj *obj)
{
struct intel_bw_state *state;
@@ -457,18 +467,16 @@ static struct drm_private_state *intel_bw_duplicate_state(struct drm_private_obj
if (!state)
return NULL;
- __drm_atomic_helper_private_obj_duplicate_state(obj, &state->base);
-
return &state->base;
}
-static void intel_bw_destroy_state(struct drm_private_obj *obj,
- struct drm_private_state *state)
+static void intel_bw_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
{
kfree(state);
}
-static const struct drm_private_state_funcs intel_bw_funcs = {
+static const struct intel_global_state_funcs intel_bw_funcs = {
.atomic_duplicate_state = intel_bw_duplicate_state,
.atomic_destroy_state = intel_bw_destroy_state,
};
@@ -481,13 +489,8 @@ int intel_bw_init(struct drm_i915_private *dev_priv)
if (!state)
return -ENOMEM;
- drm_atomic_private_obj_init(&dev_priv->drm, &dev_priv->bw_obj,
- &state->base, &intel_bw_funcs);
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->bw_obj,
+ &state->base, &intel_bw_funcs);
return 0;
}
-
-void intel_bw_cleanup(struct drm_i915_private *dev_priv)
-{
- drm_atomic_private_obj_fini(&dev_priv->bw_obj);
-}
diff --git a/drivers/gpu/drm/i915/display/intel_bw.h b/drivers/gpu/drm/i915/display/intel_bw.h
index 20b9ad241802..a8aa7624c5aa 100644
--- a/drivers/gpu/drm/i915/display/intel_bw.h
+++ b/drivers/gpu/drm/i915/display/intel_bw.h
@@ -9,13 +9,14 @@
#include <drm/drm_atomic.h>
#include "intel_display.h"
+#include "intel_global_state.h"
struct drm_i915_private;
struct intel_atomic_state;
struct intel_crtc_state;
struct intel_bw_state {
- struct drm_private_state base;
+ struct intel_global_state base;
unsigned int data_rate[I915_MAX_PIPES];
u8 num_active_planes[I915_MAX_PIPES];
@@ -25,7 +26,6 @@ struct intel_bw_state {
void intel_bw_init_hw(struct drm_i915_private *dev_priv);
int intel_bw_init(struct drm_i915_private *dev_priv);
-void intel_bw_cleanup(struct drm_i915_private *dev_priv);
int intel_bw_atomic_check(struct intel_atomic_state *state);
void intel_bw_crtc_update(struct intel_bw_state *bw_state,
const struct intel_crtc_state *crtc_state);
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c
index 0ce5926006ca..979a0241fdcb 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.c
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.c
@@ -55,43 +55,43 @@
*/
static void fixed_133mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
}
static void fixed_200mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 200000;
+ cdclk_config->cdclk = 200000;
}
static void fixed_266mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 266667;
+ cdclk_config->cdclk = 266667;
}
static void fixed_333mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 333333;
+ cdclk_config->cdclk = 333333;
}
static void fixed_400mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 400000;
+ cdclk_config->cdclk = 400000;
}
static void fixed_450mhz_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 450000;
}
static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u16 hpllcc = 0;
@@ -102,7 +102,7 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
* FIXME is this the right way to detect 852GM/852GMV?
*/
if (pdev->revision == 0x1) {
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
return;
}
@@ -116,24 +116,24 @@ static void i85x_get_cdclk(struct drm_i915_private *dev_priv,
case GC_CLOCK_133_200:
case GC_CLOCK_133_200_2:
case GC_CLOCK_100_200:
- cdclk_state->cdclk = 200000;
+ cdclk_config->cdclk = 200000;
break;
case GC_CLOCK_166_250:
- cdclk_state->cdclk = 250000;
+ cdclk_config->cdclk = 250000;
break;
case GC_CLOCK_100_133:
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
break;
case GC_CLOCK_133_266:
case GC_CLOCK_133_266_2:
case GC_CLOCK_166_266:
- cdclk_state->cdclk = 266667;
+ cdclk_config->cdclk = 266667;
break;
}
}
static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
@@ -141,23 +141,23 @@ static void i915gm_get_cdclk(struct drm_i915_private *dev_priv,
pci_read_config_word(pdev, GCFGC, &gcfgc);
if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
return;
}
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_333_320_MHZ:
- cdclk_state->cdclk = 333333;
+ cdclk_config->cdclk = 333333;
break;
default:
case GC_DISPLAY_CLOCK_190_200_MHZ:
- cdclk_state->cdclk = 190000;
+ cdclk_config->cdclk = 190000;
break;
}
}
static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
@@ -165,17 +165,17 @@ static void i945gm_get_cdclk(struct drm_i915_private *dev_priv,
pci_read_config_word(pdev, GCFGC, &gcfgc);
if (gcfgc & GC_LOW_FREQUENCY_ENABLE) {
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
return;
}
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_333_320_MHZ:
- cdclk_state->cdclk = 320000;
+ cdclk_config->cdclk = 320000;
break;
default:
case GC_DISPLAY_CLOCK_190_200_MHZ:
- cdclk_state->cdclk = 200000;
+ cdclk_config->cdclk = 200000;
break;
}
}
@@ -237,20 +237,21 @@ static unsigned int intel_hpll_vco(struct drm_i915_private *dev_priv)
else
return 0;
- tmp = I915_READ(IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ?
- HPLLVCO_MOBILE : HPLLVCO);
+ tmp = intel_de_read(dev_priv,
+ IS_PINEVIEW(dev_priv) || IS_MOBILE(dev_priv) ? HPLLVCO_MOBILE : HPLLVCO);
vco = vco_table[tmp & 0x7];
if (vco == 0)
- DRM_ERROR("Bad HPLL VCO (HPLLVCO=0x%02x)\n", tmp);
+ drm_err(&dev_priv->drm, "Bad HPLL VCO (HPLLVCO=0x%02x)\n",
+ tmp);
else
- DRM_DEBUG_KMS("HPLL VCO %u kHz\n", vco);
+ drm_dbg_kms(&dev_priv->drm, "HPLL VCO %u kHz\n", vco);
return vco;
}
static void g33_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
static const u8 div_3200[] = { 12, 10, 8, 7, 5, 16 };
@@ -261,7 +262,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
unsigned int cdclk_sel;
u16 tmp = 0;
- cdclk_state->vco = intel_hpll_vco(dev_priv);
+ cdclk_config->vco = intel_hpll_vco(dev_priv);
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -270,7 +271,7 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
if (cdclk_sel >= ARRAY_SIZE(div_3200))
goto fail;
- switch (cdclk_state->vco) {
+ switch (cdclk_config->vco) {
case 3200000:
div_table = div_3200;
break;
@@ -287,18 +288,19 @@ static void g33_get_cdclk(struct drm_i915_private *dev_priv,
goto fail;
}
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
- div_table[cdclk_sel]);
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco,
+ div_table[cdclk_sel]);
return;
fail:
- DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
- cdclk_state->vco, tmp);
- cdclk_state->cdclk = 190476;
+ drm_err(&dev_priv->drm,
+ "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%08x\n",
+ cdclk_config->vco, tmp);
+ cdclk_config->cdclk = 190476;
}
static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u16 gcfgc = 0;
@@ -307,31 +309,32 @@ static void pnv_get_cdclk(struct drm_i915_private *dev_priv,
switch (gcfgc & GC_DISPLAY_CLOCK_MASK) {
case GC_DISPLAY_CLOCK_267_MHZ_PNV:
- cdclk_state->cdclk = 266667;
+ cdclk_config->cdclk = 266667;
break;
case GC_DISPLAY_CLOCK_333_MHZ_PNV:
- cdclk_state->cdclk = 333333;
+ cdclk_config->cdclk = 333333;
break;
case GC_DISPLAY_CLOCK_444_MHZ_PNV:
- cdclk_state->cdclk = 444444;
+ cdclk_config->cdclk = 444444;
break;
case GC_DISPLAY_CLOCK_200_MHZ_PNV:
- cdclk_state->cdclk = 200000;
+ cdclk_config->cdclk = 200000;
break;
default:
- DRM_ERROR("Unknown pnv display core clock 0x%04x\n", gcfgc);
+ drm_err(&dev_priv->drm,
+ "Unknown pnv display core clock 0x%04x\n", gcfgc);
/* fall through */
case GC_DISPLAY_CLOCK_133_MHZ_PNV:
- cdclk_state->cdclk = 133333;
+ cdclk_config->cdclk = 133333;
break;
case GC_DISPLAY_CLOCK_167_MHZ_PNV:
- cdclk_state->cdclk = 166667;
+ cdclk_config->cdclk = 166667;
break;
}
}
static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
static const u8 div_3200[] = { 16, 10, 8 };
@@ -341,7 +344,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
unsigned int cdclk_sel;
u16 tmp = 0;
- cdclk_state->vco = intel_hpll_vco(dev_priv);
+ cdclk_config->vco = intel_hpll_vco(dev_priv);
pci_read_config_word(pdev, GCFGC, &tmp);
@@ -350,7 +353,7 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
if (cdclk_sel >= ARRAY_SIZE(div_3200))
goto fail;
- switch (cdclk_state->vco) {
+ switch (cdclk_config->vco) {
case 3200000:
div_table = div_3200;
break;
@@ -364,62 +367,64 @@ static void i965gm_get_cdclk(struct drm_i915_private *dev_priv,
goto fail;
}
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco,
- div_table[cdclk_sel]);
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco,
+ div_table[cdclk_sel]);
return;
fail:
- DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
- cdclk_state->vco, tmp);
- cdclk_state->cdclk = 200000;
+ drm_err(&dev_priv->drm,
+ "Unable to determine CDCLK. HPLL VCO=%u kHz, CFGC=0x%04x\n",
+ cdclk_config->vco, tmp);
+ cdclk_config->cdclk = 200000;
}
static void gm45_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
unsigned int cdclk_sel;
u16 tmp = 0;
- cdclk_state->vco = intel_hpll_vco(dev_priv);
+ cdclk_config->vco = intel_hpll_vco(dev_priv);
pci_read_config_word(pdev, GCFGC, &tmp);
cdclk_sel = (tmp >> 12) & 0x1;
- switch (cdclk_state->vco) {
+ switch (cdclk_config->vco) {
case 2666667:
case 4000000:
case 5333333:
- cdclk_state->cdclk = cdclk_sel ? 333333 : 222222;
+ cdclk_config->cdclk = cdclk_sel ? 333333 : 222222;
break;
case 3200000:
- cdclk_state->cdclk = cdclk_sel ? 320000 : 228571;
+ cdclk_config->cdclk = cdclk_sel ? 320000 : 228571;
break;
default:
- DRM_ERROR("Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
- cdclk_state->vco, tmp);
- cdclk_state->cdclk = 222222;
+ drm_err(&dev_priv->drm,
+ "Unable to determine CDCLK. HPLL VCO=%u, CFGC=0x%04x\n",
+ cdclk_config->vco, tmp);
+ cdclk_config->cdclk = 222222;
break;
}
}
static void hsw_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- u32 lcpll = I915_READ(LCPLL_CTL);
+ u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
- cdclk_state->cdclk = 800000;
- else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 800000;
+ else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_450)
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 450000;
else if (IS_HSW_ULT(dev_priv))
- cdclk_state->cdclk = 337500;
+ cdclk_config->cdclk = 337500;
else
- cdclk_state->cdclk = 540000;
+ cdclk_config->cdclk = 540000;
}
static int vlv_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
@@ -462,17 +467,17 @@ static u8 vlv_calc_voltage_level(struct drm_i915_private *dev_priv, int cdclk)
}
static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 val;
vlv_iosf_sb_get(dev_priv,
BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
- cdclk_state->vco = vlv_get_hpll_vco(dev_priv);
- cdclk_state->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
- CCK_DISPLAY_CLOCK_CONTROL,
- cdclk_state->vco);
+ cdclk_config->vco = vlv_get_hpll_vco(dev_priv);
+ cdclk_config->cdclk = vlv_get_cck_clock(dev_priv, "cdclk",
+ CCK_DISPLAY_CLOCK_CONTROL,
+ cdclk_config->vco);
val = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
@@ -480,10 +485,10 @@ static void vlv_get_cdclk(struct drm_i915_private *dev_priv,
BIT(VLV_IOSF_SB_CCK) | BIT(VLV_IOSF_SB_PUNIT));
if (IS_VALLEYVIEW(dev_priv))
- cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK) >>
+ cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK) >>
DSPFREQGUAR_SHIFT;
else
- cdclk_state->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
+ cdclk_config->voltage_level = (val & DSPFREQGUAR_MASK_CHV) >>
DSPFREQGUAR_SHIFT_CHV;
}
@@ -510,25 +515,26 @@ static void vlv_program_pfi_credits(struct drm_i915_private *dev_priv)
* WA - write default credits before re-programming
* FIXME: should we also set the resend bit here?
*/
- I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
- default_credits);
+ intel_de_write(dev_priv, GCI_CONTROL,
+ VGA_FAST_MODE_DISABLE | default_credits);
- I915_WRITE(GCI_CONTROL, VGA_FAST_MODE_DISABLE |
- credits | PFI_CREDIT_RESEND);
+ intel_de_write(dev_priv, GCI_CONTROL,
+ VGA_FAST_MODE_DISABLE | credits | PFI_CREDIT_RESEND);
/*
* FIXME is this guaranteed to clear
* immediately or should we poll for it?
*/
- WARN_ON(I915_READ(GCI_CONTROL) & PFI_CREDIT_RESEND);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, GCI_CONTROL) & PFI_CREDIT_RESEND);
}
static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
- u32 val, cmd = cdclk_state->voltage_level;
+ int cdclk = cdclk_config->cdclk;
+ u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
switch (cdclk) {
@@ -563,7 +569,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT),
50)) {
- DRM_ERROR("timed out waiting for CDclk change\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for CDclk change\n");
}
if (cdclk == 400000) {
@@ -581,7 +588,8 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
if (wait_for((vlv_cck_read(dev_priv, CCK_DISPLAY_CLOCK_CONTROL) &
CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT),
50))
- DRM_ERROR("timed out waiting for CDclk change\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for CDclk change\n");
}
/* adjust self-refresh exit latency value */
@@ -611,11 +619,11 @@ static void vlv_set_cdclk(struct drm_i915_private *dev_priv,
}
static void chv_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
- u32 val, cmd = cdclk_state->voltage_level;
+ int cdclk = cdclk_config->cdclk;
+ u32 val, cmd = cdclk_config->voltage_level;
intel_wakeref_t wakeref;
switch (cdclk) {
@@ -645,7 +653,8 @@ static void chv_set_cdclk(struct drm_i915_private *dev_priv,
if (wait_for((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) &
DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV),
50)) {
- DRM_ERROR("timed out waiting for CDclk change\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for CDclk change\n");
}
vlv_punit_put(dev_priv);
@@ -685,68 +694,70 @@ static u8 bdw_calc_voltage_level(int cdclk)
}
static void bdw_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- u32 lcpll = I915_READ(LCPLL_CTL);
+ u32 lcpll = intel_de_read(dev_priv, LCPLL_CTL);
u32 freq = lcpll & LCPLL_CLK_FREQ_MASK;
if (lcpll & LCPLL_CD_SOURCE_FCLK)
- cdclk_state->cdclk = 800000;
- else if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 800000;
+ else if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_450)
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 450000;
else if (freq == LCPLL_CLK_FREQ_54O_BDW)
- cdclk_state->cdclk = 540000;
+ cdclk_config->cdclk = 540000;
else if (freq == LCPLL_CLK_FREQ_337_5_BDW)
- cdclk_state->cdclk = 337500;
+ cdclk_config->cdclk = 337500;
else
- cdclk_state->cdclk = 675000;
+ cdclk_config->cdclk = 675000;
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
- cdclk_state->voltage_level =
- bdw_calc_voltage_level(cdclk_state->cdclk);
+ cdclk_config->voltage_level =
+ bdw_calc_voltage_level(cdclk_config->cdclk);
}
static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
+ int cdclk = cdclk_config->cdclk;
u32 val;
int ret;
- if (WARN((I915_READ(LCPLL_CTL) &
- (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
- LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
- LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
- LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
- "trying to change cdclk frequency with cdclk not enabled\n"))
+ if (drm_WARN(&dev_priv->drm,
+ (intel_de_read(dev_priv, LCPLL_CTL) &
+ (LCPLL_PLL_DISABLE | LCPLL_PLL_LOCK |
+ LCPLL_CD_CLOCK_DISABLE | LCPLL_ROOT_CD_CLOCK_DISABLE |
+ LCPLL_CD2X_CLOCK_DISABLE | LCPLL_POWER_DOWN_ALLOW |
+ LCPLL_CD_SOURCE_FCLK)) != LCPLL_PLL_LOCK,
+ "trying to change cdclk frequency with cdclk not enabled\n"))
return;
ret = sandybridge_pcode_write(dev_priv,
BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0);
if (ret) {
- DRM_ERROR("failed to inform pcode about cdclk change\n");
+ drm_err(&dev_priv->drm,
+ "failed to inform pcode about cdclk change\n");
return;
}
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val |= LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
/*
* According to the spec, it should be enough to poll for this 1 us.
* However, extensive testing shows that this can take longer.
*/
- if (wait_for_us(I915_READ(LCPLL_CTL) &
+ if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 100))
- DRM_ERROR("Switching to FCLK failed\n");
+ drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_CLK_FREQ_MASK;
switch (cdclk) {
@@ -767,20 +778,21 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv,
break;
}
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
- if (wait_for_us((I915_READ(LCPLL_CTL) &
- LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- DRM_ERROR("Switching back to LCPLL failed\n");
+ if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
+ LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
+ drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n");
sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_state->voltage_level);
+ cdclk_config->voltage_level);
- I915_WRITE(CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
+ intel_de_write(dev_priv, CDCLK_FREQ,
+ DIV_ROUND_CLOSEST(cdclk, 1000) - 1);
intel_update_cdclk(dev_priv);
}
@@ -821,26 +833,27 @@ static u8 skl_calc_voltage_level(int cdclk)
}
static void skl_dpll0_update(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 val;
- cdclk_state->ref = 24000;
- cdclk_state->vco = 0;
+ cdclk_config->ref = 24000;
+ cdclk_config->vco = 0;
- val = I915_READ(LCPLL1_CTL);
+ val = intel_de_read(dev_priv, LCPLL1_CTL);
if ((val & LCPLL_PLL_ENABLE) == 0)
return;
- if (WARN_ON((val & LCPLL_PLL_LOCK) == 0))
+ if (drm_WARN_ON(&dev_priv->drm, (val & LCPLL_PLL_LOCK) == 0))
return;
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
- if (WARN_ON((val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
- DPLL_CTRL1_SSC(SKL_DPLL0) |
- DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
- DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ (val & (DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) |
+ DPLL_CTRL1_SSC(SKL_DPLL0) |
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0))) !=
+ DPLL_CTRL1_OVERRIDE(SKL_DPLL0)))
return;
switch (val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0)) {
@@ -848,11 +861,11 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv,
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1350, SKL_DPLL0):
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1620, SKL_DPLL0):
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2700, SKL_DPLL0):
- cdclk_state->vco = 8100000;
+ cdclk_config->vco = 8100000;
break;
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_1080, SKL_DPLL0):
case DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_2160, SKL_DPLL0):
- cdclk_state->vco = 8640000;
+ cdclk_config->vco = 8640000;
break;
default:
MISSING_CASE(val & DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
@@ -861,32 +874,32 @@ static void skl_dpll0_update(struct drm_i915_private *dev_priv,
}
static void skl_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 cdctl;
- skl_dpll0_update(dev_priv, cdclk_state);
+ skl_dpll0_update(dev_priv, cdclk_config);
- cdclk_state->cdclk = cdclk_state->bypass = cdclk_state->ref;
+ cdclk_config->cdclk = cdclk_config->bypass = cdclk_config->ref;
- if (cdclk_state->vco == 0)
+ if (cdclk_config->vco == 0)
goto out;
- cdctl = I915_READ(CDCLK_CTL);
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
- if (cdclk_state->vco == 8640000) {
+ if (cdclk_config->vco == 8640000) {
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
- cdclk_state->cdclk = 432000;
+ cdclk_config->cdclk = 432000;
break;
case CDCLK_FREQ_337_308:
- cdclk_state->cdclk = 308571;
+ cdclk_config->cdclk = 308571;
break;
case CDCLK_FREQ_540:
- cdclk_state->cdclk = 540000;
+ cdclk_config->cdclk = 540000;
break;
case CDCLK_FREQ_675_617:
- cdclk_state->cdclk = 617143;
+ cdclk_config->cdclk = 617143;
break;
default:
MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
@@ -895,16 +908,16 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
} else {
switch (cdctl & CDCLK_FREQ_SEL_MASK) {
case CDCLK_FREQ_450_432:
- cdclk_state->cdclk = 450000;
+ cdclk_config->cdclk = 450000;
break;
case CDCLK_FREQ_337_308:
- cdclk_state->cdclk = 337500;
+ cdclk_config->cdclk = 337500;
break;
case CDCLK_FREQ_540:
- cdclk_state->cdclk = 540000;
+ cdclk_config->cdclk = 540000;
break;
case CDCLK_FREQ_675_617:
- cdclk_state->cdclk = 675000;
+ cdclk_config->cdclk = 675000;
break;
default:
MISSING_CASE(cdctl & CDCLK_FREQ_SEL_MASK);
@@ -917,8 +930,8 @@ static void skl_get_cdclk(struct drm_i915_private *dev_priv,
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
- cdclk_state->voltage_level =
- skl_calc_voltage_level(cdclk_state->cdclk);
+ cdclk_config->voltage_level =
+ skl_calc_voltage_level(cdclk_config->cdclk);
}
/* convert from kHz to .1 fixpoint MHz with -1MHz offset */
@@ -942,7 +955,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
{
u32 val;
- WARN_ON(vco != 8100000 && vco != 8640000);
+ drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
/*
* We always enable DPLL0 with the lowest link rate possible, but still
@@ -953,7 +966,7 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
* rate later on, with the constraint of choosing a frequency that
* works with vco.
*/
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(SKL_DPLL0) | DPLL_CTRL1_SSC(SKL_DPLL0) |
DPLL_CTRL1_LINK_RATE_MASK(SKL_DPLL0));
@@ -965,13 +978,14 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
val |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810,
SKL_DPLL0);
- I915_WRITE(DPLL_CTRL1, val);
- POSTING_READ(DPLL_CTRL1);
+ intel_de_write(dev_priv, DPLL_CTRL1, val);
+ intel_de_posting_read(dev_priv, DPLL_CTRL1);
- I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) | LCPLL_PLL_ENABLE);
+ intel_de_write(dev_priv, LCPLL1_CTL,
+ intel_de_read(dev_priv, LCPLL1_CTL) | LCPLL_PLL_ENABLE);
if (intel_de_wait_for_set(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 5))
- DRM_ERROR("DPLL0 not locked\n");
+ drm_err(&dev_priv->drm, "DPLL0 not locked\n");
dev_priv->cdclk.hw.vco = vco;
@@ -981,19 +995,20 @@ static void skl_dpll0_enable(struct drm_i915_private *dev_priv, int vco)
static void skl_dpll0_disable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(LCPLL1_CTL, I915_READ(LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
+ intel_de_write(dev_priv, LCPLL1_CTL,
+ intel_de_read(dev_priv, LCPLL1_CTL) & ~LCPLL_PLL_ENABLE);
if (intel_de_wait_for_clear(dev_priv, LCPLL1_CTL, LCPLL_PLL_LOCK, 1))
- DRM_ERROR("Couldn't disable DPLL0\n");
+ drm_err(&dev_priv->drm, "Couldn't disable DPLL0\n");
dev_priv->cdclk.hw.vco = 0;
}
static void skl_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
- int vco = cdclk_state->vco;
+ int cdclk = cdclk_config->cdclk;
+ int vco = cdclk_config->vco;
u32 freq_select, cdclk_ctl;
int ret;
@@ -1005,23 +1020,25 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
* use the corresponding VCO freq as that always leads to using the
* minimum 308MHz CDCLK.
*/
- WARN_ON_ONCE(IS_SKYLAKE(dev_priv) && vco == 8640000);
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ IS_SKYLAKE(dev_priv) && vco == 8640000);
ret = skl_pcode_request(dev_priv, SKL_PCODE_CDCLK_CONTROL,
SKL_CDCLK_PREPARE_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE,
SKL_CDCLK_READY_FOR_CHANGE, 3);
if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (%d)\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Failed to inform PCU about cdclk change (%d)\n", ret);
return;
}
/* Choose frequency for this cdclk */
switch (cdclk) {
default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
- WARN_ON(vco != 0);
+ drm_WARN_ON(&dev_priv->drm,
+ cdclk != dev_priv->cdclk.hw.bypass);
+ drm_WARN_ON(&dev_priv->drm, vco != 0);
/* fall through */
case 308571:
case 337500:
@@ -1044,38 +1061,38 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv,
dev_priv->cdclk.hw.vco != vco)
skl_dpll0_disable(dev_priv);
- cdclk_ctl = I915_READ(CDCLK_CTL);
+ cdclk_ctl = intel_de_read(dev_priv, CDCLK_CTL);
if (dev_priv->cdclk.hw.vco != vco) {
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
}
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl |= CDCLK_DIVMUX_CD_OVERRIDE;
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
- POSTING_READ(CDCLK_CTL);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+ intel_de_posting_read(dev_priv, CDCLK_CTL);
if (dev_priv->cdclk.hw.vco != vco)
skl_dpll0_enable(dev_priv, vco);
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~(CDCLK_FREQ_SEL_MASK | CDCLK_FREQ_DECIMAL_MASK);
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
cdclk_ctl |= freq_select | skl_cdclk_decimal(cdclk);
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
/* Wa Display #1183: skl,kbl,cfl */
cdclk_ctl &= ~CDCLK_DIVMUX_CD_OVERRIDE;
- I915_WRITE(CDCLK_CTL, cdclk_ctl);
- POSTING_READ(CDCLK_CTL);
+ intel_de_write(dev_priv, CDCLK_CTL, cdclk_ctl);
+ intel_de_posting_read(dev_priv, CDCLK_CTL);
/* inform PCU of the change */
sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
+ cdclk_config->voltage_level);
intel_update_cdclk(dev_priv);
}
@@ -1089,11 +1106,11 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* There is SWF18 scratchpad register defined which is set by the
* pre-os which can be used by the OS drivers to check the status
*/
- if ((I915_READ(SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
+ if ((intel_de_read(dev_priv, SWF_ILK(0x18)) & 0x00FFFFFF) == 0)
goto sanitize;
intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+ intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
/* Is PLL enabled and locked ? */
if (dev_priv->cdclk.hw.vco == 0 ||
@@ -1106,7 +1123,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
* decimal part is programmed wrong from BIOS where pre-os does not
* enable display. Verify the same as well.
*/
- cdctl = I915_READ(CDCLK_CTL);
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
expected = (cdctl & CDCLK_FREQ_SEL_MASK) |
skl_cdclk_decimal(dev_priv->cdclk.hw.cdclk);
if (cdctl == expected)
@@ -1114,7 +1131,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv)
return;
sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+ drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
dev_priv->cdclk.hw.cdclk = 0;
@@ -1122,9 +1139,9 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
-static void skl_init_cdclk(struct drm_i915_private *dev_priv)
+static void skl_cdclk_init_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state;
+ struct intel_cdclk_config cdclk_config;
skl_sanitize_cdclk(dev_priv);
@@ -1140,26 +1157,26 @@ static void skl_init_cdclk(struct drm_i915_private *dev_priv)
return;
}
- cdclk_state = dev_priv->cdclk.hw;
+ cdclk_config = dev_priv->cdclk.hw;
- cdclk_state.vco = dev_priv->skl_preferred_vco_freq;
- if (cdclk_state.vco == 0)
- cdclk_state.vco = 8100000;
- cdclk_state.cdclk = skl_calc_cdclk(0, cdclk_state.vco);
- cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
+ cdclk_config.vco = dev_priv->skl_preferred_vco_freq;
+ if (cdclk_config.vco == 0)
+ cdclk_config.vco = 8100000;
+ cdclk_config.cdclk = skl_calc_cdclk(0, cdclk_config.vco);
+ cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
-static void skl_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void skl_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+ struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level = skl_calc_voltage_level(cdclk_state.cdclk);
+ cdclk_config.cdclk = cdclk_config.bypass;
+ cdclk_config.vco = 0;
+ cdclk_config.voltage_level = skl_calc_voltage_level(cdclk_config.cdclk);
- skl_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ skl_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
static const struct intel_cdclk_vals bxt_cdclk_table[] = {
@@ -1223,8 +1240,9 @@ static int bxt_calc_cdclk(struct drm_i915_private *dev_priv, int min_cdclk)
table[i].cdclk >= min_cdclk)
return table[i].cdclk;
- WARN(1, "Cannot satisfy minimum cdclk %d with refclk %u\n",
- min_cdclk, dev_priv->cdclk.hw.ref);
+ drm_WARN(&dev_priv->drm, 1,
+ "Cannot satisfy minimum cdclk %d with refclk %u\n",
+ min_cdclk, dev_priv->cdclk.hw.ref);
return 0;
}
@@ -1241,8 +1259,8 @@ static int bxt_calc_cdclk_pll_vco(struct drm_i915_private *dev_priv, int cdclk)
table[i].cdclk == cdclk)
return dev_priv->cdclk.hw.ref * table[i].ratio;
- WARN(1, "cdclk %d not valid for refclk %u\n",
- cdclk, dev_priv->cdclk.hw.ref);
+ drm_WARN(&dev_priv->drm, 1, "cdclk %d not valid for refclk %u\n",
+ cdclk, dev_priv->cdclk.hw.ref);
return 0;
}
@@ -1283,56 +1301,68 @@ static u8 ehl_calc_voltage_level(int cdclk)
return 0;
}
+static u8 tgl_calc_voltage_level(int cdclk)
+{
+ if (cdclk > 556800)
+ return 3;
+ else if (cdclk > 326400)
+ return 2;
+ else if (cdclk > 312000)
+ return 1;
+ else
+ return 0;
+}
+
static void cnl_readout_refclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- if (I915_READ(SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
- cdclk_state->ref = 24000;
+ if (intel_de_read(dev_priv, SKL_DSSM) & CNL_DSSM_CDCLK_PLL_REFCLK_24MHz)
+ cdclk_config->ref = 24000;
else
- cdclk_state->ref = 19200;
+ cdclk_config->ref = 19200;
}
static void icl_readout_refclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
- u32 dssm = I915_READ(SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
+ u32 dssm = intel_de_read(dev_priv, SKL_DSSM) & ICL_DSSM_CDCLK_PLL_REFCLK_MASK;
switch (dssm) {
default:
MISSING_CASE(dssm);
/* fall through */
case ICL_DSSM_CDCLK_PLL_REFCLK_24MHz:
- cdclk_state->ref = 24000;
+ cdclk_config->ref = 24000;
break;
case ICL_DSSM_CDCLK_PLL_REFCLK_19_2MHz:
- cdclk_state->ref = 19200;
+ cdclk_config->ref = 19200;
break;
case ICL_DSSM_CDCLK_PLL_REFCLK_38_4MHz:
- cdclk_state->ref = 38400;
+ cdclk_config->ref = 38400;
break;
}
}
static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 val, ratio;
if (INTEL_GEN(dev_priv) >= 11)
- icl_readout_refclk(dev_priv, cdclk_state);
+ icl_readout_refclk(dev_priv, cdclk_config);
else if (IS_CANNONLAKE(dev_priv))
- cnl_readout_refclk(dev_priv, cdclk_state);
+ cnl_readout_refclk(dev_priv, cdclk_config);
else
- cdclk_state->ref = 19200;
+ cdclk_config->ref = 19200;
- val = I915_READ(BXT_DE_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE);
if ((val & BXT_DE_PLL_PLL_ENABLE) == 0 ||
(val & BXT_DE_PLL_LOCK) == 0) {
/*
* CDCLK PLL is disabled, the VCO/ratio doesn't matter, but
* setting it to zero is a way to signal that.
*/
- cdclk_state->vco = 0;
+ cdclk_config->vco = 0;
return;
}
@@ -1343,47 +1373,49 @@ static void bxt_de_pll_readout(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) >= 10)
ratio = val & CNL_CDCLK_PLL_RATIO_MASK;
else
- ratio = I915_READ(BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
+ ratio = intel_de_read(dev_priv, BXT_DE_PLL_CTL) & BXT_DE_PLL_RATIO_MASK;
- cdclk_state->vco = ratio * cdclk_state->ref;
+ cdclk_config->vco = ratio * cdclk_config->ref;
}
static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state)
+ struct intel_cdclk_config *cdclk_config)
{
u32 divider;
int div;
- bxt_de_pll_readout(dev_priv, cdclk_state);
+ bxt_de_pll_readout(dev_priv, cdclk_config);
if (INTEL_GEN(dev_priv) >= 12)
- cdclk_state->bypass = cdclk_state->ref / 2;
+ cdclk_config->bypass = cdclk_config->ref / 2;
else if (INTEL_GEN(dev_priv) >= 11)
- cdclk_state->bypass = 50000;
+ cdclk_config->bypass = 50000;
else
- cdclk_state->bypass = cdclk_state->ref;
+ cdclk_config->bypass = cdclk_config->ref;
- if (cdclk_state->vco == 0) {
- cdclk_state->cdclk = cdclk_state->bypass;
+ if (cdclk_config->vco == 0) {
+ cdclk_config->cdclk = cdclk_config->bypass;
goto out;
}
- divider = I915_READ(CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
+ divider = intel_de_read(dev_priv, CDCLK_CTL) & BXT_CDCLK_CD2X_DIV_SEL_MASK;
switch (divider) {
case BXT_CDCLK_CD2X_DIV_SEL_1:
div = 2;
break;
case BXT_CDCLK_CD2X_DIV_SEL_1_5:
- WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
- "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm,
+ IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
div = 3;
break;
case BXT_CDCLK_CD2X_DIV_SEL_2:
div = 4;
break;
case BXT_CDCLK_CD2X_DIV_SEL_4:
- WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
div = 8;
break;
default:
@@ -1391,25 +1423,25 @@ static void bxt_get_cdclk(struct drm_i915_private *dev_priv,
return;
}
- cdclk_state->cdclk = DIV_ROUND_CLOSEST(cdclk_state->vco, div);
+ cdclk_config->cdclk = DIV_ROUND_CLOSEST(cdclk_config->vco, div);
out:
/*
* Can't read this out :( Let's assume it's
* at least what the CDCLK frequency requires.
*/
- cdclk_state->voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_state->cdclk);
+ cdclk_config->voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_config->cdclk);
}
static void bxt_de_pll_disable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(BXT_DE_PLL_ENABLE, 0);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, 0);
/* Timeout 200us */
if (intel_de_wait_for_clear(dev_priv,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
- DRM_ERROR("timeout waiting for DE PLL unlock\n");
+ drm_err(&dev_priv->drm, "timeout waiting for DE PLL unlock\n");
dev_priv->cdclk.hw.vco = 0;
}
@@ -1419,17 +1451,17 @@ static void bxt_de_pll_enable(struct drm_i915_private *dev_priv, int vco)
int ratio = DIV_ROUND_CLOSEST(vco, dev_priv->cdclk.hw.ref);
u32 val;
- val = I915_READ(BXT_DE_PLL_CTL);
+ val = intel_de_read(dev_priv, BXT_DE_PLL_CTL);
val &= ~BXT_DE_PLL_RATIO_MASK;
val |= BXT_DE_PLL_RATIO(ratio);
- I915_WRITE(BXT_DE_PLL_CTL, val);
+ intel_de_write(dev_priv, BXT_DE_PLL_CTL, val);
- I915_WRITE(BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_PLL_ENABLE);
/* Timeout 200us */
if (intel_de_wait_for_set(dev_priv,
BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK, 1))
- DRM_ERROR("timeout waiting for DE PLL lock\n");
+ drm_err(&dev_priv->drm, "timeout waiting for DE PLL lock\n");
dev_priv->cdclk.hw.vco = vco;
}
@@ -1438,13 +1470,14 @@ static void cnl_cdclk_pll_disable(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(BXT_DE_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DE_PLL_ENABLE);
val &= ~BXT_DE_PLL_PLL_ENABLE;
- I915_WRITE(BXT_DE_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
- DRM_ERROR("timeout waiting for CDCLK PLL unlock\n");
+ if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) == 0, 1))
+ drm_err(&dev_priv->drm,
+ "timeout waiting for CDCLK PLL unlock\n");
dev_priv->cdclk.hw.vco = 0;
}
@@ -1455,14 +1488,15 @@ static void cnl_cdclk_pll_enable(struct drm_i915_private *dev_priv, int vco)
u32 val;
val = CNL_CDCLK_PLL_RATIO(ratio);
- I915_WRITE(BXT_DE_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
val |= BXT_DE_PLL_PLL_ENABLE;
- I915_WRITE(BXT_DE_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val);
/* Timeout 200us */
- if (wait_for((I915_READ(BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
- DRM_ERROR("timeout waiting for CDCLK PLL lock\n");
+ if (wait_for((intel_de_read(dev_priv, BXT_DE_PLL_ENABLE) & BXT_DE_PLL_LOCK) != 0, 1))
+ drm_err(&dev_priv->drm,
+ "timeout waiting for CDCLK PLL lock\n");
dev_priv->cdclk.hw.vco = vco;
}
@@ -1488,11 +1522,11 @@ static u32 bxt_cdclk_cd2x_pipe(struct drm_i915_private *dev_priv, enum pipe pipe
}
static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- int cdclk = cdclk_state->cdclk;
- int vco = cdclk_state->vco;
+ int cdclk = cdclk_config->cdclk;
+ int vco = cdclk_config->vco;
u32 val, divider;
int ret;
@@ -1512,30 +1546,34 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
0x80000000, 150, 2);
if (ret) {
- DRM_ERROR("Failed to inform PCU about cdclk change (err %d, freq %d)\n",
- ret, cdclk);
+ drm_err(&dev_priv->drm,
+ "Failed to inform PCU about cdclk change (err %d, freq %d)\n",
+ ret, cdclk);
return;
}
/* cdclk = vco / 2 / div{1,1.5,2,4} */
switch (DIV_ROUND_CLOSEST(vco, cdclk)) {
default:
- WARN_ON(cdclk != dev_priv->cdclk.hw.bypass);
- WARN_ON(vco != 0);
+ drm_WARN_ON(&dev_priv->drm,
+ cdclk != dev_priv->cdclk.hw.bypass);
+ drm_WARN_ON(&dev_priv->drm, vco != 0);
/* fall through */
case 2:
divider = BXT_CDCLK_CD2X_DIV_SEL_1;
break;
case 3:
- WARN(IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
- "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm,
+ IS_GEMINILAKE(dev_priv) || INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_1_5;
break;
case 4:
divider = BXT_CDCLK_CD2X_DIV_SEL_2;
break;
case 8:
- WARN(INTEL_GEN(dev_priv) >= 10, "Unsupported divider\n");
+ drm_WARN(&dev_priv->drm, INTEL_GEN(dev_priv) >= 10,
+ "Unsupported divider\n");
divider = BXT_CDCLK_CD2X_DIV_SEL_4;
break;
}
@@ -1566,14 +1604,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
*/
if (IS_GEN9_LP(dev_priv) && cdclk >= 500000)
val |= BXT_CDCLK_SSA_PRECHARGE_ENABLE;
- I915_WRITE(CDCLK_CTL, val);
+ intel_de_write(dev_priv, CDCLK_CTL, val);
if (pipe != INVALID_PIPE)
intel_wait_for_vblank(dev_priv, pipe);
if (INTEL_GEN(dev_priv) >= 10) {
ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL,
- cdclk_state->voltage_level);
+ cdclk_config->voltage_level);
} else {
/*
* The timeout isn't specified, the 2ms used here is based on
@@ -1583,13 +1621,14 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
*/
ret = sandybridge_pcode_write_timeout(dev_priv,
HSW_PCODE_DE_WRITE_FREQ_REQ,
- cdclk_state->voltage_level,
+ cdclk_config->voltage_level,
150, 2);
}
if (ret) {
- DRM_ERROR("PCode CDCLK freq set failed, (err %d, freq %d)\n",
- ret, cdclk);
+ drm_err(&dev_priv->drm,
+ "PCode CDCLK freq set failed, (err %d, freq %d)\n",
+ ret, cdclk);
return;
}
@@ -1600,7 +1639,7 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv,
* Can't read out the voltage level :(
* Let's just assume everything is as expected.
*/
- dev_priv->cdclk.hw.voltage_level = cdclk_state->voltage_level;
+ dev_priv->cdclk.hw.voltage_level = cdclk_config->voltage_level;
}
static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
@@ -1609,7 +1648,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
int cdclk, vco;
intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+ intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
if (dev_priv->cdclk.hw.vco == 0 ||
dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass)
@@ -1621,7 +1660,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
* set reserved MBZ bits in CDCLK_CTL at least during exiting from S4,
* so sanitize this register.
*/
- cdctl = I915_READ(CDCLK_CTL);
+ cdctl = intel_de_read(dev_priv, CDCLK_CTL);
/*
* Let's ignore the pipe field, since BIOS could have configured the
* dividers both synching to an active pipe, or asynchronously
@@ -1672,7 +1711,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv)
return;
sanitize:
- DRM_DEBUG_KMS("Sanitizing cdclk programmed by pre-os\n");
+ drm_dbg_kms(&dev_priv->drm, "Sanitizing cdclk programmed by pre-os\n");
/* force cdclk programming */
dev_priv->cdclk.hw.cdclk = 0;
@@ -1681,9 +1720,9 @@ sanitize:
dev_priv->cdclk.hw.vco = -1;
}
-static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_cdclk_init_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state;
+ struct intel_cdclk_config cdclk_config;
bxt_sanitize_cdclk(dev_priv);
@@ -1691,35 +1730,35 @@ static void bxt_init_cdclk(struct drm_i915_private *dev_priv)
dev_priv->cdclk.hw.vco != 0)
return;
- cdclk_state = dev_priv->cdclk.hw;
+ cdclk_config = dev_priv->cdclk.hw;
/*
* FIXME:
* - The initial CDCLK needs to be read from VBT.
* Need to make this change after VBT has changes for BXT.
*/
- cdclk_state.cdclk = bxt_calc_cdclk(dev_priv, 0);
- cdclk_state.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_state.cdclk);
- cdclk_state.voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
+ cdclk_config.cdclk = bxt_calc_cdclk(dev_priv, 0);
+ cdclk_config.vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk_config.cdclk);
+ cdclk_config.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_config.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
-static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
+static void bxt_cdclk_uninit_hw(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state = dev_priv->cdclk.hw;
+ struct intel_cdclk_config cdclk_config = dev_priv->cdclk.hw;
- cdclk_state.cdclk = cdclk_state.bypass;
- cdclk_state.vco = 0;
- cdclk_state.voltage_level =
- dev_priv->display.calc_voltage_level(cdclk_state.cdclk);
+ cdclk_config.cdclk = cdclk_config.bypass;
+ cdclk_config.vco = 0;
+ cdclk_config.voltage_level =
+ dev_priv->display.calc_voltage_level(cdclk_config.cdclk);
- bxt_set_cdclk(dev_priv, &cdclk_state, INVALID_PIPE);
+ bxt_set_cdclk(dev_priv, &cdclk_config, INVALID_PIPE);
}
/**
- * intel_cdclk_init - Initialize CDCLK
+ * intel_cdclk_init_hw - Initialize CDCLK hardware
* @i915: i915 device
*
* Initialize CDCLK. This consists mainly of initializing dev_priv->cdclk.hw and
@@ -1727,39 +1766,41 @@ static void bxt_uninit_cdclk(struct drm_i915_private *dev_priv)
* during the display core initialization sequence, after which the DMC will
* take care of turning CDCLK off/on as needed.
*/
-void intel_cdclk_init(struct drm_i915_private *i915)
+void intel_cdclk_init_hw(struct drm_i915_private *i915)
{
if (IS_GEN9_LP(i915) || INTEL_GEN(i915) >= 10)
- bxt_init_cdclk(i915);
+ bxt_cdclk_init_hw(i915);
else if (IS_GEN9_BC(i915))
- skl_init_cdclk(i915);
+ skl_cdclk_init_hw(i915);
}
/**
- * intel_cdclk_uninit - Uninitialize CDCLK
+ * intel_cdclk_uninit_hw - Uninitialize CDCLK hardware
* @i915: i915 device
*
* Uninitialize CDCLK. This is done only during the display core
* uninitialization sequence.
*/
-void intel_cdclk_uninit(struct drm_i915_private *i915)
+void intel_cdclk_uninit_hw(struct drm_i915_private *i915)
{
if (INTEL_GEN(i915) >= 10 || IS_GEN9_LP(i915))
- bxt_uninit_cdclk(i915);
+ bxt_cdclk_uninit_hw(i915);
else if (IS_GEN9_BC(i915))
- skl_uninit_cdclk(i915);
+ skl_cdclk_uninit_hw(i915);
}
/**
- * intel_cdclk_needs_modeset - Determine if two CDCLK states require a modeset on all pipes
- * @a: first CDCLK state
- * @b: second CDCLK state
+ * intel_cdclk_needs_modeset - Determine if changong between the CDCLK
+ * configurations requires a modeset on all pipes
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
*
* Returns:
- * True if the CDCLK states require pipes to be off during reprogramming, false if not.
+ * True if changing between the two CDCLK configurations
+ * requires all pipes to be off, false if not.
*/
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
{
return a->cdclk != b->cdclk ||
a->vco != b->vco ||
@@ -1767,17 +1808,19 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
}
/**
- * intel_cdclk_needs_cd2x_update - Determine if two CDCLK states require a cd2x divider update
- * @dev_priv: Not a CDCLK state, it's the drm_i915_private!
- * @a: first CDCLK state
- * @b: second CDCLK state
+ * intel_cdclk_can_cd2x_update - Determine if changing between the two CDCLK
+ * configurations requires only a cd2x divider update
+ * @dev_priv: i915 device
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
*
* Returns:
- * True if the CDCLK states require just a cd2x divider update, false if not.
+ * True if changing between the two CDCLK configurations
+ * can be done with just a cd2x divider update, false if not.
*/
-static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_can_cd2x_update(struct drm_i915_private *dev_priv,
+ const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
{
/* Older hw doesn't have the capability */
if (INTEL_GEN(dev_priv) < 10 && !IS_GEN9_LP(dev_priv))
@@ -1789,117 +1832,138 @@ static bool intel_cdclk_needs_cd2x_update(struct drm_i915_private *dev_priv,
}
/**
- * intel_cdclk_changed - Determine if two CDCLK states are different
- * @a: first CDCLK state
- * @b: second CDCLK state
+ * intel_cdclk_changed - Determine if two CDCLK configurations are different
+ * @a: first CDCLK configuration
+ * @b: second CDCLK configuration
*
* Returns:
- * True if the CDCLK states don't match, false if they do.
+ * True if the CDCLK configurations don't match, false if they do.
*/
-static bool intel_cdclk_changed(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b)
+static bool intel_cdclk_changed(const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b)
{
return intel_cdclk_needs_modeset(a, b) ||
a->voltage_level != b->voltage_level;
}
-/**
- * intel_cdclk_swap_state - make atomic CDCLK configuration effective
- * @state: atomic state
- *
- * This is the CDCLK version of drm_atomic_helper_swap_state() since the
- * helper does not handle driver-specific global state.
- *
- * Similarly to the atomic helpers this function does a complete swap,
- * i.e. it also puts the old state into @state. This is used by the commit
- * code to determine how CDCLK has changed (for instance did it increase or
- * decrease).
- */
-void intel_cdclk_swap_state(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
-
- swap(state->cdclk.logical, dev_priv->cdclk.logical);
- swap(state->cdclk.actual, dev_priv->cdclk.actual);
-}
-
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
- const char *context)
+void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config,
+ const char *context)
{
DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n",
- context, cdclk_state->cdclk, cdclk_state->vco,
- cdclk_state->ref, cdclk_state->bypass,
- cdclk_state->voltage_level);
+ context, cdclk_config->cdclk, cdclk_config->vco,
+ cdclk_config->ref, cdclk_config->bypass,
+ cdclk_config->voltage_level);
}
/**
- * intel_set_cdclk - Push the CDCLK state to the hardware
+ * intel_set_cdclk - Push the CDCLK configuration to the hardware
* @dev_priv: i915 device
- * @cdclk_state: new CDCLK state
+ * @cdclk_config: new CDCLK configuration
* @pipe: pipe with which to synchronize the update
*
* Program the hardware based on the passed in CDCLK state,
* if necessary.
*/
static void intel_set_cdclk(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe)
{
- if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state))
+ struct intel_encoder *encoder;
+
+ if (!intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config))
return;
- if (WARN_ON_ONCE(!dev_priv->display.set_cdclk))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->display.set_cdclk))
return;
- intel_dump_cdclk_state(cdclk_state, "Changing CDCLK to");
+ intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to");
- dev_priv->display.set_cdclk(dev_priv, cdclk_state, pipe);
+ /*
+ * Lock aux/gmbus while we change cdclk in case those
+ * functions use cdclk. Not all platforms/ports do,
+ * but we'll lock them all for simplicity.
+ */
+ mutex_lock(&dev_priv->gmbus_mutex);
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_lock_nest_lock(&intel_dp->aux.hw_mutex,
+ &dev_priv->gmbus_mutex);
+ }
+
+ dev_priv->display.set_cdclk(dev_priv, cdclk_config, pipe);
- if (WARN(intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_state),
- "cdclk state doesn't match!\n")) {
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "[hw state]");
- intel_dump_cdclk_state(cdclk_state, "[sw state]");
+ for_each_intel_dp(&dev_priv->drm, encoder) {
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ mutex_unlock(&intel_dp->aux.hw_mutex);
+ }
+ mutex_unlock(&dev_priv->gmbus_mutex);
+
+ if (drm_WARN(&dev_priv->drm,
+ intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config),
+ "cdclk state doesn't match!\n")) {
+ intel_dump_cdclk_config(&dev_priv->cdclk.hw, "[hw state]");
+ intel_dump_cdclk_config(cdclk_config, "[sw state]");
}
}
/**
* intel_set_cdclk_pre_plane_update - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
- * @old_state: old CDCLK state
- * @new_state: new CDCLK state
- * @pipe: pipe with which to synchronize the update
+ * @state: intel atomic state
*
- * Program the hardware before updating the HW plane state based on the passed
- * in CDCLK state, if necessary.
+ * Program the hardware before updating the HW plane state based on the
+ * new CDCLK state, if necessary.
*/
void
-intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *old_state,
- const struct intel_cdclk_state *new_state,
- enum pipe pipe)
+intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state)
{
- if (pipe == INVALID_PIPE || old_state->cdclk <= new_state->cdclk)
- intel_set_cdclk(dev_priv, new_state, pipe);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ enum pipe pipe = new_cdclk_state->pipe;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual))
+ return;
+
+ if (pipe == INVALID_PIPE ||
+ old_cdclk_state->actual.cdclk <= new_cdclk_state->actual.cdclk) {
+ drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
+ }
}
/**
* intel_set_cdclk_post_plane_update - Push the CDCLK state to the hardware
- * @dev_priv: i915 device
- * @old_state: old CDCLK state
- * @new_state: new CDCLK state
- * @pipe: pipe with which to synchronize the update
+ * @state: intel atomic state
*
- * Program the hardware after updating the HW plane state based on the passed
- * in CDCLK state, if necessary.
+ * Program the hardware after updating the HW plane state based on the
+ * new CDCLK state, if necessary.
*/
void
-intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *old_state,
- const struct intel_cdclk_state *new_state,
- enum pipe pipe)
+intel_set_cdclk_post_plane_update(struct intel_atomic_state *state)
{
- if (pipe != INVALID_PIPE && old_state->cdclk > new_state->cdclk)
- intel_set_cdclk(dev_priv, new_state, pipe);
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state =
+ intel_atomic_get_old_cdclk_state(state);
+ const struct intel_cdclk_state *new_cdclk_state =
+ intel_atomic_get_new_cdclk_state(state);
+ enum pipe pipe = new_cdclk_state->pipe;
+
+ if (!intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual))
+ return;
+
+ if (pipe != INVALID_PIPE &&
+ old_cdclk_state->actual.cdclk > new_cdclk_state->actual.cdclk) {
+ drm_WARN_ON(&dev_priv->drm, !new_cdclk_state->base.changed);
+
+ intel_set_cdclk(dev_priv, &new_cdclk_state->actual, pipe);
+ }
}
static int intel_pixel_rate_to_cdclk(const struct intel_crtc_state *crtc_state)
@@ -2017,25 +2081,24 @@ int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state)
min_cdclk = max(min_cdclk, (int)crtc_state->pixel_rate);
if (min_cdclk > dev_priv->max_cdclk_freq) {
- DRM_DEBUG_KMS("required cdclk (%d kHz) exceeds max (%d kHz)\n",
- min_cdclk, dev_priv->max_cdclk_freq);
+ drm_dbg_kms(&dev_priv->drm,
+ "required cdclk (%d kHz) exceeds max (%d kHz)\n",
+ min_cdclk, dev_priv->max_cdclk_freq);
return -EINVAL;
}
return min_cdclk;
}
-static int intel_compute_min_cdclk(struct intel_atomic_state *state)
+static int intel_compute_min_cdclk(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int min_cdclk, i;
enum pipe pipe;
- memcpy(state->min_cdclk, dev_priv->min_cdclk,
- sizeof(state->min_cdclk));
-
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
int ret;
@@ -2043,19 +2106,19 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
if (min_cdclk < 0)
return min_cdclk;
- if (state->min_cdclk[i] == min_cdclk)
+ if (cdclk_state->min_cdclk[i] == min_cdclk)
continue;
- state->min_cdclk[i] = min_cdclk;
+ cdclk_state->min_cdclk[i] = min_cdclk;
- ret = intel_atomic_lock_global_state(state);
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
if (ret)
return ret;
}
- min_cdclk = state->cdclk.force_min_cdclk;
+ min_cdclk = cdclk_state->force_min_cdclk;
for_each_pipe(dev_priv, pipe)
- min_cdclk = max(state->min_cdclk[pipe], min_cdclk);
+ min_cdclk = max(cdclk_state->min_cdclk[pipe], min_cdclk);
return min_cdclk;
}
@@ -2073,8 +2136,9 @@ static int intel_compute_min_cdclk(struct intel_atomic_state *state)
* future platforms this code will need to be
* adjusted.
*/
-static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
+static int bxt_compute_min_voltage_level(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
@@ -2082,9 +2146,6 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
int i;
enum pipe pipe;
- memcpy(state->min_voltage_level, dev_priv->min_voltage_level,
- sizeof(state->min_voltage_level));
-
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
int ret;
@@ -2093,57 +2154,58 @@ static int bxt_compute_min_voltage_level(struct intel_atomic_state *state)
else
min_voltage_level = 0;
- if (state->min_voltage_level[i] == min_voltage_level)
+ if (cdclk_state->min_voltage_level[i] == min_voltage_level)
continue;
- state->min_voltage_level[i] = min_voltage_level;
+ cdclk_state->min_voltage_level[i] = min_voltage_level;
- ret = intel_atomic_lock_global_state(state);
+ ret = intel_atomic_lock_global_state(&cdclk_state->base);
if (ret)
return ret;
}
min_voltage_level = 0;
for_each_pipe(dev_priv, pipe)
- min_voltage_level = max(state->min_voltage_level[pipe],
+ min_voltage_level = max(cdclk_state->min_voltage_level[pipe],
min_voltage_level);
return min_voltage_level;
}
-static int vlv_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int vlv_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
int min_cdclk, cdclk;
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
cdclk = vlv_calc_cdclk(dev_priv, min_cdclk);
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
- if (!state->active_pipes) {
- cdclk = vlv_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+ if (!cdclk_state->active_pipes) {
+ cdclk = vlv_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
vlv_calc_voltage_level(dev_priv, cdclk);
} else {
- state->cdclk.actual = state->cdclk.logical;
+ cdclk_state->actual = cdclk_state->logical;
}
return 0;
}
-static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int bdw_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
int min_cdclk, cdclk;
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
@@ -2153,31 +2215,32 @@ static int bdw_modeset_calc_cdclk(struct intel_atomic_state *state)
*/
cdclk = bdw_calc_cdclk(min_cdclk);
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
bdw_calc_voltage_level(cdclk);
- if (!state->active_pipes) {
- cdclk = bdw_calc_cdclk(state->cdclk.force_min_cdclk);
+ if (!cdclk_state->active_pipes) {
+ cdclk = bdw_calc_cdclk(cdclk_state->force_min_cdclk);
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
bdw_calc_voltage_level(cdclk);
} else {
- state->cdclk.actual = state->cdclk.logical;
+ cdclk_state->actual = cdclk_state->logical;
}
return 0;
}
-static int skl_dpll0_vco(struct intel_atomic_state *state)
+static int skl_dpll0_vco(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
int vco, i;
- vco = state->cdclk.logical.vco;
+ vco = cdclk_state->logical.vco;
if (!vco)
vco = dev_priv->skl_preferred_vco_freq;
@@ -2206,15 +2269,15 @@ static int skl_dpll0_vco(struct intel_atomic_state *state)
return vco;
}
-static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int skl_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
int min_cdclk, cdclk, vco;
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
- vco = skl_dpll0_vco(state);
+ vco = skl_dpll0_vco(cdclk_state);
/*
* FIXME should also account for plane ratio
@@ -2222,57 +2285,58 @@ static int skl_modeset_calc_cdclk(struct intel_atomic_state *state)
*/
cdclk = skl_calc_cdclk(min_cdclk, vco);
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
+ cdclk_state->logical.vco = vco;
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
skl_calc_voltage_level(cdclk);
- if (!state->active_pipes) {
- cdclk = skl_calc_cdclk(state->cdclk.force_min_cdclk, vco);
+ if (!cdclk_state->active_pipes) {
+ cdclk = skl_calc_cdclk(cdclk_state->force_min_cdclk, vco);
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
+ cdclk_state->actual.vco = vco;
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
skl_calc_voltage_level(cdclk);
} else {
- state->cdclk.actual = state->cdclk.logical;
+ cdclk_state->actual = cdclk_state->logical;
}
return 0;
}
-static int bxt_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int bxt_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
+ struct intel_atomic_state *state = cdclk_state->base.state;
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
int min_cdclk, min_voltage_level, cdclk, vco;
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
- min_voltage_level = bxt_compute_min_voltage_level(state);
+ min_voltage_level = bxt_compute_min_voltage_level(cdclk_state);
if (min_voltage_level < 0)
return min_voltage_level;
cdclk = bxt_calc_cdclk(dev_priv, min_cdclk);
vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
- state->cdclk.logical.vco = vco;
- state->cdclk.logical.cdclk = cdclk;
- state->cdclk.logical.voltage_level =
+ cdclk_state->logical.vco = vco;
+ cdclk_state->logical.cdclk = cdclk;
+ cdclk_state->logical.voltage_level =
max_t(int, min_voltage_level,
dev_priv->display.calc_voltage_level(cdclk));
- if (!state->active_pipes) {
- cdclk = bxt_calc_cdclk(dev_priv, state->cdclk.force_min_cdclk);
+ if (!cdclk_state->active_pipes) {
+ cdclk = bxt_calc_cdclk(dev_priv, cdclk_state->force_min_cdclk);
vco = bxt_calc_cdclk_pll_vco(dev_priv, cdclk);
- state->cdclk.actual.vco = vco;
- state->cdclk.actual.cdclk = cdclk;
- state->cdclk.actual.voltage_level =
+ cdclk_state->actual.vco = vco;
+ cdclk_state->actual.cdclk = cdclk;
+ cdclk_state->actual.voltage_level =
dev_priv->display.calc_voltage_level(cdclk);
} else {
- state->cdclk.actual = state->cdclk.logical;
+ cdclk_state->actual = cdclk_state->logical;
}
return 0;
@@ -2317,7 +2381,7 @@ static int intel_modeset_all_pipes(struct intel_atomic_state *state)
return 0;
}
-static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state)
+static int fixed_modeset_calc_cdclk(struct intel_cdclk_state *cdclk_state)
{
int min_cdclk;
@@ -2326,54 +2390,113 @@ static int fixed_modeset_calc_cdclk(struct intel_atomic_state *state)
* check that the required minimum frequency doesn't exceed
* the actual cdclk frequency.
*/
- min_cdclk = intel_compute_min_cdclk(state);
+ min_cdclk = intel_compute_min_cdclk(cdclk_state);
if (min_cdclk < 0)
return min_cdclk;
return 0;
}
+static struct intel_global_state *intel_cdclk_duplicate_state(struct intel_global_obj *obj)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = kmemdup(obj->state, sizeof(*cdclk_state), GFP_KERNEL);
+ if (!cdclk_state)
+ return NULL;
+
+ cdclk_state->force_min_cdclk_changed = false;
+ cdclk_state->pipe = INVALID_PIPE;
+
+ return &cdclk_state->base;
+}
+
+static void intel_cdclk_destroy_state(struct intel_global_obj *obj,
+ struct intel_global_state *state)
+{
+ kfree(state);
+}
+
+static const struct intel_global_state_funcs intel_cdclk_funcs = {
+ .atomic_duplicate_state = intel_cdclk_duplicate_state,
+ .atomic_destroy_state = intel_cdclk_destroy_state,
+};
+
+struct intel_cdclk_state *
+intel_atomic_get_cdclk_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *cdclk_state;
+
+ cdclk_state = intel_atomic_get_global_obj_state(state, &dev_priv->cdclk.obj);
+ if (IS_ERR(cdclk_state))
+ return ERR_CAST(cdclk_state);
+
+ return to_intel_cdclk_state(cdclk_state);
+}
+
+int intel_cdclk_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_cdclk_state *cdclk_state;
+
+ cdclk_state = kzalloc(sizeof(*cdclk_state), GFP_KERNEL);
+ if (!cdclk_state)
+ return -ENOMEM;
+
+ intel_atomic_global_obj_init(dev_priv, &dev_priv->cdclk.obj,
+ &cdclk_state->base, &intel_cdclk_funcs);
+
+ return 0;
+}
+
int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ const struct intel_cdclk_state *old_cdclk_state;
+ struct intel_cdclk_state *new_cdclk_state;
enum pipe pipe;
int ret;
- ret = dev_priv->display.modeset_calc_cdclk(state);
+ new_cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(new_cdclk_state))
+ return PTR_ERR(new_cdclk_state);
+
+ old_cdclk_state = intel_atomic_get_old_cdclk_state(state);
+
+ new_cdclk_state->active_pipes =
+ intel_calc_active_pipes(state, old_cdclk_state->active_pipes);
+
+ ret = dev_priv->display.modeset_calc_cdclk(new_cdclk_state);
if (ret)
return ret;
- /*
- * Writes to dev_priv->cdclk.{actual,logical} must protected
- * by holding all the crtc mutexes even if we don't end up
- * touching the hardware
- */
- if (intel_cdclk_changed(&dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
+ if (intel_cdclk_changed(&old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
/*
* Also serialize commits across all crtcs
* if the actual hw needs to be poked.
*/
- ret = intel_atomic_serialize_global_state(state);
+ ret = intel_atomic_serialize_global_state(&new_cdclk_state->base);
if (ret)
return ret;
- } else if (intel_cdclk_changed(&dev_priv->cdclk.logical,
- &state->cdclk.logical)) {
- ret = intel_atomic_lock_global_state(state);
+ } else if (old_cdclk_state->active_pipes != new_cdclk_state->active_pipes ||
+ intel_cdclk_changed(&old_cdclk_state->logical,
+ &new_cdclk_state->logical)) {
+ ret = intel_atomic_lock_global_state(&new_cdclk_state->base);
if (ret)
return ret;
} else {
return 0;
}
- if (is_power_of_2(state->active_pipes) &&
- intel_cdclk_needs_cd2x_update(dev_priv,
- &dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
+ if (is_power_of_2(new_cdclk_state->active_pipes) &&
+ intel_cdclk_can_cd2x_update(dev_priv,
+ &old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
- pipe = ilog2(state->active_pipes);
+ pipe = ilog2(new_cdclk_state->active_pipes);
crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
@@ -2387,28 +2510,32 @@ int intel_modeset_calc_cdclk(struct intel_atomic_state *state)
}
if (pipe != INVALID_PIPE) {
- state->cdclk.pipe = pipe;
+ new_cdclk_state->pipe = pipe;
- DRM_DEBUG_KMS("Can change cdclk with pipe %c active\n",
- pipe_name(pipe));
- } else if (intel_cdclk_needs_modeset(&dev_priv->cdclk.actual,
- &state->cdclk.actual)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Can change cdclk with pipe %c active\n",
+ pipe_name(pipe));
+ } else if (intel_cdclk_needs_modeset(&old_cdclk_state->actual,
+ &new_cdclk_state->actual)) {
/* All pipes must be switched off while we change the cdclk. */
ret = intel_modeset_all_pipes(state);
if (ret)
return ret;
- state->cdclk.pipe = INVALID_PIPE;
+ new_cdclk_state->pipe = INVALID_PIPE;
- DRM_DEBUG_KMS("Modeset required for cdclk change\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Modeset required for cdclk change\n");
}
- DRM_DEBUG_KMS("New cdclk calculated to be logical %u kHz, actual %u kHz\n",
- state->cdclk.logical.cdclk,
- state->cdclk.actual.cdclk);
- DRM_DEBUG_KMS("New voltage level calculated to be logical %u, actual %u\n",
- state->cdclk.logical.voltage_level,
- state->cdclk.actual.voltage_level);
+ drm_dbg_kms(&dev_priv->drm,
+ "New cdclk calculated to be logical %u kHz, actual %u kHz\n",
+ new_cdclk_state->logical.cdclk,
+ new_cdclk_state->actual.cdclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "New voltage level calculated to be logical %u, actual %u\n",
+ new_cdclk_state->logical.voltage_level,
+ new_cdclk_state->actual.voltage_level);
return 0;
}
@@ -2453,11 +2580,11 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
} else if (IS_CANNONLAKE(dev_priv)) {
dev_priv->max_cdclk_freq = 528000;
} else if (IS_GEN9_BC(dev_priv)) {
- u32 limit = I915_READ(SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
+ u32 limit = intel_de_read(dev_priv, SKL_DFSM) & SKL_DFSM_CDCLK_LIMIT_MASK;
int max_cdclk, vco;
vco = dev_priv->skl_preferred_vco_freq;
- WARN_ON(vco != 8100000 && vco != 8640000);
+ drm_WARN_ON(&dev_priv->drm, vco != 8100000 && vco != 8640000);
/*
* Use the lower (vco 8640) cdclk values as a
@@ -2485,7 +2612,7 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
* How can we know if extra cooling is
* available? PCI ID, VTB, something else?
*/
- if (I915_READ(FUSE_STRAP) & HSW_CDCLK_LIMIT)
+ if (intel_de_read(dev_priv, FUSE_STRAP) & HSW_CDCLK_LIMIT)
dev_priv->max_cdclk_freq = 450000;
else if (IS_BDW_ULX(dev_priv))
dev_priv->max_cdclk_freq = 450000;
@@ -2504,11 +2631,11 @@ void intel_update_max_cdclk(struct drm_i915_private *dev_priv)
dev_priv->max_dotclk_freq = intel_compute_max_dotclk(dev_priv);
- DRM_DEBUG_DRIVER("Max CD clock rate: %d kHz\n",
- dev_priv->max_cdclk_freq);
+ drm_dbg(&dev_priv->drm, "Max CD clock rate: %d kHz\n",
+ dev_priv->max_cdclk_freq);
- DRM_DEBUG_DRIVER("Max dotclock rate: %d kHz\n",
- dev_priv->max_dotclk_freq);
+ drm_dbg(&dev_priv->drm, "Max dotclock rate: %d kHz\n",
+ dev_priv->max_dotclk_freq);
}
/**
@@ -2528,8 +2655,8 @@ void intel_update_cdclk(struct drm_i915_private *dev_priv)
* generate GMBus clock. This will vary with the cdclk freq.
*/
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- I915_WRITE(GMBUSFREQ_VLV,
- DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
+ intel_de_write(dev_priv, GMBUSFREQ_VLV,
+ DIV_ROUND_UP(dev_priv->cdclk.hw.cdclk, 1000));
}
static int cnp_rawclk(struct drm_i915_private *dev_priv)
@@ -2537,7 +2664,7 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
u32 rawclk;
int divider, fraction;
- if (I915_READ(SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
+ if (intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_RAW_FREQUENCY) {
/* 24 MHz */
divider = 24000;
fraction = 0;
@@ -2557,13 +2684,13 @@ static int cnp_rawclk(struct drm_i915_private *dev_priv)
rawclk |= ICP_RAWCLK_NUM(numerator);
}
- I915_WRITE(PCH_RAWCLK_FREQ, rawclk);
+ intel_de_write(dev_priv, PCH_RAWCLK_FREQ, rawclk);
return divider + fraction;
}
static int pch_rawclk(struct drm_i915_private *dev_priv)
{
- return (I915_READ(PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
+ return (intel_de_read(dev_priv, PCH_RAWCLK_FREQ) & RAWCLK_FREQ_MASK) * 1000;
}
static int vlv_hrawclk(struct drm_i915_private *dev_priv)
@@ -2578,7 +2705,7 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
u32 clkcfg;
/* hrawclock is 1/4 the FSB frequency */
- clkcfg = I915_READ(CLKCFG);
+ clkcfg = intel_de_read(dev_priv, CLKCFG);
switch (clkcfg & CLKCFG_FSB_MASK) {
case CLKCFG_FSB_400:
return 100000;
@@ -2600,27 +2727,29 @@ static int g4x_hrawclk(struct drm_i915_private *dev_priv)
}
/**
- * intel_update_rawclk - Determine the current RAWCLK frequency
+ * intel_read_rawclk - Determine the current RAWCLK frequency
* @dev_priv: i915 device
*
* Determine the current RAWCLK frequency. RAWCLK is a fixed
* frequency clock so this needs to done only once.
*/
-void intel_update_rawclk(struct drm_i915_private *dev_priv)
+u32 intel_read_rawclk(struct drm_i915_private *dev_priv)
{
+ u32 freq;
+
if (INTEL_PCH_TYPE(dev_priv) >= PCH_CNP)
- dev_priv->rawclk_freq = cnp_rawclk(dev_priv);
+ freq = cnp_rawclk(dev_priv);
else if (HAS_PCH_SPLIT(dev_priv))
- dev_priv->rawclk_freq = pch_rawclk(dev_priv);
+ freq = pch_rawclk(dev_priv);
else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- dev_priv->rawclk_freq = vlv_hrawclk(dev_priv);
+ freq = vlv_hrawclk(dev_priv);
else if (IS_G4X(dev_priv) || IS_PINEVIEW(dev_priv))
- dev_priv->rawclk_freq = g4x_hrawclk(dev_priv);
+ freq = g4x_hrawclk(dev_priv);
else
/* no rawclk on other platforms, or no need to know it */
- return;
+ return 0;
- DRM_DEBUG_DRIVER("rawclk rate: %d kHz\n", dev_priv->rawclk_freq);
+ return freq;
}
/**
@@ -2629,7 +2758,12 @@ void intel_update_rawclk(struct drm_i915_private *dev_priv)
*/
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
{
- if (IS_ELKHARTLAKE(dev_priv)) {
+ if (INTEL_GEN(dev_priv) >= 12) {
+ dev_priv->display.set_cdclk = bxt_set_cdclk;
+ dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
+ dev_priv->display.calc_voltage_level = tgl_calc_voltage_level;
+ dev_priv->cdclk.table = icl_cdclk_table;
+ } else if (IS_ELKHARTLAKE(dev_priv)) {
dev_priv->display.set_cdclk = bxt_set_cdclk;
dev_priv->display.modeset_calc_cdclk = bxt_modeset_calc_cdclk;
dev_priv->display.calc_voltage_level = ehl_calc_voltage_level;
@@ -2709,8 +2843,8 @@ void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv)
else if (IS_I845G(dev_priv))
dev_priv->display.get_cdclk = fixed_200mhz_get_cdclk;
else { /* 830 */
- WARN(!IS_I830(dev_priv),
- "Unknown platform. Assuming 133 MHz CDCLK\n");
+ drm_WARN(&dev_priv->drm, !IS_I830(dev_priv),
+ "Unknown platform. Assuming 133 MHz CDCLK\n");
dev_priv->display.get_cdclk = fixed_133mhz_get_cdclk;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h
index cf71394cc79c..5731806e4cee 100644
--- a/drivers/gpu/drm/i915/display/intel_cdclk.h
+++ b/drivers/gpu/drm/i915/display/intel_cdclk.h
@@ -8,11 +8,12 @@
#include <linux/types.h>
+#include "i915_drv.h"
#include "intel_display.h"
+#include "intel_global_state.h"
struct drm_i915_private;
struct intel_atomic_state;
-struct intel_cdclk_state;
struct intel_crtc_state;
struct intel_cdclk_vals {
@@ -22,28 +23,62 @@ struct intel_cdclk_vals {
u8 ratio;
};
+struct intel_cdclk_state {
+ struct intel_global_state base;
+
+ /*
+ * Logical configuration of cdclk (used for all scaling,
+ * watermark, etc. calculations and checks). This is
+ * computed as if all enabled crtcs were active.
+ */
+ struct intel_cdclk_config logical;
+
+ /*
+ * Actual configuration of cdclk, can be different from the
+ * logical configuration only when all crtc's are DPMS off.
+ */
+ struct intel_cdclk_config actual;
+
+ /* minimum acceptable cdclk for each pipe */
+ int min_cdclk[I915_MAX_PIPES];
+ /* minimum acceptable voltage level for each pipe */
+ u8 min_voltage_level[I915_MAX_PIPES];
+
+ /* pipe to which cd2x update is synchronized */
+ enum pipe pipe;
+
+ /* forced minimum cdclk for glk+ audio w/a */
+ int force_min_cdclk;
+ bool force_min_cdclk_changed;
+
+ /* bitmask of active pipes */
+ u8 active_pipes;
+};
+
int intel_crtc_compute_min_cdclk(const struct intel_crtc_state *crtc_state);
-void intel_cdclk_init(struct drm_i915_private *i915);
-void intel_cdclk_uninit(struct drm_i915_private *i915);
+void intel_cdclk_init_hw(struct drm_i915_private *i915);
+void intel_cdclk_uninit_hw(struct drm_i915_private *i915);
void intel_init_cdclk_hooks(struct drm_i915_private *dev_priv);
void intel_update_max_cdclk(struct drm_i915_private *dev_priv);
void intel_update_cdclk(struct drm_i915_private *dev_priv);
-void intel_update_rawclk(struct drm_i915_private *dev_priv);
-bool intel_cdclk_needs_modeset(const struct intel_cdclk_state *a,
- const struct intel_cdclk_state *b);
-void intel_cdclk_swap_state(struct intel_atomic_state *state);
-void
-intel_set_cdclk_pre_plane_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *old_state,
- const struct intel_cdclk_state *new_state,
- enum pipe pipe);
-void
-intel_set_cdclk_post_plane_update(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *old_state,
- const struct intel_cdclk_state *new_state,
- enum pipe pipe);
-void intel_dump_cdclk_state(const struct intel_cdclk_state *cdclk_state,
- const char *context);
+u32 intel_read_rawclk(struct drm_i915_private *dev_priv);
+bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a,
+ const struct intel_cdclk_config *b);
+void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state);
+void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state);
+void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config,
+ const char *context);
int intel_modeset_calc_cdclk(struct intel_atomic_state *state);
+struct intel_cdclk_state *
+intel_atomic_get_cdclk_state(struct intel_atomic_state *state);
+
+#define to_intel_cdclk_state(x) container_of((x), struct intel_cdclk_state, base)
+#define intel_atomic_get_old_cdclk_state(state) \
+ to_intel_cdclk_state(intel_atomic_get_old_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+#define intel_atomic_get_new_cdclk_state(state) \
+ to_intel_cdclk_state(intel_atomic_get_new_global_obj_state(state, &to_i915(state->base.dev)->cdclk.obj))
+
+int intel_cdclk_init(struct drm_i915_private *dev_priv);
+
#endif /* __INTEL_CDCLK_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_color.c b/drivers/gpu/drm/i915/display/intel_color.c
index 3980e8b50c28..c1cce93a1c25 100644
--- a/drivers/gpu/drm/i915/display/intel_color.c
+++ b/drivers/gpu/drm/i915/display/intel_color.c
@@ -157,23 +157,29 @@ static void ilk_update_pipe_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
- I915_WRITE(PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
- I915_WRITE(PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
+ intel_de_write(dev_priv, PIPE_CSC_PREOFF_HI(pipe), preoff[0]);
+ intel_de_write(dev_priv, PIPE_CSC_PREOFF_ME(pipe), preoff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_PREOFF_LO(pipe), preoff[2]);
- I915_WRITE(PIPE_CSC_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
- I915_WRITE(PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_RY_GY(pipe),
+ coeff[0] << 16 | coeff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_BY(pipe), coeff[2] << 16);
- I915_WRITE(PIPE_CSC_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
- I915_WRITE(PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_RU_GU(pipe),
+ coeff[3] << 16 | coeff[4]);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_BU(pipe), coeff[5] << 16);
- I915_WRITE(PIPE_CSC_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
- I915_WRITE(PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_RV_GV(pipe),
+ coeff[6] << 16 | coeff[7]);
+ intel_de_write(dev_priv, PIPE_CSC_COEFF_BV(pipe), coeff[8] << 16);
if (INTEL_GEN(dev_priv) >= 7) {
- I915_WRITE(PIPE_CSC_POSTOFF_HI(pipe), postoff[0]);
- I915_WRITE(PIPE_CSC_POSTOFF_ME(pipe), postoff[1]);
- I915_WRITE(PIPE_CSC_POSTOFF_LO(pipe), postoff[2]);
+ intel_de_write(dev_priv, PIPE_CSC_POSTOFF_HI(pipe),
+ postoff[0]);
+ intel_de_write(dev_priv, PIPE_CSC_POSTOFF_ME(pipe),
+ postoff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_POSTOFF_LO(pipe),
+ postoff[2]);
}
}
@@ -185,22 +191,28 @@ static void icl_update_output_csc(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
- I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
- I915_WRITE(PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_HI(pipe), preoff[0]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_ME(pipe), preoff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_PREOFF_LO(pipe), preoff[2]);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe), coeff[0] << 16 | coeff[1]);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BY(pipe), coeff[2] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RY_GY(pipe),
+ coeff[0] << 16 | coeff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BY(pipe),
+ coeff[2] << 16);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe), coeff[3] << 16 | coeff[4]);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BU(pipe), coeff[5] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RU_GU(pipe),
+ coeff[3] << 16 | coeff[4]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BU(pipe),
+ coeff[5] << 16);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe), coeff[6] << 16 | coeff[7]);
- I915_WRITE(PIPE_CSC_OUTPUT_COEFF_BV(pipe), coeff[8] << 16);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_RV_GV(pipe),
+ coeff[6] << 16 | coeff[7]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_COEFF_BV(pipe),
+ coeff[8] << 16);
- I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
- I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
- I915_WRITE(PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_HI(pipe), postoff[0]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_ME(pipe), postoff[1]);
+ intel_de_write(dev_priv, PIPE_CSC_OUTPUT_POSTOFF_LO(pipe), postoff[2]);
}
static bool ilk_csc_limited_range(const struct intel_crtc_state *crtc_state)
@@ -297,14 +309,16 @@ static void ilk_load_csc_matrix(const struct intel_crtc_state *crtc_state)
* LUT is needed but CSC is not we need to load an
* identity matrix.
*/
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_GEMINILAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
+ !IS_GEMINILAKE(dev_priv));
ilk_update_pipe_csc(crtc, ilk_csc_off_zero,
ilk_csc_coeff_identity,
ilk_csc_off_zero);
}
- I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+ intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
}
static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
@@ -330,51 +344,74 @@ static void icl_load_csc_matrix(const struct intel_crtc_state *crtc_state)
ilk_csc_postoff_limited_range);
}
- I915_WRITE(PIPE_CSC_MODE(crtc->pipe), crtc_state->csc_mode);
+ intel_de_write(dev_priv, PIPE_CSC_MODE(crtc->pipe),
+ crtc_state->csc_mode);
}
-/*
- * Set up the pipe CSC unit on CherryView.
- */
-static void cherryview_load_csc_matrix(const struct intel_crtc_state *crtc_state)
+static void chv_load_cgm_csc(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_ctm *ctm = blob->data;
enum pipe pipe = crtc->pipe;
+ u16 coeffs[9];
+ int i;
- if (crtc_state->hw.ctm) {
- const struct drm_color_ctm *ctm = crtc_state->hw.ctm->data;
- u16 coeffs[9] = {};
- int i;
-
- for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
- u64 abs_coeff =
- ((1ULL << 63) - 1) & ctm->matrix[i];
-
- /* Round coefficient. */
- abs_coeff += 1 << (32 - 13);
- /* Clamp to hardware limits. */
- abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
-
- /* Write coefficients in S3.12 format. */
- if (ctm->matrix[i] & (1ULL << 63))
- coeffs[i] = 1 << 15;
- coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
- coeffs[i] |= (abs_coeff >> 20) & 0xfff;
- }
+ for (i = 0; i < ARRAY_SIZE(coeffs); i++) {
+ u64 abs_coeff = ((1ULL << 63) - 1) & ctm->matrix[i];
+
+ /* Round coefficient. */
+ abs_coeff += 1 << (32 - 13);
+ /* Clamp to hardware limits. */
+ abs_coeff = clamp_val(abs_coeff, 0, CTM_COEFF_8_0 - 1);
+
+ coeffs[i] = 0;
+
+ /* Write coefficients in S3.12 format. */
+ if (ctm->matrix[i] & (1ULL << 63))
+ coeffs[i] |= 1 << 15;
- I915_WRITE(CGM_PIPE_CSC_COEFF01(pipe),
- coeffs[1] << 16 | coeffs[0]);
- I915_WRITE(CGM_PIPE_CSC_COEFF23(pipe),
- coeffs[3] << 16 | coeffs[2]);
- I915_WRITE(CGM_PIPE_CSC_COEFF45(pipe),
- coeffs[5] << 16 | coeffs[4]);
- I915_WRITE(CGM_PIPE_CSC_COEFF67(pipe),
- coeffs[7] << 16 | coeffs[6]);
- I915_WRITE(CGM_PIPE_CSC_COEFF8(pipe), coeffs[8]);
+ coeffs[i] |= ((abs_coeff >> 32) & 7) << 12;
+ coeffs[i] |= (abs_coeff >> 20) & 0xfff;
}
- I915_WRITE(CGM_PIPE_MODE(pipe), crtc_state->cgm_mode);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF01(pipe),
+ coeffs[1] << 16 | coeffs[0]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF23(pipe),
+ coeffs[3] << 16 | coeffs[2]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF45(pipe),
+ coeffs[5] << 16 | coeffs[4]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF67(pipe),
+ coeffs[7] << 16 | coeffs[6]);
+ intel_de_write(dev_priv, CGM_PIPE_CSC_COEFF8(pipe),
+ coeffs[8]);
+}
+
+/* convert hw value with given bit_precision to lut property val */
+static u32 intel_color_lut_pack(u32 val, int bit_precision)
+{
+ u32 max = 0xffff >> (16 - bit_precision);
+
+ val = clamp_val(val, 0, max);
+
+ if (bit_precision < 16)
+ val <<= 16 - bit_precision;
+
+ return val;
+}
+
+static u32 i9xx_lut_8(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 8) << 16 |
+ drm_color_lut_extract(color->green, 8) << 8 |
+ drm_color_lut_extract(color->blue, 8);
+}
+
+static void i9xx_lut_8_pack(struct drm_color_lut *entry, u32 val)
+{
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_RED_MASK, val), 8);
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_GREEN_MASK, val), 8);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(LGC_PALETTE_BLUE_MASK, val), 8);
}
/* i965+ "10.6" bit interpolated format "even DW" (low 8 bits) */
@@ -393,49 +430,34 @@ static u32 i965_lut_10p6_udw(const struct drm_color_lut *color)
(color->blue >> 8);
}
-static u32 ilk_lut_10(const struct drm_color_lut *color)
+static void i965_lut_10p6_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
{
- return drm_color_lut_extract(color->red, 10) << 20 |
- drm_color_lut_extract(color->green, 10) << 10 |
- drm_color_lut_extract(color->blue, 10);
+ entry->red = REG_FIELD_GET(PALETTE_RED_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_RED_MASK, ldw);
+ entry->green = REG_FIELD_GET(PALETTE_GREEN_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_GREEN_MASK, ldw);
+ entry->blue = REG_FIELD_GET(PALETTE_BLUE_MASK, udw) << 8 |
+ REG_FIELD_GET(PALETTE_BLUE_MASK, ldw);
}
-/* Loads the legacy palette/gamma unit for the CRTC. */
-static void i9xx_load_luts_internal(const struct intel_crtc_state *crtc_state,
- const struct drm_property_blob *blob)
+static u16 i965_lut_11p6_max_pack(u32 val)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum pipe pipe = crtc->pipe;
- int i;
-
- if (HAS_GMCH(dev_priv)) {
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- assert_dsi_pll_enabled(dev_priv);
- else
- assert_pll_enabled(dev_priv, pipe);
- }
-
- if (blob) {
- const struct drm_color_lut *lut = blob->data;
-
- for (i = 0; i < 256; i++) {
- u32 word =
- (drm_color_lut_extract(lut[i].red, 8) << 16) |
- (drm_color_lut_extract(lut[i].green, 8) << 8) |
- drm_color_lut_extract(lut[i].blue, 8);
+ /* PIPEGCMAX is 11.6, clamp to 10.6 */
+ return clamp_val(val, 0, 0xffff);
+}
- if (HAS_GMCH(dev_priv))
- I915_WRITE(PALETTE(pipe, i), word);
- else
- I915_WRITE(LGC_PALETTE(pipe, i), word);
- }
- }
+static u32 ilk_lut_10(const struct drm_color_lut *color)
+{
+ return drm_color_lut_extract(color->red, 10) << 20 |
+ drm_color_lut_extract(color->green, 10) << 10 |
+ drm_color_lut_extract(color->blue, 10);
}
-static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+static void ilk_lut_10_pack(struct drm_color_lut *entry, u32 val)
{
- i9xx_load_luts_internal(crtc_state, crtc_state->hw.gamma_lut);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_RED_MASK, val), 10);
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_GREEN_MASK, val), 10);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(PREC_PALETTE_BLUE_MASK, val), 10);
}
static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
@@ -445,10 +467,10 @@ static void i9xx_color_commit(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
u32 val;
- val = I915_READ(PIPECONF(pipe));
+ val = intel_de_read(dev_priv, PIPECONF(pipe));
val &= ~PIPECONF_GAMMA_MODE_MASK_I9XX;
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- I915_WRITE(PIPECONF(pipe), val);
+ intel_de_write(dev_priv, PIPECONF(pipe), val);
}
static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
@@ -458,10 +480,10 @@ static void ilk_color_commit(const struct intel_crtc_state *crtc_state)
enum pipe pipe = crtc->pipe;
u32 val;
- val = I915_READ(PIPECONF(pipe));
+ val = intel_de_read(dev_priv, PIPECONF(pipe));
val &= ~PIPECONF_GAMMA_MODE_MASK_ILK;
val |= PIPECONF_GAMMA_MODE(crtc_state->gamma_mode);
- I915_WRITE(PIPECONF(pipe), val);
+ intel_de_write(dev_priv, PIPECONF(pipe), val);
ilk_load_csc_matrix(crtc_state);
}
@@ -471,7 +493,8 @@ static void hsw_color_commit(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
+ intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
+ crtc_state->gamma_mode);
ilk_load_csc_matrix(crtc_state);
}
@@ -492,9 +515,10 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
val |= SKL_BOTTOM_COLOR_GAMMA_ENABLE;
if (crtc_state->csc_enable)
val |= SKL_BOTTOM_COLOR_CSC_ENABLE;
- I915_WRITE(SKL_BOTTOM_COLOR(pipe), val);
+ intel_de_write(dev_priv, SKL_BOTTOM_COLOR(pipe), val);
- I915_WRITE(GAMMA_MODE(crtc->pipe), crtc_state->gamma_mode);
+ intel_de_write(dev_priv, GAMMA_MODE(crtc->pipe),
+ crtc_state->gamma_mode);
if (INTEL_GEN(dev_priv) >= 11)
icl_load_csc_matrix(crtc_state);
@@ -502,6 +526,35 @@ static void skl_color_commit(const struct intel_crtc_state *crtc_state)
ilk_load_csc_matrix(crtc_state);
}
+static void i9xx_load_lut_8(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ if (!blob)
+ return;
+
+ lut = blob->data;
+
+ for (i = 0; i < 256; i++)
+ intel_de_write(dev_priv, PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+}
+
+static void i9xx_load_luts(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+
+ assert_pll_enabled(dev_priv, crtc->pipe);
+
+ i9xx_load_lut_8(crtc, gamma_lut);
+}
+
static void i965_load_lut_10p6(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -511,28 +564,52 @@ static void i965_load_lut_10p6(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size - 1; i++) {
- I915_WRITE(PALETTE(pipe, 2 * i + 0),
- i965_lut_10p6_ldw(&lut[i]));
- I915_WRITE(PALETTE(pipe, 2 * i + 1),
- i965_lut_10p6_udw(&lut[i]));
+ intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 0),
+ i965_lut_10p6_ldw(&lut[i]));
+ intel_de_write(dev_priv, PALETTE(pipe, 2 * i + 1),
+ i965_lut_10p6_udw(&lut[i]));
}
- I915_WRITE(PIPEGCMAX(pipe, 0), lut[i].red);
- I915_WRITE(PIPEGCMAX(pipe, 1), lut[i].green);
- I915_WRITE(PIPEGCMAX(pipe, 2), lut[i].blue);
+ intel_de_write(dev_priv, PIPEGCMAX(pipe, 0), lut[i].red);
+ intel_de_write(dev_priv, PIPEGCMAX(pipe, 1), lut[i].green);
+ intel_de_write(dev_priv, PIPEGCMAX(pipe, 2), lut[i].blue);
}
static void i965_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
+ assert_dsi_pll_enabled(dev_priv);
+ else
+ assert_pll_enabled(dev_priv, crtc->pipe);
+
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- i9xx_load_luts(crtc_state);
+ i9xx_load_lut_8(crtc, gamma_lut);
else
i965_load_lut_10p6(crtc, gamma_lut);
}
+static void ilk_load_lut_8(struct intel_crtc *crtc,
+ const struct drm_property_blob *blob)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_color_lut *lut;
+ enum pipe pipe = crtc->pipe;
+ int i;
+
+ if (!blob)
+ return;
+
+ lut = blob->data;
+
+ for (i = 0; i < 256; i++)
+ intel_de_write(dev_priv, LGC_PALETTE(pipe, i),
+ i9xx_lut_8(&lut[i]));
+}
+
static void ilk_load_lut_10(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -542,7 +619,8 @@ static void ilk_load_lut_10(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++)
- I915_WRITE(PREC_PALETTE(pipe, i), ilk_lut_10(&lut[i]));
+ intel_de_write(dev_priv, PREC_PALETTE(pipe, i),
+ ilk_lut_10(&lut[i]));
}
static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
@@ -551,7 +629,7 @@ static void ilk_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
else
ilk_load_lut_10(crtc, gamma_lut);
}
@@ -584,15 +662,16 @@ static void ivb_load_lut_10(struct intel_crtc *crtc,
const struct drm_color_lut *entry =
&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
- I915_WRITE(PREC_PAL_INDEX(pipe), prec_index++);
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), prec_index++);
+ intel_de_write(dev_priv, PREC_PAL_DATA(pipe),
+ ilk_lut_10(entry));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
/* On BDW+ the index auto increment mode actually works */
@@ -606,22 +685,23 @@ static void bdw_load_lut_10(struct intel_crtc *crtc,
int i, lut_size = drm_color_lut_size(blob);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
- PAL_PREC_AUTO_INCREMENT);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
+ prec_index | PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < hw_lut_size; i++) {
/* We discard half the user entries in split gamma mode */
const struct drm_color_lut *entry =
&lut[i * (lut_size - 1) / (hw_lut_size - 1)];
- I915_WRITE(PREC_PAL_DATA(pipe), ilk_lut_10(entry));
+ intel_de_write(dev_priv, PREC_PAL_DATA(pipe),
+ ilk_lut_10(entry));
}
/*
* Reset the index, otherwise it prevents the legacy palette to be
* written properly.
*/
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
}
static void ivb_load_lut_ext_max(struct intel_crtc *crtc)
@@ -659,7 +739,7 @@ static void ivb_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
ivb_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
@@ -682,7 +762,7 @@ static void bdw_load_luts(const struct intel_crtc_state *crtc_state)
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else if (crtc_state->gamma_mode == GAMMA_MODE_MODE_SPLIT) {
bdw_load_lut_10(crtc, degamma_lut, PAL_PREC_SPLIT_MODE |
PAL_PREC_INDEX_VALUE(0));
@@ -703,17 +783,17 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
const struct drm_color_lut *lut = crtc_state->hw.degamma_lut->data;
- u32 i;
/*
* When setting the auto-increment bit, the hardware seems to
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
- I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
- I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++) {
/*
@@ -729,12 +809,13 @@ static void glk_load_degamma_lut(const struct intel_crtc_state *crtc_state)
* ToDo: Extend to max 7.0. Enable 32 bit input value
* as compared to just 16 to achieve this.
*/
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), lut[i].green);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe),
+ lut[i].green);
}
/* Clamp values > 1.0. */
while (i++ < 35)
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
}
static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_state)
@@ -742,26 +823,26 @@ static void glk_load_degamma_lut_linear(const struct intel_crtc_state *crtc_stat
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- const u32 lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
- u32 i;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.degamma_lut_size;
/*
* When setting the auto-increment bit, the hardware seems to
* ignore the index bits, so we need to reset it to index 0
* separately.
*/
- I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), 0);
- I915_WRITE(PRE_CSC_GAMC_INDEX(pipe), PRE_CSC_GAMC_AUTO_INCREMENT);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_INDEX(pipe),
+ PRE_CSC_GAMC_AUTO_INCREMENT);
for (i = 0; i < lut_size; i++) {
u32 v = (i << 16) / (lut_size - 1);
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), v);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), v);
}
/* Clamp values > 1.0. */
while (i++ < 35)
- I915_WRITE(PRE_CSC_GAMC_DATA(pipe), 1 << 16);
+ intel_de_write(dev_priv, PRE_CSC_GAMC_DATA(pipe), 1 << 16);
}
static void glk_load_luts(const struct intel_crtc_state *crtc_state)
@@ -783,7 +864,7 @@ static void glk_load_luts(const struct intel_crtc_state *crtc_state)
glk_load_degamma_lut_linear(crtc_state);
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT) {
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
} else {
bdw_load_lut_10(crtc, gamma_lut, PAL_PREC_INDEX_VALUE(0));
ivb_load_lut_ext_max(crtc);
@@ -827,7 +908,7 @@ icl_program_gamma_superfine_segment(const struct intel_crtc_state *crtc_state)
const struct drm_color_lut *lut = blob->data;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- u32 i;
+ int i;
/*
* Program Super Fine segment (let's call it seg1)...
@@ -860,7 +941,7 @@ icl_program_gamma_multi_segment(const struct intel_crtc_state *crtc_state)
const struct drm_color_lut *entry;
struct intel_dsb *dsb = intel_dsb_get(crtc);
enum pipe pipe = crtc->pipe;
- u32 i;
+ int i;
/*
* Program Fine segment (let's call it seg2)...
@@ -919,7 +1000,7 @@ static void icl_load_luts(const struct intel_crtc_state *crtc_state)
switch (crtc_state->gamma_mode & GAMMA_MODE_MODE_MASK) {
case GAMMA_MODE_MODE_8BIT:
- i9xx_load_luts(crtc_state);
+ ilk_load_lut_8(crtc, gamma_lut);
break;
case GAMMA_MODE_MODE_12BIT_MULTI_SEGMENTED:
icl_program_gamma_superfine_segment(crtc_state);
@@ -945,6 +1026,13 @@ static u32 chv_cgm_degamma_udw(const struct drm_color_lut *color)
return drm_color_lut_extract(color->red, 14);
}
+static void chv_cgm_gamma_pack(struct drm_color_lut *entry, u32 ldw, u32 udw)
+{
+ entry->green = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_GREEN_MASK, ldw), 10);
+ entry->blue = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_BLUE_MASK, ldw), 10);
+ entry->red = intel_color_lut_pack(REG_FIELD_GET(CGM_PIPE_GAMMA_RED_MASK, udw), 10);
+}
+
static void chv_load_cgm_degamma(struct intel_crtc *crtc,
const struct drm_property_blob *blob)
{
@@ -954,10 +1042,10 @@ static void chv_load_cgm_degamma(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
- I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 0),
- chv_cgm_degamma_ldw(&lut[i]));
- I915_WRITE(CGM_PIPE_DEGAMMA(pipe, i, 1),
- chv_cgm_degamma_udw(&lut[i]));
+ intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 0),
+ chv_cgm_degamma_ldw(&lut[i]));
+ intel_de_write(dev_priv, CGM_PIPE_DEGAMMA(pipe, i, 1),
+ chv_cgm_degamma_udw(&lut[i]));
}
}
@@ -981,31 +1069,34 @@ static void chv_load_cgm_gamma(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
for (i = 0; i < lut_size; i++) {
- I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 0),
- chv_cgm_gamma_ldw(&lut[i]));
- I915_WRITE(CGM_PIPE_GAMMA(pipe, i, 1),
- chv_cgm_gamma_udw(&lut[i]));
+ intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0),
+ chv_cgm_gamma_ldw(&lut[i]));
+ intel_de_write(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1),
+ chv_cgm_gamma_udw(&lut[i]));
}
}
static void chv_load_luts(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
- const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
const struct drm_property_blob *degamma_lut = crtc_state->hw.degamma_lut;
+ const struct drm_property_blob *gamma_lut = crtc_state->hw.gamma_lut;
+ const struct drm_property_blob *ctm = crtc_state->hw.ctm;
- cherryview_load_csc_matrix(crtc_state);
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_CSC)
+ chv_load_cgm_csc(crtc, ctm);
- if (crtc_state_is_legacy_gamma(crtc_state)) {
- i9xx_load_luts(crtc_state);
- return;
- }
-
- if (degamma_lut)
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_DEGAMMA)
chv_load_cgm_degamma(crtc, degamma_lut);
- if (gamma_lut)
+ if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
chv_load_cgm_gamma(crtc, gamma_lut);
+ else
+ i965_load_luts(crtc_state);
+
+ intel_de_write(dev_priv, CGM_PIPE_MODE(crtc->pipe),
+ crtc_state->cgm_mode);
}
void intel_color_load_luts(const struct intel_crtc_state *crtc_state)
@@ -1167,7 +1258,8 @@ static int check_luts(const struct intel_crtc_state *crtc_state)
/* C8 relies on its palette being stored in the legacy LUT */
if (crtc_state->c8_planes) {
- DRM_DEBUG_KMS("C8 pixelformat requires the legacy LUT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "C8 pixelformat requires the legacy LUT\n");
return -EINVAL;
}
@@ -1630,28 +1722,13 @@ bool intel_color_lut_equal(struct drm_property_blob *blob1,
return true;
}
-/* convert hw value with given bit_precision to lut property val */
-static u32 intel_color_lut_pack(u32 val, u32 bit_precision)
+static struct drm_property_blob *i9xx_read_lut_8(struct intel_crtc *crtc)
{
- u32 max = 0xffff >> (16 - bit_precision);
-
- val = clamp_val(val, 0, max);
-
- if (bit_precision < 16)
- val <<= 16 - bit_precision;
-
- return val;
-}
-
-static struct drm_property_blob *
-i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
-{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
+ int i;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
@@ -1659,20 +1736,12 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
- if (HAS_GMCH(dev_priv))
- val = I915_READ(PALETTE(pipe, i));
- else
- val = I915_READ(LGC_PALETTE(pipe, i));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_RED_MASK, val), 8);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_GREEN_MASK, val), 8);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- LGC_PALETTE_BLUE_MASK, val), 8);
+ u32 val = intel_de_read(dev_priv, PALETTE(pipe, i));
+
+ i9xx_lut_8_pack(&lut[i], val);
}
return blob;
@@ -1680,22 +1749,21 @@ i9xx_read_lut_8(const struct intel_crtc_state *crtc_state)
static void i9xx_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
}
-static struct drm_property_blob *
-i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *i965_read_lut_10p6(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val1, val2;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1703,51 +1771,42 @@ i965_read_lut_10p6(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size - 1; i++) {
- val1 = I915_READ(PALETTE(pipe, 2 * i + 0));
- val2 = I915_READ(PALETTE(pipe, 2 * i + 1));
-
- blob_data[i].red = REG_FIELD_GET(PALETTE_RED_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_RED_MASK, val1);
- blob_data[i].green = REG_FIELD_GET(PALETTE_GREEN_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_GREEN_MASK, val1);
- blob_data[i].blue = REG_FIELD_GET(PALETTE_BLUE_MASK, val2) << 8 |
- REG_FIELD_GET(PALETTE_BLUE_MASK, val1);
+ u32 ldw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 0));
+ u32 udw = intel_de_read(dev_priv, PALETTE(pipe, 2 * i + 1));
+
+ i965_lut_10p6_pack(&lut[i], ldw, udw);
}
- blob_data[i].red = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- I915_READ(PIPEGCMAX(pipe, 0)));
- blob_data[i].green = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- I915_READ(PIPEGCMAX(pipe, 1)));
- blob_data[i].blue = REG_FIELD_GET(PIPEGCMAX_RGB_MASK,
- I915_READ(PIPEGCMAX(pipe, 2)));
+ lut[i].red = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 0)));
+ lut[i].green = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 1)));
+ lut[i].blue = i965_lut_11p6_max_pack(intel_de_read(dev_priv, PIPEGCMAX(pipe, 2)));
return blob;
}
static void i965_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc_state);
+ crtc_state->hw.gamma_lut = i965_read_lut_10p6(crtc);
}
-static struct drm_property_blob *
-chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *chv_read_cgm_gamma(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1755,18 +1814,13 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size; i++) {
- val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 0));
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_BLUE_MASK, val), 10);
-
- val = I915_READ(CGM_PIPE_GAMMA(pipe, i, 1));
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- CGM_PIPE_GAMMA_RED_MASK, val), 10);
+ u32 ldw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 0));
+ u32 udw = intel_de_read(dev_priv, CGM_PIPE_GAMMA(pipe, i, 1));
+
+ chv_cgm_gamma_pack(&lut[i], ldw, udw);
}
return blob;
@@ -1774,22 +1828,46 @@ chv_read_cgm_lut(const struct intel_crtc_state *crtc_state)
static void chv_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (crtc_state->cgm_mode & CGM_PIPE_MODE_GAMMA)
- crtc_state->hw.gamma_lut = chv_read_cgm_lut(crtc_state);
+ crtc_state->hw.gamma_lut = chv_read_cgm_gamma(crtc);
else
i965_read_luts(crtc_state);
}
-static struct drm_property_blob *
-ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
+static struct drm_property_blob *ilk_read_lut_8(struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+ struct drm_property_blob *blob;
+ struct drm_color_lut *lut;
+ int i;
+
+ blob = drm_property_create_blob(&dev_priv->drm,
+ sizeof(struct drm_color_lut) * LEGACY_LUT_LENGTH,
+ NULL);
+ if (IS_ERR(blob))
+ return NULL;
+
+ lut = blob->data;
+
+ for (i = 0; i < LEGACY_LUT_LENGTH; i++) {
+ u32 val = intel_de_read(dev_priv, LGC_PALETTE(pipe, i));
+
+ i9xx_lut_8_pack(&lut[i], val);
+ }
+
+ return blob;
+}
+
+static struct drm_property_blob *ilk_read_lut_10(struct intel_crtc *crtc)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- u32 lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
+ int i, lut_size = INTEL_INFO(dev_priv)->color.gamma_lut_size;
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * lut_size,
@@ -1797,17 +1875,12 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
for (i = 0; i < lut_size; i++) {
- val = I915_READ(PREC_PALETTE(pipe, i));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_RED_MASK, val), 10);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PALETTE_BLUE_MASK, val), 10);
+ u32 val = intel_de_read(dev_priv, PREC_PALETTE(pipe, i));
+
+ ilk_lut_10_pack(&lut[i], val);
}
return blob;
@@ -1815,6 +1888,8 @@ ilk_read_lut_10(const struct intel_crtc_state *crtc_state)
static void ilk_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
@@ -1822,21 +1897,19 @@ static void ilk_read_luts(struct intel_crtc_state *crtc_state)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_10(crtc);
}
-static struct drm_property_blob *
-glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
+static struct drm_property_blob *glk_read_lut_10(struct intel_crtc *crtc,
+ u32 prec_index)
{
- struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- int hw_lut_size = ivb_lut_10_size(prec_index);
+ int i, hw_lut_size = ivb_lut_10_size(prec_index);
enum pipe pipe = crtc->pipe;
struct drm_property_blob *blob;
- struct drm_color_lut *blob_data;
- u32 i, val;
+ struct drm_color_lut *lut;
blob = drm_property_create_blob(&dev_priv->drm,
sizeof(struct drm_color_lut) * hw_lut_size,
@@ -1844,36 +1917,33 @@ glk_read_lut_10(const struct intel_crtc_state *crtc_state, u32 prec_index)
if (IS_ERR(blob))
return NULL;
- blob_data = blob->data;
+ lut = blob->data;
- I915_WRITE(PREC_PAL_INDEX(pipe), prec_index |
- PAL_PREC_AUTO_INCREMENT);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe),
+ prec_index | PAL_PREC_AUTO_INCREMENT);
for (i = 0; i < hw_lut_size; i++) {
- val = I915_READ(PREC_PAL_DATA(pipe));
-
- blob_data[i].red = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_RED_MASK, val), 10);
- blob_data[i].green = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_GREEN_MASK, val), 10);
- blob_data[i].blue = intel_color_lut_pack(REG_FIELD_GET(
- PREC_PAL_DATA_BLUE_MASK, val), 10);
+ u32 val = intel_de_read(dev_priv, PREC_PAL_DATA(pipe));
+
+ ilk_lut_10_pack(&lut[i], val);
}
- I915_WRITE(PREC_PAL_INDEX(pipe), 0);
+ intel_de_write(dev_priv, PREC_PAL_INDEX(pipe), 0);
return blob;
}
static void glk_read_luts(struct intel_crtc_state *crtc_state)
{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+
if (!crtc_state->gamma_enable)
return;
if (crtc_state->gamma_mode == GAMMA_MODE_MODE_8BIT)
- crtc_state->hw.gamma_lut = i9xx_read_lut_8(crtc_state);
+ crtc_state->hw.gamma_lut = ilk_read_lut_8(crtc);
else
- crtc_state->hw.gamma_lut = glk_read_lut_10(crtc_state, PAL_PREC_INDEX_VALUE(0));
+ crtc_state->hw.gamma_lut = glk_read_lut_10(crtc, PAL_PREC_INDEX_VALUE(0));
}
void intel_color_init(struct intel_crtc *crtc)
diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c
index 44bbc7e74fc3..9ff05ec12115 100644
--- a/drivers/gpu/drm/i915/display/intel_combo_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c
@@ -48,7 +48,7 @@ cnl_get_procmon_ref_values(struct drm_i915_private *dev_priv, enum phy phy)
const struct cnl_procmon *procmon;
u32 val;
- val = I915_READ(ICL_PORT_COMP_DW3(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW3(phy));
switch (val & (PROCESS_INFO_MASK | VOLTAGE_INFO_MASK)) {
default:
MISSING_CASE(val);
@@ -81,26 +81,27 @@ static void cnl_set_procmon_ref_values(struct drm_i915_private *dev_priv,
procmon = cnl_get_procmon_ref_values(dev_priv, phy);
- val = I915_READ(ICL_PORT_COMP_DW1(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW1(phy));
val &= ~((0xff << 16) | 0xff);
val |= procmon->dw1;
- I915_WRITE(ICL_PORT_COMP_DW1(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW1(phy), val);
- I915_WRITE(ICL_PORT_COMP_DW9(phy), procmon->dw9);
- I915_WRITE(ICL_PORT_COMP_DW10(phy), procmon->dw10);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW9(phy), procmon->dw9);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW10(phy), procmon->dw10);
}
static bool check_phy_reg(struct drm_i915_private *dev_priv,
enum phy phy, i915_reg_t reg, u32 mask,
u32 expected_val)
{
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
if ((val & mask) != expected_val) {
- DRM_DEBUG_DRIVER("Combo PHY %c reg %08x state mismatch: "
- "current %08x mask %08x expected %08x\n",
- phy_name(phy),
- reg.reg, val, mask, expected_val);
+ drm_dbg(&dev_priv->drm,
+ "Combo PHY %c reg %08x state mismatch: "
+ "current %08x mask %08x expected %08x\n",
+ phy_name(phy),
+ reg.reg, val, mask, expected_val);
return false;
}
@@ -127,8 +128,8 @@ static bool cnl_verify_procmon_ref_values(struct drm_i915_private *dev_priv,
static bool cnl_combo_phy_enabled(struct drm_i915_private *dev_priv)
{
- return !(I915_READ(CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
- (I915_READ(CNL_PORT_COMP_DW0) & COMP_INIT);
+ return !(intel_de_read(dev_priv, CHICKEN_MISC_2) & CNL_COMP_PWR_DOWN) &&
+ (intel_de_read(dev_priv, CNL_PORT_COMP_DW0) & COMP_INIT);
}
static bool cnl_combo_phy_verify_state(struct drm_i915_private *dev_priv)
@@ -151,20 +152,20 @@ static void cnl_combo_phys_init(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(CHICKEN_MISC_2);
+ val = intel_de_read(dev_priv, CHICKEN_MISC_2);
val &= ~CNL_COMP_PWR_DOWN;
- I915_WRITE(CHICKEN_MISC_2, val);
+ intel_de_write(dev_priv, CHICKEN_MISC_2, val);
/* Dummy PORT_A to get the correct CNL register from the ICL macro */
cnl_set_procmon_ref_values(dev_priv, PHY_A);
- val = I915_READ(CNL_PORT_COMP_DW0);
+ val = intel_de_read(dev_priv, CNL_PORT_COMP_DW0);
val |= COMP_INIT;
- I915_WRITE(CNL_PORT_COMP_DW0, val);
+ intel_de_write(dev_priv, CNL_PORT_COMP_DW0, val);
- val = I915_READ(CNL_PORT_CL1CM_DW5);
+ val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5);
val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+ intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val);
}
static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
@@ -172,11 +173,12 @@ static void cnl_combo_phys_uninit(struct drm_i915_private *dev_priv)
u32 val;
if (!cnl_combo_phy_verify_state(dev_priv))
- DRM_WARN("Combo PHY HW state changed unexpectedly.\n");
+ drm_warn(&dev_priv->drm,
+ "Combo PHY HW state changed unexpectedly.\n");
- val = I915_READ(CHICKEN_MISC_2);
+ val = intel_de_read(dev_priv, CHICKEN_MISC_2);
val |= CNL_COMP_PWR_DOWN;
- I915_WRITE(CHICKEN_MISC_2, val);
+ intel_de_write(dev_priv, CHICKEN_MISC_2, val);
}
static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
@@ -184,27 +186,65 @@ static bool icl_combo_phy_enabled(struct drm_i915_private *dev_priv,
{
/* The PHY C added by EHL has no PHY_MISC register */
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
- return I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
+ return intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT;
else
- return !(I915_READ(ICL_PHY_MISC(phy)) &
+ return !(intel_de_read(dev_priv, ICL_PHY_MISC(phy)) &
ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN) &&
- (I915_READ(ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
+ (intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy)) & COMP_INIT);
+}
+
+static bool ehl_vbt_ddi_d_present(struct drm_i915_private *i915)
+{
+ bool ddi_a_present = intel_bios_is_port_present(i915, PORT_A);
+ bool ddi_d_present = intel_bios_is_port_present(i915, PORT_D);
+ bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
+
+ /*
+ * VBT's 'dvo port' field for child devices references the DDI, not
+ * the PHY. So if combo PHY A is wired up to drive an external
+ * display, we should see a child device present on PORT_D and
+ * nothing on PORT_A and no DSI.
+ */
+ if (ddi_d_present && !ddi_a_present && !dsi_present)
+ return true;
+
+ /*
+ * If we encounter a VBT that claims to have an external display on
+ * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
+ * in the log and let the internal display win.
+ */
+ if (ddi_d_present)
+ drm_err(&i915->drm,
+ "VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
+
+ return false;
}
static bool icl_combo_phy_verify_state(struct drm_i915_private *dev_priv,
enum phy phy)
{
bool ret;
+ u32 expected_val = 0;
if (!icl_combo_phy_enabled(dev_priv, phy))
return false;
ret = cnl_verify_procmon_ref_values(dev_priv, phy);
- if (phy == PHY_A)
+ if (phy == PHY_A) {
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_COMP_DW8(phy),
IREFGEN, IREFGEN);
+ if (IS_ELKHARTLAKE(dev_priv)) {
+ if (ehl_vbt_ddi_d_present(dev_priv))
+ expected_val = ICL_PHY_MISC_MUX_DDID;
+
+ ret &= check_phy_reg(dev_priv, phy, ICL_PHY_MISC(phy),
+ ICL_PHY_MISC_MUX_DDID,
+ expected_val);
+ }
+ }
+
ret &= check_phy_reg(dev_priv, phy, ICL_PORT_CL_DW5(phy),
CL_POWER_DOWN_ENABLE, CL_POWER_DOWN_ENABLE);
@@ -219,7 +259,7 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
u32 val;
if (is_dsi) {
- WARN_ON(lane_reversal);
+ drm_WARN_ON(&dev_priv->drm, lane_reversal);
switch (lane_count) {
case 1:
@@ -257,36 +297,10 @@ void intel_combo_phy_power_up_lanes(struct drm_i915_private *dev_priv,
}
}
- val = I915_READ(ICL_PORT_CL_DW10(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW10(phy));
val &= ~PWR_DOWN_LN_MASK;
val |= lane_mask << PWR_DOWN_LN_SHIFT;
- I915_WRITE(ICL_PORT_CL_DW10(phy), val);
-}
-
-static u32 ehl_combo_phy_a_mux(struct drm_i915_private *i915, u32 val)
-{
- bool ddi_a_present = i915->vbt.ddi_port_info[PORT_A].child != NULL;
- bool ddi_d_present = i915->vbt.ddi_port_info[PORT_D].child != NULL;
- bool dsi_present = intel_bios_is_dsi_present(i915, NULL);
-
- /*
- * VBT's 'dvo port' field for child devices references the DDI, not
- * the PHY. So if combo PHY A is wired up to drive an external
- * display, we should see a child device present on PORT_D and
- * nothing on PORT_A and no DSI.
- */
- if (ddi_d_present && !ddi_a_present && !dsi_present)
- return val | ICL_PHY_MISC_MUX_DDID;
-
- /*
- * If we encounter a VBT that claims to have an external display on
- * DDI-D _and_ an internal display on DDI-A/DSI leave an error message
- * in the log and let the internal display win.
- */
- if (ddi_d_present)
- DRM_ERROR("VBT claims to have both internal and external displays on PHY A. Configuring for internal.\n");
-
- return val & ~ICL_PHY_MISC_MUX_DDID;
+ intel_de_write(dev_priv, ICL_PORT_CL_DW10(phy), val);
}
static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
@@ -297,8 +311,9 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
u32 val;
if (icl_combo_phy_verify_state(dev_priv, phy)) {
- DRM_DEBUG_DRIVER("Combo PHY %c already enabled, won't reprogram it.\n",
- phy_name(phy));
+ drm_dbg(&dev_priv->drm,
+ "Combo PHY %c already enabled, won't reprogram it.\n",
+ phy_name(phy));
continue;
}
@@ -318,28 +333,33 @@ static void icl_combo_phys_init(struct drm_i915_private *dev_priv)
* based on whether our VBT indicates the presence of any
* "internal" child devices.
*/
- val = I915_READ(ICL_PHY_MISC(phy));
- if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A)
- val = ehl_combo_phy_a_mux(dev_priv, val);
+ val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
+ if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_A) {
+ val &= ~ICL_PHY_MISC_MUX_DDID;
+
+ if (ehl_vbt_ddi_d_present(dev_priv))
+ val |= ICL_PHY_MISC_MUX_DDID;
+ }
+
val &= ~ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(phy), val);
+ intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
skip_phy_misc:
cnl_set_procmon_ref_values(dev_priv, phy);
if (phy == PHY_A) {
- val = I915_READ(ICL_PORT_COMP_DW8(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW8(phy));
val |= IREFGEN;
- I915_WRITE(ICL_PORT_COMP_DW8(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW8(phy), val);
}
- val = I915_READ(ICL_PORT_COMP_DW0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
val |= COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
- val = I915_READ(ICL_PORT_CL_DW5(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
val |= CL_POWER_DOWN_ENABLE;
- I915_WRITE(ICL_PORT_CL_DW5(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
}
}
@@ -352,7 +372,8 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
if (phy == PHY_A &&
!icl_combo_phy_verify_state(dev_priv, phy))
- DRM_WARN("Combo PHY %c HW state changed unexpectedly\n",
+ drm_warn(&dev_priv->drm,
+ "Combo PHY %c HW state changed unexpectedly\n",
phy_name(phy));
/*
@@ -363,14 +384,14 @@ static void icl_combo_phys_uninit(struct drm_i915_private *dev_priv)
if (IS_ELKHARTLAKE(dev_priv) && phy == PHY_C)
goto skip_phy_misc;
- val = I915_READ(ICL_PHY_MISC(phy));
+ val = intel_de_read(dev_priv, ICL_PHY_MISC(phy));
val |= ICL_PHY_MISC_DE_IO_COMP_PWR_DOWN;
- I915_WRITE(ICL_PHY_MISC(phy), val);
+ intel_de_write(dev_priv, ICL_PHY_MISC(phy), val);
skip_phy_misc:
- val = I915_READ(ICL_PORT_COMP_DW0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_COMP_DW0(phy));
val &= ~COMP_INIT;
- I915_WRITE(ICL_PORT_COMP_DW0(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_COMP_DW0(phy), val);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c
index 1133c4e97bb4..903e49659f56 100644
--- a/drivers/gpu/drm/i915/display/intel_connector.c
+++ b/drivers/gpu/drm/i915/display/intel_connector.c
@@ -153,7 +153,7 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
bool intel_connector_get_hw_state(struct intel_connector *connector)
{
enum pipe pipe = 0;
- struct intel_encoder *encoder = connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
return encoder->get_hw_state(encoder, &pipe);
}
@@ -162,7 +162,8 @@ enum pipe intel_connector_get_pipe(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
- WARN_ON(!drm_modeset_is_locked(&dev->mode_config.connection_mutex));
+ drm_WARN_ON(dev,
+ !drm_modeset_is_locked(&dev->mode_config.connection_mutex));
if (!connector->base.state->crtc)
return INVALID_PIPE;
diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c
index f976b800b245..78f9b6cde810 100644
--- a/drivers/gpu/drm/i915/display/intel_crt.c
+++ b/drivers/gpu/drm/i915/display/intel_crt.c
@@ -32,7 +32,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
@@ -75,7 +74,7 @@ bool intel_crt_port_enabled(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(adpa_reg);
+ val = intel_de_read(dev_priv, adpa_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
@@ -112,7 +111,7 @@ static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
struct intel_crt *crt = intel_encoder_to_crt(encoder);
u32 tmp, flags = 0;
- tmp = I915_READ(crt->adpa_reg);
+ tmp = intel_de_read(dev_priv, crt->adpa_reg);
if (tmp & ADPA_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -184,7 +183,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
adpa |= ADPA_PIPE_SEL(crtc->pipe);
if (!HAS_PCH_SPLIT(dev_priv))
- I915_WRITE(BCLRPAT(crtc->pipe), 0);
+ intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
switch (mode) {
case DRM_MODE_DPMS_ON:
@@ -201,7 +200,7 @@ static void intel_crt_set_dpms(struct intel_encoder *encoder,
break;
}
- I915_WRITE(crt->adpa_reg, adpa);
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
}
static void intel_disable_crt(struct intel_encoder *encoder,
@@ -230,7 +229,7 @@ static void hsw_disable_crt(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- WARN_ON(!old_crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
@@ -258,7 +257,7 @@ static void hsw_post_disable_crt(struct intel_encoder *encoder,
intel_ddi_fdi_post_disable(encoder, old_crtc_state, old_conn_state);
- WARN_ON(!old_crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !old_crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, true);
}
@@ -269,7 +268,7 @@ static void hsw_pre_pll_enable_crt(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- WARN_ON(!crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
intel_set_pch_fifo_underrun_reporting(dev_priv, PIPE_A, false);
}
@@ -282,7 +281,7 @@ static void hsw_pre_enable_crt(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
- WARN_ON(!crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false);
@@ -299,7 +298,13 @@ static void hsw_enable_crt(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
enum pipe pipe = crtc->pipe;
- WARN_ON(!crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, !crtc_state->has_pch_encoder);
+
+ intel_enable_pipe(crtc_state);
+
+ lpt_pch_enable(crtc_state);
+
+ intel_crtc_vblank_on(crtc_state);
intel_crt_set_dpms(encoder, crtc_state, DRM_MODE_DPMS_ON);
@@ -414,7 +419,8 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder,
/* LPT FDI RX only supports 8bpc. */
if (HAS_PCH_LPT(dev_priv)) {
if (pipe_config->bw_constrained && pipe_config->pipe_bpp < 24) {
- DRM_DEBUG_KMS("LPT only supports 24bpp\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LPT only supports 24bpp\n");
return -EINVAL;
}
@@ -442,34 +448,37 @@ static bool ilk_crt_detect_hotplug(struct drm_connector *connector)
crt->force_hotplug_required = false;
- save_adpa = adpa = I915_READ(crt->adpa_reg);
- DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+ save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ drm_dbg_kms(&dev_priv->drm,
+ "trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
if (turn_off_dac)
adpa &= ~ADPA_DAC_ENABLE;
- I915_WRITE(crt->adpa_reg, adpa);
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
if (intel_de_wait_for_clear(dev_priv,
crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER,
1000))
- DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
+ drm_dbg_kms(&dev_priv->drm,
+ "timed out waiting for FORCE_TRIGGER");
if (turn_off_dac) {
- I915_WRITE(crt->adpa_reg, save_adpa);
- POSTING_READ(crt->adpa_reg);
+ intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
+ intel_de_posting_read(dev_priv, crt->adpa_reg);
}
}
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(crt->adpa_reg);
+ adpa = intel_de_read(dev_priv, crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
- DRM_DEBUG_KMS("ironlake hotplug adpa=0x%x, result %d\n", adpa, ret);
+ drm_dbg_kms(&dev_priv->drm, "ironlake hotplug adpa=0x%x, result %d\n",
+ adpa, ret);
return ret;
}
@@ -498,27 +507,30 @@ static bool valleyview_crt_detect_hotplug(struct drm_connector *connector)
*/
reenable_hpd = intel_hpd_disable(dev_priv, crt->base.hpd_pin);
- save_adpa = adpa = I915_READ(crt->adpa_reg);
- DRM_DEBUG_KMS("trigger hotplug detect cycle: adpa=0x%x\n", adpa);
+ save_adpa = adpa = intel_de_read(dev_priv, crt->adpa_reg);
+ drm_dbg_kms(&dev_priv->drm,
+ "trigger hotplug detect cycle: adpa=0x%x\n", adpa);
adpa |= ADPA_CRT_HOTPLUG_FORCE_TRIGGER;
- I915_WRITE(crt->adpa_reg, adpa);
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
if (intel_de_wait_for_clear(dev_priv, crt->adpa_reg,
ADPA_CRT_HOTPLUG_FORCE_TRIGGER, 1000)) {
- DRM_DEBUG_KMS("timed out waiting for FORCE_TRIGGER");
- I915_WRITE(crt->adpa_reg, save_adpa);
+ drm_dbg_kms(&dev_priv->drm,
+ "timed out waiting for FORCE_TRIGGER");
+ intel_de_write(dev_priv, crt->adpa_reg, save_adpa);
}
/* Check the status to see if both blue and green are on now */
- adpa = I915_READ(crt->adpa_reg);
+ adpa = intel_de_read(dev_priv, crt->adpa_reg);
if ((adpa & ADPA_CRT_HOTPLUG_MONITOR_MASK) != 0)
ret = true;
else
ret = false;
- DRM_DEBUG_KMS("valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "valleyview hotplug adpa=0x%x, result %d\n", adpa, ret);
if (reenable_hpd)
intel_hpd_enable(dev_priv, crt->base.hpd_pin);
@@ -558,15 +570,16 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
/* wait for FORCE_DETECT to go off */
if (intel_de_wait_for_clear(dev_priv, PORT_HOTPLUG_EN,
CRT_HOTPLUG_FORCE_DETECT, 1000))
- DRM_DEBUG_KMS("timed out waiting for FORCE_DETECT to go off");
+ drm_dbg_kms(&dev_priv->drm,
+ "timed out waiting for FORCE_DETECT to go off");
}
- stat = I915_READ(PORT_HOTPLUG_STAT);
+ stat = intel_de_read(dev_priv, PORT_HOTPLUG_STAT);
if ((stat & CRT_HOTPLUG_MONITOR_MASK) != CRT_HOTPLUG_MONITOR_NONE)
ret = true;
/* clear the interrupt we just generated, if any */
- I915_WRITE(PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
+ intel_de_write(dev_priv, PORT_HOTPLUG_STAT, CRT_HOTPLUG_INT_STATUS);
i915_hotplug_interrupt_update(dev_priv, CRT_HOTPLUG_FORCE_DETECT, 0);
@@ -629,13 +642,16 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
* have to check the EDID input spec of the attached device.
*/
if (!is_digital) {
- DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT detected via DDC:0x50 [EDID]\n");
ret = true;
} else {
- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
} else {
- DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
kfree(edid);
@@ -660,7 +676,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
u8 st00;
enum drm_connector_status status;
- DRM_DEBUG_KMS("starting load-detect on CRT\n");
+ drm_dbg_kms(&dev_priv->drm, "starting load-detect on CRT\n");
bclrpat_reg = BCLRPAT(pipe);
vtotal_reg = VTOTAL(pipe);
@@ -706,7 +722,7 @@ intel_crt_load_detect(struct intel_crt *crt, u32 pipe)
* Yes, this will flicker
*/
if (vblank_start <= vactive && vblank_end >= vtotal) {
- u32 vsync = I915_READ(vsync_reg);
+ u32 vsync = intel_de_read(dev_priv, vsync_reg);
u32 vsync_start = (vsync & 0xffff) + 1;
vblank_start = vsync_start;
@@ -801,9 +817,9 @@ intel_crt_detect(struct drm_connector *connector,
int status, ret;
struct intel_load_detect_pipe tmp;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] force=%d\n",
- connector->base.id, connector->name,
- force);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] force=%d\n",
+ connector->base.id, connector->name,
+ force);
if (i915_modparams.load_detect_test) {
wakeref = intel_display_power_get(dev_priv,
@@ -824,11 +840,13 @@ intel_crt_detect(struct drm_connector *connector,
* only trust an assertion that the monitor is connected.
*/
if (intel_crt_detect_hotplug(connector)) {
- DRM_DEBUG_KMS("CRT detected via hotplug\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT detected via hotplug\n");
status = connector_status_connected;
goto out;
} else
- DRM_DEBUG_KMS("CRT not detected via hotplug\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CRT not detected via hotplug\n");
}
if (intel_crt_detect_ddc(connector)) {
@@ -918,13 +936,13 @@ void intel_crt_reset(struct drm_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 5) {
u32 adpa;
- adpa = I915_READ(crt->adpa_reg);
+ adpa = intel_de_read(dev_priv, crt->adpa_reg);
adpa &= ~ADPA_CRT_HOTPLUG_MASK;
adpa |= ADPA_HOTPLUG_BITS;
- I915_WRITE(crt->adpa_reg, adpa);
- POSTING_READ(crt->adpa_reg);
+ intel_de_write(dev_priv, crt->adpa_reg, adpa);
+ intel_de_posting_read(dev_priv, crt->adpa_reg);
- DRM_DEBUG_KMS("crt adpa set to 0x%x\n", adpa);
+ drm_dbg_kms(&dev_priv->drm, "crt adpa set to 0x%x\n", adpa);
crt->force_hotplug_required = true;
}
@@ -969,7 +987,7 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
else
adpa_reg = ADPA;
- adpa = I915_READ(adpa_reg);
+ adpa = intel_de_read(dev_priv, adpa_reg);
if ((adpa & ADPA_DAC_ENABLE) == 0) {
/*
* On some machines (some IVB at least) CRT can be
@@ -979,11 +997,11 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
* take. So the only way to tell is attempt to enable
* it and see what happens.
*/
- I915_WRITE(adpa_reg, adpa | ADPA_DAC_ENABLE |
- ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
- if ((I915_READ(adpa_reg) & ADPA_DAC_ENABLE) == 0)
+ intel_de_write(dev_priv, adpa_reg,
+ adpa | ADPA_DAC_ENABLE | ADPA_HSYNC_CNTL_DISABLE | ADPA_VSYNC_CNTL_DISABLE);
+ if ((intel_de_read(dev_priv, adpa_reg) & ADPA_DAC_ENABLE) == 0)
return;
- I915_WRITE(adpa_reg, adpa);
+ intel_de_write(dev_priv, adpa_reg, adpa);
}
crt = kzalloc(sizeof(struct intel_crt), GFP_KERNEL);
@@ -1027,6 +1045,9 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
!dmi_check_system(intel_spurious_crt_detect)) {
crt->base.hpd_pin = HPD_CRT;
crt->base.hotplug = intel_encoder_hotplug;
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
+ } else {
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
}
if (HAS_DDI(dev_priv)) {
@@ -1057,14 +1078,6 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
drm_connector_helper_add(connector, &intel_crt_connector_helper_funcs);
- if (!I915_HAS_HOTPLUG(dev_priv))
- intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT;
-
- /*
- * Configure the automatic hotplug detection stuff
- */
- crt->force_hotplug_required = false;
-
/*
* TODO: find a proper way to discover whether we need to set the the
* polarity and link reversal bits or not, instead of relying on the
@@ -1074,7 +1087,8 @@ void intel_crt_init(struct drm_i915_private *dev_priv)
u32 fdi_config = FDI_RX_POLARITY_REVERSED_LPT |
FDI_RX_LINK_REVERSAL_OVERRIDE;
- dev_priv->fdi_rx_config = I915_READ(FDI_RX_CTL(PIPE_A)) & fdi_config;
+ dev_priv->fdi_rx_config = intel_de_read(dev_priv,
+ FDI_RX_CTL(PIPE_A)) & fdi_config;
}
intel_crt_reset(&crt->base.base);
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/display/intel_csr.c
index 09870a31b4f0..3112572cfb7d 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/display/intel_csr.c
@@ -27,6 +27,7 @@
#include "i915_drv.h"
#include "i915_reg.h"
#include "intel_csr.h"
+#include "intel_de.h"
/**
* DOC: csr support for dmc
@@ -39,8 +40,8 @@
#define GEN12_CSR_MAX_FW_SIZE ICL_CSR_MAX_FW_SIZE
-#define TGL_CSR_PATH "i915/tgl_dmc_ver2_04.bin"
-#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 4)
+#define TGL_CSR_PATH "i915/tgl_dmc_ver2_06.bin"
+#define TGL_CSR_VERSION_REQUIRED CSR_VERSION(2, 6)
#define TGL_CSR_MAX_FW_SIZE 0x6000
MODULE_FIRMWARE(TGL_CSR_PATH);
@@ -276,11 +277,11 @@ static void gen9_set_dc_state_debugmask(struct drm_i915_private *dev_priv)
mask |= DC_STATE_DEBUG_MASK_CORES;
/* The below bit doesn't need to be cleared ever afterwards */
- val = I915_READ(DC_STATE_DEBUG);
+ val = intel_de_read(dev_priv, DC_STATE_DEBUG);
if ((val & mask) != mask) {
val |= mask;
- I915_WRITE(DC_STATE_DEBUG, val);
- POSTING_READ(DC_STATE_DEBUG);
+ intel_de_write(dev_priv, DC_STATE_DEBUG, val);
+ intel_de_posting_read(dev_priv, DC_STATE_DEBUG);
}
}
@@ -298,12 +299,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
u32 i, fw_size;
if (!HAS_CSR(dev_priv)) {
- DRM_ERROR("No CSR support available for this platform\n");
+ drm_err(&dev_priv->drm,
+ "No CSR support available for this platform\n");
return;
}
if (!dev_priv->csr.dmc_payload) {
- DRM_ERROR("Tried to program CSR with empty payload\n");
+ drm_err(&dev_priv->drm,
+ "Tried to program CSR with empty payload\n");
return;
}
@@ -313,13 +316,14 @@ void intel_csr_load_program(struct drm_i915_private *dev_priv)
preempt_disable();
for (i = 0; i < fw_size; i++)
- I915_WRITE_FW(CSR_PROGRAM(i), payload[i]);
+ intel_uncore_write_fw(&dev_priv->uncore, CSR_PROGRAM(i),
+ payload[i]);
preempt_enable();
for (i = 0; i < dev_priv->csr.mmio_count; i++) {
- I915_WRITE(dev_priv->csr.mmioaddr[i],
- dev_priv->csr.mmiodata[i]);
+ intel_de_write(dev_priv, dev_priv->csr.mmioaddr[i],
+ dev_priv->csr.mmiodata[i]);
}
dev_priv->csr.dc_state = 0;
@@ -607,7 +611,7 @@ static void parse_csr_fw(struct drm_i915_private *dev_priv,
static void intel_csr_runtime_pm_get(struct drm_i915_private *dev_priv)
{
- WARN_ON(dev_priv->csr.wakeref);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
dev_priv->csr.wakeref =
intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
}
@@ -636,16 +640,16 @@ static void csr_load_work_fn(struct work_struct *work)
intel_csr_load_program(dev_priv);
intel_csr_runtime_pm_put(dev_priv);
- DRM_INFO("Finished loading DMC firmware %s (v%u.%u)\n",
- dev_priv->csr.fw_path,
- CSR_VERSION_MAJOR(csr->version),
+ drm_info(&dev_priv->drm,
+ "Finished loading DMC firmware %s (v%u.%u)\n",
+ dev_priv->csr.fw_path, CSR_VERSION_MAJOR(csr->version),
CSR_VERSION_MINOR(csr->version));
} else {
- dev_notice(dev_priv->drm.dev,
+ drm_notice(&dev_priv->drm,
"Failed to load DMC firmware %s."
" Disabling runtime power management.\n",
csr->fw_path);
- dev_notice(dev_priv->drm.dev, "DMC firmware homepage: %s",
+ drm_notice(&dev_priv->drm, "DMC firmware homepage: %s",
INTEL_UC_FIRMWARE_URL);
}
@@ -712,7 +716,8 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
if (i915_modparams.dmc_firmware_path) {
if (strlen(i915_modparams.dmc_firmware_path) == 0) {
csr->fw_path = NULL;
- DRM_INFO("Disabling CSR firmware and runtime PM\n");
+ drm_info(&dev_priv->drm,
+ "Disabling CSR firmware and runtime PM\n");
return;
}
@@ -722,11 +727,12 @@ void intel_csr_ucode_init(struct drm_i915_private *dev_priv)
}
if (csr->fw_path == NULL) {
- DRM_DEBUG_KMS("No known CSR firmware for platform, disabling runtime PM\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No known CSR firmware for platform, disabling runtime PM\n");
return;
}
- DRM_DEBUG_KMS("Loading %s\n", csr->fw_path);
+ drm_dbg_kms(&dev_priv->drm, "Loading %s\n", csr->fw_path);
schedule_work(&dev_priv->csr.work);
}
@@ -783,7 +789,7 @@ void intel_csr_ucode_fini(struct drm_i915_private *dev_priv)
return;
intel_csr_ucode_suspend(dev_priv);
- WARN_ON(dev_priv->csr.wakeref);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->csr.wakeref);
kfree(dev_priv->csr.dmc_payload);
}
diff --git a/drivers/gpu/drm/i915/intel_csr.h b/drivers/gpu/drm/i915/display/intel_csr.h
index 03c64f8af7ab..03c64f8af7ab 100644
--- a/drivers/gpu/drm/i915/intel_csr.h
+++ b/drivers/gpu/drm/i915/display/intel_csr.h
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c
index 33f1dc3d7c1a..73d0f4648c06 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.c
+++ b/drivers/gpu/drm/i915/display/intel_ddi.c
@@ -568,6 +568,20 @@ static const struct cnl_ddi_buf_trans icl_combo_phy_ddi_translations_hdmi[] = {
{ 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 850 3.0 */
};
+static const struct cnl_ddi_buf_trans ehl_combo_phy_ddi_translations_hbr2_hbr3[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x33, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x47, 0x36, 0x00, 0x09 }, /* 350 500 3.1 */
+ { 0xC, 0x64, 0x30, 0x00, 0x0F }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2C, 0x00, 0x13 }, /* 350 900 8.2 */
+ { 0xA, 0x46, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x64, 0x36, 0x00, 0x09 }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x30, 0x00, 0x0F }, /* 500 900 5.1 */
+ { 0xC, 0x61, 0x3F, 0x00, 0x00 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x37, 0x00, 0x08 }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
struct icl_mg_phy_ddi_buf_trans {
u32 cri_txdeemph_override_5_0;
u32 cri_txdeemph_override_11_6;
@@ -622,6 +636,34 @@ static const struct tgl_dkl_phy_ddi_buf_trans tgl_dkl_phy_hdmi_ddi_trans[] = {
{ 0x0, 0x0, 0xA }, /* 10 Full -3 dB */
};
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x32, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x71, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7D, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x4C, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x73, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x6C, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7F, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
+static const struct cnl_ddi_buf_trans tgl_combo_phy_ddi_translations_dp_hbr2[] = {
+ /* NT mV Trans mV db */
+ { 0xA, 0x35, 0x3F, 0x00, 0x00 }, /* 350 350 0.0 */
+ { 0xA, 0x4F, 0x37, 0x00, 0x08 }, /* 350 500 3.1 */
+ { 0xC, 0x63, 0x2F, 0x00, 0x10 }, /* 350 700 6.0 */
+ { 0x6, 0x7F, 0x2B, 0x00, 0x14 }, /* 350 900 8.2 */
+ { 0xA, 0x47, 0x3F, 0x00, 0x00 }, /* 500 500 0.0 */
+ { 0xC, 0x63, 0x34, 0x00, 0x0B }, /* 500 700 2.9 */
+ { 0x6, 0x7F, 0x2F, 0x00, 0x10 }, /* 500 900 5.1 */
+ { 0xC, 0x61, 0x3C, 0x00, 0x03 }, /* 650 700 0.6 */
+ { 0x6, 0x7B, 0x35, 0x00, 0x0A }, /* 600 900 3.5 */
+ { 0x6, 0x7F, 0x3F, 0x00, 0x00 }, /* 900 900 0.0 */
+};
+
static const struct ddi_buf_trans *
bdw_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
@@ -818,7 +860,7 @@ bxt_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
{
- u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_hdmi_0_85V);
@@ -839,7 +881,7 @@ cnl_get_buf_trans_hdmi(struct drm_i915_private *dev_priv, int *n_entries)
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
{
- u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (voltage == VOLTAGE_INFO_0_85V) {
*n_entries = ARRAY_SIZE(cnl_ddi_translations_dp_0_85V);
@@ -860,7 +902,7 @@ cnl_get_buf_trans_dp(struct drm_i915_private *dev_priv, int *n_entries)
static const struct cnl_ddi_buf_trans *
cnl_get_buf_trans_edp(struct drm_i915_private *dev_priv, int *n_entries)
{
- u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
if (dev_priv->vbt.edp.low_vswing) {
if (voltage == VOLTAGE_INFO_0_85V) {
@@ -901,15 +943,42 @@ icl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
return icl_combo_phy_ddi_translations_dp_hbr2;
}
-static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port port)
+static const struct cnl_ddi_buf_trans *
+ehl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
{
- struct ddi_vbt_port_info *port_info = &dev_priv->vbt.ddi_port_info[port];
+ if (type == INTEL_OUTPUT_DP && rate > 270000) {
+ *n_entries = ARRAY_SIZE(ehl_combo_phy_ddi_translations_hbr2_hbr3);
+ return ehl_combo_phy_ddi_translations_hbr2_hbr3;
+ }
+
+ return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+}
+
+static const struct cnl_ddi_buf_trans *
+tgl_get_combo_buf_trans(struct drm_i915_private *dev_priv, int type, int rate,
+ int *n_entries)
+{
+ if (type != INTEL_OUTPUT_DP) {
+ return icl_get_combo_buf_trans(dev_priv, type, rate, n_entries);
+ } else if (rate > 270000) {
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr2);
+ return tgl_combo_phy_ddi_translations_dp_hbr2;
+ }
+
+ *n_entries = ARRAY_SIZE(tgl_combo_phy_ddi_translations_dp_hbr);
+ return tgl_combo_phy_ddi_translations_dp_hbr;
+}
+
+static int intel_ddi_hdmi_level(struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int n_entries, level, default_entry;
- enum phy phy = intel_port_to_phy(dev_priv, port);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
+ tgl_get_combo_buf_trans(dev_priv, INTEL_OUTPUT_HDMI,
0, &n_entries);
else
n_entries = ARRAY_SIZE(tgl_dkl_phy_hdmi_ddi_trans);
@@ -937,19 +1006,18 @@ static int intel_ddi_hdmi_level(struct drm_i915_private *dev_priv, enum port por
intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
default_entry = 6;
} else {
- WARN(1, "ddi translation table missing\n");
+ drm_WARN(&dev_priv->drm, 1, "ddi translation table missing\n");
return 0;
}
- if (WARN_ON_ONCE(n_entries == 0))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, n_entries == 0))
return 0;
- if (port_info->hdmi_level_shift_set)
- level = port_info->hdmi_level_shift;
- else
+ level = intel_bios_hdmi_level_shift(encoder);
+ if (level < 0)
level = default_entry;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
return level;
@@ -980,15 +1048,14 @@ static void intel_prepare_dp_ddi_buffers(struct intel_encoder *encoder,
&n_entries);
/* If we're boosting the current, set bit 31 of trans1 */
- if (IS_GEN9_BC(dev_priv) &&
- dev_priv->vbt.ddi_port_info[port].dp_boost_level)
+ if (IS_GEN9_BC(dev_priv) && intel_bios_dp_boost_level(encoder))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
for (i = 0; i < n_entries; i++) {
- I915_WRITE(DDI_BUF_TRANS_LO(port, i),
- ddi_translations[i].trans1 | iboost_bit);
- I915_WRITE(DDI_BUF_TRANS_HI(port, i),
- ddi_translations[i].trans2);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, i),
+ ddi_translations[i].trans1 | iboost_bit);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, i),
+ ddi_translations[i].trans2);
}
}
@@ -1008,21 +1075,20 @@ static void intel_prepare_hdmi_ddi_buffers(struct intel_encoder *encoder,
ddi_translations = intel_ddi_get_buf_trans_hdmi(dev_priv, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
/* If we're boosting the current, set bit 31 of trans1 */
- if (IS_GEN9_BC(dev_priv) &&
- dev_priv->vbt.ddi_port_info[port].hdmi_boost_level)
+ if (IS_GEN9_BC(dev_priv) && intel_bios_hdmi_boost_level(encoder))
iboost_bit = DDI_BUF_BALANCE_LEG_ENABLE;
/* Entry 9 is for HDMI: */
- I915_WRITE(DDI_BUF_TRANS_LO(port, 9),
- ddi_translations[level].trans1 | iboost_bit);
- I915_WRITE(DDI_BUF_TRANS_HI(port, 9),
- ddi_translations[level].trans2);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_LO(port, 9),
+ ddi_translations[level].trans1 | iboost_bit);
+ intel_de_write(dev_priv, DDI_BUF_TRANS_HI(port, 9),
+ ddi_translations[level].trans2);
}
static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
@@ -1033,7 +1099,7 @@ static void intel_wait_ddi_buf_idle(struct drm_i915_private *dev_priv,
for (i = 0; i < 16; i++) {
udelay(1);
- if (I915_READ(reg) & DDI_BUF_IS_IDLE)
+ if (intel_de_read(dev_priv, reg) & DDI_BUF_IS_IDLE)
return;
}
DRM_ERROR("Timeout waiting for DDI BUF %c idle bit\n", port_name(port));
@@ -1124,70 +1190,64 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
*
* WaFDIAutoLinkSetTimingOverrride:hsw
*/
- I915_WRITE(FDI_RX_MISC(PIPE_A), FDI_RX_PWRDN_LANE1_VAL(2) |
- FDI_RX_PWRDN_LANE0_VAL(2) |
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A),
+ FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2) | FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
/* Enable the PCH Receiver FDI PLL */
rx_ctl_val = dev_priv->fdi_rx_config | FDI_RX_ENHANCE_FRAME_ENABLE |
FDI_RX_PLL_ENABLE |
FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
- POSTING_READ(FDI_RX_CTL(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
udelay(220);
/* Switch from Rawclk to PCDclk */
rx_ctl_val |= FDI_PCDCLK;
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
/* Configure Port Clock Select */
ddi_pll_sel = hsw_pll_to_ddi_pll_sel(crtc_state->shared_dpll);
- I915_WRITE(PORT_CLK_SEL(PORT_E), ddi_pll_sel);
- WARN_ON(ddi_pll_sel != PORT_CLK_SEL_SPLL);
+ intel_de_write(dev_priv, PORT_CLK_SEL(PORT_E), ddi_pll_sel);
+ drm_WARN_ON(&dev_priv->drm, ddi_pll_sel != PORT_CLK_SEL_SPLL);
/* Start the training iterating through available voltages and emphasis,
* testing each value twice. */
for (i = 0; i < ARRAY_SIZE(hsw_ddi_translations_fdi) * 2; i++) {
/* Configure DP_TP_CTL with auto-training */
- I915_WRITE(DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_LINK_TRAIN_PAT1 |
- DP_TP_CTL_ENABLE);
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_LINK_TRAIN_PAT1 | DP_TP_CTL_ENABLE);
/* Configure and enable DDI_BUF_CTL for DDI E with next voltage.
* DDI E does not support port reversal, the functionality is
* achieved on the PCH side in FDI_RX_CTL, so no need to set the
* port reversal bit */
- I915_WRITE(DDI_BUF_CTL(PORT_E),
- DDI_BUF_CTL_ENABLE |
- ((crtc_state->fdi_lanes - 1) << 1) |
- DDI_BUF_TRANS_SELECT(i / 2));
- POSTING_READ(DDI_BUF_CTL(PORT_E));
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E),
+ DDI_BUF_CTL_ENABLE | ((crtc_state->fdi_lanes - 1) << 1) | DDI_BUF_TRANS_SELECT(i / 2));
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
udelay(600);
/* Program PCH FDI Receiver TU */
- I915_WRITE(FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(PIPE_A), TU_SIZE(64));
/* Enable PCH FDI Receiver with auto-training */
rx_ctl_val |= FDI_RX_ENABLE | FDI_LINK_TRAIN_AUTO;
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
- POSTING_READ(FDI_RX_CTL(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
/* Wait for FDI receiver lane calibration */
udelay(30);
/* Unset FDI_RX_MISC pwrdn lanes */
- temp = I915_READ(FDI_RX_MISC(PIPE_A));
+ temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
- I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
- POSTING_READ(FDI_RX_MISC(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
/* Wait for FDI auto training time */
udelay(5);
- temp = I915_READ(DP_TP_STATUS(PORT_E));
+ temp = intel_de_read(dev_priv, DP_TP_STATUS(PORT_E));
if (temp & DP_TP_STATUS_AUTOTRAIN_DONE) {
DRM_DEBUG_KMS("FDI link training done on step %d\n", i);
break;
@@ -1203,37 +1263,34 @@ void hsw_fdi_link_train(struct intel_encoder *encoder,
}
rx_ctl_val &= ~FDI_RX_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), rx_ctl_val);
- POSTING_READ(FDI_RX_CTL(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), rx_ctl_val);
+ intel_de_posting_read(dev_priv, FDI_RX_CTL(PIPE_A));
- temp = I915_READ(DDI_BUF_CTL(PORT_E));
+ temp = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_E));
temp &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(PORT_E), temp);
- POSTING_READ(DDI_BUF_CTL(PORT_E));
+ intel_de_write(dev_priv, DDI_BUF_CTL(PORT_E), temp);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(PORT_E));
/* Disable DP_TP_CTL and FDI_RX_CTL and retry */
- temp = I915_READ(DP_TP_CTL(PORT_E));
+ temp = intel_de_read(dev_priv, DP_TP_CTL(PORT_E));
temp &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
temp |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(DP_TP_CTL(PORT_E), temp);
- POSTING_READ(DP_TP_CTL(PORT_E));
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E), temp);
+ intel_de_posting_read(dev_priv, DP_TP_CTL(PORT_E));
intel_wait_ddi_buf_idle(dev_priv, PORT_E);
/* Reset FDI_RX_MISC pwrdn lanes */
- temp = I915_READ(FDI_RX_MISC(PIPE_A));
+ temp = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
temp &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
temp |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- I915_WRITE(FDI_RX_MISC(PIPE_A), temp);
- POSTING_READ(FDI_RX_MISC(PIPE_A));
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), temp);
+ intel_de_posting_read(dev_priv, FDI_RX_MISC(PIPE_A));
}
/* Enable normal pixel sending for FDI */
- I915_WRITE(DP_TP_CTL(PORT_E),
- DP_TP_CTL_FDI_AUTOTRAIN |
- DP_TP_CTL_LINK_TRAIN_NORMAL |
- DP_TP_CTL_ENHANCED_FRAME_ENABLE |
- DP_TP_CTL_ENABLE);
+ intel_de_write(dev_priv, DP_TP_CTL(PORT_E),
+ DP_TP_CTL_FDI_AUTOTRAIN | DP_TP_CTL_LINK_TRAIN_NORMAL | DP_TP_CTL_ENHANCED_FRAME_ENABLE | DP_TP_CTL_ENABLE);
}
static void intel_ddi_init_dp_buf_reg(struct intel_encoder *encoder)
@@ -1260,175 +1317,18 @@ intel_ddi_get_crtc_encoder(struct intel_crtc *crtc)
}
if (num_encoders != 1)
- WARN(1, "%d encoders on crtc for pipe %c\n", num_encoders,
- pipe_name(crtc->pipe));
+ drm_WARN(dev, 1, "%d encoders on crtc for pipe %c\n",
+ num_encoders,
+ pipe_name(crtc->pipe));
BUG_ON(ret == NULL);
return ret;
}
-static int hsw_ddi_calc_wrpll_link(struct drm_i915_private *dev_priv,
- i915_reg_t reg)
-{
- int refclk;
- int n, p, r;
- u32 wrpll;
-
- wrpll = I915_READ(reg);
- switch (wrpll & WRPLL_REF_MASK) {
- case WRPLL_REF_SPECIAL_HSW:
- /*
- * muxed-SSC for BDW.
- * non-SSC for non-ULT HSW. Check FUSE_STRAP3
- * for the non-SSC reference frequency.
- */
- if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
- if (I915_READ(FUSE_STRAP3) & HSW_REF_CLK_SELECT)
- refclk = 24;
- else
- refclk = 135;
- break;
- }
- /* fall through */
- case WRPLL_REF_PCH_SSC:
- /*
- * We could calculate spread here, but our checking
- * code only cares about 5% accuracy, and spread is a max of
- * 0.5% downspread.
- */
- refclk = 135;
- break;
- case WRPLL_REF_LCPLL:
- refclk = 2700;
- break;
- default:
- MISSING_CASE(wrpll);
- return 0;
- }
-
- r = wrpll & WRPLL_DIVIDER_REF_MASK;
- p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
- n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
-
- /* Convert to KHz, p & r have a fixed point portion */
- return (refclk * n * 100) / (p * r);
-}
-
-static int skl_calc_wrpll_link(const struct intel_dpll_hw_state *pll_state)
-{
- u32 p0, p1, p2, dco_freq;
-
- p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
- p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
-
- if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
- p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
- else
- p1 = 1;
-
-
- switch (p0) {
- case DPLL_CFGCR2_PDIV_1:
- p0 = 1;
- break;
- case DPLL_CFGCR2_PDIV_2:
- p0 = 2;
- break;
- case DPLL_CFGCR2_PDIV_3:
- p0 = 3;
- break;
- case DPLL_CFGCR2_PDIV_7:
- p0 = 7;
- break;
- }
-
- switch (p2) {
- case DPLL_CFGCR2_KDIV_5:
- p2 = 5;
- break;
- case DPLL_CFGCR2_KDIV_2:
- p2 = 2;
- break;
- case DPLL_CFGCR2_KDIV_3:
- p2 = 3;
- break;
- case DPLL_CFGCR2_KDIV_1:
- p2 = 1;
- break;
- }
-
- dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK)
- * 24 * 1000;
-
- dco_freq += (((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9)
- * 24 * 1000) / 0x8000;
-
- if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
- return 0;
-
- return dco_freq / (p0 * p1 * p2 * 5);
-}
-
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *pll_state)
-{
- u32 p0, p1, p2, dco_freq, ref_clock;
-
- p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
- p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
-
- if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
- p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
- DPLL_CFGCR1_QDIV_RATIO_SHIFT;
- else
- p1 = 1;
-
-
- switch (p0) {
- case DPLL_CFGCR1_PDIV_2:
- p0 = 2;
- break;
- case DPLL_CFGCR1_PDIV_3:
- p0 = 3;
- break;
- case DPLL_CFGCR1_PDIV_5:
- p0 = 5;
- break;
- case DPLL_CFGCR1_PDIV_7:
- p0 = 7;
- break;
- }
-
- switch (p2) {
- case DPLL_CFGCR1_KDIV_1:
- p2 = 1;
- break;
- case DPLL_CFGCR1_KDIV_2:
- p2 = 2;
- break;
- case DPLL_CFGCR1_KDIV_3:
- p2 = 3;
- break;
- }
-
- ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
- dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK)
- * ref_clock;
-
- dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
- DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
-
- if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
- return 0;
-
- return dco_freq / (p0 * p1 * p2 * 5);
-}
-
static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
enum port port)
{
- u32 val = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+ u32 val = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
switch (val) {
case DDI_CLK_SEL_NONE:
@@ -1447,77 +1347,6 @@ static int icl_calc_tbt_pll_link(struct drm_i915_private *dev_priv,
}
}
-static int icl_calc_mg_pll_link(struct drm_i915_private *dev_priv,
- const struct intel_dpll_hw_state *pll_state)
-{
- u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
- u64 tmp;
-
- ref_clock = dev_priv->cdclk.hw.ref;
-
- if (INTEL_GEN(dev_priv) >= 12) {
- m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
- m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
- m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
-
- if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
- m2_frac = pll_state->mg_pll_bias &
- DKL_PLL_BIAS_FBDIV_FRAC_MASK;
- m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
- } else {
- m2_frac = 0;
- }
- } else {
- m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
- m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
-
- if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
- m2_frac = pll_state->mg_pll_div0 &
- MG_PLL_DIV0_FBDIV_FRAC_MASK;
- m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
- } else {
- m2_frac = 0;
- }
- }
-
- switch (pll_state->mg_clktop2_hsclkctl &
- MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
- div1 = 2;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
- div1 = 3;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
- div1 = 5;
- break;
- case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
- div1 = 7;
- break;
- default:
- MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
- return 0;
- }
-
- div2 = (pll_state->mg_clktop2_hsclkctl &
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
- MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
-
- /* div2 value of 0 is same as 1 means no div */
- if (div2 == 0)
- div2 = 1;
-
- /*
- * Adjust the original formula to delay the division by 2^22 in order to
- * minimize possible rounding errors.
- */
- tmp = (u64)m1 * m2_int * ref_clock +
- (((u64)m1 * m2_frac * ref_clock) >> 22);
- tmp = div_u64(tmp, 5 * div1 * div2);
-
- return tmp;
-}
-
static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
{
int dotclock;
@@ -1543,214 +1372,22 @@ static void ddi_dotclock_get(struct intel_crtc_state *pipe_config)
pipe_config->hw.adjusted_mode.crtc_clock = dotclock;
}
-static void icl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- enum port port = encoder->port;
- enum phy phy = intel_port_to_phy(dev_priv, port);
- int link_clock;
-
- if (intel_phy_is_combo(dev_priv, phy)) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
- } else {
- enum intel_dpll_id pll_id = intel_get_shared_dpll_id(dev_priv,
- pipe_config->shared_dpll);
-
- if (pll_id == DPLL_ID_ICL_TBTPLL)
- link_clock = icl_calc_tbt_pll_link(dev_priv, port);
- else
- link_clock = icl_calc_mg_pll_link(dev_priv, pll_state);
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void cnl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- int link_clock;
-
- if (pll_state->cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
- link_clock = cnl_calc_wrpll_link(dev_priv, pll_state);
- } else {
- link_clock = pll_state->cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK;
-
- switch (link_clock) {
- case DPLL_CFGCR0_LINK_RATE_810:
- link_clock = 81000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1080:
- link_clock = 108000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1350:
- link_clock = 135000;
- break;
- case DPLL_CFGCR0_LINK_RATE_1620:
- link_clock = 162000;
- break;
- case DPLL_CFGCR0_LINK_RATE_2160:
- link_clock = 216000;
- break;
- case DPLL_CFGCR0_LINK_RATE_2700:
- link_clock = 270000;
- break;
- case DPLL_CFGCR0_LINK_RATE_3240:
- link_clock = 324000;
- break;
- case DPLL_CFGCR0_LINK_RATE_4050:
- link_clock = 405000;
- break;
- default:
- WARN(1, "Unsupported link rate\n");
- break;
- }
- link_clock *= 2;
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void skl_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct intel_dpll_hw_state *pll_state = &pipe_config->dpll_hw_state;
- int link_clock;
-
- /*
- * ctrl1 register is already shifted for each pll, just use 0 to get
- * the internal shift for each field
- */
- if (pll_state->ctrl1 & DPLL_CTRL1_HDMI_MODE(0)) {
- link_clock = skl_calc_wrpll_link(pll_state);
- } else {
- link_clock = pll_state->ctrl1 & DPLL_CTRL1_LINK_RATE_MASK(0);
- link_clock >>= DPLL_CTRL1_LINK_RATE_SHIFT(0);
-
- switch (link_clock) {
- case DPLL_CTRL1_LINK_RATE_810:
- link_clock = 81000;
- break;
- case DPLL_CTRL1_LINK_RATE_1080:
- link_clock = 108000;
- break;
- case DPLL_CTRL1_LINK_RATE_1350:
- link_clock = 135000;
- break;
- case DPLL_CTRL1_LINK_RATE_1620:
- link_clock = 162000;
- break;
- case DPLL_CTRL1_LINK_RATE_2160:
- link_clock = 216000;
- break;
- case DPLL_CTRL1_LINK_RATE_2700:
- link_clock = 270000;
- break;
- default:
- WARN(1, "Unsupported link rate\n");
- break;
- }
- link_clock *= 2;
- }
-
- pipe_config->port_clock = link_clock;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static void hsw_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- int link_clock = 0;
- u32 val, pll;
-
- val = hsw_pll_to_ddi_pll_sel(pipe_config->shared_dpll);
- switch (val & PORT_CLK_SEL_MASK) {
- case PORT_CLK_SEL_LCPLL_810:
- link_clock = 81000;
- break;
- case PORT_CLK_SEL_LCPLL_1350:
- link_clock = 135000;
- break;
- case PORT_CLK_SEL_LCPLL_2700:
- link_clock = 270000;
- break;
- case PORT_CLK_SEL_WRPLL1:
- link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(0));
- break;
- case PORT_CLK_SEL_WRPLL2:
- link_clock = hsw_ddi_calc_wrpll_link(dev_priv, WRPLL_CTL(1));
- break;
- case PORT_CLK_SEL_SPLL:
- pll = I915_READ(SPLL_CTL) & SPLL_FREQ_MASK;
- if (pll == SPLL_FREQ_810MHz)
- link_clock = 81000;
- else if (pll == SPLL_FREQ_1350MHz)
- link_clock = 135000;
- else if (pll == SPLL_FREQ_2700MHz)
- link_clock = 270000;
- else {
- WARN(1, "bad spll freq\n");
- return;
- }
- break;
- default:
- WARN(1, "bad port clock sel\n");
- return;
- }
-
- pipe_config->port_clock = link_clock * 2;
-
- ddi_dotclock_get(pipe_config);
-}
-
-static int bxt_calc_pll_link(const struct intel_dpll_hw_state *pll_state)
-{
- struct dpll clock;
-
- clock.m1 = 2;
- clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
- if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
- clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
- clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
- clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
- clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
-
- return chv_calc_dpll_params(100000, &clock);
-}
-
-static void bxt_ddi_clock_get(struct intel_encoder *encoder,
- struct intel_crtc_state *pipe_config)
-{
- pipe_config->port_clock =
- bxt_calc_pll_link(&pipe_config->dpll_hw_state);
-
- ddi_dotclock_get(pipe_config);
-}
-
static void intel_ddi_clock_get(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
- if (INTEL_GEN(dev_priv) >= 11)
- icl_ddi_clock_get(encoder, pipe_config);
- else if (IS_CANNONLAKE(dev_priv))
- cnl_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_LP(dev_priv))
- bxt_ddi_clock_get(encoder, pipe_config);
- else if (IS_GEN9_BC(dev_priv))
- skl_ddi_clock_get(encoder, pipe_config);
- else if (INTEL_GEN(dev_priv) <= 8)
- hsw_ddi_clock_get(encoder, pipe_config);
+ if (intel_phy_is_tc(dev_priv, phy) &&
+ intel_get_shared_dpll_id(dev_priv, pipe_config->shared_dpll) ==
+ DPLL_ID_ICL_TBTPLL)
+ pipe_config->port_clock = icl_calc_tbt_pll_link(dev_priv,
+ encoder->port);
+ else
+ pipe_config->port_clock =
+ intel_dpll_get_freq(dev_priv, pipe_config->shared_dpll);
+
+ ddi_dotclock_get(pipe_config);
}
void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
@@ -1764,7 +1401,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (!intel_crtc_has_dp_encoder(crtc_state))
return;
- WARN_ON(transcoder_is_dsi(cpu_transcoder));
+ drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder));
temp = DP_MSA_MISC_SYNC_CLOCK;
@@ -1787,8 +1424,8 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
}
/* nonsense combination */
- WARN_ON(crtc_state->limited_color_range &&
- crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
temp |= DP_MSA_MISC_COLOR_CEA_RGB;
@@ -1810,7 +1447,7 @@ void intel_ddi_set_dp_msa(const struct intel_crtc_state *crtc_state,
if (intel_dp_needs_vsc_sdp(crtc_state, conn_state))
temp |= DP_MSA_MISC_COLOR_VSC_SDP;
- I915_WRITE(TRANS_MSA_MISC(cpu_transcoder), temp);
+ intel_de_write(dev_priv, TRANS_MSA_MISC(cpu_transcoder), temp);
}
/*
@@ -1904,7 +1541,8 @@ intel_ddi_transcoder_func_reg_val_get(const struct intel_crtc_state *crtc_state)
enum transcoder master;
master = crtc_state->mst_master_transcoder;
- WARN_ON(master == INVALID_TRANSCODER);
+ drm_WARN_ON(&dev_priv->drm,
+ master == INVALID_TRANSCODER);
temp |= TRANS_DDI_MST_TRANSPORT_SELECT(master);
}
} else {
@@ -1925,7 +1563,7 @@ void intel_ddi_enable_transcoder_func(const struct intel_crtc_state *crtc_state)
temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
temp |= TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
/*
@@ -1942,7 +1580,7 @@ intel_ddi_config_transcoder_func(const struct intel_crtc_state *crtc_state)
temp = intel_ddi_transcoder_func_reg_val_get(crtc_state);
temp &= ~TRANS_DDI_FUNC_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), temp);
}
void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state)
@@ -1952,16 +1590,18 @@ void intel_ddi_disable_transcoder_func(const struct intel_crtc_state *crtc_state
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 val;
- val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
val &= ~TRANS_DDI_FUNC_ENABLE;
if (INTEL_GEN(dev_priv) >= 12) {
- if (!intel_dp_mst_is_master_trans(crtc_state))
- val &= ~TGL_TRANS_DDI_PORT_MASK;
+ if (!intel_dp_mst_is_master_trans(crtc_state)) {
+ val &= ~(TGL_TRANS_DDI_PORT_MASK |
+ TRANS_DDI_MODE_SELECT_MASK);
+ }
} else {
- val &= ~TRANS_DDI_PORT_MASK;
+ val &= ~(TRANS_DDI_PORT_MASK | TRANS_DDI_MODE_SELECT_MASK);
}
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
if (dev_priv->quirks & QUIRK_INCREASE_DDI_DISABLED_TIME &&
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
@@ -1983,20 +1623,21 @@ int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
wakeref = intel_display_power_get_if_enabled(dev_priv,
intel_encoder->power_domain);
- if (WARN_ON(!wakeref))
+ if (drm_WARN_ON(dev, !wakeref))
return -ENXIO;
- if (WARN_ON(!intel_encoder->get_hw_state(intel_encoder, &pipe))) {
+ if (drm_WARN_ON(dev,
+ !intel_encoder->get_hw_state(intel_encoder, &pipe))) {
ret = -EIO;
goto out;
}
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(pipe));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(pipe));
if (enable)
tmp |= TRANS_DDI_HDCP_SIGNALLING;
else
tmp &= ~TRANS_DDI_HDCP_SIGNALLING;
- I915_WRITE(TRANS_DDI_FUNC_CTL(pipe), tmp);
+ intel_de_write(dev_priv, TRANS_DDI_FUNC_CTL(pipe), tmp);
out:
intel_display_power_put(dev_priv, intel_encoder->power_domain, wakeref);
return ret;
@@ -2006,7 +1647,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder = intel_connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
int type = intel_connector->base.connector_type;
enum port port = encoder->port;
enum transcoder cpu_transcoder;
@@ -2030,7 +1671,7 @@ bool intel_ddi_connector_get_hw_state(struct intel_connector *intel_connector)
else
cpu_transcoder = (enum transcoder) pipe;
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
@@ -2083,12 +1724,13 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
if (!wakeref)
return;
- tmp = I915_READ(DDI_BUF_CTL(port));
+ tmp = intel_de_read(dev_priv, DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
goto out;
if (HAS_TRANSCODER_EDP(dev_priv) && port == PORT_A) {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
@@ -2128,7 +1770,8 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
ddi_select = TRANS_DDI_SELECT_PORT(port);
}
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
intel_display_power_put(dev_priv, POWER_DOMAIN_TRANSCODER(cpu_transcoder),
trans_wakeref);
@@ -2162,7 +1805,7 @@ static void intel_ddi_get_encoder_pipes(struct intel_encoder *encoder,
out:
if (*pipe_mask && IS_GEN9_LP(dev_priv)) {
- tmp = I915_READ(BXT_PHY_CTL(port));
+ tmp = intel_de_read(dev_priv, BXT_PHY_CTL(port));
if ((tmp & (BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK |
BXT_PHY_LANE_ENABLED)) != BXT_PHY_LANE_ENABLED)
@@ -2221,7 +1864,8 @@ static void intel_ddi_get_power_domains(struct intel_encoder *encoder,
* happen since fake-MST encoders don't set their get_power_domains()
* hook.
*/
- if (WARN_ON(intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)))
return;
dig_port = enc_to_dig_port(encoder);
@@ -2254,11 +1898,13 @@ void intel_ddi_enable_pipe_clock(const struct intel_crtc_state *crtc_state)
if (cpu_transcoder != TRANSCODER_EDP) {
if (INTEL_GEN(dev_priv) >= 12)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TGL_TRANS_CLK_SEL_PORT(port));
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_PORT(port));
else
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_PORT(port));
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_PORT(port));
}
}
@@ -2269,11 +1915,13 @@ void intel_ddi_disable_pipe_clock(const struct intel_crtc_state *crtc_state)
if (cpu_transcoder != TRANSCODER_EDP) {
if (INTEL_GEN(dev_priv) >= 12)
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TGL_TRANS_CLK_SEL_DISABLED);
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TGL_TRANS_CLK_SEL_DISABLED);
else
- I915_WRITE(TRANS_CLK_SEL(cpu_transcoder),
- TRANS_CLK_SEL_DISABLED);
+ intel_de_write(dev_priv,
+ TRANS_CLK_SEL(cpu_transcoder),
+ TRANS_CLK_SEL_DISABLED);
}
}
@@ -2282,13 +1930,13 @@ static void _skl_ddi_set_iboost(struct drm_i915_private *dev_priv,
{
u32 tmp;
- tmp = I915_READ(DISPIO_CR_TX_BMU_CR0);
+ tmp = intel_de_read(dev_priv, DISPIO_CR_TX_BMU_CR0);
tmp &= ~(BALANCE_LEG_MASK(port) | BALANCE_LEG_DISABLE(port));
if (iboost)
tmp |= iboost << BALANCE_LEG_SHIFT(port);
else
tmp |= BALANCE_LEG_DISABLE(port);
- I915_WRITE(DISPIO_CR_TX_BMU_CR0, tmp);
+ intel_de_write(dev_priv, DISPIO_CR_TX_BMU_CR0, tmp);
}
static void skl_ddi_set_iboost(struct intel_encoder *encoder,
@@ -2300,9 +1948,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
u8 iboost;
if (type == INTEL_OUTPUT_HDMI)
- iboost = dev_priv->vbt.ddi_port_info[port].hdmi_boost_level;
+ iboost = intel_bios_hdmi_boost_level(encoder);
else
- iboost = dev_priv->vbt.ddi_port_info[port].dp_boost_level;
+ iboost = intel_bios_dp_boost_level(encoder);
if (iboost == 0) {
const struct ddi_buf_trans *ddi_translations;
@@ -2315,9 +1963,9 @@ static void skl_ddi_set_iboost(struct intel_encoder *encoder,
else
ddi_translations = intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
iboost = ddi_translations[level].i_boost;
@@ -2350,9 +1998,9 @@ static void bxt_ddi_vswing_sequence(struct intel_encoder *encoder,
else
ddi_translations = bxt_get_buf_trans_dp(dev_priv, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
bxt_ddi_phy_set_signal_level(dev_priv, port,
@@ -2372,12 +2020,15 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 12) {
if (intel_phy_is_combo(dev_priv, phy))
- icl_get_combo_buf_trans(dev_priv, encoder->type,
+ tgl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
n_entries = ARRAY_SIZE(tgl_dkl_phy_dp_ddi_trans);
} else if (INTEL_GEN(dev_priv) == 11) {
- if (intel_phy_is_combo(dev_priv, phy))
+ if (IS_ELKHARTLAKE(dev_priv))
+ ehl_get_combo_buf_trans(dev_priv, encoder->type,
+ intel_dp->link_rate, &n_entries);
+ else if (intel_phy_is_combo(dev_priv, phy))
icl_get_combo_buf_trans(dev_priv, encoder->type,
intel_dp->link_rate, &n_entries);
else
@@ -2399,9 +2050,10 @@ u8 intel_ddi_dp_voltage_max(struct intel_encoder *encoder)
intel_ddi_get_buf_trans_dp(dev_priv, port, &n_entries);
}
- if (WARN_ON(n_entries < 1))
+ if (drm_WARN_ON(&dev_priv->drm, n_entries < 1))
n_entries = 1;
- if (WARN_ON(n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ n_entries > ARRAY_SIZE(index_to_dp_signal_levels)))
n_entries = ARRAY_SIZE(index_to_dp_signal_levels);
return index_to_dp_signal_levels[n_entries - 1] &
@@ -2444,52 +2096,52 @@ static void cnl_ddi_vswing_program(struct intel_encoder *encoder,
else
ddi_translations = cnl_get_buf_trans_dp(dev_priv, &n_entries);
- if (WARN_ON_ONCE(!ddi_translations))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !ddi_translations))
return;
- if (WARN_ON_ONCE(level >= n_entries))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, level >= n_entries))
level = n_entries - 1;
/* Set PORT_TX_DW5 Scaling Mode Sel to 010b. */
- val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
val &= ~SCALING_MODE_SEL_MASK;
val |= SCALING_MODE_SEL(2);
- I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
/* Program PORT_TX_DW2 */
- val = I915_READ(CNL_PORT_TX_DW2_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW2_LN0(port));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Rcomp scalar is fixed as 0x98 for every table entry */
val |= RCOMP_SCALAR(0x98);
- I915_WRITE(CNL_PORT_TX_DW2_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW2_GRP(port), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overrite individual loadgen */
for (ln = 0; ln < 4; ln++) {
- val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* Program PORT_TX_DW5 */
/* All DW5 values are fixed for every table entry */
- val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
val &= ~RTERM_SELECT_MASK;
val |= RTERM_SELECT(6);
val |= TAP3_DISABLE;
- I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
/* Program PORT_TX_DW7 */
- val = I915_READ(CNL_PORT_TX_DW7_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW7_LN0(port));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
- I915_WRITE(CNL_PORT_TX_DW7_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW7_GRP(port), val);
}
static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2515,12 +2167,12 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = I915_READ(CNL_PORT_PCS_DW1_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_PCS_DW1_LN0(port));
if (type != INTEL_OUTPUT_HDMI)
val |= COMMON_KEEPER_EN;
else
val &= ~COMMON_KEEPER_EN;
- I915_WRITE(CNL_PORT_PCS_DW1_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_PCS_DW1_GRP(port), val);
/* 2. Program loadgen select */
/*
@@ -2530,33 +2182,33 @@ static void cnl_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(CNL_PORT_TX_DW4_LN(ln, port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW4_LN(ln, port));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(CNL_PORT_TX_DW4_LN(ln, port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW4_LN(ln, port), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- val = I915_READ(CNL_PORT_CL1CM_DW5);
+ val = intel_de_read(dev_priv, CNL_PORT_CL1CM_DW5);
val |= SUS_CLOCK_CONFIG;
- I915_WRITE(CNL_PORT_CL1CM_DW5, val);
+ intel_de_write(dev_priv, CNL_PORT_CL1CM_DW5, val);
/* 4. Clear training enable to change swing values */
- val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
val &= ~TX_TRAINING_EN;
- I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
/* 5. Program swing and de-emphasis */
cnl_ddi_vswing_program(encoder, level, type);
/* 6. Set training enable to trigger update */
- val = I915_READ(CNL_PORT_TX_DW5_LN0(port));
+ val = intel_de_read(dev_priv, CNL_PORT_TX_DW5_LN0(port));
val |= TX_TRAINING_EN;
- I915_WRITE(CNL_PORT_TX_DW5_GRP(port), val);
+ intel_de_write(dev_priv, CNL_PORT_TX_DW5_GRP(port), val);
}
static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
@@ -2567,8 +2219,15 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
u32 n_entries, val;
int ln;
- ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
- &n_entries);
+ if (INTEL_GEN(dev_priv) >= 12)
+ ddi_translations = tgl_get_combo_buf_trans(dev_priv, type, rate,
+ &n_entries);
+ else if (IS_ELKHARTLAKE(dev_priv))
+ ddi_translations = ehl_get_combo_buf_trans(dev_priv, type, rate,
+ &n_entries);
+ else
+ ddi_translations = icl_get_combo_buf_trans(dev_priv, type, rate,
+ &n_entries);
if (!ddi_translations)
return;
@@ -2578,41 +2237,41 @@ static void icl_ddi_combo_vswing_program(struct drm_i915_private *dev_priv,
}
/* Set PORT_TX_DW5 */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
val &= ~(SCALING_MODE_SEL_MASK | RTERM_SELECT_MASK |
TAP2_DISABLE | TAP3_DISABLE);
val |= SCALING_MODE_SEL(0x2);
val |= RTERM_SELECT(0x6);
val |= TAP3_DISABLE;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* Program PORT_TX_DW2 */
- val = I915_READ(ICL_PORT_TX_DW2_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW2_LN0(phy));
val &= ~(SWING_SEL_LOWER_MASK | SWING_SEL_UPPER_MASK |
RCOMP_SCALAR_MASK);
val |= SWING_SEL_UPPER(ddi_translations[level].dw2_swing_sel);
val |= SWING_SEL_LOWER(ddi_translations[level].dw2_swing_sel);
/* Program Rcomp scalar for every table entry */
val |= RCOMP_SCALAR(0x98);
- I915_WRITE(ICL_PORT_TX_DW2_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW2_GRP(phy), val);
/* Program PORT_TX_DW4 */
/* We cannot write to GRP. It would overwrite individual loadgen. */
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~(POST_CURSOR_1_MASK | POST_CURSOR_2_MASK |
CURSOR_COEFF_MASK);
val |= POST_CURSOR_1(ddi_translations[level].dw4_post_cursor_1);
val |= POST_CURSOR_2(ddi_translations[level].dw4_post_cursor_2);
val |= CURSOR_COEFF(ddi_translations[level].dw4_cursor_coeff);
- I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* Program PORT_TX_DW7 */
- val = I915_READ(ICL_PORT_TX_DW7_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW7_LN0(phy));
val &= ~N_SCALAR_MASK;
val |= N_SCALAR(ddi_translations[level].dw7_n_scalar);
- I915_WRITE(ICL_PORT_TX_DW7_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW7_GRP(phy), val);
}
static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2641,12 +2300,12 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* set PORT_PCS_DW1 cmnkeeper_enable to 1b,
* else clear to 0b.
*/
- val = I915_READ(ICL_PORT_PCS_DW1_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_PCS_DW1_LN0(phy));
if (type == INTEL_OUTPUT_HDMI)
val &= ~COMMON_KEEPER_EN;
else
val |= COMMON_KEEPER_EN;
- I915_WRITE(ICL_PORT_PCS_DW1_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_PCS_DW1_GRP(phy), val);
/* 2. Program loadgen select */
/*
@@ -2656,33 +2315,33 @@ static void icl_combo_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* > 6 GHz (LN0=0, LN1=0, LN2=0, LN3=0)
*/
for (ln = 0; ln <= 3; ln++) {
- val = I915_READ(ICL_PORT_TX_DW4_LN(ln, phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy));
val &= ~LOADGEN_SELECT;
if ((rate <= 600000 && width == 4 && ln >= 1) ||
(rate <= 600000 && width < 4 && (ln == 1 || ln == 2))) {
val |= LOADGEN_SELECT;
}
- I915_WRITE(ICL_PORT_TX_DW4_LN(ln, phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW4_LN(ln, phy), val);
}
/* 3. Set PORT_CL_DW5 SUS Clock Config to 11b */
- val = I915_READ(ICL_PORT_CL_DW5(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW5(phy));
val |= SUS_CLOCK_CONFIG;
- I915_WRITE(ICL_PORT_CL_DW5(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_CL_DW5(phy), val);
/* 4. Clear training enable to change swing values */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
val &= ~TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
/* 5. Program swing and de-emphasis */
icl_ddi_combo_vswing_program(dev_priv, level, phy, type, rate);
/* 6. Set training enable to trigger update */
- val = I915_READ(ICL_PORT_TX_DW5_LN0(phy));
+ val = intel_de_read(dev_priv, ICL_PORT_TX_DW5_LN0(phy));
val |= TX_TRAINING_EN;
- I915_WRITE(ICL_PORT_TX_DW5_GRP(phy), val);
+ intel_de_write(dev_priv, ICL_PORT_TX_DW5_GRP(phy), val);
}
static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
@@ -2706,33 +2365,33 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
/* Set MG_TX_LINK_PARAMS cri_use_fs32 to 0. */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_LINK_PARAMS(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX1_LINK_PARAMS(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_LINK_PARAMS(ln, tc_port), val);
- val = I915_READ(MG_TX2_LINK_PARAMS(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port));
val &= ~CRI_USE_FS32;
- I915_WRITE(MG_TX2_LINK_PARAMS(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_LINK_PARAMS(ln, tc_port), val);
}
/* Program MG_TX_SWINGCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_SWINGCTRL(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX1_SWINGCTRL(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_SWINGCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_SWINGCTRL(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port));
val &= ~CRI_TXDEEMPH_OVERRIDE_17_12_MASK;
val |= CRI_TXDEEMPH_OVERRIDE_17_12(
ddi_translations[level].cri_txdeemph_override_17_12);
- I915_WRITE(MG_TX2_SWINGCTRL(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_SWINGCTRL(ln, tc_port), val);
}
/* Program MG_TX_DRVCTRL with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DRVCTRL(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX1_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2740,9 +2399,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX1_DRVCTRL(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_DRVCTRL(ln, tc_port), val);
- val = I915_READ(MG_TX2_DRVCTRL(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX2_DRVCTRL(ln, tc_port));
val &= ~(CRI_TXDEEMPH_OVERRIDE_11_6_MASK |
CRI_TXDEEMPH_OVERRIDE_5_0_MASK);
val |= CRI_TXDEEMPH_OVERRIDE_5_0(
@@ -2750,7 +2409,7 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
CRI_TXDEEMPH_OVERRIDE_11_6(
ddi_translations[level].cri_txdeemph_override_11_6) |
CRI_TXDEEMPH_OVERRIDE_EN;
- I915_WRITE(MG_TX2_DRVCTRL(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_DRVCTRL(ln, tc_port), val);
/* FIXME: Program CRI_LOADGEN_SEL after the spec is updated */
}
@@ -2761,17 +2420,17 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
* values from table for which TX1 and TX2 enabled.
*/
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_CLKHUB(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_CLKHUB(ln, tc_port));
if (link_clock < 300000)
val |= CFG_LOW_RATE_LKREN_EN;
else
val &= ~CFG_LOW_RATE_LKREN_EN;
- I915_WRITE(MG_CLKHUB(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_CLKHUB(ln, tc_port), val);
}
/* Program the MG_TX_DCC<LN, port being used> based on the link frequency */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_DCC(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX1_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2779,9 +2438,9 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX1_DCC(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_DCC(ln, tc_port), val);
- val = I915_READ(MG_TX2_DCC(ln, tc_port));
+ val = intel_de_read(dev_priv, MG_TX2_DCC(ln, tc_port));
val &= ~CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK;
if (link_clock <= 500000) {
val &= ~CFG_AMI_CK_DIV_OVERRIDE_EN;
@@ -2789,18 +2448,22 @@ static void icl_mg_phy_ddi_vswing_sequence(struct intel_encoder *encoder,
val |= CFG_AMI_CK_DIV_OVERRIDE_EN |
CFG_AMI_CK_DIV_OVERRIDE_VAL(1);
}
- I915_WRITE(MG_TX2_DCC(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_DCC(ln, tc_port), val);
}
/* Program MG_TX_PISO_READLOAD with values from vswing table */
for (ln = 0; ln < 2; ln++) {
- val = I915_READ(MG_TX1_PISO_READLOAD(ln, tc_port));
+ val = intel_de_read(dev_priv,
+ MG_TX1_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX1_PISO_READLOAD(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX1_PISO_READLOAD(ln, tc_port),
+ val);
- val = I915_READ(MG_TX2_PISO_READLOAD(ln, tc_port));
+ val = intel_de_read(dev_priv,
+ MG_TX2_PISO_READLOAD(ln, tc_port));
val |= CRI_CALCINIT;
- I915_WRITE(MG_TX2_PISO_READLOAD(ln, tc_port), val);
+ intel_de_write(dev_priv, MG_TX2_PISO_READLOAD(ln, tc_port),
+ val);
}
}
@@ -2846,24 +2509,25 @@ tgl_dkl_phy_ddi_vswing_sequence(struct intel_encoder *encoder, int link_clock,
dpcnt_val |= DKL_TX_PRESHOOT_COEFF(ddi_translations[level].dkl_preshoot_control);
for (ln = 0; ln < 2; ln++) {
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, ln));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, ln));
- I915_WRITE(DKL_TX_PMD_LANE_SUS(tc_port), 0);
+ intel_de_write(dev_priv, DKL_TX_PMD_LANE_SUS(tc_port), 0);
/* All the registers are RMW */
- val = I915_READ(DKL_TX_DPCNTL0(tc_port));
+ val = intel_de_read(dev_priv, DKL_TX_DPCNTL0(tc_port));
val &= ~dpcnt_mask;
val |= dpcnt_val;
- I915_WRITE(DKL_TX_DPCNTL0(tc_port), val);
+ intel_de_write(dev_priv, DKL_TX_DPCNTL0(tc_port), val);
- val = I915_READ(DKL_TX_DPCNTL1(tc_port));
+ val = intel_de_read(dev_priv, DKL_TX_DPCNTL1(tc_port));
val &= ~dpcnt_mask;
val |= dpcnt_val;
- I915_WRITE(DKL_TX_DPCNTL1(tc_port), val);
+ intel_de_write(dev_priv, DKL_TX_DPCNTL1(tc_port), val);
- val = I915_READ(DKL_TX_DPCNTL2(tc_port));
+ val = intel_de_read(dev_priv, DKL_TX_DPCNTL2(tc_port));
val &= ~DKL_TX_DP20BITMODE;
- I915_WRITE(DKL_TX_DPCNTL2(tc_port), val);
+ intel_de_write(dev_priv, DKL_TX_DPCNTL2(tc_port), val);
}
}
@@ -2963,10 +2627,11 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
- val = I915_READ(ICL_DPCLKA_CFGCR0);
- WARN_ON((val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
+ drm_WARN_ON(&dev_priv->drm,
+ (val & icl_dpclka_cfgcr0_clk_off(dev_priv, phy)) == 0);
if (intel_phy_is_combo(dev_priv, phy)) {
/*
@@ -2981,14 +2646,14 @@ static void icl_map_plls_to_ports(struct intel_encoder *encoder,
*/
val &= ~ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
val |= ICL_DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
- POSTING_READ(ICL_DPCLKA_CFGCR0);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
+ intel_de_posting_read(dev_priv, ICL_DPCLKA_CFGCR0);
}
val &= ~icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
@@ -2997,13 +2662,13 @@ static void icl_unmap_plls_to_ports(struct intel_encoder *encoder)
enum phy phy = intel_port_to_phy(dev_priv, encoder->port);
u32 val;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
- val = I915_READ(ICL_DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
@@ -3012,7 +2677,7 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
enum port port;
u32 val;
- val = I915_READ(ICL_DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0);
for_each_port_masked(port, port_mask) {
enum phy phy = intel_port_to_phy(dev_priv, port);
bool ddi_clk_off = val & icl_dpclka_cfgcr0_clk_off(dev_priv,
@@ -3025,13 +2690,13 @@ static void icl_sanitize_port_clk_off(struct drm_i915_private *dev_priv,
* Punt on the case now where clock is gated, but it would
* be needed by the port. Something else is really broken then.
*/
- if (WARN_ON(ddi_clk_needed))
+ if (drm_WARN_ON(&dev_priv->drm, ddi_clk_needed))
continue;
DRM_NOTE("PHY %c is disabled/in DSI mode with an ungated DDI clock, gate it\n",
phy_name(phy));
val |= icl_dpclka_cfgcr0_clk_off(dev_priv, phy);
- I915_WRITE(ICL_DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, ICL_DPCLKA_CFGCR0, val);
}
}
@@ -3057,7 +2722,7 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
* In the unlikely case that BIOS enables DP in MST mode, just
* warn since our MST HW readout is incomplete.
*/
- if (WARN_ON(is_mst))
+ if (drm_WARN_ON(&dev_priv->drm, is_mst))
return;
}
@@ -3076,7 +2741,8 @@ void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder)
if (other_encoder == encoder)
continue;
- if (WARN_ON(port_mask & BIT(other_encoder->port)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ port_mask & BIT(other_encoder->port)))
return;
}
/*
@@ -3098,52 +2764,54 @@ static void intel_ddi_clk_select(struct intel_encoder *encoder,
u32 val;
const struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- if (WARN_ON(!pll))
+ if (drm_WARN_ON(&dev_priv->drm, !pll))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_phy_is_combo(dev_priv, phy))
- I915_WRITE(DDI_CLK_SEL(port),
- icl_pll_to_ddi_clk_sel(encoder, crtc_state));
+ intel_de_write(dev_priv, DDI_CLK_SEL(port),
+ icl_pll_to_ddi_clk_sel(encoder, crtc_state));
else if (IS_ELKHARTLAKE(dev_priv) && port >= PORT_C)
/*
* MG does not exist but the programming is required
* to ungate DDIC and DDID
*/
- I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_MG);
+ intel_de_write(dev_priv, DDI_CLK_SEL(port),
+ DDI_CLK_SEL_MG);
} else if (IS_CANNONLAKE(dev_priv)) {
/* Configure DPCLKA_CFGCR0 to map the DPLL to the DDI. */
- val = I915_READ(DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, DPCLKA_CFGCR0);
val &= ~DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
val |= DPCLKA_CFGCR0_DDI_CLK_SEL(pll->info->id, port);
- I915_WRITE(DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, DPCLKA_CFGCR0, val);
/*
* Configure DPCLKA_CFGCR0 to turn on the clock for the DDI.
* This step and the step before must be done with separate
* register writes.
*/
- val = I915_READ(DPCLKA_CFGCR0);
+ val = intel_de_read(dev_priv, DPCLKA_CFGCR0);
val &= ~DPCLKA_CFGCR0_DDI_CLK_OFF(port);
- I915_WRITE(DPCLKA_CFGCR0, val);
+ intel_de_write(dev_priv, DPCLKA_CFGCR0, val);
} else if (IS_GEN9_BC(dev_priv)) {
/* DDI -> PLL mapping */
- val = I915_READ(DPLL_CTRL2);
+ val = intel_de_read(dev_priv, DPLL_CTRL2);
val &= ~(DPLL_CTRL2_DDI_CLK_OFF(port) |
DPLL_CTRL2_DDI_CLK_SEL_MASK(port));
val |= (DPLL_CTRL2_DDI_CLK_SEL(pll->info->id, port) |
DPLL_CTRL2_DDI_SEL_OVERRIDE(port));
- I915_WRITE(DPLL_CTRL2, val);
+ intel_de_write(dev_priv, DPLL_CTRL2, val);
} else if (INTEL_GEN(dev_priv) < 9) {
- I915_WRITE(PORT_CLK_SEL(port), hsw_pll_to_ddi_pll_sel(pll));
+ intel_de_write(dev_priv, PORT_CLK_SEL(port),
+ hsw_pll_to_ddi_pll_sel(pll));
}
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static void intel_ddi_clk_disable(struct intel_encoder *encoder)
@@ -3155,15 +2823,17 @@ static void intel_ddi_clk_disable(struct intel_encoder *encoder)
if (INTEL_GEN(dev_priv) >= 11) {
if (!intel_phy_is_combo(dev_priv, phy) ||
(IS_ELKHARTLAKE(dev_priv) && port >= PORT_C))
- I915_WRITE(DDI_CLK_SEL(port), DDI_CLK_SEL_NONE);
+ intel_de_write(dev_priv, DDI_CLK_SEL(port),
+ DDI_CLK_SEL_NONE);
} else if (IS_CANNONLAKE(dev_priv)) {
- I915_WRITE(DPCLKA_CFGCR0, I915_READ(DPCLKA_CFGCR0) |
- DPCLKA_CFGCR0_DDI_CLK_OFF(port));
+ intel_de_write(dev_priv, DPCLKA_CFGCR0,
+ intel_de_read(dev_priv, DPCLKA_CFGCR0) | DPCLKA_CFGCR0_DDI_CLK_OFF(port));
} else if (IS_GEN9_BC(dev_priv)) {
- I915_WRITE(DPLL_CTRL2, I915_READ(DPLL_CTRL2) |
- DPLL_CTRL2_DDI_CLK_OFF(port));
+ intel_de_write(dev_priv, DPLL_CTRL2,
+ intel_de_read(dev_priv, DPLL_CTRL2) | DPLL_CTRL2_DDI_CLK_OFF(port));
} else if (INTEL_GEN(dev_priv) < 9) {
- I915_WRITE(PORT_CLK_SEL(port), PORT_CLK_SEL_NONE);
+ intel_de_write(dev_priv, PORT_CLK_SEL(port),
+ PORT_CLK_SEL_NONE);
}
}
@@ -3180,13 +2850,15 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
return;
if (INTEL_GEN(dev_priv) >= 12) {
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
- ln0 = I915_READ(DKL_DP_MODE(tc_port));
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
- ln1 = I915_READ(DKL_DP_MODE(tc_port));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x0));
+ ln0 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x1));
+ ln1 = intel_de_read(dev_priv, DKL_DP_MODE(tc_port));
} else {
- ln0 = I915_READ(MG_DP_MODE(0, tc_port));
- ln1 = I915_READ(MG_DP_MODE(1, tc_port));
+ ln0 = intel_de_read(dev_priv, MG_DP_MODE(0, tc_port));
+ ln1 = intel_de_read(dev_priv, MG_DP_MODE(1, tc_port));
}
ln0 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X1_MODE);
@@ -3198,7 +2870,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
switch (pin_assignment) {
case 0x0:
- WARN_ON(intel_dig_port->tc_mode != TC_PORT_LEGACY);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dig_port->tc_mode != TC_PORT_LEGACY);
if (width == 1) {
ln1 |= MG_DP_MODE_CFG_DP_X1_MODE;
} else {
@@ -3243,13 +2916,15 @@ icl_program_mg_dp_mode(struct intel_digital_port *intel_dig_port,
}
if (INTEL_GEN(dev_priv) >= 12) {
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x0));
- I915_WRITE(DKL_DP_MODE(tc_port), ln0);
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x1));
- I915_WRITE(DKL_DP_MODE(tc_port), ln1);
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x0));
+ intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln0);
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x1));
+ intel_de_write(dev_priv, DKL_DP_MODE(tc_port), ln1);
} else {
- I915_WRITE(MG_DP_MODE(0, tc_port), ln0);
- I915_WRITE(MG_DP_MODE(1, tc_port), ln1);
+ intel_de_write(dev_priv, MG_DP_MODE(0, tc_port), ln0);
+ intel_de_write(dev_priv, MG_DP_MODE(1, tc_port), ln1);
}
}
@@ -3274,9 +2949,9 @@ static void intel_ddi_enable_fec(struct intel_encoder *encoder,
return;
intel_dp = enc_to_intel_dp(encoder);
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val |= DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_FEC_ENABLE_LIVE, 1))
@@ -3294,90 +2969,10 @@ static void intel_ddi_disable_fec_state(struct intel_encoder *encoder,
return;
intel_dp = enc_to_intel_dp(encoder);
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_FEC_ENABLE;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
- POSTING_READ(intel_dp->regs.dp_tp_ctl);
-}
-
-static void
-tgl_clear_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
-{
- struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
- u32 val;
-
- if (!cstate->dc3co_exitline)
- return;
-
- val = I915_READ(EXITLINE(cstate->cpu_transcoder));
- val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
- I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
-}
-
-static void
-tgl_set_psr2_transcoder_exitline(const struct intel_crtc_state *cstate)
-{
- u32 val, exit_scanlines;
- struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
-
- if (!cstate->dc3co_exitline)
- return;
-
- exit_scanlines = cstate->dc3co_exitline;
- exit_scanlines <<= EXITLINE_SHIFT;
- val = I915_READ(EXITLINE(cstate->cpu_transcoder));
- val &= ~(EXITLINE_MASK | EXITLINE_ENABLE);
- val |= exit_scanlines;
- val |= EXITLINE_ENABLE;
- I915_WRITE(EXITLINE(cstate->cpu_transcoder), val);
-}
-
-static void tgl_dc3co_exitline_compute_config(struct intel_encoder *encoder,
- struct intel_crtc_state *cstate)
-{
- u32 exit_scanlines;
- struct drm_i915_private *dev_priv = to_i915(cstate->uapi.crtc->dev);
- u32 crtc_vdisplay = cstate->hw.adjusted_mode.crtc_vdisplay;
-
- cstate->dc3co_exitline = 0;
-
- if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
- return;
-
- /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
- if (to_intel_crtc(cstate->uapi.crtc)->pipe != PIPE_A ||
- encoder->port != PORT_A)
- return;
-
- if (!cstate->has_psr2 || !cstate->hw.active)
- return;
-
- /*
- * DC3CO Exit time 200us B.Spec 49196
- * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
- */
- exit_scanlines =
- intel_usecs_to_scanlines(&cstate->hw.adjusted_mode, 200) + 1;
-
- if (WARN_ON(exit_scanlines > crtc_vdisplay))
- return;
-
- cstate->dc3co_exitline = crtc_vdisplay - exit_scanlines;
- DRM_DEBUG_KMS("DC3CO exit scanlines %d\n", cstate->dc3co_exitline);
-}
-
-static void tgl_dc3co_exitline_get_config(struct intel_crtc_state *crtc_state)
-{
- u32 val;
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
-
- if (INTEL_GEN(dev_priv) < 12)
- return;
-
- val = I915_READ(EXITLINE(crtc_state->cpu_transcoder));
-
- if (val & EXITLINE_ENABLE)
- crtc_state->dc3co_exitline = val & EXITLINE_MASK;
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
+ intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
}
static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
@@ -3392,7 +2987,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_encoder *encoder,
int level = intel_ddi_dp_level(intel_dp);
enum transcoder transcoder = crtc_state->cpu_transcoder;
- tgl_set_psr2_transcoder_exitline(crtc_state);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
@@ -3534,9 +3128,10 @@ static void hsw_ddi_pre_enable_dp(struct intel_encoder *encoder,
int level = intel_ddi_dp_level(intel_dp);
if (INTEL_GEN(dev_priv) < 11)
- WARN_ON(is_mst && (port == PORT_A || port == PORT_E));
+ drm_WARN_ON(&dev_priv->drm,
+ is_mst && (port == PORT_A || port == PORT_E));
else
- WARN_ON(is_mst && port == PORT_A);
+ drm_WARN_ON(&dev_priv->drm, is_mst && port == PORT_A);
intel_dp_set_link_params(intel_dp, crtc_state->port_clock,
crtc_state->lane_count, is_mst);
@@ -3607,8 +3202,11 @@ static void intel_ddi_pre_enable_dp(struct intel_encoder *encoder,
/* MST will call a setting of MSA after an allocating of Virtual Channel
* from MST encoder pre_enable callback.
*/
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
+ if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) {
intel_ddi_set_dp_msa(crtc_state, conn_state);
+
+ intel_dp_set_m_n(crtc_state, M1_N1);
+ }
}
static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
@@ -3618,8 +3216,7 @@ static void intel_ddi_pre_enable_hdmi(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- enum port port = encoder->port;
- int level = intel_ddi_hdmi_level(dev_priv, port);
+ int level = intel_ddi_hdmi_level(encoder);
struct intel_digital_port *dig_port = enc_to_dig_port(encoder);
intel_dp_dual_mode_set_tmds_output(intel_hdmi, true);
@@ -3673,7 +3270,7 @@ static void intel_ddi_pre_enable(struct intel_encoder *encoder,
* the DP link parameteres
*/
- WARN_ON(crtc_state->has_pch_encoder);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->has_pch_encoder);
if (INTEL_GEN(dev_priv) >= 11)
icl_map_plls_to_ports(encoder, crtc_state);
@@ -3706,20 +3303,20 @@ static void intel_disable_ddi_buf(struct intel_encoder *encoder,
bool wait = false;
u32 val;
- val = I915_READ(DDI_BUF_CTL(port));
+ val = intel_de_read(dev_priv, DDI_BUF_CTL(port));
if (val & DDI_BUF_CTL_ENABLE) {
val &= ~DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), val);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), val);
wait = true;
}
if (intel_crtc_has_dp_encoder(crtc_state)) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
val |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
}
/* Disable FEC in DP Sink */
@@ -3751,9 +3348,13 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder;
u32 val;
- val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
- val &= ~TGL_TRANS_DDI_PORT_MASK;
- I915_WRITE(TRANS_DDI_FUNC_CTL(cpu_transcoder), val);
+ val = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ val &= ~(TGL_TRANS_DDI_PORT_MASK |
+ TRANS_DDI_MODE_SELECT_MASK);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder),
+ val);
}
} else {
if (!is_mst)
@@ -3779,7 +3380,6 @@ static void intel_ddi_post_disable_dp(struct intel_encoder *encoder,
dig_port->ddi_io_power_domain);
intel_ddi_clk_disable(encoder);
- tgl_clear_psr2_transcoder_exitline(old_crtc_state);
}
static void intel_ddi_post_disable_hdmi(struct intel_encoder *encoder,
@@ -3816,7 +3416,8 @@ static void icl_disable_transcoder_port_sync(const struct intel_crtc_state *old_
DRM_DEBUG_KMS("Disabling Transcoder Port Sync on Slave Transcoder %s\n",
transcoder_name(old_crtc_state->cpu_transcoder));
- I915_WRITE(TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(old_crtc_state->cpu_transcoder), 0);
}
static void intel_ddi_post_disable(struct intel_encoder *encoder,
@@ -3890,25 +3491,25 @@ void intel_ddi_fdi_post_disable(struct intel_encoder *encoder,
* step 13 is the correct place for it. Step 18 is where it was
* originally before the BUN.
*/
- val = I915_READ(FDI_RX_CTL(PIPE_A));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
intel_disable_ddi_buf(encoder, old_crtc_state);
intel_ddi_clk_disable(encoder);
- val = I915_READ(FDI_RX_MISC(PIPE_A));
+ val = intel_de_read(dev_priv, FDI_RX_MISC(PIPE_A));
val &= ~(FDI_RX_PWRDN_LANE1_MASK | FDI_RX_PWRDN_LANE0_MASK);
val |= FDI_RX_PWRDN_LANE1_VAL(2) | FDI_RX_PWRDN_LANE0_VAL(2);
- I915_WRITE(FDI_RX_MISC(PIPE_A), val);
+ intel_de_write(dev_priv, FDI_RX_MISC(PIPE_A), val);
- val = I915_READ(FDI_RX_CTL(PIPE_A));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_PCDCLK;
- I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
- val = I915_READ(FDI_RX_CTL(PIPE_A));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
val &= ~FDI_RX_PLL_ENABLE;
- I915_WRITE(FDI_RX_CTL(PIPE_A), val);
+ intel_de_write(dev_priv, FDI_RX_CTL(PIPE_A), val);
}
static void intel_enable_ddi_dp(struct intel_encoder *encoder,
@@ -3944,9 +3545,9 @@ gen9_chicken_trans_reg_by_port(struct drm_i915_private *dev_priv,
[PORT_E] = TRANSCODER_A,
};
- WARN_ON(INTEL_GEN(dev_priv) < 9);
+ drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) < 9);
- if (WARN_ON(port < PORT_A || port > PORT_E))
+ if (drm_WARN_ON(&dev_priv->drm, port < PORT_A || port > PORT_E))
port = PORT_A;
return CHICKEN_TRANS(trans[port]);
@@ -3964,8 +3565,9 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
if (!intel_hdmi_handle_sink_scrambling(encoder, connector,
crtc_state->hdmi_high_tmds_clock_ratio,
crtc_state->hdmi_scrambling))
- DRM_ERROR("[CONNECTOR:%d:%s] Failed to configure sink scrambling/TMDS bit clock ratio\n",
- connector->base.id, connector->name);
+ DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Failed to configure sink "
+ "scrambling/TMDS bit clock ratio\n",
+ connector->base.id, connector->name);
/* Display WA #1143: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv)) {
@@ -3978,7 +3580,7 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
i915_reg_t reg = gen9_chicken_trans_reg_by_port(dev_priv, port);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if (port == PORT_E)
val |= DDIE_TRAINING_OVERRIDE_ENABLE |
@@ -3987,8 +3589,8 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val |= DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
udelay(1);
@@ -3999,15 +3601,15 @@ static void intel_enable_ddi_hdmi(struct intel_encoder *encoder,
val &= ~(DDI_TRAINING_OVERRIDE_ENABLE |
DDI_TRAINING_OVERRIDE_VALUE);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
/* In HDMI/DVI mode, the port width, and swing/emphasis values
* are ignored so nothing special needs to be done besides
* enabling the port.
*/
- I915_WRITE(DDI_BUF_CTL(port),
- dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port),
+ dig_port->saved_port_bits | DDI_BUF_CTL_ENABLE);
if (crtc_state->has_audio)
intel_audio_codec_enable(encoder, crtc_state, conn_state);
@@ -4017,6 +3619,12 @@ static void intel_enable_ddi(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
+ WARN_ON(crtc_state->has_pch_encoder);
+
+ intel_enable_pipe(crtc_state);
+
+ intel_crtc_vblank_on(crtc_state);
+
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_enable_ddi_hdmi(encoder, crtc_state, conn_state);
else
@@ -4096,43 +3704,11 @@ static void intel_ddi_update_pipe(struct intel_encoder *encoder,
const struct intel_crtc_state *crtc_state,
const struct drm_connector_state *conn_state)
{
- struct intel_connector *connector =
- to_intel_connector(conn_state->connector);
- struct intel_hdcp *hdcp = &connector->hdcp;
- bool content_protection_type_changed =
- (conn_state->hdcp_content_type != hdcp->content_type &&
- conn_state->content_protection !=
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
intel_ddi_update_pipe_dp(encoder, crtc_state, conn_state);
- /*
- * During the HDCP encryption session if Type change is requested,
- * disable the HDCP and reenable it with new TYPE value.
- */
- if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
- content_protection_type_changed)
- intel_hdcp_disable(connector);
-
- /*
- * Mark the hdcp state as DESIRED after the hdcp disable of type
- * change procedure.
- */
- if (content_protection_type_changed) {
- mutex_lock(&hdcp->mutex);
- hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
- schedule_work(&hdcp->prop_work);
- mutex_unlock(&hdcp->mutex);
- }
-
- if (conn_state->content_protection ==
- DRM_MODE_CONTENT_PROTECTION_DESIRED ||
- content_protection_type_changed)
- intel_hdcp_enable(connector,
- crtc_state->cpu_transcoder,
- (u8)conn_state->hdcp_content_type);
+ intel_hdcp_update_pipe(encoder, crtc_state, conn_state);
}
static void
@@ -4197,20 +3773,20 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
u32 dp_tp_ctl, ddi_buf_ctl;
bool wait = false;
- dp_tp_ctl = I915_READ(intel_dp->regs.dp_tp_ctl);
+ dp_tp_ctl = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
if (dp_tp_ctl & DP_TP_CTL_ENABLE) {
- ddi_buf_ctl = I915_READ(DDI_BUF_CTL(port));
+ ddi_buf_ctl = intel_de_read(dev_priv, DDI_BUF_CTL(port));
if (ddi_buf_ctl & DDI_BUF_CTL_ENABLE) {
- I915_WRITE(DDI_BUF_CTL(port),
- ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
+ intel_de_write(dev_priv, DDI_BUF_CTL(port),
+ ddi_buf_ctl & ~DDI_BUF_CTL_ENABLE);
wait = true;
}
dp_tp_ctl &= ~(DP_TP_CTL_ENABLE | DP_TP_CTL_LINK_TRAIN_MASK);
dp_tp_ctl |= DP_TP_CTL_LINK_TRAIN_PAT1;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
- POSTING_READ(intel_dp->regs.dp_tp_ctl);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
+ intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
if (wait)
intel_wait_ddi_buf_idle(dev_priv, port);
@@ -4225,12 +3801,12 @@ static void intel_ddi_prepare_link_retrain(struct intel_dp *intel_dp)
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
dp_tp_ctl |= DP_TP_CTL_ENHANCED_FRAME_ENABLE;
}
- I915_WRITE(intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
- POSTING_READ(intel_dp->regs.dp_tp_ctl);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, dp_tp_ctl);
+ intel_de_posting_read(dev_priv, intel_dp->regs.dp_tp_ctl);
intel_dp->DP |= DDI_BUF_CTL_ENABLE;
- I915_WRITE(DDI_BUF_CTL(port), intel_dp->DP);
- POSTING_READ(DDI_BUF_CTL(port));
+ intel_de_write(dev_priv, DDI_BUF_CTL(port), intel_dp->DP);
+ intel_de_posting_read(dev_priv, DDI_BUF_CTL(port));
udelay(600);
}
@@ -4244,14 +3820,18 @@ static bool intel_ddi_is_audio_enabled(struct drm_i915_private *dev_priv,
if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO))
return false;
- return I915_READ(HSW_AUD_PIN_ELD_CP_VLD) &
+ return intel_de_read(dev_priv, HSW_AUD_PIN_ELD_CP_VLD) &
AUDIO_OUTPUT_ENABLE(cpu_transcoder);
}
void intel_ddi_compute_min_voltage_level(struct drm_i915_private *dev_priv,
struct intel_crtc_state *crtc_state)
{
- if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
+ if (INTEL_GEN(dev_priv) >= 12 && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 2;
+ else if (IS_ELKHARTLAKE(dev_priv) && crtc_state->port_clock > 594000)
+ crtc_state->min_voltage_level = 3;
+ else if (INTEL_GEN(dev_priv) >= 11 && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 1;
else if (IS_CANNONLAKE(dev_priv) && crtc_state->port_clock > 594000)
crtc_state->min_voltage_level = 2;
@@ -4266,12 +3846,12 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
u32 temp, flags = 0;
/* XXX: DSI transcoder paranoia */
- if (WARN_ON(transcoder_is_dsi(cpu_transcoder)))
+ if (drm_WARN_ON(&dev_priv->drm, transcoder_is_dsi(cpu_transcoder)))
return;
intel_dsc_get_config(encoder, pipe_config);
- temp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ temp = intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (temp & TRANS_DDI_PHSYNC)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -4340,7 +3920,7 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
dp_tp_ctl = TGL_DP_TP_CTL(pipe_config->cpu_transcoder);
pipe_config->fec_enable =
- I915_READ(dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
+ intel_de_read(dev_priv, dp_tp_ctl) & DP_TP_CTL_FEC_ENABLE;
DRM_DEBUG_KMS("[ENCODER:%d:%s] Fec status: %u\n",
encoder->base.base.id, encoder->base.name,
@@ -4363,9 +3943,6 @@ void intel_ddi_get_config(struct intel_encoder *encoder,
break;
}
- if (encoder->type == INTEL_OUTPUT_EDP)
- tgl_dc3co_exitline_get_config(pipe_config);
-
pipe_config->has_audio =
intel_ddi_is_audio_enabled(dev_priv, cpu_transcoder);
@@ -4447,7 +4024,6 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
ret = intel_hdmi_compute_config(encoder, pipe_config, conn_state);
} else {
ret = intel_dp_compute_config(encoder, pipe_config, conn_state);
- tgl_dc3co_exitline_compute_config(encoder, pipe_config);
}
if (ret)
@@ -4468,6 +4044,112 @@ static int intel_ddi_compute_config(struct intel_encoder *encoder,
return 0;
}
+static bool mode_equal(const struct drm_display_mode *mode1,
+ const struct drm_display_mode *mode2)
+{
+ return drm_mode_match(mode1, mode2,
+ DRM_MODE_MATCH_TIMINGS |
+ DRM_MODE_MATCH_FLAGS |
+ DRM_MODE_MATCH_3D_FLAGS) &&
+ mode1->clock == mode2->clock; /* we want an exact match */
+}
+
+static bool m_n_equal(const struct intel_link_m_n *m_n_1,
+ const struct intel_link_m_n *m_n_2)
+{
+ return m_n_1->tu == m_n_2->tu &&
+ m_n_1->gmch_m == m_n_2->gmch_m &&
+ m_n_1->gmch_n == m_n_2->gmch_n &&
+ m_n_1->link_m == m_n_2->link_m &&
+ m_n_1->link_n == m_n_2->link_n;
+}
+
+static bool crtcs_port_sync_compatible(const struct intel_crtc_state *crtc_state1,
+ const struct intel_crtc_state *crtc_state2)
+{
+ return crtc_state1->hw.active && crtc_state2->hw.active &&
+ crtc_state1->output_types == crtc_state2->output_types &&
+ crtc_state1->output_format == crtc_state2->output_format &&
+ crtc_state1->lane_count == crtc_state2->lane_count &&
+ crtc_state1->port_clock == crtc_state2->port_clock &&
+ mode_equal(&crtc_state1->hw.adjusted_mode,
+ &crtc_state2->hw.adjusted_mode) &&
+ m_n_equal(&crtc_state1->dp_m_n, &crtc_state2->dp_m_n);
+}
+
+static u8
+intel_ddi_port_sync_transcoders(const struct intel_crtc_state *ref_crtc_state,
+ int tile_group_id)
+{
+ struct drm_connector *connector;
+ const struct drm_connector_state *conn_state;
+ struct drm_i915_private *dev_priv = to_i915(ref_crtc_state->uapi.crtc->dev);
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(ref_crtc_state->uapi.state);
+ u8 transcoders = 0;
+ int i;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ if (!intel_crtc_has_type(ref_crtc_state, INTEL_OUTPUT_DP))
+ return 0;
+
+ for_each_new_connector_in_state(&state->base, connector, conn_state, i) {
+ struct intel_crtc *crtc = to_intel_crtc(conn_state->crtc);
+ const struct intel_crtc_state *crtc_state;
+
+ if (!crtc)
+ continue;
+
+ if (!connector->has_tile ||
+ connector->tile_group->id !=
+ tile_group_id)
+ continue;
+ crtc_state = intel_atomic_get_new_crtc_state(state,
+ crtc);
+ if (!crtcs_port_sync_compatible(ref_crtc_state,
+ crtc_state))
+ continue;
+ transcoders |= BIT(crtc_state->cpu_transcoder);
+ }
+
+ return transcoders;
+}
+
+static int intel_ddi_compute_config_late(struct intel_encoder *encoder,
+ struct intel_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_connector *connector = conn_state->connector;
+ u8 port_sync_transcoders = 0;
+
+ DRM_DEBUG_KMS("[ENCODER:%d:%s] [CRTC:%d:%s]",
+ encoder->base.base.id, encoder->base.name,
+ crtc_state->uapi.crtc->base.id, crtc_state->uapi.crtc->name);
+
+ if (connector->has_tile)
+ port_sync_transcoders = intel_ddi_port_sync_transcoders(crtc_state,
+ connector->tile_group->id);
+
+ /*
+ * EDP Transcoders cannot be ensalved
+ * make them a master always when present
+ */
+ if (port_sync_transcoders & BIT(TRANSCODER_EDP))
+ crtc_state->master_transcoder = TRANSCODER_EDP;
+ else
+ crtc_state->master_transcoder = ffs(port_sync_transcoders) - 1;
+
+ if (crtc_state->master_transcoder == crtc_state->cpu_transcoder) {
+ crtc_state->master_transcoder = INVALID_TRANSCODER;
+ crtc_state->sync_mode_slaves_mask =
+ port_sync_transcoders & ~BIT(crtc_state->cpu_transcoder);
+ }
+
+ return 0;
+}
+
static void intel_ddi_encoder_destroy(struct drm_encoder *encoder)
{
struct intel_digital_port *dig_port = enc_to_dig_port(to_intel_encoder(encoder));
@@ -4567,7 +4249,8 @@ static int intel_hdmi_reset_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- WARN_ON(!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
+ drm_WARN_ON(&dev_priv->drm,
+ !intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI));
if (!crtc_state->hw.active)
return 0;
@@ -4634,7 +4317,8 @@ intel_ddi_hotplug(struct intel_encoder *encoder,
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
+ drm_WARN(encoder->base.dev, ret,
+ "Acquiring modeset locks failed with %i\n", ret);
/*
* Unpowered type-c dongles can take some time to boot and be
@@ -4714,7 +4398,7 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
return max_lanes;
if (port == PORT_A || port == PORT_E) {
- if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
max_lanes = port == PORT_A ? 4 : 0;
else
/* Both A and E share 2 lanes */
@@ -4737,15 +4421,14 @@ intel_ddi_max_lanes(struct intel_digital_port *intel_dport)
void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
{
- struct ddi_vbt_port_info *port_info =
- &dev_priv->vbt.ddi_port_info[port];
struct intel_digital_port *intel_dig_port;
struct intel_encoder *encoder;
bool init_hdmi, init_dp, init_lspcon = false;
enum phy phy = intel_port_to_phy(dev_priv, port);
- init_hdmi = port_info->supports_dvi || port_info->supports_hdmi;
- init_dp = port_info->supports_dp;
+ init_hdmi = intel_bios_port_supports_dvi(dev_priv, port) ||
+ intel_bios_port_supports_hdmi(dev_priv, port);
+ init_dp = intel_bios_port_supports_dp(dev_priv, port);
if (intel_bios_is_lspcon_present(dev_priv, port)) {
/*
@@ -4777,6 +4460,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->hotplug = intel_ddi_hotplug;
encoder->compute_output_type = intel_ddi_compute_output_type;
encoder->compute_config = intel_ddi_compute_config;
+ encoder->compute_config_late = intel_ddi_compute_config_late;
encoder->enable = intel_enable_ddi;
encoder->pre_pll_enable = intel_ddi_pre_pll_enable;
encoder->pre_enable = intel_ddi_pre_enable;
@@ -4795,10 +4479,12 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->pipe_mask = ~0;
if (INTEL_GEN(dev_priv) >= 11)
- intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
+ DDI_BUF_CTL(port)) &
DDI_BUF_PORT_REVERSAL;
else
- intel_dig_port->saved_port_bits = I915_READ(DDI_BUF_CTL(port)) &
+ intel_dig_port->saved_port_bits = intel_de_read(dev_priv,
+ DDI_BUF_CTL(port)) &
(DDI_BUF_PORT_REVERSAL | DDI_A_4_LANES);
intel_dig_port->dp.output_reg = INVALID_MMIO_REG;
@@ -4806,8 +4492,9 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
intel_dig_port->aux_ch = intel_bios_port_aux_ch(dev_priv, port);
if (intel_phy_is_tc(dev_priv, phy)) {
- bool is_legacy = !port_info->supports_typec_usb &&
- !port_info->supports_tbt;
+ bool is_legacy =
+ !intel_bios_port_supports_typec_usb(dev_priv, port) &&
+ !intel_bios_port_supports_tbt(dev_priv, port);
intel_tc_port_init(intel_dig_port, is_legacy);
@@ -4815,7 +4502,7 @@ void intel_ddi_init(struct drm_i915_private *dev_priv, enum port port)
encoder->update_complete = intel_ddi_update_complete;
}
- WARN_ON(port > PORT_I);
+ drm_WARN_ON(&dev_priv->drm, port > PORT_I);
intel_dig_port->ddi_io_power_domain = POWER_DOMAIN_PORT_DDI_A_IO +
port - PORT_A;
diff --git a/drivers/gpu/drm/i915/display/intel_ddi.h b/drivers/gpu/drm/i915/display/intel_ddi.h
index 167c6579d972..55fd72b901fe 100644
--- a/drivers/gpu/drm/i915/display/intel_ddi.h
+++ b/drivers/gpu/drm/i915/display/intel_ddi.h
@@ -6,8 +6,6 @@
#ifndef __INTEL_DDI_H__
#define __INTEL_DDI_H__
-#include <drm/i915_drm.h>
-
#include "intel_display.h"
struct drm_connector_state;
@@ -47,7 +45,5 @@ u8 intel_ddi_dp_pre_emphasis_max(struct intel_encoder *encoder,
int intel_ddi_toggle_hdcp_signalling(struct intel_encoder *intel_encoder,
bool enable);
void icl_sanitize_encoder_pll_mapping(struct intel_encoder *encoder);
-int cnl_calc_wrpll_link(struct drm_i915_private *dev_priv,
- struct intel_dpll_hw_state *state);
#endif /* __INTEL_DDI_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_de.h b/drivers/gpu/drm/i915/display/intel_de.h
new file mode 100644
index 000000000000..00da10bf35f5
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_de.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __INTEL_DE_H__
+#define __INTEL_DE_H__
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "intel_uncore.h"
+
+static inline u32
+intel_de_read(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ return intel_uncore_read(&i915->uncore, reg);
+}
+
+static inline void
+intel_de_posting_read(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ intel_uncore_posting_read(&i915->uncore, reg);
+}
+
+/* Note: read the warnings for intel_uncore_*_fw() functions! */
+static inline u32
+intel_de_read_fw(struct drm_i915_private *i915, i915_reg_t reg)
+{
+ return intel_uncore_read_fw(&i915->uncore, reg);
+}
+
+static inline void
+intel_de_write(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write(&i915->uncore, reg, val);
+}
+
+/* Note: read the warnings for intel_uncore_*_fw() functions! */
+static inline void
+intel_de_write_fw(struct drm_i915_private *i915, i915_reg_t reg, u32 val)
+{
+ intel_uncore_write_fw(&i915->uncore, reg, val);
+}
+
+static inline void
+intel_de_rmw(struct drm_i915_private *i915, i915_reg_t reg, u32 clear, u32 set)
+{
+ intel_uncore_rmw(&i915->uncore, reg, clear, set);
+}
+
+static inline int
+intel_de_wait_for_register(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, u32 value, unsigned int timeout)
+{
+ return intel_wait_for_register(&i915->uncore, reg, mask, value, timeout);
+}
+
+static inline int
+intel_de_wait_for_set(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
+{
+ return intel_de_wait_for_register(i915, reg, mask, mask, timeout);
+}
+
+static inline int
+intel_de_wait_for_clear(struct drm_i915_private *i915, i915_reg_t reg,
+ u32 mask, unsigned int timeout)
+{
+ return intel_de_wait_for_register(i915, reg, mask, 0, timeout);
+}
+
+#endif /* __INTEL_DE_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c
index 19ea842cfd84..8f23c4d51c33 100644
--- a/drivers/gpu/drm/i915/display/intel_display.c
+++ b/drivers/gpu/drm/i915/display/intel_display.c
@@ -41,7 +41,6 @@
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
#include "display/intel_crt.h"
#include "display/intel_ddi.h"
@@ -203,9 +202,9 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
val = vlv_cck_read(dev_priv, reg);
divider = val & CCK_FREQUENCY_VALUES;
- WARN((val & CCK_FREQUENCY_STATUS) !=
- (divider << CCK_FREQUENCY_STATUS_SHIFT),
- "%s change in progress\n", name);
+ drm_WARN(&dev_priv->drm, (val & CCK_FREQUENCY_STATUS) !=
+ (divider << CCK_FREQUENCY_STATUS_SHIFT),
+ "%s change in progress\n", name);
return DIV_ROUND_CLOSEST(ref_freq << 1, divider + 1);
}
@@ -235,7 +234,8 @@ static void intel_update_czclk(struct drm_i915_private *dev_priv)
dev_priv->czclk_freq = vlv_get_cck_clock_hpll(dev_priv, "czclk",
CCK_CZ_CLOCK_CONTROL);
- DRM_DEBUG_DRIVER("CZ clock rate: %d kHz\n", dev_priv->czclk_freq);
+ drm_dbg(&dev_priv->drm, "CZ clock rate: %d kHz\n",
+ dev_priv->czclk_freq);
}
static inline u32 /* units of 100MHz */
@@ -518,13 +518,11 @@ static void
skl_wa_827(struct drm_i915_private *dev_priv, enum pipe pipe, bool enable)
{
if (enable)
- I915_WRITE(CLKGATE_DIS_PSL(pipe),
- I915_READ(CLKGATE_DIS_PSL(pipe)) |
- DUPS1_GATING_DIS | DUPS2_GATING_DIS);
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DUPS1_GATING_DIS | DUPS2_GATING_DIS);
else
- I915_WRITE(CLKGATE_DIS_PSL(pipe),
- I915_READ(CLKGATE_DIS_PSL(pipe)) &
- ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~(DUPS1_GATING_DIS | DUPS2_GATING_DIS));
}
/* Wa_2006604312:icl */
@@ -533,11 +531,11 @@ icl_wa_scalerclkgating(struct drm_i915_private *dev_priv, enum pipe pipe,
bool enable)
{
if (enable)
- I915_WRITE(CLKGATE_DIS_PSL(pipe),
- I915_READ(CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) | DPFR_GATING_DIS);
else
- I915_WRITE(CLKGATE_DIS_PSL(pipe),
- I915_READ(CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe),
+ intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe)) & ~DPFR_GATING_DIS);
}
static bool
@@ -883,7 +881,7 @@ static bool vlv_PLL_is_optimal(struct drm_device *dev, int target_freq,
return calculated_clock->p > best_clock->p;
}
- if (WARN_ON_ONCE(!target_freq))
+ if (drm_WARN_ON_ONCE(dev, !target_freq))
return false;
*error_ppm = div_u64(1000000ULL *
@@ -1049,9 +1047,9 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv,
else
line_mask = DSL_LINEMASK_GEN3;
- line1 = I915_READ(reg) & line_mask;
+ line1 = intel_de_read(dev_priv, reg) & line_mask;
msleep(5);
- line2 = I915_READ(reg) & line_mask;
+ line2 = intel_de_read(dev_priv, reg) & line_mask;
return line1 != line2;
}
@@ -1063,8 +1061,9 @@ static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state)
/* Wait for the display line to settle/start moving */
if (wait_for(pipe_scanline_is_moving(dev_priv, pipe) == state, 100))
- DRM_ERROR("pipe %c scanline %s wait timed out\n",
- pipe_name(pipe), onoff(state));
+ drm_err(&dev_priv->drm,
+ "pipe %c scanline %s wait timed out\n",
+ pipe_name(pipe), onoff(state));
}
static void intel_wait_for_pipe_scanline_stopped(struct intel_crtc *crtc)
@@ -1090,7 +1089,8 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state)
/* Wait for the Pipe State to go off */
if (intel_de_wait_for_clear(dev_priv, reg,
I965_PIPECONF_ACTIVE, 100))
- WARN(1, "pipe_off wait timed out\n");
+ drm_WARN(&dev_priv->drm, 1,
+ "pipe_off wait timed out\n");
} else {
intel_wait_for_pipe_scanline_stopped(crtc);
}
@@ -1103,7 +1103,7 @@ void assert_pll(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- val = I915_READ(DPLL(pipe));
+ val = intel_de_read(dev_priv, DPLL(pipe));
cur_state = !!(val & DPLL_VCO_ENABLE);
I915_STATE_WARN(cur_state != state,
"PLL state assertion failure (expected %s, current %s)\n",
@@ -1139,10 +1139,11 @@ static void assert_fdi_tx(struct drm_i915_private *dev_priv,
* so pipe->transcoder cast is fine here.
*/
enum transcoder cpu_transcoder = (enum transcoder)pipe;
- u32 val = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ u32 val = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
cur_state = !!(val & TRANS_DDI_FUNC_ENABLE);
} else {
- u32 val = I915_READ(FDI_TX_CTL(pipe));
+ u32 val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
cur_state = !!(val & FDI_TX_ENABLE);
}
I915_STATE_WARN(cur_state != state,
@@ -1158,7 +1159,7 @@ static void assert_fdi_rx(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- val = I915_READ(FDI_RX_CTL(pipe));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
cur_state = !!(val & FDI_RX_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX state assertion failure (expected %s, current %s)\n",
@@ -1180,7 +1181,7 @@ static void assert_fdi_tx_pll_enabled(struct drm_i915_private *dev_priv,
if (HAS_DDI(dev_priv))
return;
- val = I915_READ(FDI_TX_CTL(pipe));
+ val = intel_de_read(dev_priv, FDI_TX_CTL(pipe));
I915_STATE_WARN(!(val & FDI_TX_PLL_ENABLE), "FDI TX PLL assertion failure, should be active but is disabled\n");
}
@@ -1190,7 +1191,7 @@ void assert_fdi_rx_pll(struct drm_i915_private *dev_priv,
u32 val;
bool cur_state;
- val = I915_READ(FDI_RX_CTL(pipe));
+ val = intel_de_read(dev_priv, FDI_RX_CTL(pipe));
cur_state = !!(val & FDI_RX_PLL_ENABLE);
I915_STATE_WARN(cur_state != state,
"FDI RX PLL assertion failure (expected %s, current %s)\n",
@@ -1204,14 +1205,14 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
enum pipe panel_pipe = INVALID_PIPE;
bool locked = true;
- if (WARN_ON(HAS_DDI(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, HAS_DDI(dev_priv)))
return;
if (HAS_PCH_SPLIT(dev_priv)) {
u32 port_sel;
pp_reg = PP_CONTROL(0);
- port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+ port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
switch (port_sel) {
case PANEL_PORT_SELECT_LVDS:
@@ -1238,13 +1239,14 @@ void assert_panel_unlocked(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 port_sel;
pp_reg = PP_CONTROL(0);
- port_sel = I915_READ(PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
+ port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(0)) & PANEL_PORT_SELECT_MASK;
- WARN_ON(port_sel != PANEL_PORT_SELECT_LVDS);
+ drm_WARN_ON(&dev_priv->drm,
+ port_sel != PANEL_PORT_SELECT_LVDS);
intel_lvds_port_enabled(dev_priv, LVDS, &panel_pipe);
}
- val = I915_READ(pp_reg);
+ val = intel_de_read(dev_priv, pp_reg);
if (!(val & PANEL_POWER_ON) ||
((val & PANEL_UNLOCK_MASK) == PANEL_UNLOCK_REGS))
locked = false;
@@ -1268,7 +1270,7 @@ void assert_pipe(struct drm_i915_private *dev_priv,
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wakeref) {
- u32 val = I915_READ(PIPECONF(cpu_transcoder));
+ u32 val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
intel_display_power_put(dev_priv, power_domain, wakeref);
@@ -1318,7 +1320,7 @@ void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv,
u32 val;
bool enabled;
- val = I915_READ(PCH_TRANSCONF(pipe));
+ val = intel_de_read(dev_priv, PCH_TRANSCONF(pipe));
enabled = !!(val & TRANS_ENABLE);
I915_STATE_WARN(enabled,
"transcoder assertion failed, should be off on pipe %c but is still active\n",
@@ -1392,12 +1394,12 @@ static void _vlv_enable_pll(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- DRM_ERROR("DPLL %d failed to lock\n", pipe);
+ drm_err(&dev_priv->drm, "DPLL %d failed to lock\n", pipe);
}
static void vlv_enable_pll(struct intel_crtc *crtc,
@@ -1414,8 +1416,9 @@ static void vlv_enable_pll(struct intel_crtc *crtc,
if (pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE)
_vlv_enable_pll(crtc, pipe_config);
- I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
- POSTING_READ(DPLL_MD(pipe));
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
}
@@ -1442,11 +1445,11 @@ static void _chv_enable_pll(struct intel_crtc *crtc,
udelay(1);
/* Enable PLL */
- I915_WRITE(DPLL(pipe), pipe_config->dpll_hw_state.dpll);
+ intel_de_write(dev_priv, DPLL(pipe), pipe_config->dpll_hw_state.dpll);
/* Check PLL is locked */
if (intel_de_wait_for_set(dev_priv, DPLL(pipe), DPLL_LOCK_VLV, 1))
- DRM_ERROR("PLL %d failed to lock\n", pipe);
+ drm_err(&dev_priv->drm, "PLL %d failed to lock\n", pipe);
}
static void chv_enable_pll(struct intel_crtc *crtc,
@@ -1470,19 +1473,23 @@ static void chv_enable_pll(struct intel_crtc *crtc,
* DPLLCMD is AWOL. Use chicken bits to propagate
* the value from DPLLBMD to either pipe B or C.
*/
- I915_WRITE(CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
- I915_WRITE(DPLL_MD(PIPE_B), pipe_config->dpll_hw_state.dpll_md);
- I915_WRITE(CBR4_VLV, 0);
+ intel_de_write(dev_priv, CBR4_VLV, CBR_DPLLBMD_PIPE(pipe));
+ intel_de_write(dev_priv, DPLL_MD(PIPE_B),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, CBR4_VLV, 0);
dev_priv->chv_dpll_md[pipe] = pipe_config->dpll_hw_state.dpll_md;
/*
* DPLLB VGA mode also seems to cause problems.
* We should always have it disabled.
*/
- WARN_ON((I915_READ(DPLL(PIPE_B)) & DPLL_VGA_MODE_DIS) == 0);
+ drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) &
+ DPLL_VGA_MODE_DIS) == 0);
} else {
- I915_WRITE(DPLL_MD(pipe), pipe_config->dpll_hw_state.dpll_md);
- POSTING_READ(DPLL_MD(pipe));
+ intel_de_write(dev_priv, DPLL_MD(pipe),
+ pipe_config->dpll_hw_state.dpll_md);
+ intel_de_posting_read(dev_priv, DPLL_MD(pipe));
}
}
@@ -1513,29 +1520,29 @@ static void i9xx_enable_pll(struct intel_crtc *crtc,
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- I915_WRITE(reg, dpll & ~DPLL_VGA_MODE_DIS);
- I915_WRITE(reg, dpll);
+ intel_de_write(dev_priv, reg, dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, reg, dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
if (INTEL_GEN(dev_priv) >= 4) {
- I915_WRITE(DPLL_MD(crtc->pipe),
- crtc_state->dpll_hw_state.dpll_md);
+ intel_de_write(dev_priv, DPLL_MD(crtc->pipe),
+ crtc_state->dpll_hw_state.dpll_md);
} else {
/* The pixel multiplier can only be updated once the
* DPLL is enabled and the clocks are stable.
*
* So write it again.
*/
- I915_WRITE(reg, dpll);
+ intel_de_write(dev_priv, reg, dpll);
}
/* We do this three times for luck */
for (i = 0; i < 3; i++) {
- I915_WRITE(reg, dpll);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, dpll);
+ intel_de_posting_read(dev_priv, reg);
udelay(150); /* wait for warmup */
}
}
@@ -1553,8 +1560,8 @@ static void i9xx_disable_pll(const struct intel_crtc_state *crtc_state)
/* Make sure the pipe isn't still relying on us */
assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
- I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
}
static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1569,8 +1576,8 @@ static void vlv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- I915_WRITE(DPLL(pipe), val);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
}
static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
@@ -1586,8 +1593,8 @@ static void chv_disable_pll(struct drm_i915_private *dev_priv, enum pipe pipe)
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- I915_WRITE(DPLL(pipe), val);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), val);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
vlv_dpio_get(dev_priv);
@@ -1626,9 +1633,11 @@ void vlv_wait_port_ready(struct drm_i915_private *dev_priv,
if (intel_de_wait_for_register(dev_priv, dpll_reg,
port_mask, expected_mask, 1000))
- WARN(1, "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
- dport->base.base.base.id, dport->base.base.name,
- I915_READ(dpll_reg) & port_mask, expected_mask);
+ drm_WARN(&dev_priv->drm, 1,
+ "timed out waiting for [ENCODER:%d:%s] port ready: got 0x%x, expected 0x%x\n",
+ dport->base.base.base.id, dport->base.base.name,
+ intel_de_read(dev_priv, dpll_reg) & port_mask,
+ expected_mask);
}
static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
@@ -1648,7 +1657,7 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
if (HAS_PCH_CPT(dev_priv)) {
reg = TRANS_CHICKEN2(pipe);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
/*
* Workaround: Set the timing override bit
* before enabling the pch transcoder.
@@ -1657,12 +1666,12 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
reg = PCH_TRANSCONF(pipe);
- val = I915_READ(reg);
- pipeconf_val = I915_READ(PIPECONF(pipe));
+ val = intel_de_read(dev_priv, reg);
+ pipeconf_val = intel_de_read(dev_priv, PIPECONF(pipe));
if (HAS_PCH_IBX(dev_priv)) {
/* Configure frame start delay to match the CPU */
@@ -1692,9 +1701,10 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state)
val |= TRANS_PROGRESSIVE;
}
- I915_WRITE(reg, val | TRANS_ENABLE);
+ intel_de_write(dev_priv, reg, val | TRANS_ENABLE);
if (intel_de_wait_for_set(dev_priv, reg, TRANS_STATE_ENABLE, 100))
- DRM_ERROR("failed to enable transcoder %c\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "failed to enable transcoder %c\n",
+ pipe_name(pipe));
}
static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -1706,16 +1716,16 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_fdi_tx_enabled(dev_priv, (enum pipe) cpu_transcoder);
assert_fdi_rx_enabled(dev_priv, PIPE_A);
- val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+ val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
/* Workaround: set timing override bit. */
val |= TRANS_CHICKEN2_TIMING_OVERRIDE;
/* Configure frame start delay to match the CPU */
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
- I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
val = TRANS_ENABLE;
- pipeconf_val = I915_READ(PIPECONF(cpu_transcoder));
+ pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder));
if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) ==
PIPECONF_INTERLACED_ILK)
@@ -1723,10 +1733,10 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv,
else
val |= TRANS_PROGRESSIVE;
- I915_WRITE(LPT_TRANSCONF, val);
+ intel_de_write(dev_priv, LPT_TRANSCONF, val);
if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 100))
- DRM_ERROR("Failed to enable PCH transcoder\n");
+ drm_err(&dev_priv->drm, "Failed to enable PCH transcoder\n");
}
static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
@@ -1743,19 +1753,20 @@ static void ilk_disable_pch_transcoder(struct drm_i915_private *dev_priv,
assert_pch_ports_disabled(dev_priv, pipe);
reg = PCH_TRANSCONF(pipe);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~TRANS_ENABLE;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, reg, TRANS_STATE_ENABLE, 50))
- DRM_ERROR("failed to disable transcoder %c\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "failed to disable transcoder %c\n",
+ pipe_name(pipe));
if (HAS_PCH_CPT(dev_priv)) {
/* Workaround: Clear the timing override chicken bit again. */
reg = TRANS_CHICKEN2(pipe);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
}
@@ -1763,18 +1774,18 @@ void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(LPT_TRANSCONF);
+ val = intel_de_read(dev_priv, LPT_TRANSCONF);
val &= ~TRANS_ENABLE;
- I915_WRITE(LPT_TRANSCONF, val);
+ intel_de_write(dev_priv, LPT_TRANSCONF, val);
/* wait for PCH transcoder off, transcoder state */
if (intel_de_wait_for_clear(dev_priv, LPT_TRANSCONF,
TRANS_STATE_ENABLE, 50))
- DRM_ERROR("Failed to disable PCH transcoder\n");
+ drm_err(&dev_priv->drm, "Failed to disable PCH transcoder\n");
/* Workaround: clear timing override bit. */
- val = I915_READ(TRANS_CHICKEN2(PIPE_A));
+ val = intel_de_read(dev_priv, TRANS_CHICKEN2(PIPE_A));
val &= ~TRANS_CHICKEN2_TIMING_OVERRIDE;
- I915_WRITE(TRANS_CHICKEN2(PIPE_A), val);
+ intel_de_write(dev_priv, TRANS_CHICKEN2(PIPE_A), val);
}
enum pipe intel_crtc_pch_transcoder(struct intel_crtc *crtc)
@@ -1807,7 +1818,7 @@ static u32 intel_crtc_max_vblank_count(const struct intel_crtc_state *crtc_state
return 0; /* Gen2 doesn't have a hardware frame counter */
}
-static void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
@@ -1825,7 +1836,7 @@ void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state)
assert_vblank_disabled(&crtc->base);
}
-static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
+void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(new_crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -1834,7 +1845,7 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
i915_reg_t reg;
u32 val;
- DRM_DEBUG_KMS("enabling pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm, "enabling pipe %c\n", pipe_name(pipe));
assert_planes_disabled(crtc);
@@ -1862,15 +1873,15 @@ static void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state)
trace_intel_pipe_enable(crtc);
reg = PIPECONF(cpu_transcoder);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if (val & PIPECONF_ENABLE) {
/* we keep both pipes enabled on 830 */
- WARN_ON(!IS_I830(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_I830(dev_priv));
return;
}
- I915_WRITE(reg, val | PIPECONF_ENABLE);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val | PIPECONF_ENABLE);
+ intel_de_posting_read(dev_priv, reg);
/*
* Until the pipe starts PIPEDSL reads will return a stale value,
@@ -1892,7 +1903,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
i915_reg_t reg;
u32 val;
- DRM_DEBUG_KMS("disabling pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm, "disabling pipe %c\n", pipe_name(pipe));
/*
* Make sure planes won't keep trying to pump pixels to us,
@@ -1903,7 +1914,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
trace_intel_pipe_disable(crtc);
reg = PIPECONF(cpu_transcoder);
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if ((val & PIPECONF_ENABLE) == 0)
return;
@@ -1918,7 +1929,7 @@ void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state)
if (!IS_I830(dev_priv))
val &= ~PIPECONF_ENABLE;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
if ((val & PIPECONF_ENABLE) == 0)
intel_wait_for_pipe_off(old_crtc_state);
}
@@ -2211,11 +2222,11 @@ intel_pin_and_fence_fb_obj(struct drm_framebuffer *fb,
unsigned int pinctl;
u32 alignment;
- if (WARN_ON(!i915_gem_object_is_framebuffer(obj)))
+ if (drm_WARN_ON(dev, !i915_gem_object_is_framebuffer(obj)))
return ERR_PTR(-EINVAL);
alignment = intel_surf_alignment(fb, 0);
- if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ if (drm_WARN_ON(dev, alignment && !is_power_of_2(alignment)))
return ERR_PTR(-EINVAL);
/* Note that the w/a also requires 64 PTE of padding following the
@@ -2386,7 +2397,7 @@ static u32 intel_adjust_aligned_offset(int *x, int *y,
struct drm_i915_private *dev_priv = to_i915(fb->dev);
unsigned int cpp = fb->format->cpp[color_plane];
- WARN_ON(new_offset > old_offset);
+ drm_WARN_ON(&dev_priv->drm, new_offset > old_offset);
if (!is_surface_linear(fb, color_plane)) {
unsigned int tile_size, tile_width, tile_height;
@@ -2537,8 +2548,9 @@ static int intel_fb_offset_to_xy(int *x, int *y,
alignment = 0;
if (alignment != 0 && fb->offsets[color_plane] % alignment) {
- DRM_DEBUG_KMS("Misaligned offset 0x%08x for color plane %d\n",
- fb->offsets[color_plane], color_plane);
+ drm_dbg_kms(&dev_priv->drm,
+ "Misaligned offset 0x%08x for color plane %d\n",
+ fb->offsets[color_plane], color_plane);
return -EINVAL;
}
@@ -2548,9 +2560,10 @@ static int intel_fb_offset_to_xy(int *x, int *y,
/* Catch potential overflows early */
if (add_overflows_t(u32, mul_u32_u32(height, fb->pitches[color_plane]),
fb->offsets[color_plane])) {
- DRM_DEBUG_KMS("Bad offset 0x%08x or pitch %d for color plane %d\n",
- fb->offsets[color_plane], fb->pitches[color_plane],
- color_plane);
+ drm_dbg_kms(&dev_priv->drm,
+ "Bad offset 0x%08x or pitch %d for color plane %d\n",
+ fb->offsets[color_plane], fb->pitches[color_plane],
+ color_plane);
return -ERANGE;
}
@@ -2706,9 +2719,10 @@ u32 intel_plane_fb_max_stride(struct drm_i915_private *dev_priv,
/*
* We assume the primary plane for pipe A has
- * the highest stride limits of them all.
+ * the highest stride limits of them all,
+ * if in case pipe A is disabled, use the first pipe from pipe_mask.
*/
- crtc = intel_get_crtc_for_pipe(dev_priv, PIPE_A);
+ crtc = intel_get_first_crtc(dev_priv);
if (!crtc)
return 0;
@@ -3034,8 +3048,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
ret = intel_fb_offset_to_xy(&x, &y, fb, i);
if (ret) {
- DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
- i, fb->offsets[i]);
+ drm_dbg_kms(&dev_priv->drm,
+ "bad fb plane %d offset: 0x%x\n",
+ i, fb->offsets[i]);
return ret;
}
@@ -3054,8 +3069,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
*/
if (i == 0 && i915_gem_object_is_tiled(obj) &&
(x + width) * cpp > fb->pitches[i]) {
- DRM_DEBUG_KMS("bad fb plane %d offset: 0x%x\n",
- i, fb->offsets[i]);
+ drm_dbg_kms(&dev_priv->drm,
+ "bad fb plane %d offset: 0x%x\n",
+ i, fb->offsets[i]);
return -EINVAL;
}
@@ -3111,8 +3127,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
}
if (mul_u32_u32(max_size, tile_size) > obj->base.size) {
- DRM_DEBUG_KMS("fb too big for bo (need %llu bytes, have %zu bytes)\n",
- mul_u32_u32(max_size, tile_size), obj->base.size);
+ drm_dbg_kms(&dev_priv->drm,
+ "fb too big for bo (need %llu bytes, have %zu bytes)\n",
+ mul_u32_u32(max_size, tile_size), obj->base.size);
return -EINVAL;
}
@@ -3143,7 +3160,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- WARN_ON(is_ccs_modifier(fb->modifier));
+ drm_WARN_ON(&dev_priv->drm, is_ccs_modifier(fb->modifier));
/* Make src coordinates relative to the viewport */
drm_rect_translate(&plane_state->uapi.src,
@@ -3184,7 +3201,7 @@ intel_plane_remap_gtt(struct intel_plane_state *plane_state)
DRM_MODE_ROTATE_0, tile_size);
offset /= tile_size;
- WARN_ON(i >= ARRAY_SIZE(info->plane));
+ drm_WARN_ON(&dev_priv->drm, i >= ARRAY_SIZE(info->plane));
info->plane[i].offset = offset;
info->plane[i].stride = DIV_ROUND_UP(fb->pitches[i],
tile_width * cpp);
@@ -3377,6 +3394,67 @@ int skl_format_to_fourcc(int format, bool rgb_order, bool alpha)
}
}
+static struct i915_vma *
+initial_plane_vma(struct drm_i915_private *i915,
+ struct intel_initial_plane_config *plane_config)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 base, size;
+
+ if (plane_config->size == 0)
+ return NULL;
+
+ base = round_down(plane_config->base,
+ I915_GTT_MIN_ALIGNMENT);
+ size = round_up(plane_config->base + plane_config->size,
+ I915_GTT_MIN_ALIGNMENT);
+ size -= base;
+
+ /*
+ * If the FB is too big, just don't use it since fbdev is not very
+ * important and we should probably use that space with FBC or other
+ * features.
+ */
+ if (size * 2 > i915->stolen_usable_size)
+ return NULL;
+
+ obj = i915_gem_object_create_stolen_for_preallocated(i915, base, size);
+ if (IS_ERR(obj))
+ return NULL;
+
+ switch (plane_config->tiling) {
+ case I915_TILING_NONE:
+ break;
+ case I915_TILING_X:
+ case I915_TILING_Y:
+ obj->tiling_and_stride =
+ plane_config->fb->base.pitches[0] |
+ plane_config->tiling;
+ break;
+ default:
+ MISSING_CASE(plane_config->tiling);
+ goto err_obj;
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
+ if (IS_ERR(vma))
+ goto err_obj;
+
+ if (i915_ggtt_pin(vma, 0, PIN_MAPPABLE | PIN_OFFSET_FIXED | base))
+ goto err_obj;
+
+ if (i915_gem_object_is_tiled(obj) &&
+ !i915_vma_is_map_and_fenceable(vma))
+ goto err_obj;
+
+ return vma;
+
+err_obj:
+ i915_gem_object_put(obj);
+ return NULL;
+}
+
static bool
intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
struct intel_initial_plane_config *plane_config)
@@ -3385,22 +3463,7 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_mode_fb_cmd2 mode_cmd = { 0 };
struct drm_framebuffer *fb = &plane_config->fb->base;
- u32 base_aligned = round_down(plane_config->base, PAGE_SIZE);
- u32 size_aligned = round_up(plane_config->base + plane_config->size,
- PAGE_SIZE);
- struct drm_i915_gem_object *obj;
- bool ret = false;
-
- size_aligned -= base_aligned;
-
- if (plane_config->size == 0)
- return false;
-
- /* If the FB is too big, just don't use it since fbdev is not very
- * important and we should probably use that space with FBC or other
- * features. */
- if (size_aligned * 2 > dev_priv->stolen_usable_size)
- return false;
+ struct i915_vma *vma;
switch (fb->modifier) {
case DRM_FORMAT_MOD_LINEAR:
@@ -3408,30 +3471,16 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
case I915_FORMAT_MOD_Y_TILED:
break;
default:
- DRM_DEBUG_DRIVER("Unsupported modifier for initial FB: 0x%llx\n",
- fb->modifier);
+ drm_dbg(&dev_priv->drm,
+ "Unsupported modifier for initial FB: 0x%llx\n",
+ fb->modifier);
return false;
}
- obj = i915_gem_object_create_stolen_for_preallocated(dev_priv,
- base_aligned,
- base_aligned,
- size_aligned);
- if (IS_ERR(obj))
+ vma = initial_plane_vma(dev_priv, plane_config);
+ if (!vma)
return false;
- switch (plane_config->tiling) {
- case I915_TILING_NONE:
- break;
- case I915_TILING_X:
- case I915_TILING_Y:
- obj->tiling_and_stride = fb->pitches[0] | plane_config->tiling;
- break;
- default:
- MISSING_CASE(plane_config->tiling);
- goto out;
- }
-
mode_cmd.pixel_format = fb->format->format;
mode_cmd.width = fb->width;
mode_cmd.height = fb->height;
@@ -3439,17 +3488,18 @@ intel_alloc_initial_plane_obj(struct intel_crtc *crtc,
mode_cmd.modifier[0] = fb->modifier;
mode_cmd.flags = DRM_MODE_FB_MODIFIERS;
- if (intel_framebuffer_init(to_intel_framebuffer(fb), obj, &mode_cmd)) {
- DRM_DEBUG_KMS("intel fb init failed\n");
- goto out;
+ if (intel_framebuffer_init(to_intel_framebuffer(fb),
+ vma->obj, &mode_cmd)) {
+ drm_dbg_kms(&dev_priv->drm, "intel fb init failed\n");
+ goto err_vma;
}
+ plane_config->vma = vma;
+ return true;
- DRM_DEBUG_KMS("initial plane fb obj %p\n", obj);
- ret = true;
-out:
- i915_gem_object_put(obj);
- return ret;
+err_vma:
+ i915_vma_put(vma);
+ return false;
}
static void
@@ -3493,9 +3543,10 @@ static void intel_plane_disable_noatomic(struct intel_crtc *crtc,
struct intel_plane_state *plane_state =
to_intel_plane_state(plane->base.state);
- DRM_DEBUG_KMS("Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
- plane->base.base.id, plane->base.name,
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Disabling [PLANE:%d:%s] on [CRTC:%d:%s]\n",
+ plane->base.base.id, plane->base.name,
+ crtc->base.base.id, crtc->base.name);
intel_set_plane_visible(crtc_state, plane_state, false);
fixup_active_planes(crtc_state);
@@ -3547,17 +3598,17 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
struct intel_plane_state *intel_state =
to_intel_plane_state(plane_state);
struct drm_framebuffer *fb;
+ struct i915_vma *vma;
if (!plane_config->fb)
return;
if (intel_alloc_initial_plane_obj(intel_crtc, plane_config)) {
fb = &plane_config->fb->base;
+ vma = plane_config->vma;
goto valid_fb;
}
- kfree(plane_config->fb);
-
/*
* Failed to alloc the obj, check to see if we should share
* an fb with another CRTC instead
@@ -3577,7 +3628,7 @@ intel_find_initial_plane_obj(struct intel_crtc *intel_crtc,
if (intel_plane_ggtt_offset(state) == plane_config->base) {
fb = state->hw.fb;
- drm_framebuffer_get(fb);
+ vma = state->vma;
goto valid_fb;
}
}
@@ -3600,21 +3651,11 @@ valid_fb:
intel_state->color_plane[0].stride =
intel_fb_pitch(fb, 0, intel_state->hw.rotation);
- intel_state->vma =
- intel_pin_and_fence_fb_obj(fb,
- &intel_state->view,
- intel_plane_uses_fence(intel_state),
- &intel_state->flags);
- if (IS_ERR(intel_state->vma)) {
- DRM_ERROR("failed to pin boot fb on pipe %d: %li\n",
- intel_crtc->pipe, PTR_ERR(intel_state->vma));
-
- intel_state->vma = NULL;
- drm_framebuffer_put(fb);
- return;
- }
-
- intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
+ __i915_vma_pin(vma);
+ intel_state->vma = i915_vma_get(vma);
+ if (intel_plane_uses_fence(intel_state) && i915_vma_pin_fence(vma) == 0)
+ if (vma->fence)
+ intel_state->flags |= PLANE_HAS_FENCE;
plane_state->src_x = 0;
plane_state->src_y = 0;
@@ -3633,9 +3674,13 @@ valid_fb:
dev_priv->preserve_bios_swizzle = true;
plane_state->fb = fb;
+ drm_framebuffer_get(fb);
+
plane_state->crtc = &intel_crtc->base;
intel_plane_copy_uapi_to_hw_state(intel_state, intel_state);
+ intel_frontbuffer_flush(to_intel_frontbuffer(fb), ORIGIN_DIRTYFB);
+
atomic_or(to_intel_plane(primary)->frontbuffer_bit,
&to_intel_frontbuffer(fb)->bits);
}
@@ -3798,15 +3843,16 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
max_height = skl_max_plane_height();
if (w > max_width || h > max_height) {
- DRM_DEBUG_KMS("requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
- w, h, max_width, max_height);
+ drm_dbg_kms(&dev_priv->drm,
+ "requested Y/RGB source size %dx%d too big (limit %dx%d)\n",
+ w, h, max_width, max_height);
return -EINVAL;
}
intel_add_fb_offsets(&x, &y, plane_state, 0);
offset = intel_plane_compute_aligned_offset(&x, &y, plane_state, 0);
alignment = intel_surf_alignment(fb, 0);
- if (WARN_ON(alignment && !is_power_of_2(alignment)))
+ if (drm_WARN_ON(&dev_priv->drm, alignment && !is_power_of_2(alignment)))
return -EINVAL;
/*
@@ -3829,7 +3875,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
while ((x + w) * cpp > plane_state->color_plane[0].stride) {
if (offset == 0) {
- DRM_DEBUG_KMS("Unable to find suitable display surface offset due to X-tiling\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to X-tiling\n");
return -EINVAL;
}
@@ -3854,7 +3901,8 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
if (x != plane_state->color_plane[aux_plane].x ||
y != plane_state->color_plane[aux_plane].y) {
- DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Unable to find suitable display surface offset due to CCS\n");
return -EINVAL;
}
}
@@ -3875,6 +3923,7 @@ static int skl_check_main_surface(struct intel_plane_state *plane_state)
static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
{
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
unsigned int rotation = plane_state->hw.rotation;
int uv_plane = 1;
@@ -3892,8 +3941,9 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
/* FIXME not quite sure how/if these apply to the chroma plane */
if (w > max_width || h > max_height) {
- DRM_DEBUG_KMS("CbCr source size %dx%d too big (limit %dx%d)\n",
- w, h, max_width, max_height);
+ drm_dbg_kms(&i915->drm,
+ "CbCr source size %dx%d too big (limit %dx%d)\n",
+ w, h, max_width, max_height);
return -EINVAL;
}
@@ -3922,7 +3972,8 @@ static int skl_check_nv12_aux_surface(struct intel_plane_state *plane_state)
if (x != plane_state->color_plane[ccs_plane].x ||
y != plane_state->color_plane[ccs_plane].y) {
- DRM_DEBUG_KMS("Unable to find suitable display surface offset due to CCS\n");
+ drm_dbg_kms(&i915->drm,
+ "Unable to find suitable display surface offset due to CCS\n");
return -EINVAL;
}
}
@@ -4331,7 +4382,8 @@ static void i9xx_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DSPSTRIDE(i9xx_plane), plane_state->color_plane[0].stride);
+ intel_de_write_fw(dev_priv, DSPSTRIDE(i9xx_plane),
+ plane_state->color_plane[0].stride);
if (INTEL_GEN(dev_priv) < 4) {
/*
@@ -4339,21 +4391,26 @@ static void i9xx_update_plane(struct intel_plane *plane,
* generator but let's assume we still need to
* program whatever is there.
*/
- I915_WRITE_FW(DSPPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(DSPSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
+ intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
} else if (IS_CHERRYVIEW(dev_priv) && i9xx_plane == PLANE_B) {
- I915_WRITE_FW(PRIMPOS(i9xx_plane), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(PRIMSIZE(i9xx_plane),
- ((crtc_h - 1) << 16) | (crtc_w - 1));
- I915_WRITE_FW(PRIMCNSTALPHA(i9xx_plane), 0);
+ intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane),
+ ((crtc_h - 1) << 16) | (crtc_w - 1));
+ intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0);
}
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(DSPOFFSET(i9xx_plane), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane),
+ (y << 16) | x);
} else if (INTEL_GEN(dev_priv) >= 4) {
- I915_WRITE_FW(DSPLINOFF(i9xx_plane), linear_offset);
- I915_WRITE_FW(DSPTILEOFF(i9xx_plane), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane),
+ linear_offset);
+ intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane),
+ (y << 16) | x);
}
/*
@@ -4361,15 +4418,13 @@ static void i9xx_update_plane(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
if (INTEL_GEN(dev_priv) >= 4)
- I915_WRITE_FW(DSPSURF(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) +
- dspaddr_offset);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
else
- I915_WRITE_FW(DSPADDR(i9xx_plane),
- intel_plane_ggtt_offset(plane_state) +
- dspaddr_offset);
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane),
+ intel_plane_ggtt_offset(plane_state) + dspaddr_offset);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -4396,11 +4451,11 @@ static void i9xx_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DSPCNTR(i9xx_plane), dspcntr);
+ intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr);
if (INTEL_GEN(dev_priv) >= 4)
- I915_WRITE_FW(DSPSURF(i9xx_plane), 0);
+ intel_de_write_fw(dev_priv, DSPSURF(i9xx_plane), 0);
else
- I915_WRITE_FW(DSPADDR(i9xx_plane), 0);
+ intel_de_write_fw(dev_priv, DSPADDR(i9xx_plane), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -4425,7 +4480,7 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- val = I915_READ(DSPCNTR(i9xx_plane));
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
ret = val & DISPLAY_PLANE_ENABLE;
@@ -4444,10 +4499,15 @@ static void skl_detach_scaler(struct intel_crtc *intel_crtc, int id)
{
struct drm_device *dev = intel_crtc->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
+ unsigned long irqflags;
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
- I915_WRITE(SKL_PS_CTRL(intel_crtc->pipe, id), 0);
- I915_WRITE(SKL_PS_WIN_POS(intel_crtc->pipe, id), 0);
- I915_WRITE(SKL_PS_WIN_SZ(intel_crtc->pipe, id), 0);
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
/*
@@ -4791,7 +4851,7 @@ __intel_display_resume(struct drm_device *dev,
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
- WARN_ON(ret == -EDEADLK);
+ drm_WARN_ON(dev, ret == -EDEADLK);
return ret;
}
@@ -4819,7 +4879,8 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
wake_up_bit(&dev_priv->gt.reset.flags, I915_RESET_MODESET);
if (atomic_read(&dev_priv->gpu_error.pending_fb_pin)) {
- DRM_DEBUG_KMS("Modeset potentially stuck, unbreaking through wedging\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Modeset potentially stuck, unbreaking through wedging\n");
intel_gt_set_wedged(&dev_priv->gt);
}
@@ -4843,13 +4904,15 @@ void intel_prepare_reset(struct drm_i915_private *dev_priv)
state = drm_atomic_helper_duplicate_state(dev, ctx);
if (IS_ERR(state)) {
ret = PTR_ERR(state);
- DRM_ERROR("Duplicating state failed with %i\n", ret);
+ drm_err(&dev_priv->drm, "Duplicating state failed with %i\n",
+ ret);
return;
}
ret = drm_atomic_helper_disable_all(dev, ctx);
if (ret) {
- DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+ drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+ ret);
drm_atomic_state_put(state);
return;
}
@@ -4878,7 +4941,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
/* for testing only restore the display */
ret = __intel_display_resume(dev, state, ctx);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(&dev_priv->drm,
+ "Restoring old state failed with %i\n", ret);
} else {
/*
* The display has been reset as well,
@@ -4895,7 +4959,8 @@ void intel_finish_reset(struct drm_i915_private *dev_priv)
ret = __intel_display_resume(dev, state, ctx);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(&dev_priv->drm,
+ "Restoring old state failed with %i\n", ret);
intel_hpd_init(dev_priv);
}
@@ -4915,7 +4980,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
enum pipe pipe = crtc->pipe;
u32 tmp;
- tmp = I915_READ(PIPE_CHICKEN(pipe));
+ tmp = intel_de_read(dev_priv, PIPE_CHICKEN(pipe));
/*
* Display WA #1153: icl
@@ -4930,7 +4995,7 @@ static void icl_set_pipe_chicken(struct intel_crtc *crtc)
* across pipe
*/
tmp |= PIXEL_ROUNDING_TRUNC_FB_PASSTHRU;
- I915_WRITE(PIPE_CHICKEN(pipe), tmp);
+ intel_de_write(dev_priv, PIPE_CHICKEN(pipe), tmp);
}
static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state)
@@ -4959,8 +5024,9 @@ static void icl_enable_trans_port_sync(const struct intel_crtc_state *crtc_state
/* Enable Transcoder Port Sync */
trans_ddi_func_ctl2_val |= PORT_SYNC_MODE_ENABLE;
- I915_WRITE(TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
- trans_ddi_func_ctl2_val);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL2(crtc_state->cpu_transcoder),
+ trans_ddi_func_ctl2_val);
}
static void intel_fdi_normal_train(struct intel_crtc *crtc)
@@ -4973,7 +5039,7 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
/* enable normal train */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (IS_IVYBRIDGE(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_NONE_IVB | FDI_TX_ENHANCE_FRAME_ENABLE;
@@ -4981,10 +5047,10 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE | FDI_TX_ENHANCE_FRAME_ENABLE;
}
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_NORMAL_CPT;
@@ -4992,16 +5058,16 @@ static void intel_fdi_normal_train(struct intel_crtc *crtc)
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_NONE;
}
- I915_WRITE(reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENHANCE_FRAME_ENABLE);
/* wait one idle pattern time */
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(1000);
/* IVB wants error correction enabled */
if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE(reg, I915_READ(reg) | FDI_FS_ERRC_ENABLE |
- FDI_FE_ERRC_ENABLE);
+ intel_de_write(dev_priv, reg,
+ intel_de_read(dev_priv, reg) | FDI_FS_ERRC_ENABLE | FDI_FE_ERRC_ENABLE);
}
/* The FDI link training functions for ILK/Ibexpeak. */
@@ -5020,81 +5086,83 @@ static void ilk_fdi_link_train(struct intel_crtc *crtc,
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(reg, temp);
- I915_READ(reg);
+ intel_de_write(dev_priv, reg, temp);
+ intel_de_read(dev_priv, reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp | FDI_RX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
/* Ironlake workaround, enable clock pointer after FDI enable*/
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR |
- FDI_RX_PHASE_SYNC_POINTER_EN);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR | FDI_RX_PHASE_SYNC_POINTER_EN);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if ((temp & FDI_RX_BIT_LOCK)) {
- DRM_DEBUG_KMS("FDI train 1 done.\n");
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm, "FDI train 1 done.\n");
+ intel_de_write(dev_priv, reg, temp | FDI_RX_BIT_LOCK);
break;
}
}
if (tries == 5)
- DRM_ERROR("FDI train 1 fail!\n");
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
reg = FDI_RX_IIR(pipe);
for (tries = 0; tries < 5; tries++) {
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm, "FDI train 2 done.\n");
break;
}
}
if (tries == 5)
- DRM_ERROR("FDI train 2 fail!\n");
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
- DRM_DEBUG_KMS("FDI train done\n");
+ drm_dbg_kms(&dev_priv->drm, "FDI train done\n");
}
@@ -5118,17 +5186,17 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp &= ~FDI_LINK_TRAIN_NONE;
@@ -5136,13 +5204,13 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
- I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
- I915_WRITE(FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -5150,28 +5218,30 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
}
- I915_WRITE(reg, temp | FDI_RX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(500);
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done.\n");
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done.\n");
break;
}
udelay(50);
@@ -5180,11 +5250,11 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
break;
}
if (i == 4)
- DRM_ERROR("FDI train 1 fail!\n");
+ drm_err(&dev_priv->drm, "FDI train 1 fail!\n");
/* Train 2 */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
if (IS_GEN(dev_priv, 6)) {
@@ -5192,10 +5262,10 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
/* SNB-B */
temp |= FDI_LINK_TRAIN_400MV_0DB_SNB_B;
}
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
@@ -5203,28 +5273,30 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_2;
}
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
for (i = 0; i < 4; i++) {
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[i];
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(500);
for (retry = 0; retry < 5; retry++) {
reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done.\n");
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done.\n");
break;
}
udelay(50);
@@ -5233,9 +5305,9 @@ static void gen6_fdi_link_train(struct intel_crtc *crtc,
break;
}
if (i == 4)
- DRM_ERROR("FDI train 2 fail!\n");
+ drm_err(&dev_priv->drm, "FDI train 2 fail!\n");
- DRM_DEBUG_KMS("FDI train done.\n");
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
}
/* Manual link training for Ivy Bridge A0 parts */
@@ -5251,111 +5323,117 @@ static void ivb_manual_fdi_link_train(struct intel_crtc *crtc,
/* Train 1: umask FDI RX Interrupt symbol_lock and bit_lock bit
for train result */
reg = FDI_RX_IMR(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_RX_SYMBOL_LOCK;
temp &= ~FDI_RX_BIT_LOCK;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(150);
- DRM_DEBUG_KMS("FDI_RX_IIR before link train 0x%x\n",
- I915_READ(FDI_RX_IIR(pipe)));
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR before link train 0x%x\n",
+ intel_de_read(dev_priv, FDI_RX_IIR(pipe)));
/* Try each vswing and preemphasis setting twice before moving on */
for (j = 0; j < ARRAY_SIZE(snb_b_fdi_train_param) * 2; j++) {
/* disable first in case we need to retry */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(FDI_LINK_TRAIN_AUTO | FDI_LINK_TRAIN_NONE_IVB);
temp &= ~FDI_TX_ENABLE;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_AUTO;
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp &= ~FDI_RX_ENABLE;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
/* enable CPU FDI TX and PCH FDI RX */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_DP_PORT_WIDTH_MASK;
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
temp |= FDI_LINK_TRAIN_PATTERN_1_IVB;
temp &= ~FDI_LINK_TRAIN_VOL_EMP_MASK;
temp |= snb_b_fdi_train_param[j/2];
temp |= FDI_COMPOSITE_SYNC;
- I915_WRITE(reg, temp | FDI_TX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_TX_ENABLE);
- I915_WRITE(FDI_RX_MISC(pipe),
- FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
+ intel_de_write(dev_priv, FDI_RX_MISC(pipe),
+ FDI_RX_TP1_TO_TP2_48 | FDI_RX_FDI_DELAY_90);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
temp |= FDI_COMPOSITE_SYNC;
- I915_WRITE(reg, temp | FDI_RX_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_RX_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(1); /* should be 0.5us */
for (i = 0; i < 4; i++) {
reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_BIT_LOCK ||
- (I915_READ(reg) & FDI_RX_BIT_LOCK)) {
- I915_WRITE(reg, temp | FDI_RX_BIT_LOCK);
- DRM_DEBUG_KMS("FDI train 1 done, level %i.\n",
- i);
+ (intel_de_read(dev_priv, reg) & FDI_RX_BIT_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_BIT_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 done, level %i.\n",
+ i);
break;
}
udelay(1); /* should be 0.5us */
}
if (i == 4) {
- DRM_DEBUG_KMS("FDI train 1 fail on vswing %d\n", j / 2);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 1 fail on vswing %d\n", j / 2);
continue;
}
/* Train 2 */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE_IVB;
temp |= FDI_LINK_TRAIN_PATTERN_2_IVB;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_2_CPT;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(2); /* should be 1.5us */
for (i = 0; i < 4; i++) {
reg = FDI_RX_IIR(pipe);
- temp = I915_READ(reg);
- DRM_DEBUG_KMS("FDI_RX_IIR 0x%x\n", temp);
+ temp = intel_de_read(dev_priv, reg);
+ drm_dbg_kms(&dev_priv->drm, "FDI_RX_IIR 0x%x\n", temp);
if (temp & FDI_RX_SYMBOL_LOCK ||
- (I915_READ(reg) & FDI_RX_SYMBOL_LOCK)) {
- I915_WRITE(reg, temp | FDI_RX_SYMBOL_LOCK);
- DRM_DEBUG_KMS("FDI train 2 done, level %i.\n",
- i);
+ (intel_de_read(dev_priv, reg) & FDI_RX_SYMBOL_LOCK)) {
+ intel_de_write(dev_priv, reg,
+ temp | FDI_RX_SYMBOL_LOCK);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 done, level %i.\n",
+ i);
goto train_done;
}
udelay(2); /* should be 1.5us */
}
if (i == 4)
- DRM_DEBUG_KMS("FDI train 2 fail on vswing %d\n", j / 2);
+ drm_dbg_kms(&dev_priv->drm,
+ "FDI train 2 fail on vswing %d\n", j / 2);
}
train_done:
- DRM_DEBUG_KMS("FDI train done.\n");
+ drm_dbg_kms(&dev_priv->drm, "FDI train done.\n");
}
static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
@@ -5368,29 +5446,29 @@ static void ilk_fdi_pll_enable(const struct intel_crtc_state *crtc_state)
/* enable PCH FDI RX PLL, wait warmup plus DMI latency */
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(FDI_DP_PORT_WIDTH_MASK | (0x7 << 16));
temp |= FDI_DP_PORT_WIDTH(crtc_state->fdi_lanes);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- I915_WRITE(reg, temp | FDI_RX_PLL_ENABLE);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp | FDI_RX_PLL_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(200);
/* Switch from Rawclk to PCDclk */
- temp = I915_READ(reg);
- I915_WRITE(reg, temp | FDI_PCDCLK);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp | FDI_PCDCLK);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(200);
/* Enable CPU FDI TX PLL, always on for Ironlake */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if ((temp & FDI_TX_PLL_ENABLE) == 0) {
- I915_WRITE(reg, temp | FDI_TX_PLL_ENABLE);
+ intel_de_write(dev_priv, reg, temp | FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
}
}
@@ -5405,23 +5483,23 @@ static void ilk_fdi_pll_disable(struct intel_crtc *intel_crtc)
/* Switch from PCDclk to Rawclk */
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_PCDCLK);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_PCDCLK);
/* Disable CPU FDI TX PLL */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_PLL_ENABLE);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_PLL_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_RX_PLL_ENABLE);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_PLL_ENABLE);
/* Wait for the clocks to turn off. */
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
}
@@ -5434,32 +5512,33 @@ static void ilk_fdi_disable(struct intel_crtc *crtc)
/* disable CPU FDI tx and PCH FDI rx */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
- I915_WRITE(reg, temp & ~FDI_TX_ENABLE);
- POSTING_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
+ intel_de_write(dev_priv, reg, temp & ~FDI_TX_ENABLE);
+ intel_de_posting_read(dev_priv, reg);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(0x7 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- I915_WRITE(reg, temp & ~FDI_RX_ENABLE);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp & ~FDI_RX_ENABLE);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
/* Ironlake workaround, disable clock pointer after downing FDI */
if (HAS_PCH_IBX(dev_priv))
- I915_WRITE(FDI_RX_CHICKEN(pipe), FDI_RX_PHASE_SYNC_POINTER_OVR);
+ intel_de_write(dev_priv, FDI_RX_CHICKEN(pipe),
+ FDI_RX_PHASE_SYNC_POINTER_OVR);
/* still set train pattern 1 */
reg = FDI_TX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~FDI_LINK_TRAIN_NONE;
temp |= FDI_LINK_TRAIN_PATTERN_1;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
reg = FDI_RX_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
if (HAS_PCH_CPT(dev_priv)) {
temp &= ~FDI_LINK_TRAIN_PATTERN_MASK_CPT;
temp |= FDI_LINK_TRAIN_PATTERN_1_CPT;
@@ -5469,10 +5548,10 @@ static void ilk_fdi_disable(struct intel_crtc *crtc)
}
/* BPC in FDI rx is consistent with that in PIPECONF */
temp &= ~(0x07 << 16);
- temp |= (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
- I915_WRITE(reg, temp);
+ temp |= (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) << 11;
+ intel_de_write(dev_priv, reg, temp);
- POSTING_READ(reg);
+ intel_de_posting_read(dev_priv, reg);
udelay(100);
}
@@ -5505,7 +5584,7 @@ void lpt_disable_iclkip(struct drm_i915_private *dev_priv)
{
u32 temp;
- I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_GATE);
+ intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_GATE);
mutex_lock(&dev_priv->sb_lock);
@@ -5552,17 +5631,14 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
}
/* This should not happen with any sane values */
- WARN_ON(SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
- ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
- WARN_ON(SBI_SSCDIVINTPHASE_DIR(phasedir) &
- ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
-
- DRM_DEBUG_KMS("iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
- clock,
- auxdiv,
- divsel,
- phasedir,
- phaseinc);
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIVSEL(divsel) &
+ ~SBI_SSCDIVINTPHASE_DIVSEL_MASK);
+ drm_WARN_ON(&dev_priv->drm, SBI_SSCDIVINTPHASE_DIR(phasedir) &
+ ~SBI_SSCDIVINTPHASE_INCVAL_MASK);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "iCLKIP clock: found settings for %dKHz refresh rate: auxdiv=%x, divsel=%x, phasedir=%x, phaseinc=%x\n",
+ clock, auxdiv, divsel, phasedir, phaseinc);
mutex_lock(&dev_priv->sb_lock);
@@ -5592,7 +5668,7 @@ static void lpt_program_iclkip(const struct intel_crtc_state *crtc_state)
/* Wait for initialization time */
udelay(24);
- I915_WRITE(PIXCLK_GATE, PIXCLK_GATE_UNGATE);
+ intel_de_write(dev_priv, PIXCLK_GATE, PIXCLK_GATE_UNGATE);
}
int lpt_get_iclkip(struct drm_i915_private *dev_priv)
@@ -5603,7 +5679,7 @@ int lpt_get_iclkip(struct drm_i915_private *dev_priv)
u32 desired_divisor;
u32 temp;
- if ((I915_READ(PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
+ if ((intel_de_read(dev_priv, PIXCLK_GATE) & PIXCLK_GATE_UNGATE) == 0)
return 0;
mutex_lock(&dev_priv->sb_lock);
@@ -5639,41 +5715,46 @@ static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_s
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
- I915_WRITE(PCH_TRANS_HTOTAL(pch_transcoder),
- I915_READ(HTOTAL(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_HBLANK(pch_transcoder),
- I915_READ(HBLANK(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_HSYNC(pch_transcoder),
- I915_READ(HSYNC(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HTOTAL(pch_transcoder),
+ intel_de_read(dev_priv, HTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HBLANK(pch_transcoder),
+ intel_de_read(dev_priv, HBLANK(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_HSYNC(pch_transcoder),
+ intel_de_read(dev_priv, HSYNC(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_VTOTAL(pch_transcoder),
- I915_READ(VTOTAL(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_VBLANK(pch_transcoder),
- I915_READ(VBLANK(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_VSYNC(pch_transcoder),
- I915_READ(VSYNC(cpu_transcoder)));
- I915_WRITE(PCH_TRANS_VSYNCSHIFT(pch_transcoder),
- I915_READ(VSYNCSHIFT(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VTOTAL(pch_transcoder),
+ intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VBLANK(pch_transcoder),
+ intel_de_read(dev_priv, VBLANK(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VSYNC(pch_transcoder),
+ intel_de_read(dev_priv, VSYNC(cpu_transcoder)));
+ intel_de_write(dev_priv, PCH_TRANS_VSYNCSHIFT(pch_transcoder),
+ intel_de_read(dev_priv, VSYNCSHIFT(cpu_transcoder)));
}
static void cpt_set_fdi_bc_bifurcation(struct drm_i915_private *dev_priv, bool enable)
{
u32 temp;
- temp = I915_READ(SOUTH_CHICKEN1);
+ temp = intel_de_read(dev_priv, SOUTH_CHICKEN1);
if (!!(temp & FDI_BC_BIFURCATION_SELECT) == enable)
return;
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_B)) & FDI_RX_ENABLE);
- WARN_ON(I915_READ(FDI_RX_CTL(PIPE_C)) & FDI_RX_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_B)) &
+ FDI_RX_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, FDI_RX_CTL(PIPE_C)) &
+ FDI_RX_ENABLE);
temp &= ~FDI_BC_BIFURCATION_SELECT;
if (enable)
temp |= FDI_BC_BIFURCATION_SELECT;
- DRM_DEBUG_KMS("%sabling fdi C rx\n", enable ? "en" : "dis");
- I915_WRITE(SOUTH_CHICKEN1, temp);
- POSTING_READ(SOUTH_CHICKEN1);
+ drm_dbg_kms(&dev_priv->drm, "%sabling fdi C rx\n",
+ enable ? "en" : "dis");
+ intel_de_write(dev_priv, SOUTH_CHICKEN1, temp);
+ intel_de_posting_read(dev_priv, SOUTH_CHICKEN1);
}
static void ivb_update_fdi_bc_bifurcation(const struct intel_crtc_state *crtc_state)
@@ -5723,8 +5804,9 @@ intel_get_crtc_new_encoder(const struct intel_atomic_state *state,
num_encoders++;
}
- WARN(num_encoders != 1, "%d encoders for pipe %c\n",
- num_encoders, pipe_name(crtc->pipe));
+ drm_WARN(encoder->base.dev, num_encoders != 1,
+ "%d encoders for pipe %c\n",
+ num_encoders, pipe_name(crtc->pipe));
return encoder;
}
@@ -5753,8 +5835,8 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
/* Write the TU size bits before fdi link training, so that error
* detection works. */
- I915_WRITE(FDI_RX_TUSIZE1(pipe),
- I915_READ(PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
+ intel_de_write(dev_priv, FDI_RX_TUSIZE1(pipe),
+ intel_de_read(dev_priv, PIPE_DATA_M1(pipe)) & TU_SIZE_MASK);
/* For PCH output, training FDI link */
dev_priv->display.fdi_link_train(crtc, crtc_state);
@@ -5764,7 +5846,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
if (HAS_PCH_CPT(dev_priv)) {
u32 sel;
- temp = I915_READ(PCH_DPLL_SEL);
+ temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
temp |= TRANS_DPLL_ENABLE(pipe);
sel = TRANS_DPLLB_SEL(pipe);
if (crtc_state->shared_dpll ==
@@ -5772,7 +5854,7 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
temp |= sel;
else
temp &= ~sel;
- I915_WRITE(PCH_DPLL_SEL, temp);
+ intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
}
/* XXX: pch pll's can be enabled any time before we enable the PCH
@@ -5795,11 +5877,11 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
intel_crtc_has_dp_encoder(crtc_state)) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
- u32 bpc = (I915_READ(PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
+ u32 bpc = (intel_de_read(dev_priv, PIPECONF(pipe)) & PIPECONF_BPC_MASK) >> 5;
i915_reg_t reg = TRANS_DP_CTL(pipe);
enum port port;
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(TRANS_DP_PORT_SEL_MASK |
TRANS_DP_SYNC_MASK |
TRANS_DP_BPC_MASK);
@@ -5812,17 +5894,16 @@ static void ilk_pch_enable(const struct intel_atomic_state *state,
temp |= TRANS_DP_VSYNC_ACTIVE_HIGH;
port = intel_get_crtc_new_encoder(state, crtc_state)->port;
- WARN_ON(port < PORT_B || port > PORT_D);
+ drm_WARN_ON(dev, port < PORT_B || port > PORT_D);
temp |= TRANS_DP_PORT_SEL(port);
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
}
ilk_enable_pch_transcoder(crtc_state);
}
-static void lpt_pch_enable(const struct intel_atomic_state *state,
- const struct intel_crtc_state *crtc_state)
+void lpt_pch_enable(const struct intel_crtc_state *crtc_state)
{
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -5844,11 +5925,13 @@ static void cpt_verify_modeset(struct drm_i915_private *dev_priv,
i915_reg_t dslreg = PIPEDSL(pipe);
u32 temp;
- temp = I915_READ(dslreg);
+ temp = intel_de_read(dev_priv, dslreg);
udelay(500);
- if (wait_for(I915_READ(dslreg) != temp, 5)) {
- if (wait_for(I915_READ(dslreg) != temp, 5))
- DRM_ERROR("mode set failed: pipe %c stuck\n", pipe_name(pipe));
+ if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5)) {
+ if (wait_for(intel_de_read(dev_priv, dslreg) != temp, 5))
+ drm_err(&dev_priv->drm,
+ "mode set failed: pipe %c stuck\n",
+ pipe_name(pipe));
}
}
@@ -5963,7 +6046,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
*/
if (INTEL_GEN(dev_priv) >= 9 && crtc_state->hw.enable &&
need_scaler && adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- DRM_DEBUG_KMS("Pipe/Plane scaling not supported with IF-ID mode\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Pipe/Plane scaling not supported with IF-ID mode\n");
return -EINVAL;
}
@@ -5982,10 +6066,11 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
scaler_state->scaler_users &= ~(1 << scaler_user);
scaler_state->scalers[*scaler_id].in_use = 0;
- DRM_DEBUG_KMS("scaler_user index %u.%u: "
- "Staged freeing scaler id %d scaler_users = 0x%x\n",
- intel_crtc->pipe, scaler_user, *scaler_id,
- scaler_state->scaler_users);
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: "
+ "Staged freeing scaler id %d scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, *scaler_id,
+ scaler_state->scaler_users);
*scaler_id = -1;
}
return 0;
@@ -5993,7 +6078,8 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
if (format && intel_format_info_is_yuv_semiplanar(format, modifier) &&
(src_h < SKL_MIN_YUV_420_SRC_H || src_w < SKL_MIN_YUV_420_SRC_W)) {
- DRM_DEBUG_KMS("Planar YUV: src dimensions not met\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Planar YUV: src dimensions not met\n");
return -EINVAL;
}
@@ -6006,18 +6092,20 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach,
(INTEL_GEN(dev_priv) < 11 &&
(src_w > SKL_MAX_SRC_W || src_h > SKL_MAX_SRC_H ||
dst_w > SKL_MAX_DST_W || dst_h > SKL_MAX_DST_H))) {
- DRM_DEBUG_KMS("scaler_user index %u.%u: src %ux%u dst %ux%u "
- "size is out of scaler range\n",
- intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h);
+ drm_dbg_kms(&dev_priv->drm,
+ "scaler_user index %u.%u: src %ux%u dst %ux%u "
+ "size is out of scaler range\n",
+ intel_crtc->pipe, scaler_user, src_w, src_h,
+ dst_w, dst_h);
return -EINVAL;
}
/* mark this plane as a scaler user in crtc_state */
scaler_state->scaler_users |= (1 << scaler_user);
- DRM_DEBUG_KMS("scaler_user index %u.%u: "
- "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
- intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
- scaler_state->scaler_users);
+ drm_dbg_kms(&dev_priv->drm, "scaler_user index %u.%u: "
+ "staged scaling request for %ux%u->%ux%u scaler_users = 0x%x\n",
+ intel_crtc->pipe, scaler_user, src_w, src_h, dst_w, dst_h,
+ scaler_state->scaler_users);
return 0;
}
@@ -6036,7 +6124,8 @@ int skl_update_scaler_crtc(struct intel_crtc_state *state)
const struct drm_display_mode *adjusted_mode = &state->hw.adjusted_mode;
bool need_scaler = false;
- if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
+ if (state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 ||
+ state->pch_pfit.enabled)
need_scaler = true;
return skl_update_scaler(state, !state->hw.active, SKL_CRTC_INDEX,
@@ -6088,9 +6177,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
/* check colorkey */
if (plane_state->ckey.flags) {
- DRM_DEBUG_KMS("[PLANE:%d:%s] scaling with color key not allowed",
- intel_plane->base.base.id,
- intel_plane->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] scaling with color key not allowed",
+ intel_plane->base.base.id,
+ intel_plane->base.name);
return -EINVAL;
}
@@ -6128,9 +6218,10 @@ static int skl_update_scaler_plane(struct intel_crtc_state *crtc_state,
break;
/* fall through */
default:
- DRM_DEBUG_KMS("[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
- intel_plane->base.base.id, intel_plane->base.name,
- fb->base.id, fb->format->format);
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] FB:%d unsupported scaling format 0x%x\n",
+ intel_plane->base.base.id, intel_plane->base.name,
+ fb->base.id, fb->format->format);
return -EINVAL;
}
@@ -6157,9 +6248,11 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
if (crtc_state->pch_pfit.enabled) {
u16 uv_rgb_hphase, uv_rgb_vphase;
int pfit_w, pfit_h, hscale, vscale;
+ unsigned long irqflags;
int id;
- if (WARN_ON(crtc_state->scaler_state.scaler_id < 0))
+ if (drm_WARN_ON(&dev_priv->drm,
+ crtc_state->scaler_state.scaler_id < 0))
return;
pfit_w = (crtc_state->pch_pfit.size >> 16) & 0xFFFF;
@@ -6172,14 +6265,21 @@ static void skl_pfit_enable(const struct intel_crtc_state *crtc_state)
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
id = scaler_state->scaler_id;
- I915_WRITE(SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
- PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
- I915_WRITE_FW(SKL_PS_VPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- I915_WRITE_FW(SKL_PS_HPHASE(pipe, id),
- PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- I915_WRITE(SKL_PS_WIN_POS(pipe, id), crtc_state->pch_pfit.pos);
- I915_WRITE(SKL_PS_WIN_SZ(pipe, id), crtc_state->pch_pfit.size);
+
+ spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
+
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, id), PS_SCALER_EN |
+ PS_FILTER_MEDIUM | scaler_state->scalers[id].mode);
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, id),
+ PS_Y_PHASE(0) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, id),
+ crtc_state->pch_pfit.pos);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, id),
+ crtc_state->pch_pfit.size);
+
+ spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
}
@@ -6195,12 +6295,15 @@ static void ilk_pfit_enable(const struct intel_crtc_state *crtc_state)
* e.g. x201.
*/
if (IS_IVYBRIDGE(dev_priv) || IS_HASWELL(dev_priv))
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3 |
- PF_PIPE_SEL_IVB(pipe));
+ intel_de_write(dev_priv, PF_CTL(pipe),
+ PF_ENABLE | PF_FILTER_MED_3x3 | PF_PIPE_SEL_IVB(pipe));
else
- I915_WRITE(PF_CTL(pipe), PF_ENABLE | PF_FILTER_MED_3x3);
- I915_WRITE(PF_WIN_POS(pipe), crtc_state->pch_pfit.pos);
- I915_WRITE(PF_WIN_SZ(pipe), crtc_state->pch_pfit.size);
+ intel_de_write(dev_priv, PF_CTL(pipe),
+ PF_ENABLE | PF_FILTER_MED_3x3);
+ intel_de_write(dev_priv, PF_WIN_POS(pipe),
+ crtc_state->pch_pfit.pos);
+ intel_de_write(dev_priv, PF_WIN_SZ(pipe),
+ crtc_state->pch_pfit.size);
}
}
@@ -6218,25 +6321,26 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state)
* This function is called from post_plane_update, which is run after
* a vblank wait.
*/
- WARN_ON(!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
+ drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR)));
if (IS_BROADWELL(dev_priv)) {
- WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
- IPS_ENABLE | IPS_PCODE_CONTROL));
+ drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL,
+ IPS_ENABLE | IPS_PCODE_CONTROL));
/* Quoting Art Runyan: "its not safe to expect any particular
* value in IPS_CTL bit 31 after enabling IPS through the
* mailbox." Moreover, the mailbox may return a bogus state,
* so we need to just enable it and continue on.
*/
} else {
- I915_WRITE(IPS_CTL, IPS_ENABLE);
+ intel_de_write(dev_priv, IPS_CTL, IPS_ENABLE);
/* The bit only becomes 1 in the next vblank, so this wait here
* is essentially intel_wait_for_vblank. If we don't have this
* and don't wait for vblanks until the end of crtc_enable, then
* the HW state readout code will complain that the expected
* IPS_CTL value is not the one we read. */
if (intel_de_wait_for_set(dev_priv, IPS_CTL, IPS_ENABLE, 50))
- DRM_ERROR("Timed out waiting for IPS enable\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for IPS enable\n");
}
}
@@ -6250,17 +6354,19 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state)
return;
if (IS_BROADWELL(dev_priv)) {
- WARN_ON(sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
+ drm_WARN_ON(dev,
+ sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0));
/*
* Wait for PCODE to finish disabling IPS. The BSpec specified
* 42ms timeout value leads to occasional timeouts so use 100ms
* instead.
*/
if (intel_de_wait_for_clear(dev_priv, IPS_CTL, IPS_ENABLE, 100))
- DRM_ERROR("Timed out waiting for IPS disable\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for IPS disable\n");
} else {
- I915_WRITE(IPS_CTL, 0);
- POSTING_READ(IPS_CTL);
+ intel_de_write(dev_priv, IPS_CTL, 0);
+ intel_de_posting_read(dev_priv, IPS_CTL);
}
/* We need to wait for a vblank before we can disable the plane. */
@@ -6382,13 +6488,10 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_plane *primary = to_intel_plane(crtc->base.primary);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *new_primary_state =
- intel_atomic_get_new_plane_state(state, primary);
enum pipe pipe = crtc->pipe;
intel_frontbuffer_flip(dev_priv, new_crtc_state->fb_bits);
@@ -6399,8 +6502,7 @@ static void intel_post_plane_update(struct intel_atomic_state *state,
if (hsw_post_update_enable_ips(old_crtc_state, new_crtc_state))
hsw_enable_ips(new_crtc_state);
- if (new_primary_state)
- intel_fbc_post_update(crtc);
+ intel_fbc_post_update(state, crtc);
if (needs_nv12_wa(old_crtc_state) &&
!needs_nv12_wa(new_crtc_state))
@@ -6415,20 +6517,16 @@ static void intel_pre_plane_update(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_plane *primary = to_intel_plane(crtc->base.primary);
const struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
const struct intel_crtc_state *new_crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
- const struct intel_plane_state *new_primary_state =
- intel_atomic_get_new_plane_state(state, primary);
enum pipe pipe = crtc->pipe;
if (hsw_pre_update_disable_ips(old_crtc_state, new_crtc_state))
hsw_disable_ips(old_crtc_state);
- if (new_primary_state &&
- intel_fbc_pre_update(crtc, new_crtc_state, new_primary_state))
+ if (intel_fbc_pre_update(state, crtc))
intel_wait_for_vblank(dev_priv, pipe);
/* Display WA 827 */
@@ -6770,7 +6868,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
/*
@@ -6863,7 +6961,7 @@ static bool hsw_crtc_supports_ips(struct intel_crtc *crtc)
static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
enum pipe pipe, bool apply)
{
- u32 val = I915_READ(CLKGATE_DIS_PSL(pipe));
+ u32 val = intel_de_read(dev_priv, CLKGATE_DIS_PSL(pipe));
u32 mask = DPF_GATING_DIS | DPF_RAM_GATING_DIS | DPFR_GATING_DIS;
if (apply)
@@ -6871,7 +6969,7 @@ static void glk_pipe_scaler_clock_gating_wa(struct drm_i915_private *dev_priv,
else
val &= ~mask;
- I915_WRITE(CLKGATE_DIS_PSL(pipe), val);
+ intel_de_write(dev_priv, CLKGATE_DIS_PSL(pipe), val);
}
static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
@@ -6890,7 +6988,17 @@ static void icl_pipe_mbus_enable(struct intel_crtc *crtc)
val |= MBUS_DBOX_B_CREDIT(8);
}
- I915_WRITE(PIPE_MBUS_DBOX_CTL(pipe), val);
+ intel_de_write(dev_priv, PIPE_MBUS_DBOX_CTL(pipe), val);
+}
+
+static void hsw_set_linetime_wm(const struct intel_crtc_state *crtc_state)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+
+ intel_de_write(dev_priv, WM_LINETIME(crtc->pipe),
+ HSW_LINETIME(crtc_state->linetime) |
+ HSW_IPS_LINETIME(crtc_state->ips_linetime));
}
static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
@@ -6900,10 +7008,10 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state)
i915_reg_t reg = CHICKEN_TRANS(crtc_state->cpu_transcoder);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
val |= HSW_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
static void hsw_crtc_enable(struct intel_atomic_state *state,
@@ -6916,7 +7024,7 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder;
bool psl_clkgate_wa;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
intel_encoders_pre_pll_enable(state, crtc);
@@ -6926,9 +7034,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
intel_encoders_pre_enable(state, crtc);
- if (intel_crtc_has_dp_encoder(new_crtc_state))
- intel_dp_set_m_n(new_crtc_state, M1_N1);
-
if (!transcoder_is_dsi(cpu_transcoder))
intel_set_pipe_timings(new_crtc_state);
@@ -6939,8 +7044,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(cpu_transcoder))
- I915_WRITE(PIPE_MULT(cpu_transcoder),
- new_crtc_state->pixel_multiplier - 1);
+ intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder),
+ new_crtc_state->pixel_multiplier - 1);
if (new_crtc_state->has_pch_encoder)
intel_cpu_transcoder_set_m_n(new_crtc_state,
@@ -6977,6 +7082,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) < 9)
intel_disable_primary_plane(new_crtc_state);
+ hsw_set_linetime_wm(new_crtc_state);
+
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
@@ -6989,15 +7096,6 @@ static void hsw_crtc_enable(struct intel_atomic_state *state,
if (INTEL_GEN(dev_priv) >= 11)
icl_pipe_mbus_enable(crtc);
- /* XXX: Do the pipe assertions at the right place for BXT DSI. */
- if (!transcoder_is_dsi(cpu_transcoder))
- intel_enable_pipe(new_crtc_state);
-
- if (new_crtc_state->has_pch_encoder)
- lpt_pch_enable(state, new_crtc_state);
-
- intel_crtc_vblank_on(new_crtc_state);
-
intel_encoders_enable(state, crtc);
if (psl_clkgate_wa) {
@@ -7023,9 +7121,9 @@ void ilk_pfit_disable(const struct intel_crtc_state *old_crtc_state)
/* To avoid upsetting the power well on haswell only disable the pfit if
* it's in use. The hw state code will make sure we get this right. */
if (old_crtc_state->pch_pfit.enabled) {
- I915_WRITE(PF_CTL(pipe), 0);
- I915_WRITE(PF_WIN_POS(pipe), 0);
- I915_WRITE(PF_WIN_SZ(pipe), 0);
+ intel_de_write(dev_priv, PF_CTL(pipe), 0);
+ intel_de_write(dev_priv, PF_WIN_POS(pipe), 0);
+ intel_de_write(dev_priv, PF_WIN_SZ(pipe), 0);
}
}
@@ -7067,16 +7165,16 @@ static void ilk_crtc_disable(struct intel_atomic_state *state,
/* disable TRANS_DP_CTL */
reg = TRANS_DP_CTL(pipe);
- temp = I915_READ(reg);
+ temp = intel_de_read(dev_priv, reg);
temp &= ~(TRANS_DP_OUTPUT_ENABLE |
TRANS_DP_PORT_SEL_MASK);
temp |= TRANS_DP_PORT_SEL_NONE;
- I915_WRITE(reg, temp);
+ intel_de_write(dev_priv, reg, temp);
/* disable DPLL_SEL */
- temp = I915_READ(PCH_DPLL_SEL);
+ temp = intel_de_read(dev_priv, PCH_DPLL_SEL);
temp &= ~(TRANS_DPLL_ENABLE(pipe) | TRANS_DPLLB_SEL(pipe));
- I915_WRITE(PCH_DPLL_SEL, temp);
+ intel_de_write(dev_priv, PCH_DPLL_SEL, temp);
}
ilk_fdi_pll_disable(crtc);
@@ -7109,15 +7207,17 @@ static void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state)
* The panel fitter should only be adjusted whilst the pipe is disabled,
* according to register description and PRM.
*/
- WARN_ON(I915_READ(PFIT_CONTROL) & PFIT_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, PFIT_CONTROL) & PFIT_ENABLE);
assert_pipe_disabled(dev_priv, crtc_state->cpu_transcoder);
- I915_WRITE(PFIT_PGM_RATIOS, crtc_state->gmch_pfit.pgm_ratios);
- I915_WRITE(PFIT_CONTROL, crtc_state->gmch_pfit.control);
+ intel_de_write(dev_priv, PFIT_PGM_RATIOS,
+ crtc_state->gmch_pfit.pgm_ratios);
+ intel_de_write(dev_priv, PFIT_CONTROL, crtc_state->gmch_pfit.control);
/* Border color in case we don't scale up to the full screen. Black by
* default, change to something else for debugging. */
- I915_WRITE(BCLRPAT(crtc->pipe), 0);
+ intel_de_write(dev_priv, BCLRPAT(crtc->pipe), 0);
}
bool intel_phy_is_combo(struct drm_i915_private *dev_priv, enum phy phy)
@@ -7304,7 +7404,7 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
if (intel_crtc_has_dp_encoder(new_crtc_state))
@@ -7314,8 +7414,8 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state,
intel_set_pipe_src_size(new_crtc_state);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) {
- I915_WRITE(CHV_BLEND(pipe), CHV_BLEND_LEGACY);
- I915_WRITE(CHV_CANVAS(pipe), 0);
+ intel_de_write(dev_priv, CHV_BLEND(pipe), CHV_BLEND_LEGACY);
+ intel_de_write(dev_priv, CHV_CANVAS(pipe), 0);
}
i9xx_set_pipeconf(new_crtc_state);
@@ -7356,8 +7456,10 @@ static void i9xx_set_pll_dividers(const struct intel_crtc_state *crtc_state)
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- I915_WRITE(FP0(crtc->pipe), crtc_state->dpll_hw_state.fp0);
- I915_WRITE(FP1(crtc->pipe), crtc_state->dpll_hw_state.fp1);
+ intel_de_write(dev_priv, FP0(crtc->pipe),
+ crtc_state->dpll_hw_state.fp0);
+ intel_de_write(dev_priv, FP1(crtc->pipe),
+ crtc_state->dpll_hw_state.fp1);
}
static void i9xx_crtc_enable(struct intel_atomic_state *state,
@@ -7368,7 +7470,7 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- if (WARN_ON(crtc->active))
+ if (drm_WARN_ON(&dev_priv->drm, crtc->active))
return;
i9xx_set_pll_dividers(new_crtc_state);
@@ -7418,9 +7520,9 @@ static void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state)
assert_pipe_disabled(dev_priv, old_crtc_state->cpu_transcoder);
- DRM_DEBUG_KMS("disabling pfit, current: 0x%08x\n",
- I915_READ(PFIT_CONTROL));
- I915_WRITE(PFIT_CONTROL, 0);
+ drm_dbg_kms(&dev_priv->drm, "disabling pfit, current: 0x%08x\n",
+ intel_de_read(dev_priv, PFIT_CONTROL));
+ intel_de_write(dev_priv, PFIT_CONTROL, 0);
}
static void i9xx_crtc_disable(struct intel_atomic_state *state,
@@ -7477,6 +7579,8 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_bw_state *bw_state =
to_intel_bw_state(dev_priv->bw_obj.state);
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(dev_priv->cdclk.obj.state);
struct intel_crtc_state *crtc_state =
to_intel_crtc_state(crtc->base.state);
enum intel_display_power_domain domain;
@@ -7500,8 +7604,9 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
state = drm_atomic_state_alloc(&dev_priv->drm);
if (!state) {
- DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to disable [CRTC:%d:%s], out of memory",
+ crtc->base.base.id, crtc->base.name);
return;
}
@@ -7511,19 +7616,21 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
temp_crtc_state = intel_atomic_get_crtc_state(state, crtc);
ret = drm_atomic_add_affected_connectors(state, &crtc->base);
- WARN_ON(IS_ERR(temp_crtc_state) || ret);
+ drm_WARN_ON(&dev_priv->drm, IS_ERR(temp_crtc_state) || ret);
dev_priv->display.crtc_disable(to_intel_atomic_state(state), crtc);
drm_atomic_state_put(state);
- DRM_DEBUG_KMS("[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] hw state adjusted, was enabled, now disabled\n",
+ crtc->base.base.id, crtc->base.name);
crtc->active = false;
crtc->base.enabled = false;
- WARN_ON(drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
+ drm_WARN_ON(&dev_priv->drm,
+ drm_atomic_set_mode_for_crtc(&crtc_state->uapi, NULL) < 0);
crtc_state->uapi.active = false;
crtc_state->uapi.connector_mask = 0;
crtc_state->uapi.encoder_mask = 0;
@@ -7543,8 +7650,9 @@ static void intel_crtc_disable_noatomic(struct intel_crtc *crtc,
crtc->enabled_power_domains = 0;
dev_priv->active_pipes &= ~BIT(pipe);
- dev_priv->min_cdclk[pipe] = 0;
- dev_priv->min_voltage_level[pipe] = 0;
+ cdclk_state->min_cdclk[pipe] = 0;
+ cdclk_state->min_voltage_level[pipe] = 0;
+ cdclk_state->active_pipes &= ~BIT(pipe);
bw_state->data_rate[pipe] = 0;
bw_state->num_active_planes[pipe] = 0;
@@ -7563,7 +7671,8 @@ int intel_display_suspend(struct drm_device *dev)
state = drm_atomic_helper_suspend(dev);
ret = PTR_ERR_OR_ZERO(state);
if (ret)
- DRM_ERROR("Suspending crtc's failed with %i\n", ret);
+ drm_err(&dev_priv->drm, "Suspending crtc's failed with %i\n",
+ ret);
else
dev_priv->modeset_restore_state = state;
return ret;
@@ -7583,13 +7692,13 @@ static void intel_connector_verify_state(struct intel_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.base.id,
- connector->base.name);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.base.id, connector->base.name);
if (connector->get_hw_state(connector)) {
- struct intel_encoder *encoder = connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
I915_STATE_WARN(!crtc_state,
"connector enabled without attached crtc\n");
@@ -7632,18 +7741,21 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
struct intel_crtc *other_crtc;
struct intel_crtc_state *other_crtc_state;
- DRM_DEBUG_KMS("checking fdi config on pipe %c, lanes %i\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "checking fdi config on pipe %c, lanes %i\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
if (pipe_config->fdi_lanes > 4) {
- DRM_DEBUG_KMS("invalid fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
}
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
if (pipe_config->fdi_lanes > 2) {
- DRM_DEBUG_KMS("only 2 lanes on haswell, required: %i lanes\n",
- pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on haswell, required: %i lanes\n",
+ pipe_config->fdi_lanes);
return -EINVAL;
} else {
return 0;
@@ -7668,15 +7780,17 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return PTR_ERR(other_crtc_state);
if (pipe_required_fdi_lanes(other_crtc_state) > 0) {
- DRM_DEBUG_KMS("invalid shared fdi lane config on pipe %c: %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid shared fdi lane config on pipe %c: %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
}
return 0;
case PIPE_C:
if (pipe_config->fdi_lanes > 2) {
- DRM_DEBUG_KMS("only 2 lanes on pipe %c: required %i lanes\n",
- pipe_name(pipe), pipe_config->fdi_lanes);
+ drm_dbg_kms(&dev_priv->drm,
+ "only 2 lanes on pipe %c: required %i lanes\n",
+ pipe_name(pipe), pipe_config->fdi_lanes);
return -EINVAL;
}
@@ -7687,7 +7801,8 @@ static int ilk_check_fdi_lanes(struct drm_device *dev, enum pipe pipe,
return PTR_ERR(other_crtc_state);
if (pipe_required_fdi_lanes(other_crtc_state) > 2) {
- DRM_DEBUG_KMS("fdi link B uses too many lanes to enable link C\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "fdi link B uses too many lanes to enable link C\n");
return -EINVAL;
}
return 0;
@@ -7701,6 +7816,7 @@ static int ilk_fdi_compute_config(struct intel_crtc *intel_crtc,
struct intel_crtc_state *pipe_config)
{
struct drm_device *dev = intel_crtc->base.dev;
+ struct drm_i915_private *i915 = to_i915(dev);
const struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int lane, link_bw, fdi_dotclock, ret;
bool needs_recompute = false;
@@ -7713,7 +7829,7 @@ retry:
* Hence the bw of each lane in terms of the mode signal
* is:
*/
- link_bw = intel_fdi_link_freq(to_i915(dev), pipe_config);
+ link_bw = intel_fdi_link_freq(i915, pipe_config);
fdi_dotclock = adjusted_mode->crtc_clock;
@@ -7731,8 +7847,9 @@ retry:
if (ret == -EINVAL && pipe_config->pipe_bpp > 6*3) {
pipe_config->pipe_bpp -= 2*3;
- DRM_DEBUG_KMS("fdi link bw constraint, reducing pipe bpp to %i\n",
- pipe_config->pipe_bpp);
+ drm_dbg_kms(&i915->drm,
+ "fdi link bw constraint, reducing pipe bpp to %i\n",
+ pipe_config->pipe_bpp);
needs_recompute = true;
pipe_config->bw_constrained = true;
@@ -7774,15 +7891,17 @@ bool hsw_crtc_state_ips_capable(const struct intel_crtc_state *crtc_state)
return true;
}
-static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
+static int hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv =
to_i915(crtc_state->uapi.crtc->dev);
- struct intel_atomic_state *intel_state =
+ struct intel_atomic_state *state =
to_intel_atomic_state(crtc_state->uapi.state);
+ crtc_state->ips_enabled = false;
+
if (!hsw_crtc_state_ips_capable(crtc_state))
- return false;
+ return 0;
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -7791,18 +7910,27 @@ static bool hsw_compute_ips_config(struct intel_crtc_state *crtc_state)
* completely disable it.
*/
if (crtc_state->crc_enabled)
- return false;
+ return 0;
/* IPS should be fine as long as at least one plane is enabled. */
if (!(crtc_state->active_planes & ~BIT(PLANE_CURSOR)))
- return false;
+ return 0;
- /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
- if (IS_BROADWELL(dev_priv) &&
- crtc_state->pixel_rate > intel_state->cdclk.logical.cdclk * 95 / 100)
- return false;
+ if (IS_BROADWELL(dev_priv)) {
+ const struct intel_cdclk_state *cdclk_state;
- return true;
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
+
+ /* pixel rate mustn't exceed 95% of cdclk with IPS on BDW */
+ if (crtc_state->pixel_rate > cdclk_state->logical.cdclk * 95 / 100)
+ return 0;
+ }
+
+ crtc_state->ips_enabled = true;
+
+ return 0;
}
static bool intel_crtc_supports_double_wide(const struct intel_crtc *crtc)
@@ -7884,9 +8012,10 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
}
if (adjusted_mode->crtc_clock > clock_limit) {
- DRM_DEBUG_KMS("requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
- adjusted_mode->crtc_clock, clock_limit,
- yesno(pipe_config->double_wide));
+ drm_dbg_kms(&dev_priv->drm,
+ "requested pixel clock (%d kHz) too high (max: %d kHz, double wide: %s)\n",
+ adjusted_mode->crtc_clock, clock_limit,
+ yesno(pipe_config->double_wide));
return -EINVAL;
}
@@ -7898,7 +8027,8 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
* for output conversion from RGB->YCBCR. So if CTM is already
* applied we can't support YCBCR420 output.
*/
- DRM_DEBUG_KMS("YCBCR420 and CTM together are not possible\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "YCBCR420 and CTM together are not possible\n");
return -EINVAL;
}
@@ -7910,13 +8040,15 @@ static int intel_crtc_compute_config(struct intel_crtc *crtc,
*/
if (pipe_config->pipe_src_w & 1) {
if (pipe_config->double_wide) {
- DRM_DEBUG_KMS("Odd pipe source width not supported with double wide pipe\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Odd pipe source width not supported with double wide pipe\n");
return -EINVAL;
}
if (intel_crtc_has_type(pipe_config, INTEL_OUTPUT_LVDS) &&
intel_is_dual_link_lvds(dev_priv)) {
- DRM_DEBUG_KMS("Odd pipe source width not supported with dual link LVDS\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Odd pipe source width not supported with dual link LVDS\n");
return -EINVAL;
}
}
@@ -7997,13 +8129,15 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv)
* indicates as much.
*/
if (HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)) {
- bool bios_lvds_use_ssc = I915_READ(PCH_DREF_CONTROL) &
+ bool bios_lvds_use_ssc = intel_de_read(dev_priv,
+ PCH_DREF_CONTROL) &
DREF_SSC1_ENABLE;
if (dev_priv->vbt.lvds_use_ssc != bios_lvds_use_ssc) {
- DRM_DEBUG_KMS("SSC %s by BIOS, overriding VBT which says %s\n",
- enableddisabled(bios_lvds_use_ssc),
- enableddisabled(dev_priv->vbt.lvds_use_ssc));
+ drm_dbg_kms(&dev_priv->drm,
+ "SSC %s by BIOS, overriding VBT which says %s\n",
+ enableddisabled(bios_lvds_use_ssc),
+ enableddisabled(dev_priv->vbt.lvds_use_ssc));
dev_priv->vbt.lvds_use_ssc = bios_lvds_use_ssc;
}
}
@@ -8090,10 +8224,11 @@ static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- I915_WRITE(PCH_TRANS_DATA_M1(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
- I915_WRITE(PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
- I915_WRITE(PCH_TRANS_LINK_M1(pipe), m_n->link_m);
- I915_WRITE(PCH_TRANS_LINK_N1(pipe), m_n->link_n);
+ intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe),
+ TU_SIZE(m_n->tu) | m_n->gmch_m);
+ intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n);
+ intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m);
+ intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n);
}
static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv,
@@ -8119,33 +8254,42 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta
enum transcoder transcoder = crtc_state->cpu_transcoder;
if (INTEL_GEN(dev_priv) >= 5) {
- I915_WRITE(PIPE_DATA_M1(transcoder), TU_SIZE(m_n->tu) | m_n->gmch_m);
- I915_WRITE(PIPE_DATA_N1(transcoder), m_n->gmch_n);
- I915_WRITE(PIPE_LINK_M1(transcoder), m_n->link_m);
- I915_WRITE(PIPE_LINK_N1(transcoder), m_n->link_n);
+ intel_de_write(dev_priv, PIPE_DATA_M1(transcoder),
+ TU_SIZE(m_n->tu) | m_n->gmch_m);
+ intel_de_write(dev_priv, PIPE_DATA_N1(transcoder),
+ m_n->gmch_n);
+ intel_de_write(dev_priv, PIPE_LINK_M1(transcoder),
+ m_n->link_m);
+ intel_de_write(dev_priv, PIPE_LINK_N1(transcoder),
+ m_n->link_n);
/*
* M2_N2 registers are set only if DRRS is supported
* (to make sure the registers are not unnecessarily accessed).
*/
if (m2_n2 && crtc_state->has_drrs &&
transcoder_has_m2_n2(dev_priv, transcoder)) {
- I915_WRITE(PIPE_DATA_M2(transcoder),
- TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
- I915_WRITE(PIPE_DATA_N2(transcoder), m2_n2->gmch_n);
- I915_WRITE(PIPE_LINK_M2(transcoder), m2_n2->link_m);
- I915_WRITE(PIPE_LINK_N2(transcoder), m2_n2->link_n);
+ intel_de_write(dev_priv, PIPE_DATA_M2(transcoder),
+ TU_SIZE(m2_n2->tu) | m2_n2->gmch_m);
+ intel_de_write(dev_priv, PIPE_DATA_N2(transcoder),
+ m2_n2->gmch_n);
+ intel_de_write(dev_priv, PIPE_LINK_M2(transcoder),
+ m2_n2->link_m);
+ intel_de_write(dev_priv, PIPE_LINK_N2(transcoder),
+ m2_n2->link_n);
}
} else {
- I915_WRITE(PIPE_DATA_M_G4X(pipe), TU_SIZE(m_n->tu) | m_n->gmch_m);
- I915_WRITE(PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
- I915_WRITE(PIPE_LINK_M_G4X(pipe), m_n->link_m);
- I915_WRITE(PIPE_LINK_N_G4X(pipe), m_n->link_n);
+ intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe),
+ TU_SIZE(m_n->tu) | m_n->gmch_m);
+ intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n);
+ intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m);
+ intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n);
}
}
void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n)
{
const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
if (m_n == M1_N1) {
dp_m_n = &crtc_state->dp_m_n;
@@ -8158,7 +8302,7 @@ void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_s
*/
dp_m_n = &crtc_state->dp_m2_n2;
} else {
- DRM_ERROR("Unsupported divider value\n");
+ drm_err(&i915->drm, "Unsupported divider value\n");
return;
}
@@ -8212,9 +8356,8 @@ static void vlv_prepare_pll(struct intel_crtc *crtc,
u32 coreclk, reg_val;
/* Enable Refclk */
- I915_WRITE(DPLL(pipe),
- pipe_config->dpll_hw_state.dpll &
- ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
+ intel_de_write(dev_priv, DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll & ~(DPLL_VCO_ENABLE | DPLL_EXT_BUFFER_ENABLE_VLV));
/* No need to actually set up the DPLL with DSI */
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
@@ -8314,8 +8457,8 @@ static void chv_prepare_pll(struct intel_crtc *crtc,
int vco;
/* Enable Refclk and SSC */
- I915_WRITE(DPLL(pipe),
- pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
+ intel_de_write(dev_priv, DPLL(pipe),
+ pipe_config->dpll_hw_state.dpll & ~DPLL_VCO_ENABLE);
/* No need to actually set up the DPLL with DSI */
if ((pipe_config->dpll_hw_state.dpll & DPLL_VCO_ENABLE) == 0)
@@ -8614,27 +8757,22 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
}
if (INTEL_GEN(dev_priv) > 3)
- I915_WRITE(VSYNCSHIFT(cpu_transcoder), vsyncshift);
-
- I915_WRITE(HTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_hdisplay - 1) |
- ((adjusted_mode->crtc_htotal - 1) << 16));
- I915_WRITE(HBLANK(cpu_transcoder),
- (adjusted_mode->crtc_hblank_start - 1) |
- ((adjusted_mode->crtc_hblank_end - 1) << 16));
- I915_WRITE(HSYNC(cpu_transcoder),
- (adjusted_mode->crtc_hsync_start - 1) |
- ((adjusted_mode->crtc_hsync_end - 1) << 16));
-
- I915_WRITE(VTOTAL(cpu_transcoder),
- (adjusted_mode->crtc_vdisplay - 1) |
- ((crtc_vtotal - 1) << 16));
- I915_WRITE(VBLANK(cpu_transcoder),
- (adjusted_mode->crtc_vblank_start - 1) |
- ((crtc_vblank_end - 1) << 16));
- I915_WRITE(VSYNC(cpu_transcoder),
- (adjusted_mode->crtc_vsync_start - 1) |
- ((adjusted_mode->crtc_vsync_end - 1) << 16));
+ intel_de_write(dev_priv, VSYNCSHIFT(cpu_transcoder),
+ vsyncshift);
+
+ intel_de_write(dev_priv, HTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_hdisplay - 1) | ((adjusted_mode->crtc_htotal - 1) << 16));
+ intel_de_write(dev_priv, HBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_hblank_start - 1) | ((adjusted_mode->crtc_hblank_end - 1) << 16));
+ intel_de_write(dev_priv, HSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_hsync_start - 1) | ((adjusted_mode->crtc_hsync_end - 1) << 16));
+
+ intel_de_write(dev_priv, VTOTAL(cpu_transcoder),
+ (adjusted_mode->crtc_vdisplay - 1) | ((crtc_vtotal - 1) << 16));
+ intel_de_write(dev_priv, VBLANK(cpu_transcoder),
+ (adjusted_mode->crtc_vblank_start - 1) | ((crtc_vblank_end - 1) << 16));
+ intel_de_write(dev_priv, VSYNC(cpu_transcoder),
+ (adjusted_mode->crtc_vsync_start - 1) | ((adjusted_mode->crtc_vsync_end - 1) << 16));
/* Workaround: when the EDP input selection is B, the VTOTAL_B must be
* programmed with the VTOTAL_EDP value. Same for VTOTAL_C. This is
@@ -8642,7 +8780,8 @@ static void intel_set_pipe_timings(const struct intel_crtc_state *crtc_state)
* bits. */
if (IS_HASWELL(dev_priv) && cpu_transcoder == TRANSCODER_EDP &&
(pipe == PIPE_B || pipe == PIPE_C))
- I915_WRITE(VTOTAL(pipe), I915_READ(VTOTAL(cpu_transcoder)));
+ intel_de_write(dev_priv, VTOTAL(pipe),
+ intel_de_read(dev_priv, VTOTAL(cpu_transcoder)));
}
@@ -8655,9 +8794,8 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state)
/* pipesrc controls the size that is scaled from, which should
* always be the user's requested size.
*/
- I915_WRITE(PIPESRC(pipe),
- ((crtc_state->pipe_src_w - 1) << 16) |
- (crtc_state->pipe_src_h - 1));
+ intel_de_write(dev_priv, PIPESRC(pipe),
+ ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1));
}
static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
@@ -8670,9 +8808,9 @@ static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state)
if (INTEL_GEN(dev_priv) >= 9 ||
IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
- return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
+ return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK_HSW;
else
- return I915_READ(PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
+ return intel_de_read(dev_priv, PIPECONF(cpu_transcoder)) & PIPECONF_INTERLACE_MASK;
}
static void intel_get_pipe_timings(struct intel_crtc *crtc,
@@ -8683,33 +8821,33 @@ static void intel_get_pipe_timings(struct intel_crtc *crtc,
enum transcoder cpu_transcoder = pipe_config->cpu_transcoder;
u32 tmp;
- tmp = I915_READ(HTOTAL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HTOTAL(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_hdisplay = (tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_htotal = ((tmp >> 16) & 0xffff) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = I915_READ(HBLANK(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HBLANK(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_hblank_start =
(tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_hblank_end =
((tmp >> 16) & 0xffff) + 1;
}
- tmp = I915_READ(HSYNC(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, HSYNC(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_hsync_start = (tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_hsync_end = ((tmp >> 16) & 0xffff) + 1;
- tmp = I915_READ(VTOTAL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, VTOTAL(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_vdisplay = (tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_vtotal = ((tmp >> 16) & 0xffff) + 1;
if (!transcoder_is_dsi(cpu_transcoder)) {
- tmp = I915_READ(VBLANK(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, VBLANK(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_vblank_start =
(tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_vblank_end =
((tmp >> 16) & 0xffff) + 1;
}
- tmp = I915_READ(VSYNC(cpu_transcoder));
+ tmp = intel_de_read(dev_priv, VSYNC(cpu_transcoder));
pipe_config->hw.adjusted_mode.crtc_vsync_start = (tmp & 0xffff) + 1;
pipe_config->hw.adjusted_mode.crtc_vsync_end = ((tmp >> 16) & 0xffff) + 1;
@@ -8727,7 +8865,7 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
- tmp = I915_READ(PIPESRC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe));
pipe_config->pipe_src_h = (tmp & 0xffff) + 1;
pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1;
@@ -8768,7 +8906,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
/* we keep both pipes enabled on 830 */
if (IS_I830(dev_priv))
- pipeconf |= I915_READ(PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
+ pipeconf |= intel_de_read(dev_priv, PIPECONF(crtc->pipe)) & PIPECONF_ENABLE;
if (crtc_state->double_wide)
pipeconf |= PIPECONF_DOUBLE_WIDE;
@@ -8815,8 +8953,8 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state)
pipeconf |= PIPECONF_FRAME_START_DELAY(0);
- I915_WRITE(PIPECONF(crtc->pipe), pipeconf);
- POSTING_READ(PIPECONF(crtc->pipe));
+ intel_de_write(dev_priv, PIPECONF(crtc->pipe), pipeconf);
+ intel_de_posting_read(dev_priv, PIPECONF(crtc->pipe));
}
static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
@@ -8833,7 +8971,9 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
}
limit = &intel_limits_i8xx_lvds;
@@ -8846,7 +8986,8 @@ static int i8xx_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8868,7 +9009,9 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
}
if (intel_is_dual_link_lvds(dev_priv))
@@ -8888,7 +9031,8 @@ static int g4x_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8911,7 +9055,9 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
}
limit = &pnv_limits_lvds;
@@ -8922,7 +9068,8 @@ static int pnv_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!pnv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8945,7 +9092,9 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
refclk = dev_priv->vbt.lvds_ssc_freq;
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n", refclk);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ refclk);
}
limit = &intel_limits_i9xx_lvds;
@@ -8956,7 +9105,8 @@ static int i9xx_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!i9xx_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8970,6 +9120,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_chv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -8977,7 +9128,7 @@ static int chv_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!chv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -8991,6 +9142,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
{
int refclk = 100000;
const struct intel_limit *limit = &intel_limits_vlv;
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
@@ -8998,7 +9150,7 @@ static int vlv_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!vlv_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&i915->drm, "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
@@ -9025,7 +9177,7 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
if (!i9xx_has_pfit(dev_priv))
return;
- tmp = I915_READ(PFIT_CONTROL);
+ tmp = intel_de_read(dev_priv, PFIT_CONTROL);
if (!(tmp & PFIT_ENABLE))
return;
@@ -9039,7 +9191,8 @@ static void i9xx_get_pfit_config(struct intel_crtc *crtc,
}
pipe_config->gmch_pfit.control = tmp;
- pipe_config->gmch_pfit.pgm_ratios = I915_READ(PFIT_PGM_RATIOS);
+ pipe_config->gmch_pfit.pgm_ratios = intel_de_read(dev_priv,
+ PFIT_PGM_RATIOS);
}
static void vlv_crtc_clock_get(struct intel_crtc *crtc,
@@ -9087,11 +9240,11 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
if (!plane->get_hw_state(plane, &pipe))
return;
- WARN_ON(pipe != crtc->pipe);
+ drm_WARN_ON(dev, pipe != crtc->pipe);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
- DRM_DEBUG_KMS("failed to alloc fb\n");
+ drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
return;
}
@@ -9099,7 +9252,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->dev = dev;
- val = I915_READ(DSPCNTR(i9xx_plane));
+ val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
if (INTEL_GEN(dev_priv) >= 4) {
if (val & DISPPLANE_TILED) {
@@ -9120,34 +9273,37 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc,
fb->format = drm_format_info(fourcc);
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- offset = I915_READ(DSPOFFSET(i9xx_plane));
- base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
+ offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
} else if (INTEL_GEN(dev_priv) >= 4) {
if (plane_config->tiling)
- offset = I915_READ(DSPTILEOFF(i9xx_plane));
+ offset = intel_de_read(dev_priv,
+ DSPTILEOFF(i9xx_plane));
else
- offset = I915_READ(DSPLINOFF(i9xx_plane));
- base = I915_READ(DSPSURF(i9xx_plane)) & 0xfffff000;
+ offset = intel_de_read(dev_priv,
+ DSPLINOFF(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000;
} else {
- base = I915_READ(DSPADDR(i9xx_plane));
+ base = intel_de_read(dev_priv, DSPADDR(i9xx_plane));
}
plane_config->base = base;
- val = I915_READ(PIPESRC(pipe));
+ val = intel_de_read(dev_priv, PIPESRC(pipe));
fb->width = ((val >> 16) & 0xfff) + 1;
fb->height = ((val >> 0) & 0xfff) + 1;
- val = I915_READ(DSPSTRIDE(i9xx_plane));
+ val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane));
fb->pitches[0] = val & 0xffffffc0;
aligned_height = intel_fb_align_height(fb, 0, fb->height);
plane_config->size = fb->pitches[0] * aligned_height;
- DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
+ plane_config->size);
plane_config->fb = intel_fb;
}
@@ -9192,11 +9348,12 @@ bdw_get_pipemisc_output_format(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- tmp = I915_READ(PIPEMISC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
if (tmp & PIPEMISC_YUV420_ENABLE) {
/* We support 4:2:0 in full blend mode only */
- WARN_ON((tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
+ drm_WARN_ON(&dev_priv->drm,
+ (tmp & PIPEMISC_YUV420_MODE_FULL_BLEND) == 0);
return INTEL_OUTPUT_FORMAT_YCBCR420;
} else if (tmp & PIPEMISC_OUTPUT_COLORSPACE_YUV) {
@@ -9214,7 +9371,7 @@ static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state)
enum i9xx_plane_id i9xx_plane = plane->i9xx_plane;
u32 tmp;
- tmp = I915_READ(DSPCNTR(i9xx_plane));
+ tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane));
if (tmp & DISPPLANE_GAMMA_ENABLE)
crtc_state->gamma_enable = true;
@@ -9245,7 +9402,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
ret = false;
- tmp = I915_READ(PIPECONF(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
goto out;
@@ -9274,7 +9431,8 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
PIPECONF_GAMMA_MODE_SHIFT;
if (IS_CHERRYVIEW(dev_priv))
- pipe_config->cgm_mode = I915_READ(CGM_PIPE_MODE(crtc->pipe));
+ pipe_config->cgm_mode = intel_de_read(dev_priv,
+ CGM_PIPE_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
intel_color_get_config(pipe_config);
@@ -9292,14 +9450,14 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
if (IS_CHERRYVIEW(dev_priv) && crtc->pipe != PIPE_A)
tmp = dev_priv->chv_dpll_md[crtc->pipe];
else
- tmp = I915_READ(DPLL_MD(crtc->pipe));
+ tmp = intel_de_read(dev_priv, DPLL_MD(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & DPLL_MD_UDI_MULTIPLIER_MASK)
>> DPLL_MD_UDI_MULTIPLIER_SHIFT) + 1;
pipe_config->dpll_hw_state.dpll_md = tmp;
} else if (IS_I945G(dev_priv) || IS_I945GM(dev_priv) ||
IS_G33(dev_priv) || IS_PINEVIEW(dev_priv)) {
- tmp = I915_READ(DPLL(crtc->pipe));
+ tmp = intel_de_read(dev_priv, DPLL(crtc->pipe));
pipe_config->pixel_multiplier =
((tmp & SDVO_MULTIPLIER_MASK)
>> SDVO_MULTIPLIER_SHIFT_HIRES) + 1;
@@ -9309,10 +9467,13 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc,
* function. */
pipe_config->pixel_multiplier = 1;
}
- pipe_config->dpll_hw_state.dpll = I915_READ(DPLL(crtc->pipe));
+ pipe_config->dpll_hw_state.dpll = intel_de_read(dev_priv,
+ DPLL(crtc->pipe));
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv)) {
- pipe_config->dpll_hw_state.fp0 = I915_READ(FP0(crtc->pipe));
- pipe_config->dpll_hw_state.fp1 = I915_READ(FP1(crtc->pipe));
+ pipe_config->dpll_hw_state.fp0 = intel_de_read(dev_priv,
+ FP0(crtc->pipe));
+ pipe_config->dpll_hw_state.fp1 = intel_de_read(dev_priv,
+ FP1(crtc->pipe));
} else {
/* Mask out read-only status bits. */
pipe_config->dpll_hw_state.dpll &= ~(DPLL_LOCK_VLV |
@@ -9381,8 +9542,8 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
/* Check if any DPLLs are using the SSC source */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- u32 temp = I915_READ(PCH_DPLL(i));
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ u32 temp = intel_de_read(dev_priv, PCH_DPLL(i));
if (!(temp & DPLL_VCO_ENABLE))
continue;
@@ -9394,15 +9555,16 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
}
}
- DRM_DEBUG_KMS("has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
- has_panel, has_lvds, has_ck505, using_ssc_source);
+ drm_dbg_kms(&dev_priv->drm,
+ "has_panel %d has_lvds %d has_ck505 %d using_ssc_source %d\n",
+ has_panel, has_lvds, has_ck505, using_ssc_source);
/* Ironlake: try to setup display ref clock before DPLL
* enabling. This is only under driver's control after
* PCH B stepping, previous chipset stepping should be
* ignoring this setting.
*/
- val = I915_READ(PCH_DREF_CONTROL);
+ val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
/* As we must carefully and slowly disable/enable each source in turn,
* compute the final state we want first and check if we need to
@@ -9454,14 +9616,14 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* SSC must be turned on before enabling the CPU output */
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
- DRM_DEBUG_KMS("Using SSC on panel\n");
+ drm_dbg_kms(&dev_priv->drm, "Using SSC on panel\n");
val |= DREF_SSC1_ENABLE;
} else
val &= ~DREF_SSC1_ENABLE;
/* Get SSC going before enabling the outputs */
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
@@ -9469,30 +9631,31 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Enable CPU source on CPU attached eDP */
if (has_cpu_edp) {
if (intel_panel_use_ssc(dev_priv) && can_ssc) {
- DRM_DEBUG_KMS("Using SSC on eDP\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Using SSC on eDP\n");
val |= DREF_CPU_SOURCE_OUTPUT_DOWNSPREAD;
} else
val |= DREF_CPU_SOURCE_OUTPUT_NONSPREAD;
} else
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
} else {
- DRM_DEBUG_KMS("Disabling CPU source output\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling CPU source output\n");
val &= ~DREF_CPU_SOURCE_OUTPUT_MASK;
/* Turn off CPU output */
val |= DREF_CPU_SOURCE_OUTPUT_DISABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
if (!using_ssc_source) {
- DRM_DEBUG_KMS("Disabling SSC source\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling SSC source\n");
/* Turn off the SSC source */
val &= ~DREF_SSC_SOURCE_MASK;
@@ -9501,8 +9664,8 @@ static void ilk_init_pch_refclk(struct drm_i915_private *dev_priv)
/* Turn off SSC1 */
val &= ~DREF_SSC1_ENABLE;
- I915_WRITE(PCH_DREF_CONTROL, val);
- POSTING_READ(PCH_DREF_CONTROL);
+ intel_de_write(dev_priv, PCH_DREF_CONTROL, val);
+ intel_de_posting_read(dev_priv, PCH_DREF_CONTROL);
udelay(200);
}
}
@@ -9514,21 +9677,21 @@ static void lpt_reset_fdi_mphy(struct drm_i915_private *dev_priv)
{
u32 tmp;
- tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
tmp |= FDI_MPHY_IOSFSB_RESET_CTL;
- I915_WRITE(SOUTH_CHICKEN2, tmp);
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
- if (wait_for_us(I915_READ(SOUTH_CHICKEN2) &
+ if (wait_for_us(intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS, 100))
- DRM_ERROR("FDI mPHY reset assert timeout\n");
+ drm_err(&dev_priv->drm, "FDI mPHY reset assert timeout\n");
- tmp = I915_READ(SOUTH_CHICKEN2);
+ tmp = intel_de_read(dev_priv, SOUTH_CHICKEN2);
tmp &= ~FDI_MPHY_IOSFSB_RESET_CTL;
- I915_WRITE(SOUTH_CHICKEN2, tmp);
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, tmp);
- if (wait_for_us((I915_READ(SOUTH_CHICKEN2) &
+ if (wait_for_us((intel_de_read(dev_priv, SOUTH_CHICKEN2) &
FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100))
- DRM_ERROR("FDI mPHY reset de-assert timeout\n");
+ drm_err(&dev_priv->drm, "FDI mPHY reset de-assert timeout\n");
}
/* WaMPhyProgramming:hsw */
@@ -9617,10 +9780,11 @@ static void lpt_enable_clkout_dp(struct drm_i915_private *dev_priv,
{
u32 reg, tmp;
- if (WARN(with_fdi && !with_spread, "FDI requires downspread\n"))
+ if (drm_WARN(&dev_priv->drm, with_fdi && !with_spread,
+ "FDI requires downspread\n"))
with_spread = true;
- if (WARN(HAS_PCH_LPT_LP(dev_priv) &&
- with_fdi, "LP PCH doesn't have FDI\n"))
+ if (drm_WARN(&dev_priv->drm, HAS_PCH_LPT_LP(dev_priv) &&
+ with_fdi, "LP PCH doesn't have FDI\n"))
with_fdi = false;
mutex_lock(&dev_priv->sb_lock);
@@ -9714,10 +9878,10 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
u32 tmp;
int idx = BEND_IDX(steps);
- if (WARN_ON(steps % 5 != 0))
+ if (drm_WARN_ON(&dev_priv->drm, steps % 5 != 0))
return;
- if (WARN_ON(idx >= ARRAY_SIZE(sscdivintphase)))
+ if (drm_WARN_ON(&dev_priv->drm, idx >= ARRAY_SIZE(sscdivintphase)))
return;
mutex_lock(&dev_priv->sb_lock);
@@ -9740,8 +9904,8 @@ static void lpt_bend_clkout_dp(struct drm_i915_private *dev_priv, int steps)
static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
{
- u32 fuse_strap = I915_READ(FUSE_STRAP);
- u32 ctl = I915_READ(SPLL_CTL);
+ u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+ u32 ctl = intel_de_read(dev_priv, SPLL_CTL);
if ((ctl & SPLL_PLL_ENABLE) == 0)
return false;
@@ -9760,8 +9924,8 @@ static bool spll_uses_pch_ssc(struct drm_i915_private *dev_priv)
static bool wrpll_uses_pch_ssc(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
- u32 fuse_strap = I915_READ(FUSE_STRAP);
- u32 ctl = I915_READ(WRPLL_CTL(id));
+ u32 fuse_strap = intel_de_read(dev_priv, FUSE_STRAP);
+ u32 ctl = intel_de_read(dev_priv, WRPLL_CTL(id));
if ((ctl & WRPLL_PLL_ENABLE) == 0)
return false;
@@ -9810,17 +9974,17 @@ static void lpt_init_pch_refclk(struct drm_i915_private *dev_priv)
dev_priv->pch_ssc_use = 0;
if (spll_uses_pch_ssc(dev_priv)) {
- DRM_DEBUG_KMS("SPLL using PCH SSC\n");
+ drm_dbg_kms(&dev_priv->drm, "SPLL using PCH SSC\n");
dev_priv->pch_ssc_use |= BIT(DPLL_ID_SPLL);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL1)) {
- DRM_DEBUG_KMS("WRPLL1 using PCH SSC\n");
+ drm_dbg_kms(&dev_priv->drm, "WRPLL1 using PCH SSC\n");
dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL1);
}
if (wrpll_uses_pch_ssc(dev_priv, DPLL_ID_WRPLL2)) {
- DRM_DEBUG_KMS("WRPLL2 using PCH SSC\n");
+ drm_dbg_kms(&dev_priv->drm, "WRPLL2 using PCH SSC\n");
dev_priv->pch_ssc_use |= BIT(DPLL_ID_WRPLL2);
}
@@ -9885,8 +10049,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
* This would end up with an odd purple hue over
* the entire display. Make sure we don't do it.
*/
- WARN_ON(crtc_state->limited_color_range &&
- crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->limited_color_range)
val |= PIPECONF_COLOR_RANGE_SELECT;
@@ -9898,8 +10062,8 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state)
val |= PIPECONF_FRAME_START_DELAY(0);
- I915_WRITE(PIPECONF(pipe), val);
- POSTING_READ(PIPECONF(pipe));
+ intel_de_write(dev_priv, PIPECONF(pipe), val);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
}
static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
@@ -9921,8 +10085,8 @@ static void hsw_set_pipeconf(const struct intel_crtc_state *crtc_state)
crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB)
val |= PIPECONF_OUTPUT_COLORSPACE_YUV_HSW;
- I915_WRITE(PIPECONF(cpu_transcoder), val);
- POSTING_READ(PIPECONF(cpu_transcoder));
+ intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val);
+ intel_de_posting_read(dev_priv, PIPECONF(cpu_transcoder));
}
static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
@@ -9965,7 +10129,10 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state)
BIT(PLANE_CURSOR))) == 0)
val |= PIPEMISC_HDR_MODE_PRECISION;
- I915_WRITE(PIPEMISC(crtc->pipe), val);
+ if (INTEL_GEN(dev_priv) >= 12)
+ val |= PIPEMISC_PIXEL_ROUNDING_TRUNC;
+
+ intel_de_write(dev_priv, PIPEMISC(crtc->pipe), val);
}
int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
@@ -9973,7 +10140,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
u32 tmp;
- tmp = I915_READ(PIPEMISC(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe));
switch (tmp & PIPEMISC_DITHER_BPC_MASK) {
case PIPEMISC_DITHER_6_BPC:
@@ -10126,8 +10293,9 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_LVDS)) {
if (intel_panel_use_ssc(dev_priv)) {
- DRM_DEBUG_KMS("using SSC reference clock of %d kHz\n",
- dev_priv->vbt.lvds_ssc_freq);
+ drm_dbg_kms(&dev_priv->drm,
+ "using SSC reference clock of %d kHz\n",
+ dev_priv->vbt.lvds_ssc_freq);
refclk = dev_priv->vbt.lvds_ssc_freq;
}
@@ -10149,15 +10317,17 @@ static int ilk_crtc_compute_clock(struct intel_crtc *crtc,
if (!crtc_state->clock_set &&
!g4x_find_best_dpll(limit, crtc_state, crtc_state->port_clock,
refclk, NULL, &crtc_state->dpll)) {
- DRM_ERROR("Couldn't find PLL settings for mode!\n");
+ drm_err(&dev_priv->drm,
+ "Couldn't find PLL settings for mode!\n");
return -EINVAL;
}
ilk_compute_dpll(crtc, crtc_state, NULL);
if (!intel_reserve_shared_dplls(state, crtc, NULL)) {
- DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
return -EINVAL;
}
@@ -10171,12 +10341,12 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
enum pipe pipe = crtc->pipe;
- m_n->link_m = I915_READ(PCH_TRANS_LINK_M1(pipe));
- m_n->link_n = I915_READ(PCH_TRANS_LINK_N1(pipe));
- m_n->gmch_m = I915_READ(PCH_TRANS_DATA_M1(pipe))
+ m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe));
+ m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe));
+ m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
& ~TU_SIZE_MASK;
- m_n->gmch_n = I915_READ(PCH_TRANS_DATA_N1(pipe));
- m_n->tu = ((I915_READ(PCH_TRANS_DATA_M1(pipe))
+ m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe));
+ m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
@@ -10189,30 +10359,38 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc,
enum pipe pipe = crtc->pipe;
if (INTEL_GEN(dev_priv) >= 5) {
- m_n->link_m = I915_READ(PIPE_LINK_M1(transcoder));
- m_n->link_n = I915_READ(PIPE_LINK_N1(transcoder));
- m_n->gmch_m = I915_READ(PIPE_DATA_M1(transcoder))
+ m_n->link_m = intel_de_read(dev_priv,
+ PIPE_LINK_M1(transcoder));
+ m_n->link_n = intel_de_read(dev_priv,
+ PIPE_LINK_N1(transcoder));
+ m_n->gmch_m = intel_de_read(dev_priv,
+ PIPE_DATA_M1(transcoder))
& ~TU_SIZE_MASK;
- m_n->gmch_n = I915_READ(PIPE_DATA_N1(transcoder));
- m_n->tu = ((I915_READ(PIPE_DATA_M1(transcoder))
+ m_n->gmch_n = intel_de_read(dev_priv,
+ PIPE_DATA_N1(transcoder));
+ m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) {
- m2_n2->link_m = I915_READ(PIPE_LINK_M2(transcoder));
- m2_n2->link_n = I915_READ(PIPE_LINK_N2(transcoder));
- m2_n2->gmch_m = I915_READ(PIPE_DATA_M2(transcoder))
+ m2_n2->link_m = intel_de_read(dev_priv,
+ PIPE_LINK_M2(transcoder));
+ m2_n2->link_n = intel_de_read(dev_priv,
+ PIPE_LINK_N2(transcoder));
+ m2_n2->gmch_m = intel_de_read(dev_priv,
+ PIPE_DATA_M2(transcoder))
& ~TU_SIZE_MASK;
- m2_n2->gmch_n = I915_READ(PIPE_DATA_N2(transcoder));
- m2_n2->tu = ((I915_READ(PIPE_DATA_M2(transcoder))
+ m2_n2->gmch_n = intel_de_read(dev_priv,
+ PIPE_DATA_N2(transcoder));
+ m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
} else {
- m_n->link_m = I915_READ(PIPE_LINK_M_G4X(pipe));
- m_n->link_n = I915_READ(PIPE_LINK_N_G4X(pipe));
- m_n->gmch_m = I915_READ(PIPE_DATA_M_G4X(pipe))
+ m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe));
+ m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe));
+ m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
& ~TU_SIZE_MASK;
- m_n->gmch_n = I915_READ(PIPE_DATA_N_G4X(pipe));
- m_n->tu = ((I915_READ(PIPE_DATA_M_G4X(pipe))
+ m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe));
+ m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe))
& TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1;
}
}
@@ -10247,12 +10425,14 @@ static void skl_get_pfit_config(struct intel_crtc *crtc,
/* find scaler attached to this pipe */
for (i = 0; i < crtc->num_scalers; i++) {
- ps_ctrl = I915_READ(SKL_PS_CTRL(crtc->pipe, i));
+ ps_ctrl = intel_de_read(dev_priv, SKL_PS_CTRL(crtc->pipe, i));
if (ps_ctrl & PS_SCALER_EN && !(ps_ctrl & PS_PLANE_SEL_MASK)) {
id = i;
pipe_config->pch_pfit.enabled = true;
- pipe_config->pch_pfit.pos = I915_READ(SKL_PS_WIN_POS(crtc->pipe, i));
- pipe_config->pch_pfit.size = I915_READ(SKL_PS_WIN_SZ(crtc->pipe, i));
+ pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
+ SKL_PS_WIN_POS(crtc->pipe, i));
+ pipe_config->pch_pfit.size = intel_de_read(dev_priv,
+ SKL_PS_WIN_SZ(crtc->pipe, i));
scaler_state->scalers[i].in_use = true;
break;
}
@@ -10284,11 +10464,11 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
if (!plane->get_hw_state(plane, &pipe))
return;
- WARN_ON(pipe != crtc->pipe);
+ drm_WARN_ON(dev, pipe != crtc->pipe);
intel_fb = kzalloc(sizeof(*intel_fb), GFP_KERNEL);
if (!intel_fb) {
- DRM_DEBUG_KMS("failed to alloc fb\n");
+ drm_dbg_kms(&dev_priv->drm, "failed to alloc fb\n");
return;
}
@@ -10296,7 +10476,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
fb->dev = dev;
- val = I915_READ(PLANE_CTL(pipe, plane_id));
+ val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id));
if (INTEL_GEN(dev_priv) >= 11)
pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK;
@@ -10304,7 +10484,8 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
pixel_format = val & PLANE_CTL_FORMAT_MASK;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv)) {
- alpha = I915_READ(PLANE_COLOR_CTL(pipe, plane_id));
+ alpha = intel_de_read(dev_priv,
+ PLANE_COLOR_CTL(pipe, plane_id));
alpha &= PLANE_COLOR_ALPHA_MASK;
} else {
alpha = val & PLANE_CTL_ALPHA_MASK;
@@ -10368,16 +10549,16 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
val & PLANE_CTL_FLIP_HORIZONTAL)
plane_config->rotation |= DRM_MODE_REFLECT_X;
- base = I915_READ(PLANE_SURF(pipe, plane_id)) & 0xfffff000;
+ base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000;
plane_config->base = base;
- offset = I915_READ(PLANE_OFFSET(pipe, plane_id));
+ offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id));
- val = I915_READ(PLANE_SIZE(pipe, plane_id));
+ val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id));
fb->height = ((val >> 16) & 0xffff) + 1;
fb->width = ((val >> 0) & 0xffff) + 1;
- val = I915_READ(PLANE_STRIDE(pipe, plane_id));
+ val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id));
stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0);
fb->pitches[0] = (val & 0x3ff) * stride_mult;
@@ -10385,10 +10566,11 @@ skl_get_initial_plane_config(struct intel_crtc *crtc,
plane_config->size = fb->pitches[0] * aligned_height;
- DRM_DEBUG_KMS("%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
- crtc->base.name, plane->base.name, fb->width, fb->height,
- fb->format->cpp[0] * 8, base, fb->pitches[0],
- plane_config->size);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s/%s with fb: size=%dx%d@%d, offset=%x, pitch %d, size 0x%x\n",
+ crtc->base.name, plane->base.name, fb->width, fb->height,
+ fb->format->cpp[0] * 8, base, fb->pitches[0],
+ plane_config->size);
plane_config->fb = intel_fb;
return;
@@ -10404,19 +10586,21 @@ static void ilk_get_pfit_config(struct intel_crtc *crtc,
struct drm_i915_private *dev_priv = to_i915(dev);
u32 tmp;
- tmp = I915_READ(PF_CTL(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PF_CTL(crtc->pipe));
if (tmp & PF_ENABLE) {
pipe_config->pch_pfit.enabled = true;
- pipe_config->pch_pfit.pos = I915_READ(PF_WIN_POS(crtc->pipe));
- pipe_config->pch_pfit.size = I915_READ(PF_WIN_SZ(crtc->pipe));
+ pipe_config->pch_pfit.pos = intel_de_read(dev_priv,
+ PF_WIN_POS(crtc->pipe));
+ pipe_config->pch_pfit.size = intel_de_read(dev_priv,
+ PF_WIN_SZ(crtc->pipe));
/* We currently do not free assignements of panel fitters on
* ivb/hsw (since we don't use the higher upscaling modes which
* differentiates them) so just WARN about this case for now. */
if (IS_GEN(dev_priv, 7)) {
- WARN_ON((tmp & PF_PIPE_SEL_MASK_IVB) !=
- PF_PIPE_SEL_IVB(crtc->pipe));
+ drm_WARN_ON(dev, (tmp & PF_PIPE_SEL_MASK_IVB) !=
+ PF_PIPE_SEL_IVB(crtc->pipe));
}
}
}
@@ -10441,7 +10625,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->master_transcoder = INVALID_TRANSCODER;
ret = false;
- tmp = I915_READ(PIPECONF(crtc->pipe));
+ tmp = intel_de_read(dev_priv, PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
goto out;
@@ -10478,18 +10662,19 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >>
PIPECONF_GAMMA_MODE_SHIFT;
- pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+ pipe_config->csc_mode = intel_de_read(dev_priv,
+ PIPE_CSC_MODE(crtc->pipe));
i9xx_get_pipe_color_config(pipe_config);
intel_color_get_config(pipe_config);
- if (I915_READ(PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
+ if (intel_de_read(dev_priv, PCH_TRANSCONF(crtc->pipe)) & TRANS_ENABLE) {
struct intel_shared_dpll *pll;
enum intel_dpll_id pll_id;
pipe_config->has_pch_encoder = true;
- tmp = I915_READ(FDI_RX_CTL(crtc->pipe));
+ tmp = intel_de_read(dev_priv, FDI_RX_CTL(crtc->pipe));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
@@ -10502,7 +10687,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
*/
pll_id = (enum intel_dpll_id) crtc->pipe;
} else {
- tmp = I915_READ(PCH_DPLL_SEL);
+ tmp = intel_de_read(dev_priv, PCH_DPLL_SEL);
if (tmp & TRANS_DPLLB_SEL(crtc->pipe))
pll_id = DPLL_ID_PCH_PLL_B;
else
@@ -10513,8 +10698,8 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc,
intel_get_shared_dpll_by_id(dev_priv, pll_id);
pll = pipe_config->shared_dpll;
- WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
- &pipe_config->dpll_hw_state));
+ drm_WARN_ON(dev, !pll->info->funcs->get_hw_state(dev_priv, pll,
+ &pipe_config->dpll_hw_state));
tmp = pipe_config->dpll_hw_state.dpll;
pipe_config->pixel_multiplier =
@@ -10552,8 +10737,9 @@ static int hsw_crtc_compute_clock(struct intel_crtc *crtc,
intel_get_crtc_new_encoder(state, crtc_state);
if (!intel_reserve_shared_dplls(state, crtc, encoder)) {
- DRM_DEBUG_KMS("failed to find PLL for pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to find PLL for pipe %c\n",
+ pipe_name(crtc->pipe));
return -EINVAL;
}
}
@@ -10567,10 +10753,10 @@ static void cnl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
enum intel_dpll_id id;
u32 temp;
- temp = I915_READ(DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
+ temp = intel_de_read(dev_priv, DPCLKA_CFGCR0) & DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(port);
id = temp >> DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(port);
- if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL2))
+ if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL2))
return;
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
@@ -10585,24 +10771,25 @@ static void icl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
u32 temp;
if (intel_phy_is_combo(dev_priv, phy)) {
- temp = I915_READ(ICL_DPCLKA_CFGCR0) &
+ temp = intel_de_read(dev_priv, ICL_DPCLKA_CFGCR0) &
ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_MASK(phy);
id = temp >> ICL_DPCLKA_CFGCR0_DDI_CLK_SEL_SHIFT(phy);
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
} else if (intel_phy_is_tc(dev_priv, phy)) {
- u32 clk_sel = I915_READ(DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
+ u32 clk_sel = intel_de_read(dev_priv, DDI_CLK_SEL(port)) & DDI_CLK_SEL_MASK;
if (clk_sel == DDI_CLK_SEL_MG) {
id = icl_tc_port_to_pll_id(intel_port_to_tc(dev_priv,
port));
port_dpll_id = ICL_PORT_DPLL_MG_PHY;
} else {
- WARN_ON(clk_sel < DDI_CLK_SEL_TBT_162);
+ drm_WARN_ON(&dev_priv->drm,
+ clk_sel < DDI_CLK_SEL_TBT_162);
id = DPLL_ID_ICL_TBTPLL;
port_dpll_id = ICL_PORT_DPLL_DEFAULT;
}
} else {
- WARN(1, "Invalid port %x\n", port);
+ drm_WARN(&dev_priv->drm, 1, "Invalid port %x\n", port);
return;
}
@@ -10629,7 +10816,7 @@ static void bxt_get_ddi_pll(struct drm_i915_private *dev_priv,
id = DPLL_ID_SKL_DPLL2;
break;
default:
- DRM_ERROR("Incorrect port type\n");
+ drm_err(&dev_priv->drm, "Incorrect port type\n");
return;
}
@@ -10642,10 +10829,10 @@ static void skl_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
enum intel_dpll_id id;
u32 temp;
- temp = I915_READ(DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
+ temp = intel_de_read(dev_priv, DPLL_CTRL2) & DPLL_CTRL2_DDI_CLK_SEL_MASK(port);
id = temp >> (port * 3 + 1);
- if (WARN_ON(id < SKL_DPLL0 || id > SKL_DPLL3))
+ if (drm_WARN_ON(&dev_priv->drm, id < SKL_DPLL0 || id > SKL_DPLL3))
return;
pipe_config->shared_dpll = intel_get_shared_dpll_by_id(dev_priv, id);
@@ -10655,7 +10842,7 @@ static void hsw_get_ddi_pll(struct drm_i915_private *dev_priv, enum port port,
struct intel_crtc_state *pipe_config)
{
enum intel_dpll_id id;
- u32 ddi_pll_sel = I915_READ(PORT_CLK_SEL(port));
+ u32 ddi_pll_sel = intel_de_read(dev_priv, PORT_CLK_SEL(port));
switch (ddi_pll_sel) {
case PORT_CLK_SEL_WRPLL1:
@@ -10723,7 +10910,8 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
bool force_thru = false;
enum pipe trans_pipe;
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(panel_transcoder));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(panel_transcoder));
if (!(tmp & TRANS_DDI_FUNC_ENABLE))
continue;
@@ -10738,8 +10926,9 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
switch (tmp & TRANS_DDI_EDP_INPUT_MASK) {
default:
- WARN(1, "unknown pipe linked to transcoder %s\n",
- transcoder_name(panel_transcoder));
+ drm_WARN(dev, 1,
+ "unknown pipe linked to transcoder %s\n",
+ transcoder_name(panel_transcoder));
/* fall through */
case TRANS_DDI_EDP_INPUT_A_ONOFF:
force_thru = true;
@@ -10767,11 +10956,11 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
/*
* Valid combos: none, eDP, DSI0, DSI1, DSI0+DSI1
*/
- WARN_ON((enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
- enabled_panel_transcoders != BIT(TRANSCODER_EDP));
+ drm_WARN_ON(dev, (enabled_panel_transcoders & BIT(TRANSCODER_EDP)) &&
+ enabled_panel_transcoders != BIT(TRANSCODER_EDP));
power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wf)
@@ -10780,7 +10969,7 @@ static bool hsw_get_transcoder_state(struct intel_crtc *crtc,
wakerefs[power_domain] = wf;
*power_domain_mask |= BIT_ULL(power_domain);
- tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+ tmp = intel_de_read(dev_priv, PIPECONF(pipe_config->cpu_transcoder));
return tmp & PIPECONF_ENABLE;
}
@@ -10805,7 +10994,7 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
cpu_transcoder = TRANSCODER_DSI_C;
power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
- WARN_ON(*power_domain_mask & BIT_ULL(power_domain));
+ drm_WARN_ON(dev, *power_domain_mask & BIT_ULL(power_domain));
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wf)
@@ -10825,11 +11014,11 @@ static bool bxt_get_dsi_transcoder_state(struct intel_crtc *crtc,
break;
/* XXX: this works for video mode only */
- tmp = I915_READ(BXT_MIPI_PORT_CTRL(port));
+ tmp = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
if (!(tmp & DPI_ENABLE))
continue;
- tmp = I915_READ(MIPI_CTRL(port));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
if ((tmp & BXT_PIPE_SELECT_MASK) != BXT_PIPE_SELECT(crtc->pipe))
continue;
@@ -10853,7 +11042,8 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
port = (cpu_transcoder == TRANSCODER_DSI_A) ?
PORT_A : PORT_B;
} else {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder));
+ tmp = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(cpu_transcoder));
if (INTEL_GEN(dev_priv) >= 12)
port = TGL_TRANS_DDI_FUNC_CTL_VAL_TO_PORT(tmp);
else
@@ -10873,7 +11063,8 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
pll = pipe_config->shared_dpll;
if (pll) {
- WARN_ON(!pll->info->funcs->get_hw_state(dev_priv, pll,
+ drm_WARN_ON(&dev_priv->drm,
+ !pll->info->funcs->get_hw_state(dev_priv, pll,
&pipe_config->dpll_hw_state));
}
@@ -10883,10 +11074,10 @@ static void hsw_get_ddi_port_state(struct intel_crtc *crtc,
* the PCH transcoder is on.
*/
if (INTEL_GEN(dev_priv) < 9 &&
- (port == PORT_E) && I915_READ(LPT_TRANSCONF) & TRANS_ENABLE) {
+ (port == PORT_E) && intel_de_read(dev_priv, LPT_TRANSCONF) & TRANS_ENABLE) {
pipe_config->has_pch_encoder = true;
- tmp = I915_READ(FDI_RX_CTL(PIPE_A));
+ tmp = intel_de_read(dev_priv, FDI_RX_CTL(PIPE_A));
pipe_config->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >>
FDI_DP_PORT_WIDTH_SHIFT) + 1;
@@ -10899,7 +11090,8 @@ static enum transcoder transcoder_master_readout(struct drm_i915_private *dev_pr
{
u32 trans_port_sync, master_select;
- trans_port_sync = I915_READ(TRANS_DDI_FUNC_CTL2(cpu_transcoder));
+ trans_port_sync = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL2(cpu_transcoder));
if ((trans_port_sync & PORT_SYNC_MODE_ENABLE) == 0)
return INVALID_TRANSCODER;
@@ -10943,8 +11135,9 @@ static void icl_get_trans_port_sync_config(struct intel_crtc_state *crtc_state)
intel_display_power_put(dev_priv, power_domain, trans_wakeref);
}
- WARN_ON(crtc_state->master_transcoder != INVALID_TRANSCODER &&
- crtc_state->sync_mode_slaves_mask);
+ drm_WARN_ON(&dev_priv->drm,
+ crtc_state->master_transcoder != INVALID_TRANSCODER &&
+ crtc_state->sync_mode_slaves_mask);
}
static bool hsw_get_pipe_config(struct intel_crtc *crtc,
@@ -10955,6 +11148,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
enum intel_display_power_domain power_domain;
u64 power_domain_mask;
bool active;
+ u32 tmp;
pipe_config->master_transcoder = INVALID_TRANSCODER;
@@ -10974,7 +11168,7 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (IS_GEN9_LP(dev_priv) &&
bxt_get_dsi_transcoder_state(crtc, pipe_config,
&power_domain_mask, wakerefs)) {
- WARN_ON(active);
+ drm_WARN_ON(&dev_priv->drm, active);
active = true;
}
@@ -10990,7 +11184,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_get_pipe_src_size(crtc, pipe_config);
if (IS_HASWELL(dev_priv)) {
- u32 tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
+ u32 tmp = intel_de_read(dev_priv,
+ PIPECONF(pipe_config->cpu_transcoder));
if (tmp & PIPECONF_OUTPUT_COLORSPACE_YUV_HSW)
pipe_config->output_format = INTEL_OUTPUT_FORMAT_YCBCR444;
@@ -11013,12 +11208,14 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
pipe_config->output_format == INTEL_OUTPUT_FORMAT_YCBCR444;
}
- pipe_config->gamma_mode = I915_READ(GAMMA_MODE(crtc->pipe));
+ pipe_config->gamma_mode = intel_de_read(dev_priv,
+ GAMMA_MODE(crtc->pipe));
- pipe_config->csc_mode = I915_READ(PIPE_CSC_MODE(crtc->pipe));
+ pipe_config->csc_mode = intel_de_read(dev_priv,
+ PIPE_CSC_MODE(crtc->pipe));
if (INTEL_GEN(dev_priv) >= 9) {
- u32 tmp = I915_READ(SKL_BOTTOM_COLOR(crtc->pipe));
+ tmp = intel_de_read(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe));
if (tmp & SKL_BOTTOM_COLOR_GAMMA_ENABLE)
pipe_config->gamma_enable = true;
@@ -11031,8 +11228,14 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
intel_color_get_config(pipe_config);
+ tmp = intel_de_read(dev_priv, WM_LINETIME(crtc->pipe));
+ pipe_config->linetime = REG_FIELD_GET(HSW_LINETIME_MASK, tmp);
+ if (IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ pipe_config->ips_linetime =
+ REG_FIELD_GET(HSW_IPS_LINETIME_MASK, tmp);
+
power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
- WARN_ON(power_domain_mask & BIT_ULL(power_domain));
+ drm_WARN_ON(&dev_priv->drm, power_domain_mask & BIT_ULL(power_domain));
wf = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (wf) {
@@ -11047,7 +11250,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (hsw_crtc_supports_ips(crtc)) {
if (IS_HASWELL(dev_priv))
- pipe_config->ips_enabled = I915_READ(IPS_CTL) & IPS_ENABLE;
+ pipe_config->ips_enabled = intel_de_read(dev_priv,
+ IPS_CTL) & IPS_ENABLE;
else {
/*
* We cannot readout IPS state on broadwell, set to
@@ -11061,7 +11265,8 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc,
if (pipe_config->cpu_transcoder != TRANSCODER_EDP &&
!transcoder_is_dsi(pipe_config->cpu_transcoder)) {
pipe_config->pixel_multiplier =
- I915_READ(PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
+ intel_de_read(dev_priv,
+ PIPE_MULT(pipe_config->cpu_transcoder)) + 1;
} else {
pipe_config->pixel_multiplier = 1;
}
@@ -11087,7 +11292,7 @@ static u32 intel_cursor_base(const struct intel_plane_state *plane_state)
u32 base;
if (INTEL_INFO(dev_priv)->display.cursor_needs_physical)
- base = obj->phys_handle->busaddr;
+ base = sg_dma_address(obj->mm.pages->sgl);
else
base = intel_plane_ggtt_offset(plane_state);
@@ -11150,7 +11355,8 @@ static int intel_cursor_check_surface(struct intel_plane_state *plane_state)
plane_state, 0);
if (src_x != 0 || src_y != 0) {
- DRM_DEBUG_KMS("Arbitrary cursor panning not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Arbitrary cursor panning not supported\n");
return -EINVAL;
}
@@ -11181,10 +11387,11 @@ static int intel_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
int ret;
if (fb && fb->modifier != DRM_FORMAT_MOD_LINEAR) {
- DRM_DEBUG_KMS("cursor cannot be tiled\n");
+ drm_dbg_kms(&i915->drm, "cursor cannot be tiled\n");
return -EINVAL;
}
@@ -11255,6 +11462,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
struct intel_plane_state *plane_state)
{
const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev);
int ret;
ret = intel_check_cursor(crtc_state, plane_state);
@@ -11267,14 +11475,15 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i845_cursor_size_ok(plane_state)) {
- DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
+ drm_dbg_kms(&i915->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- WARN_ON(plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
+ drm_WARN_ON(&i915->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
switch (fb->pitches[0]) {
case 256:
@@ -11283,8 +11492,8 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state,
case 2048:
break;
default:
- DRM_DEBUG_KMS("Invalid cursor stride (%u)\n",
- fb->pitches[0]);
+ drm_dbg_kms(&i915->drm, "Invalid cursor stride (%u)\n",
+ fb->pitches[0]);
return -EINVAL;
}
@@ -11322,17 +11531,17 @@ static void i845_update_cursor(struct intel_plane *plane,
if (plane->cursor.base != base ||
plane->cursor.size != size ||
plane->cursor.cntl != cntl) {
- I915_WRITE_FW(CURCNTR(PIPE_A), 0);
- I915_WRITE_FW(CURBASE(PIPE_A), base);
- I915_WRITE_FW(CURSIZE, size);
- I915_WRITE_FW(CURPOS(PIPE_A), pos);
- I915_WRITE_FW(CURCNTR(PIPE_A), cntl);
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0);
+ intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base);
+ intel_de_write_fw(dev_priv, CURSIZE, size);
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
+ intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl);
plane->cursor.base = base;
plane->cursor.size = size;
plane->cursor.cntl = cntl;
} else {
- I915_WRITE_FW(CURPOS(PIPE_A), pos);
+ intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -11357,7 +11566,7 @@ static bool i845_cursor_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(CURCNTR(PIPE_A)) & CURSOR_ENABLE;
+ ret = intel_de_read(dev_priv, CURCNTR(PIPE_A)) & CURSOR_ENABLE;
*pipe = PIPE_A;
@@ -11483,20 +11692,22 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
/* Check for which cursor types we support */
if (!i9xx_cursor_size_ok(plane_state)) {
- DRM_DEBUG("Cursor dimension %dx%d not supported\n",
- drm_rect_width(&plane_state->uapi.dst),
- drm_rect_height(&plane_state->uapi.dst));
+ drm_dbg(&dev_priv->drm,
+ "Cursor dimension %dx%d not supported\n",
+ drm_rect_width(&plane_state->uapi.dst),
+ drm_rect_height(&plane_state->uapi.dst));
return -EINVAL;
}
- WARN_ON(plane_state->uapi.visible &&
- plane_state->color_plane[0].stride != fb->pitches[0]);
+ drm_WARN_ON(&dev_priv->drm, plane_state->uapi.visible &&
+ plane_state->color_plane[0].stride != fb->pitches[0]);
if (fb->pitches[0] !=
drm_rect_width(&plane_state->uapi.dst) * fb->format->cpp[0]) {
- DRM_DEBUG_KMS("Invalid cursor stride (%u) (cursor width %d)\n",
- fb->pitches[0],
- drm_rect_width(&plane_state->uapi.dst));
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid cursor stride (%u) (cursor width %d)\n",
+ fb->pitches[0],
+ drm_rect_width(&plane_state->uapi.dst));
return -EINVAL;
}
@@ -11512,7 +11723,8 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state,
*/
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_C &&
plane_state->uapi.visible && plane_state->uapi.dst.x1 < 0) {
- DRM_DEBUG_KMS("CHV cursor C not allowed to straddle the left screen edge\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CHV cursor C not allowed to straddle the left screen edge\n");
return -EINVAL;
}
@@ -11573,17 +11785,18 @@ static void i9xx_update_cursor(struct intel_plane *plane,
plane->cursor.size != fbc_ctl ||
plane->cursor.cntl != cntl) {
if (HAS_CUR_FBC(dev_priv))
- I915_WRITE_FW(CUR_FBC_CTL(pipe), fbc_ctl);
- I915_WRITE_FW(CURCNTR(pipe), cntl);
- I915_WRITE_FW(CURPOS(pipe), pos);
- I915_WRITE_FW(CURBASE(pipe), base);
+ intel_de_write_fw(dev_priv, CUR_FBC_CTL(pipe),
+ fbc_ctl);
+ intel_de_write_fw(dev_priv, CURCNTR(pipe), cntl);
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
plane->cursor.base = base;
plane->cursor.size = fbc_ctl;
plane->cursor.cntl = cntl;
} else {
- I915_WRITE_FW(CURPOS(pipe), pos);
- I915_WRITE_FW(CURBASE(pipe), base);
+ intel_de_write_fw(dev_priv, CURPOS(pipe), pos);
+ intel_de_write_fw(dev_priv, CURBASE(pipe), base);
}
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -11614,7 +11827,7 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- val = I915_READ(CURCNTR(plane->pipe));
+ val = intel_de_read(dev_priv, CURCNTR(plane->pipe));
ret = val & MCURSOR_MODE;
@@ -11700,13 +11913,13 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
struct intel_crtc_state *crtc_state;
int ret, i = -1;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id, connector->name,
- encoder->base.id, encoder->name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
old->restore_state = NULL;
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
+ drm_WARN_ON(dev, !drm_modeset_is_locked(&config->connection_mutex));
/*
* Algorithm gets a little messy:
@@ -11753,7 +11966,8 @@ int intel_get_load_detect_pipe(struct drm_connector *connector,
* If we didn't find an unused CRTC, don't use any.
*/
if (!crtc) {
- DRM_DEBUG_KMS("no pipe available for load-detect\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no pipe available for load-detect\n");
ret = -ENODEV;
goto fail;
}
@@ -11804,13 +12018,16 @@ found:
if (!ret)
ret = drm_atomic_add_affected_planes(restore_state, crtc);
if (ret) {
- DRM_DEBUG_KMS("Failed to create a copy of old state to restore: %i\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to create a copy of old state to restore: %i\n",
+ ret);
goto fail;
}
ret = drm_atomic_commit(state);
if (ret) {
- DRM_DEBUG_KMS("failed to set mode on load-detect pipe\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to set mode on load-detect pipe\n");
goto fail;
}
@@ -11843,20 +12060,22 @@ void intel_release_load_detect_pipe(struct drm_connector *connector,
{
struct intel_encoder *intel_encoder =
intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(intel_encoder->base.dev);
struct drm_encoder *encoder = &intel_encoder->base;
struct drm_atomic_state *state = old->restore_state;
int ret;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
- connector->base.id, connector->name,
- encoder->base.id, encoder->name);
+ drm_dbg_kms(&i915->drm, "[CONNECTOR:%d:%s], [ENCODER:%d:%s]\n",
+ connector->base.id, connector->name,
+ encoder->base.id, encoder->name);
if (!state)
return;
ret = drm_atomic_helper_commit_duplicated_state(state, ctx);
if (ret)
- DRM_DEBUG_KMS("Couldn't release load detect pipe: %i\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Couldn't release load detect pipe: %i\n", ret);
drm_atomic_state_put(state);
}
@@ -11921,8 +12140,9 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
7 : 14;
break;
default:
- DRM_DEBUG_KMS("Unknown DPLL mode %08x in programmed "
- "mode\n", (int)(dpll & DPLL_MODE_MASK));
+ drm_dbg_kms(&dev_priv->drm,
+ "Unknown DPLL mode %08x in programmed "
+ "mode\n", (int)(dpll & DPLL_MODE_MASK));
return;
}
@@ -11931,7 +12151,8 @@ static void i9xx_crtc_clock_get(struct intel_crtc *crtc,
else
port_clock = i9xx_calc_dpll_params(refclk, &clock);
} else {
- u32 lvds = IS_I830(dev_priv) ? 0 : I915_READ(LVDS);
+ u32 lvds = IS_I830(dev_priv) ? 0 : intel_de_read(dev_priv,
+ LVDS);
bool is_lvds = (pipe == 1) && (lvds & LVDS_PORT_EN);
if (is_lvds) {
@@ -12142,7 +12363,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
was_visible = old_plane_state->uapi.visible;
visible = plane_state->uapi.visible;
- if (!was_crtc_enabled && WARN_ON(was_visible))
+ if (!was_crtc_enabled && drm_WARN_ON(&dev_priv->drm, was_visible))
was_visible = false;
/*
@@ -12168,11 +12389,12 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat
turn_off = was_visible && (!visible || mode_changed);
turn_on = visible && (!was_visible || mode_changed);
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
- crtc->base.base.id, crtc->base.name,
- plane->base.base.id, plane->base.name,
- was_visible, visible,
- turn_off, turn_on, mode_changed);
+ drm_dbg_atomic(&dev_priv->drm,
+ "[CRTC:%d:%s] with [PLANE:%d:%s] visible %i -> %i, off %i, on %i, ms %i\n",
+ crtc->base.base.id, crtc->base.name,
+ plane->base.base.id, plane->base.name,
+ was_visible, visible,
+ turn_off, turn_on, mode_changed);
if (turn_on) {
if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
@@ -12349,8 +12571,9 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
}
if (!linked_state) {
- DRM_DEBUG_KMS("Need %d free Y planes for planar YUV\n",
- hweight8(crtc_state->nv12_planes));
+ drm_dbg_kms(&dev_priv->drm,
+ "Need %d free Y planes for planar YUV\n",
+ hweight8(crtc_state->nv12_planes));
return -EINVAL;
}
@@ -12361,11 +12584,13 @@ static int icl_check_nv12_planes(struct intel_crtc_state *crtc_state)
linked_state->planar_linked_plane = plane;
crtc_state->active_planes |= BIT(linked->id);
crtc_state->update_planes |= BIT(linked->id);
- DRM_DEBUG_KMS("Using %s as Y plane for %s\n", linked->base.name, plane->base.name);
+ drm_dbg_kms(&dev_priv->drm, "Using %s as Y plane for %s\n",
+ linked->base.name, plane->base.name);
/* Copy parameters to slave plane */
linked_state->ctl = plane_state->ctl | PLANE_CTL_YUV420_Y_PLANE;
linked_state->color_ctl = plane_state->color_ctl;
+ linked_state->view = plane_state->view;
memcpy(linked_state->color_plane, plane_state->color_plane,
sizeof(linked_state->color_plane));
@@ -12397,120 +12622,74 @@ static bool c8_planes_changed(const struct intel_crtc_state *new_crtc_state)
return !old_crtc_state->c8_planes != !new_crtc_state->c8_planes;
}
-static bool
-intel_atomic_is_master_connector(struct intel_crtc_state *crtc_state)
+static u16 hsw_linetime_wm(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = crtc_state->uapi.crtc;
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_connector *connector;
- struct drm_connector_state *connector_state;
- int i;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc != crtc)
- continue;
- if (connector->has_tile &&
- connector->tile_h_loc == connector->num_h_tile - 1 &&
- connector->tile_v_loc == connector->num_v_tile - 1)
- return true;
- }
+ if (!crtc_state->hw.enable)
+ return 0;
- return false;
+ return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+ adjusted_mode->crtc_clock);
}
-static void reset_port_sync_mode_state(struct intel_crtc_state *crtc_state)
+static u16 hsw_ips_linetime_wm(const struct intel_crtc_state *crtc_state,
+ const struct intel_cdclk_state *cdclk_state)
{
- crtc_state->master_transcoder = INVALID_TRANSCODER;
- crtc_state->sync_mode_slaves_mask = 0;
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+
+ if (!crtc_state->hw.enable)
+ return 0;
+
+ return DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
+ cdclk_state->logical.cdclk);
}
-static int icl_compute_port_sync_crtc_state(struct drm_connector *connector,
- struct intel_crtc_state *crtc_state,
- int num_tiled_conns)
+static u16 skl_linetime_wm(const struct intel_crtc_state *crtc_state)
{
- struct drm_crtc *crtc = crtc_state->uapi.crtc;
- struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct drm_connector *master_connector;
- struct drm_connector_list_iter conn_iter;
- struct drm_crtc *master_crtc = NULL;
- struct drm_crtc_state *master_crtc_state;
- struct intel_crtc_state *master_pipe_config;
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ const struct drm_display_mode *adjusted_mode =
+ &crtc_state->hw.adjusted_mode;
+ u16 linetime_wm;
- if (INTEL_GEN(dev_priv) < 11)
+ if (!crtc_state->hw.enable)
return 0;
- if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP))
- return 0;
+ linetime_wm = DIV_ROUND_UP(adjusted_mode->crtc_htotal * 1000 * 8,
+ crtc_state->pixel_rate);
- /*
- * In case of tiled displays there could be one or more slaves but there is
- * only one master. Lets make the CRTC used by the connector corresponding
- * to the last horizonal and last vertical tile a master/genlock CRTC.
- * All the other CRTCs corresponding to other tiles of the same Tile group
- * are the slave CRTCs and hold a pointer to their genlock CRTC.
- * If all tiles not present do not make master slave assignments.
- */
- if (!connector->has_tile ||
- crtc_state->hw.mode.hdisplay != connector->tile_h_size ||
- crtc_state->hw.mode.vdisplay != connector->tile_v_size ||
- num_tiled_conns < connector->num_h_tile * connector->num_v_tile) {
- reset_port_sync_mode_state(crtc_state);
- return 0;
- }
- /* Last Horizontal and last vertical tile connector is a master
- * Master's crtc state is already populated in slave for port sync
- */
- if (connector->tile_h_loc == connector->num_h_tile - 1 &&
- connector->tile_v_loc == connector->num_v_tile - 1)
- return 0;
+ /* Display WA #1135: BXT:ALL GLK:ALL */
+ if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
+ linetime_wm /= 2;
- /* Loop through all connectors and configure the Slave crtc_state
- * to point to the correct master.
- */
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(master_connector, &conn_iter) {
- struct drm_connector_state *master_conn_state = NULL;
+ return linetime_wm;
+}
- if (!(master_connector->has_tile &&
- master_connector->tile_group->id == connector->tile_group->id))
- continue;
- if (master_connector->tile_h_loc != master_connector->num_h_tile - 1 ||
- master_connector->tile_v_loc != master_connector->num_v_tile - 1)
- continue;
+static int hsw_compute_linetime_wm(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_cdclk_state *cdclk_state;
- master_conn_state = drm_atomic_get_connector_state(&state->base,
- master_connector);
- if (IS_ERR(master_conn_state)) {
- drm_connector_list_iter_end(&conn_iter);
- return PTR_ERR(master_conn_state);
- }
- if (master_conn_state->crtc) {
- master_crtc = master_conn_state->crtc;
- break;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
+ if (INTEL_GEN(dev_priv) >= 9)
+ crtc_state->linetime = skl_linetime_wm(crtc_state);
+ else
+ crtc_state->linetime = hsw_linetime_wm(crtc_state);
- if (!master_crtc) {
- DRM_DEBUG_KMS("Could not find Master CRTC for Slave CRTC %d\n",
- crtc->base.id);
- return -EINVAL;
- }
+ if (!hsw_crtc_supports_ips(crtc))
+ return 0;
- master_crtc_state = drm_atomic_get_crtc_state(&state->base,
- master_crtc);
- if (IS_ERR(master_crtc_state))
- return PTR_ERR(master_crtc_state);
+ cdclk_state = intel_atomic_get_cdclk_state(state);
+ if (IS_ERR(cdclk_state))
+ return PTR_ERR(cdclk_state);
- master_pipe_config = to_intel_crtc_state(master_crtc_state);
- crtc_state->master_transcoder = master_pipe_config->cpu_transcoder;
- master_pipe_config->sync_mode_slaves_mask |=
- BIT(crtc_state->cpu_transcoder);
- DRM_DEBUG_KMS("Master Transcoder = %s added for Slave CRTC = %d, slave transcoder bitmask = %d\n",
- transcoder_name(crtc_state->master_transcoder),
- crtc->base.id,
- master_pipe_config->sync_mode_slaves_mask);
+ crtc_state->ips_linetime = hsw_ips_linetime_wm(crtc_state,
+ cdclk_state);
return 0;
}
@@ -12530,7 +12709,7 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
if (mode_changed && crtc_state->hw.enable &&
dev_priv->display.crtc_compute_clock &&
- !WARN_ON(crtc_state->shared_dpll)) {
+ !drm_WARN_ON(&dev_priv->drm, crtc_state->shared_dpll)) {
ret = dev_priv->display.crtc_compute_clock(crtc, crtc_state);
if (ret)
return ret;
@@ -12550,17 +12729,18 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
return ret;
}
- ret = 0;
if (dev_priv->display.compute_pipe_wm) {
ret = dev_priv->display.compute_pipe_wm(crtc_state);
if (ret) {
- DRM_DEBUG_KMS("Target pipe watermarks are invalid\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Target pipe watermarks are invalid\n");
return ret;
}
}
if (dev_priv->display.compute_intermediate_wm) {
- if (WARN_ON(!dev_priv->display.compute_pipe_wm))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !dev_priv->display.compute_pipe_wm))
return 0;
/*
@@ -12570,23 +12750,39 @@ static int intel_crtc_atomic_check(struct intel_atomic_state *state,
*/
ret = dev_priv->display.compute_intermediate_wm(crtc_state);
if (ret) {
- DRM_DEBUG_KMS("No valid intermediate pipe watermarks are possible\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No valid intermediate pipe watermarks are possible\n");
return ret;
}
}
if (INTEL_GEN(dev_priv) >= 9) {
- if (mode_changed || crtc_state->update_pipe)
+ if (mode_changed || crtc_state->update_pipe) {
ret = skl_update_scaler_crtc(crtc_state);
- if (!ret)
- ret = intel_atomic_setup_scalers(dev_priv, crtc,
- crtc_state);
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_atomic_setup_scalers(dev_priv, crtc, crtc_state);
+ if (ret)
+ return ret;
}
- if (HAS_IPS(dev_priv))
- crtc_state->ips_enabled = hsw_compute_ips_config(crtc_state);
+ if (HAS_IPS(dev_priv)) {
+ ret = hsw_compute_ips_config(crtc_state);
+ if (ret)
+ return ret;
+ }
- return ret;
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv)) {
+ ret = hsw_compute_linetime_wm(state, crtc);
+ if (ret)
+ return ret;
+
+ }
+
+ return 0;
}
static void intel_modeset_update_connector_atomic_state(struct drm_device *dev)
@@ -12619,6 +12815,7 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
struct intel_crtc_state *pipe_config)
{
struct drm_connector *connector = conn_state->connector;
+ struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
const struct drm_display_info *info = &connector->display_info;
int bpp;
@@ -12640,11 +12837,13 @@ compute_sink_pipe_bpp(const struct drm_connector_state *conn_state,
}
if (bpp < pipe_config->pipe_bpp) {
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
- "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
- connector->base.id, connector->name,
- bpp, 3 * info->bpc, 3 * conn_state->max_requested_bpc,
- pipe_config->pipe_bpp);
+ drm_dbg_kms(&i915->drm,
+ "[CONNECTOR:%d:%s] Limiting display bpp to %d instead of "
+ "EDID bpp %d, requested bpp %d, max platform bpp %d\n",
+ connector->base.id, connector->name,
+ bpp, 3 * info->bpc,
+ 3 * conn_state->max_requested_bpc,
+ pipe_config->pipe_bpp);
pipe_config->pipe_bpp = bpp;
}
@@ -12704,10 +12903,13 @@ intel_dump_m_n_config(const struct intel_crtc_state *pipe_config,
const char *id, unsigned int lane_count,
const struct intel_link_m_n *m_n)
{
- DRM_DEBUG_KMS("%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
- id, lane_count,
- m_n->gmch_m, m_n->gmch_n,
- m_n->link_m, m_n->link_n, m_n->tu);
+ struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
+
+ drm_dbg_kms(&i915->drm,
+ "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n",
+ id, lane_count,
+ m_n->gmch_m, m_n->gmch_n,
+ m_n->link_m, m_n->link_n, m_n->tu);
}
static void
@@ -12783,27 +12985,31 @@ static const char *output_formats(enum intel_output_format format)
static void intel_dump_plane_state(const struct intel_plane_state *plane_state)
{
struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane);
+ struct drm_i915_private *i915 = to_i915(plane->base.dev);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct drm_format_name_buf format_name;
if (!fb) {
- DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
- plane->base.base.id, plane->base.name,
- yesno(plane_state->uapi.visible));
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] fb: [NOFB], visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ yesno(plane_state->uapi.visible));
return;
}
- DRM_DEBUG_KMS("[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
- plane->base.base.id, plane->base.name,
- fb->base.id, fb->width, fb->height,
- drm_get_format_name(fb->format->format, &format_name),
- yesno(plane_state->uapi.visible));
- DRM_DEBUG_KMS("\trotation: 0x%x, scaler: %d\n",
- plane_state->hw.rotation, plane_state->scaler_id);
+ drm_dbg_kms(&i915->drm,
+ "[PLANE:%d:%s] fb: [FB:%d] %ux%u format = %s, visible: %s\n",
+ plane->base.base.id, plane->base.name,
+ fb->base.id, fb->width, fb->height,
+ drm_get_format_name(fb->format->format, &format_name),
+ yesno(plane_state->uapi.visible));
+ drm_dbg_kms(&i915->drm, "\trotation: 0x%x, scaler: %d\n",
+ plane_state->hw.rotation, plane_state->scaler_id);
if (plane_state->uapi.visible)
- DRM_DEBUG_KMS("\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
- DRM_RECT_FP_ARG(&plane_state->uapi.src),
- DRM_RECT_ARG(&plane_state->uapi.dst));
+ drm_dbg_kms(&i915->drm,
+ "\tsrc: " DRM_RECT_FP_FMT " dst: " DRM_RECT_FMT "\n",
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst));
}
static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
@@ -12817,22 +13023,24 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
char buf[64];
int i;
- DRM_DEBUG_KMS("[CRTC:%d:%s] enable: %s %s\n",
- crtc->base.base.id, crtc->base.name,
- yesno(pipe_config->hw.enable), context);
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] enable: %s %s\n",
+ crtc->base.base.id, crtc->base.name,
+ yesno(pipe_config->hw.enable), context);
if (!pipe_config->hw.enable)
goto dump_planes;
snprintf_output_types(buf, sizeof(buf), pipe_config->output_types);
- DRM_DEBUG_KMS("active: %s, output_types: %s (0x%x), output format: %s\n",
- yesno(pipe_config->hw.active),
- buf, pipe_config->output_types,
- output_formats(pipe_config->output_format));
+ drm_dbg_kms(&dev_priv->drm,
+ "active: %s, output_types: %s (0x%x), output format: %s\n",
+ yesno(pipe_config->hw.active),
+ buf, pipe_config->output_types,
+ output_formats(pipe_config->output_format));
- DRM_DEBUG_KMS("cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
- transcoder_name(pipe_config->cpu_transcoder),
- pipe_config->pipe_bpp, pipe_config->dither);
+ drm_dbg_kms(&dev_priv->drm,
+ "cpu_transcoder: %s, pipe bpp: %i, dithering: %i\n",
+ transcoder_name(pipe_config->cpu_transcoder),
+ pipe_config->pipe_bpp, pipe_config->dither);
if (pipe_config->has_pch_encoder)
intel_dump_m_n_config(pipe_config, "fdi",
@@ -12848,13 +13056,15 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
&pipe_config->dp_m2_n2);
}
- DRM_DEBUG_KMS("audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
- pipe_config->has_audio, pipe_config->has_infoframe,
- pipe_config->infoframes.enable);
+ drm_dbg_kms(&dev_priv->drm,
+ "audio: %i, infoframes: %i, infoframes enabled: 0x%x\n",
+ pipe_config->has_audio, pipe_config->has_infoframe,
+ pipe_config->infoframes.enable);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL))
- DRM_DEBUG_KMS("GCP: 0x%x\n", pipe_config->infoframes.gcp);
+ drm_dbg_kms(&dev_priv->drm, "GCP: 0x%x\n",
+ pipe_config->infoframes.gcp);
if (pipe_config->infoframes.enable &
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI))
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.avi);
@@ -12865,50 +13075,59 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config,
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR))
intel_dump_infoframe(dev_priv, &pipe_config->infoframes.hdmi);
- DRM_DEBUG_KMS("requested mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "requested mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.mode);
- DRM_DEBUG_KMS("adjusted mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "adjusted mode:\n");
drm_mode_debug_printmodeline(&pipe_config->hw.adjusted_mode);
intel_dump_crtc_timings(&pipe_config->hw.adjusted_mode);
- DRM_DEBUG_KMS("port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
- pipe_config->port_clock,
- pipe_config->pipe_src_w, pipe_config->pipe_src_h,
- pipe_config->pixel_rate);
+ drm_dbg_kms(&dev_priv->drm,
+ "port clock: %d, pipe src size: %dx%d, pixel rate %d\n",
+ pipe_config->port_clock,
+ pipe_config->pipe_src_w, pipe_config->pipe_src_h,
+ pipe_config->pixel_rate);
+
+ drm_dbg_kms(&dev_priv->drm, "linetime: %d, ips linetime: %d\n",
+ pipe_config->linetime, pipe_config->ips_linetime);
if (INTEL_GEN(dev_priv) >= 9)
- DRM_DEBUG_KMS("num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
- crtc->num_scalers,
- pipe_config->scaler_state.scaler_users,
- pipe_config->scaler_state.scaler_id);
+ drm_dbg_kms(&dev_priv->drm,
+ "num_scalers: %d, scaler_users: 0x%x, scaler_id: %d\n",
+ crtc->num_scalers,
+ pipe_config->scaler_state.scaler_users,
+ pipe_config->scaler_state.scaler_id);
if (HAS_GMCH(dev_priv))
- DRM_DEBUG_KMS("gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
- pipe_config->gmch_pfit.control,
- pipe_config->gmch_pfit.pgm_ratios,
- pipe_config->gmch_pfit.lvds_border_bits);
+ drm_dbg_kms(&dev_priv->drm,
+ "gmch pfit: control: 0x%08x, ratios: 0x%08x, lvds border: 0x%08x\n",
+ pipe_config->gmch_pfit.control,
+ pipe_config->gmch_pfit.pgm_ratios,
+ pipe_config->gmch_pfit.lvds_border_bits);
else
- DRM_DEBUG_KMS("pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
- pipe_config->pch_pfit.pos,
- pipe_config->pch_pfit.size,
- enableddisabled(pipe_config->pch_pfit.enabled),
- yesno(pipe_config->pch_pfit.force_thru));
+ drm_dbg_kms(&dev_priv->drm,
+ "pch pfit: pos: 0x%08x, size: 0x%08x, %s, force thru: %s\n",
+ pipe_config->pch_pfit.pos,
+ pipe_config->pch_pfit.size,
+ enableddisabled(pipe_config->pch_pfit.enabled),
+ yesno(pipe_config->pch_pfit.force_thru));
- DRM_DEBUG_KMS("ips: %i, double wide: %i\n",
- pipe_config->ips_enabled, pipe_config->double_wide);
+ drm_dbg_kms(&dev_priv->drm, "ips: %i, double wide: %i\n",
+ pipe_config->ips_enabled, pipe_config->double_wide);
intel_dpll_dump_hw_state(dev_priv, &pipe_config->dpll_hw_state);
if (IS_CHERRYVIEW(dev_priv))
- DRM_DEBUG_KMS("cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->cgm_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
+ drm_dbg_kms(&dev_priv->drm,
+ "cgm_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->cgm_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
else
- DRM_DEBUG_KMS("csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
- pipe_config->csc_mode, pipe_config->gamma_mode,
- pipe_config->gamma_enable, pipe_config->csc_enable);
+ drm_dbg_kms(&dev_priv->drm,
+ "csc_mode: 0x%x gamma_mode: 0x%x gamma_enable: %d csc_enable: %d\n",
+ pipe_config->csc_mode, pipe_config->gamma_mode,
+ pipe_config->gamma_enable, pipe_config->csc_enable);
- DRM_DEBUG_KMS("MST master transcoder: %s\n",
- transcoder_name(pipe_config->mst_master_transcoder));
+ drm_dbg_kms(&dev_priv->drm, "MST master transcoder: %s\n",
+ transcoder_name(pipe_config->mst_master_transcoder));
dump_planes:
if (!state)
@@ -12956,24 +13175,21 @@ static bool check_digital_port_conflicts(struct intel_atomic_state *state)
encoder = to_intel_encoder(connector_state->best_encoder);
- WARN_ON(!connector_state->crtc);
+ drm_WARN_ON(dev, !connector_state->crtc);
switch (encoder->type) {
- unsigned int port_mask;
case INTEL_OUTPUT_DDI:
- if (WARN_ON(!HAS_DDI(to_i915(dev))))
+ if (drm_WARN_ON(dev, !HAS_DDI(to_i915(dev))))
break;
/* else, fall through */
case INTEL_OUTPUT_DP:
case INTEL_OUTPUT_HDMI:
case INTEL_OUTPUT_EDP:
- port_mask = 1 << encoder->port;
-
/* the same port mustn't appear more than once */
- if (used_ports & port_mask)
+ if (used_ports & BIT(encoder->port))
ret = false;
- used_ports |= port_mask;
+ used_ports |= BIT(encoder->port);
break;
case INTEL_OUTPUT_DP_MST:
used_mst_ports |=
@@ -13054,15 +13270,6 @@ intel_crtc_prepare_cleared_state(struct intel_crtc_state *crtc_state)
if (IS_G4X(dev_priv) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
saved_state->wm = crtc_state->wm;
- /*
- * Save the slave bitmask which gets filled for master crtc state during
- * slave atomic check call. For all other CRTCs reset the port sync variables
- * crtc_state->master_transcoder needs to be set to INVALID
- */
- reset_port_sync_mode_state(saved_state);
- if (intel_atomic_is_master_connector(crtc_state))
- saved_state->sync_mode_slaves_mask =
- crtc_state->sync_mode_slaves_mask;
memcpy(crtc_state, saved_state, sizeof(*crtc_state));
kfree(saved_state);
@@ -13077,11 +13284,10 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
{
struct drm_crtc *crtc = pipe_config->uapi.crtc;
struct drm_atomic_state *state = pipe_config->uapi.state;
- struct intel_encoder *encoder;
+ struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev);
struct drm_connector *connector;
struct drm_connector_state *connector_state;
- int base_bpp, ret;
- int i, tile_group_id = -1, num_tiled_conns = 0;
+ int base_bpp, ret, i;
bool retry = true;
pipe_config->cpu_transcoder =
@@ -13120,13 +13326,15 @@ intel_modeset_pipe_config(struct intel_crtc_state *pipe_config)
&pipe_config->pipe_src_h);
for_each_new_connector_in_state(state, connector, connector_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector_state->best_encoder);
+
if (connector_state->crtc != crtc)
continue;
- encoder = to_intel_encoder(connector_state->best_encoder);
-
if (!check_single_encoder_cloning(state, to_intel_crtc(crtc), encoder)) {
- DRM_DEBUG_KMS("rejecting invalid cloning configuration\n");
+ drm_dbg_kms(&i915->drm,
+ "rejecting invalid cloning configuration\n");
return -EINVAL;
}
@@ -13151,47 +13359,24 @@ encoder_retry:
drm_mode_set_crtcinfo(&pipe_config->hw.adjusted_mode,
CRTC_STEREO_DOUBLE);
- /* Get tile_group_id of tiled connector */
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector_state->crtc == crtc &&
- connector->has_tile) {
- tile_group_id = connector->tile_group->id;
- break;
- }
- }
-
- /* Get total number of tiled connectors in state that belong to
- * this tile group.
- */
- for_each_new_connector_in_state(state, connector, connector_state, i) {
- if (connector->has_tile &&
- connector->tile_group->id == tile_group_id)
- num_tiled_conns++;
- }
-
/* Pass our mode to the connectors and the CRTC to give them a chance to
* adjust it according to limitations or connector properties, and also
* a chance to reject the mode entirely.
*/
for_each_new_connector_in_state(state, connector, connector_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(connector_state->best_encoder);
+
if (connector_state->crtc != crtc)
continue;
- ret = icl_compute_port_sync_crtc_state(connector, pipe_config,
- num_tiled_conns);
- if (ret) {
- DRM_DEBUG_KMS("Cannot assign Sync Mode CRTCs: %d\n",
- ret);
- return ret;
- }
-
- encoder = to_intel_encoder(connector_state->best_encoder);
ret = encoder->compute_config(encoder, pipe_config,
connector_state);
if (ret < 0) {
if (ret != -EDEADLK)
- DRM_DEBUG_KMS("Encoder config failure: %d\n",
- ret);
+ drm_dbg_kms(&i915->drm,
+ "Encoder config failure: %d\n",
+ ret);
return ret;
}
}
@@ -13206,15 +13391,16 @@ encoder_retry:
if (ret == -EDEADLK)
return ret;
if (ret < 0) {
- DRM_DEBUG_KMS("CRTC fixup failed\n");
+ drm_dbg_kms(&i915->drm, "CRTC fixup failed\n");
return ret;
}
if (ret == RETRY) {
- if (WARN(!retry, "loop in pipe configuration computation\n"))
+ if (drm_WARN(&i915->drm, !retry,
+ "loop in pipe configuration computation\n"))
return -EINVAL;
- DRM_DEBUG_KMS("CRTC bw constrained, retrying\n");
+ drm_dbg_kms(&i915->drm, "CRTC bw constrained, retrying\n");
retry = false;
goto encoder_retry;
}
@@ -13225,8 +13411,9 @@ encoder_retry:
*/
pipe_config->dither = (pipe_config->pipe_bpp == 6*3) &&
!pipe_config->dither_force_disable;
- DRM_DEBUG_KMS("hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
- base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
+ drm_dbg_kms(&i915->drm,
+ "hw max bpp: %i, pipe bpp: %i, dithering: %i\n",
+ base_bpp, pipe_config->pipe_bpp, pipe_config->dither);
/*
* Make drm_calc_timestamping_constants in
@@ -13237,6 +13424,35 @@ encoder_retry:
return 0;
}
+static int
+intel_modeset_pipe_config_late(struct intel_crtc_state *crtc_state)
+{
+ struct intel_atomic_state *state =
+ to_intel_atomic_state(crtc_state->uapi.state);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ int i;
+
+ for_each_new_connector_in_state(&state->base, connector,
+ conn_state, i) {
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ int ret;
+
+ if (conn_state->crtc != &crtc->base ||
+ !encoder->compute_config_late)
+ continue;
+
+ ret = encoder->compute_config_late(encoder, crtc_state,
+ conn_state);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
bool intel_fuzzy_clock_check(int clock1, int clock2)
{
int diff;
@@ -13315,16 +13531,17 @@ pipe_config_infoframe_mismatch(struct drm_i915_private *dev_priv,
if (!drm_debug_enabled(DRM_UT_KMS))
return;
- DRM_DEBUG_KMS("fastset mismatch in %s infoframe\n", name);
- DRM_DEBUG_KMS("expected:\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "fastset mismatch in %s infoframe\n", name);
+ drm_dbg_kms(&dev_priv->drm, "expected:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, a);
- DRM_DEBUG_KMS("found:\n");
+ drm_dbg_kms(&dev_priv->drm, "found:\n");
hdmi_infoframe_log(KERN_DEBUG, dev_priv->drm.dev, b);
} else {
- DRM_ERROR("mismatch in %s infoframe\n", name);
- DRM_ERROR("expected:\n");
+ drm_err(&dev_priv->drm, "mismatch in %s infoframe\n", name);
+ drm_err(&dev_priv->drm, "expected:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, a);
- DRM_ERROR("found:\n");
+ drm_err(&dev_priv->drm, "found:\n");
hdmi_infoframe_log(KERN_ERR, dev_priv->drm.dev, b);
}
}
@@ -13333,6 +13550,7 @@ static void __printf(4, 5)
pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
const char *name, const char *format, ...)
{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct va_format vaf;
va_list args;
@@ -13341,11 +13559,12 @@ pipe_config_mismatch(bool fastset, const struct intel_crtc *crtc,
vaf.va = &args;
if (fastset)
- DRM_DEBUG_KMS("[CRTC:%d:%s] fastset mismatch in %s %pV\n",
- crtc->base.base.id, crtc->base.name, name, &vaf);
+ drm_dbg_kms(&i915->drm,
+ "[CRTC:%d:%s] fastset mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
else
- DRM_ERROR("[CRTC:%d:%s] mismatch in %s %pV\n",
- crtc->base.base.id, crtc->base.name, name, &vaf);
+ drm_err(&i915->drm, "[CRTC:%d:%s] mismatch in %s %pV\n",
+ crtc->base.base.id, crtc->base.name, name, &vaf);
va_end(args);
}
@@ -13381,7 +13600,8 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
!(pipe_config->hw.mode.private_flags & I915_MODE_FLAG_INHERITED);
if (fixup_inherited && !fastboot_enabled(dev_priv)) {
- DRM_DEBUG_KMS("initial modeset and fastboot not set\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "initial modeset and fastboot not set\n");
ret = false;
}
@@ -13583,7 +13803,6 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_I(pixel_multiplier);
PIPE_CONF_CHECK_I(output_format);
- PIPE_CONF_CHECK_I(dc3co_exitline);
PIPE_CONF_CHECK_BOOL(has_hdmi_sink);
if ((INTEL_GEN(dev_priv) < 8 && !IS_HASWELL(dev_priv)) ||
IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -13643,10 +13862,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_BOOL(gamma_enable);
PIPE_CONF_CHECK_BOOL(csc_enable);
+ PIPE_CONF_CHECK_I(linetime);
+ PIPE_CONF_CHECK_I(ips_linetime);
+
bp_gamma = intel_color_get_gamma_bit_precision(pipe_config);
if (bp_gamma)
PIPE_CONF_CHECK_COLOR_LUT(gamma_mode, hw.gamma_lut, bp_gamma);
-
}
PIPE_CONF_CHECK_BOOL(double_wide);
@@ -13702,7 +13923,7 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config,
PIPE_CONF_CHECK_INFOFRAME(hdmi);
PIPE_CONF_CHECK_INFOFRAME(drm);
- PIPE_CONF_CHECK_I(sync_mode_slaves_mask);
+ PIPE_CONF_CHECK_X(sync_mode_slaves_mask);
PIPE_CONF_CHECK_I(master_transcoder);
PIPE_CONF_CHECK_I(dsc.compression_enable);
@@ -13736,9 +13957,10 @@ static void intel_pipe_config_sanity_check(struct drm_i915_private *dev_priv,
* FDI already provided one idea for the dotclock.
* Yell if the encoder disagrees.
*/
- WARN(!intel_fuzzy_clock_check(fdi_dotclock, dotclock),
- "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
- fdi_dotclock, dotclock);
+ drm_WARN(&dev_priv->drm,
+ !intel_fuzzy_clock_check(fdi_dotclock, dotclock),
+ "FDI dotclock and encoder dotclock mismatch, fdi: %i, encoder: %i\n",
+ fdi_dotclock, dotclock);
}
}
@@ -13749,12 +13971,11 @@ static void verify_wm_state(struct intel_crtc *crtc,
struct skl_hw_state {
struct skl_ddb_entry ddb_y[I915_MAX_PLANES];
struct skl_ddb_entry ddb_uv[I915_MAX_PLANES];
- struct skl_ddb_allocation ddb;
struct skl_pipe_wm wm;
} *hw;
- struct skl_ddb_allocation *sw_ddb;
struct skl_pipe_wm *sw_wm;
struct skl_ddb_entry *hw_ddb_entry, *sw_ddb_entry;
+ u8 hw_enabled_slices;
const enum pipe pipe = crtc->pipe;
int plane, level, max_level = ilk_wm_max_level(dev_priv);
@@ -13770,14 +13991,14 @@ static void verify_wm_state(struct intel_crtc *crtc,
skl_pipe_ddb_get_hw_state(crtc, hw->ddb_y, hw->ddb_uv);
- skl_ddb_get_hw_state(dev_priv, &hw->ddb);
- sw_ddb = &dev_priv->wm.skl_hw.ddb;
+ hw_enabled_slices = intel_enabled_dbuf_slices_mask(dev_priv);
if (INTEL_GEN(dev_priv) >= 11 &&
- hw->ddb.enabled_slices != sw_ddb->enabled_slices)
- DRM_ERROR("mismatch in DBUF Slices (expected %u, got %u)\n",
- sw_ddb->enabled_slices,
- hw->ddb.enabled_slices);
+ hw_enabled_slices != dev_priv->enabled_dbuf_slices_mask)
+ drm_err(&dev_priv->drm,
+ "mismatch in DBUF Slices (expected 0x%x, got 0x%x)\n",
+ dev_priv->enabled_dbuf_slices_mask,
+ hw_enabled_slices);
/* planes */
for_each_universal_plane(dev_priv, pipe, plane) {
@@ -13792,26 +14013,28 @@ static void verify_wm_state(struct intel_crtc *crtc,
&sw_plane_wm->wm[level]))
continue;
- DRM_ERROR("mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), plane + 1, level,
- sw_plane_wm->wm[level].plane_en,
- sw_plane_wm->wm[level].plane_res_b,
- sw_plane_wm->wm[level].plane_res_l,
- hw_plane_wm->wm[level].plane_en,
- hw_plane_wm->wm[level].plane_res_b,
- hw_plane_wm->wm[level].plane_res_l);
+ drm_err(&dev_priv->drm,
+ "mismatch in WM pipe %c plane %d level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1, level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
}
if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
&sw_plane_wm->trans_wm)) {
- DRM_ERROR("mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), plane + 1,
- sw_plane_wm->trans_wm.plane_en,
- sw_plane_wm->trans_wm.plane_res_b,
- sw_plane_wm->trans_wm.plane_res_l,
- hw_plane_wm->trans_wm.plane_en,
- hw_plane_wm->trans_wm.plane_res_b,
- hw_plane_wm->trans_wm.plane_res_l);
+ drm_err(&dev_priv->drm,
+ "mismatch in trans WM pipe %c plane %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), plane + 1,
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
}
/* DDB */
@@ -13819,10 +14042,11 @@ static void verify_wm_state(struct intel_crtc *crtc,
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[plane];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- DRM_ERROR("mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe), plane + 1,
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
+ drm_err(&dev_priv->drm,
+ "mismatch in DDB state pipe %c plane %d (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe), plane + 1,
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
}
}
@@ -13844,26 +14068,28 @@ static void verify_wm_state(struct intel_crtc *crtc,
&sw_plane_wm->wm[level]))
continue;
- DRM_ERROR("mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe), level,
- sw_plane_wm->wm[level].plane_en,
- sw_plane_wm->wm[level].plane_res_b,
- sw_plane_wm->wm[level].plane_res_l,
- hw_plane_wm->wm[level].plane_en,
- hw_plane_wm->wm[level].plane_res_b,
- hw_plane_wm->wm[level].plane_res_l);
+ drm_err(&dev_priv->drm,
+ "mismatch in WM pipe %c cursor level %d (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe), level,
+ sw_plane_wm->wm[level].plane_en,
+ sw_plane_wm->wm[level].plane_res_b,
+ sw_plane_wm->wm[level].plane_res_l,
+ hw_plane_wm->wm[level].plane_en,
+ hw_plane_wm->wm[level].plane_res_b,
+ hw_plane_wm->wm[level].plane_res_l);
}
if (!skl_wm_level_equals(&hw_plane_wm->trans_wm,
&sw_plane_wm->trans_wm)) {
- DRM_ERROR("mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
- pipe_name(pipe),
- sw_plane_wm->trans_wm.plane_en,
- sw_plane_wm->trans_wm.plane_res_b,
- sw_plane_wm->trans_wm.plane_res_l,
- hw_plane_wm->trans_wm.plane_en,
- hw_plane_wm->trans_wm.plane_res_b,
- hw_plane_wm->trans_wm.plane_res_l);
+ drm_err(&dev_priv->drm,
+ "mismatch in trans WM pipe %c cursor (expected e=%d b=%u l=%u, got e=%d b=%u l=%u)\n",
+ pipe_name(pipe),
+ sw_plane_wm->trans_wm.plane_en,
+ sw_plane_wm->trans_wm.plane_res_b,
+ sw_plane_wm->trans_wm.plane_res_l,
+ hw_plane_wm->trans_wm.plane_en,
+ hw_plane_wm->trans_wm.plane_res_b,
+ hw_plane_wm->trans_wm.plane_res_l);
}
/* DDB */
@@ -13871,10 +14097,11 @@ static void verify_wm_state(struct intel_crtc *crtc,
sw_ddb_entry = &new_crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
if (!skl_ddb_entry_equal(hw_ddb_entry, sw_ddb_entry)) {
- DRM_ERROR("mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
- pipe_name(pipe),
- sw_ddb_entry->start, sw_ddb_entry->end,
- hw_ddb_entry->start, hw_ddb_entry->end);
+ drm_err(&dev_priv->drm,
+ "mismatch in DDB state pipe %c cursor (expected (%u,%u), found (%u,%u))\n",
+ pipe_name(pipe),
+ sw_ddb_entry->start, sw_ddb_entry->end,
+ hw_ddb_entry->start, hw_ddb_entry->end);
}
}
@@ -13918,9 +14145,9 @@ verify_encoder_state(struct drm_i915_private *dev_priv, struct intel_atomic_stat
bool enabled = false, found = false;
enum pipe pipe;
- DRM_DEBUG_KMS("[ENCODER:%d:%s]\n",
- encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm, "[ENCODER:%d:%s]\n",
+ encoder->base.base.id,
+ encoder->base.name);
for_each_oldnew_connector_in_state(&state->base, connector, old_conn_state,
new_conn_state, i) {
@@ -13972,7 +14199,8 @@ verify_crtc_state(struct intel_crtc *crtc,
intel_crtc_state_reset(old_crtc_state, crtc);
old_crtc_state->uapi.state = state;
- DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.base.id, crtc->base.name);
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s]\n", crtc->base.base.id,
+ crtc->base.name);
active = dev_priv->display.get_pipe_config(crtc, pipe_config);
@@ -14047,7 +14275,7 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
memset(&dpll_hw_state, 0, sizeof(dpll_hw_state));
- DRM_DEBUG_KMS("%s\n", pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "%s\n", pll->info->name);
active = pll->info->funcs->get_hw_state(dev_priv, pll, &dpll_hw_state);
@@ -14074,11 +14302,11 @@ verify_single_dpll_state(struct drm_i915_private *dev_priv,
if (new_crtc_state->hw.active)
I915_STATE_WARN(!(pll->active_mask & crtc_mask),
"pll active mismatch (expected pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
+ pipe_name(crtc->pipe), pll->active_mask);
else
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask 0x%02x)\n",
- pipe_name(drm_crtc_index(&crtc->base)), pll->active_mask);
+ pipe_name(crtc->pipe), pll->active_mask);
I915_STATE_WARN(!(pll->state.crtc_mask & crtc_mask),
"pll enabled crtcs mismatch (expected 0x%x in 0x%02x)\n",
@@ -14107,10 +14335,10 @@ verify_shared_dpll_state(struct intel_crtc *crtc,
I915_STATE_WARN(pll->active_mask & crtc_mask,
"pll active mismatch (didn't expect pipe %c in active mask)\n",
- pipe_name(drm_crtc_index(&crtc->base)));
+ pipe_name(crtc->pipe));
I915_STATE_WARN(pll->state.crtc_mask & crtc_mask,
"pll enabled crtcs mismatch (found %x in enabled mask)\n",
- pipe_name(drm_crtc_index(&crtc->base)));
+ pipe_name(crtc->pipe));
}
}
@@ -14134,8 +14362,10 @@ verify_disabled_dpll_state(struct drm_i915_private *dev_priv)
{
int i;
- for (i = 0; i < dev_priv->num_shared_dpll; i++)
- verify_single_dpll_state(dev_priv, &dev_priv->shared_dplls[i], NULL, NULL);
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++)
+ verify_single_dpll_state(dev_priv,
+ &dev_priv->dpll.shared_dplls[i],
+ NULL, NULL);
}
static void
@@ -14279,35 +14509,35 @@ static int hsw_mode_set_planes_workaround(struct intel_atomic_state *state)
return 0;
}
-static int intel_modeset_checks(struct intel_atomic_state *state)
+u8 intel_calc_active_pipes(struct intel_atomic_state *state,
+ u8 active_pipes)
{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ const struct intel_crtc_state *crtc_state;
struct intel_crtc *crtc;
- int ret, i;
+ int i;
- /* keep the current setting */
- if (!state->cdclk.force_min_cdclk_changed)
- state->cdclk.force_min_cdclk = dev_priv->cdclk.force_min_cdclk;
+ for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
+ if (crtc_state->hw.active)
+ active_pipes |= BIT(crtc->pipe);
+ else
+ active_pipes &= ~BIT(crtc->pipe);
+ }
- state->modeset = true;
- state->active_pipes = dev_priv->active_pipes;
- state->cdclk.logical = dev_priv->cdclk.logical;
- state->cdclk.actual = dev_priv->cdclk.actual;
+ return active_pipes;
+}
- for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
- new_crtc_state, i) {
- if (new_crtc_state->hw.active)
- state->active_pipes |= BIT(crtc->pipe);
- else
- state->active_pipes &= ~BIT(crtc->pipe);
+static int intel_modeset_checks(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ int ret;
- if (old_crtc_state->hw.active != new_crtc_state->hw.active)
- state->active_pipe_changes |= BIT(crtc->pipe);
- }
+ state->modeset = true;
+ state->active_pipes = intel_calc_active_pipes(state, dev_priv->active_pipes);
+
+ state->active_pipe_changes = state->active_pipes ^ dev_priv->active_pipes;
if (state->active_pipe_changes) {
- ret = intel_atomic_lock_global_state(state);
+ ret = _intel_atomic_lock_global_state(state);
if (ret)
return ret;
}
@@ -14398,7 +14628,7 @@ static bool active_planes_affects_min_cdclk(struct drm_i915_private *dev_priv)
}
static int intel_atomic_check_planes(struct intel_atomic_state *state,
- bool *need_modeset)
+ bool *need_cdclk_calc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
@@ -14414,8 +14644,9 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
ret = intel_plane_atomic_check(state, plane);
if (ret) {
- DRM_DEBUG_ATOMIC("[PLANE:%d:%s] atomic driver check failed\n",
- plane->base.base.id, plane->base.name);
+ drm_dbg_atomic(&dev_priv->drm,
+ "[PLANE:%d:%s] atomic driver check failed\n",
+ plane->base.base.id, plane->base.name);
return ret;
}
}
@@ -14452,8 +14683,11 @@ static int intel_atomic_check_planes(struct intel_atomic_state *state,
* affected planes are part of the state. We can now
* compute the minimum cdclk for each plane.
*/
- for_each_new_intel_plane_in_state(state, plane, plane_state, i)
- *need_modeset |= intel_plane_calc_min_cdclk(state, plane);
+ for_each_new_intel_plane_in_state(state, plane, plane_state, i) {
+ ret = intel_plane_calc_min_cdclk(state, plane, need_cdclk_calc);
+ if (ret)
+ return ret;
+ }
return 0;
}
@@ -14466,9 +14700,11 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
for_each_new_intel_crtc_in_state(state, crtc, crtc_state, i) {
int ret = intel_crtc_atomic_check(state, crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
if (ret) {
- DRM_DEBUG_ATOMIC("[CRTC:%d:%s] atomic driver check failed\n",
- crtc->base.base.id, crtc->base.name);
+ drm_dbg_atomic(&i915->drm,
+ "[CRTC:%d:%s] atomic driver check failed\n",
+ crtc->base.base.id, crtc->base.name);
return ret;
}
}
@@ -14476,105 +14712,21 @@ static int intel_atomic_check_crtcs(struct intel_atomic_state *state)
return 0;
}
-static bool intel_cpu_transcoder_needs_modeset(struct intel_atomic_state *state,
- enum transcoder transcoder)
+static bool intel_cpu_transcoders_need_modeset(struct intel_atomic_state *state,
+ u8 transcoders)
{
- struct intel_crtc_state *new_crtc_state;
+ const struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int i;
- for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i)
- if (new_crtc_state->cpu_transcoder == transcoder)
- return needs_modeset(new_crtc_state);
-
- return false;
-}
-
-static void
-intel_modeset_synced_crtcs(struct intel_atomic_state *state,
- u8 transcoders)
-{
- struct intel_crtc_state *new_crtc_state;
- struct intel_crtc *crtc;
- int i;
-
- for_each_new_intel_crtc_in_state(state, crtc,
- new_crtc_state, i) {
- if (transcoders & BIT(new_crtc_state->cpu_transcoder)) {
- new_crtc_state->uapi.mode_changed = true;
- new_crtc_state->update_pipe = false;
- }
- }
-}
-
-static int
-intel_modeset_all_tiles(struct intel_atomic_state *state, int tile_grp_id)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- int ret = 0;
-
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct drm_connector_state *conn_state;
- struct drm_crtc_state *crtc_state;
-
- if (!connector->has_tile ||
- connector->tile_group->id != tile_grp_id)
- continue;
- conn_state = drm_atomic_get_connector_state(&state->base,
- connector);
- if (IS_ERR(conn_state)) {
- ret = PTR_ERR(conn_state);
- break;
- }
-
- if (!conn_state->crtc)
- continue;
-
- crtc_state = drm_atomic_get_crtc_state(&state->base,
- conn_state->crtc);
- if (IS_ERR(crtc_state)) {
- ret = PTR_ERR(crtc_state);
- break;
- }
- crtc_state->mode_changed = true;
- ret = drm_atomic_add_affected_connectors(&state->base,
- conn_state->crtc);
- if (ret)
- break;
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return ret;
-}
-
-static int
-intel_atomic_check_tiled_conns(struct intel_atomic_state *state)
-{
- struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct drm_connector *connector;
- struct drm_connector_state *old_conn_state, *new_conn_state;
- int i, ret;
-
- if (INTEL_GEN(dev_priv) < 11)
- return 0;
-
- /* Is tiled, mark all other tiled CRTCs as needing a modeset */
- for_each_oldnew_connector_in_state(&state->base, connector,
- old_conn_state, new_conn_state, i) {
- if (!connector->has_tile)
- continue;
- if (!intel_connector_needs_modeset(state, connector))
- continue;
-
- ret = intel_modeset_all_tiles(state, connector->tile_group->id);
- if (ret)
- return ret;
+ for_each_new_intel_crtc_in_state(state, crtc, new_crtc_state, i) {
+ if (new_crtc_state->hw.enable &&
+ transcoders & BIT(new_crtc_state->cpu_transcoder) &&
+ needs_modeset(new_crtc_state))
+ return true;
}
- return 0;
+ return false;
}
/**
@@ -14588,6 +14740,7 @@ static int intel_atomic_check(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_atomic_state *state = to_intel_atomic_state(_state);
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
+ struct intel_cdclk_state *new_cdclk_state;
struct intel_crtc *crtc;
int ret, i;
bool any_ms = false;
@@ -14604,21 +14757,6 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- /**
- * This check adds all the connectors in current state that belong to
- * the same tile group to a full modeset.
- * This function directly sets the mode_changed to true and we also call
- * drm_atomic_add_affected_connectors(). Hence we are not explicitly
- * calling drm_atomic_helper_check_modeset() after this.
- *
- * Fixme: Handle some corner cases where one of the
- * tiled connectors gets disconnected and tile info is lost but since it
- * was previously synced to other conn, we need to add that to the modeset.
- */
- ret = intel_atomic_check_tiled_conns(state);
- if (ret)
- goto fail;
-
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
if (!needs_modeset(new_crtc_state)) {
@@ -14628,18 +14766,26 @@ static int intel_atomic_check(struct drm_device *dev,
continue;
}
- if (!new_crtc_state->uapi.enable) {
- intel_crtc_copy_uapi_to_hw_state(new_crtc_state);
- continue;
- }
-
ret = intel_crtc_prepare_cleared_state(new_crtc_state);
if (ret)
goto fail;
+ if (!new_crtc_state->hw.enable)
+ continue;
+
ret = intel_modeset_pipe_config(new_crtc_state);
if (ret)
goto fail;
+ }
+
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ if (!needs_modeset(new_crtc_state))
+ continue;
+
+ ret = intel_modeset_pipe_config_late(new_crtc_state);
+ if (ret)
+ goto fail;
intel_crtc_check_fastset(old_crtc_state, new_crtc_state);
}
@@ -14662,15 +14808,22 @@ static int intel_atomic_check(struct drm_device *dev,
if (intel_dp_mst_is_slave_trans(new_crtc_state)) {
enum transcoder master = new_crtc_state->mst_master_transcoder;
- if (intel_cpu_transcoder_needs_modeset(state, master)) {
+ if (intel_cpu_transcoders_need_modeset(state, BIT(master))) {
new_crtc_state->uapi.mode_changed = true;
new_crtc_state->update_pipe = false;
}
- } else if (is_trans_port_sync_mode(new_crtc_state)) {
- u8 trans = new_crtc_state->sync_mode_slaves_mask |
- BIT(new_crtc_state->master_transcoder);
+ }
+
+ if (is_trans_port_sync_mode(new_crtc_state)) {
+ u8 trans = new_crtc_state->sync_mode_slaves_mask;
- intel_modeset_synced_crtcs(state, trans);
+ if (new_crtc_state->master_transcoder != INVALID_TRANSCODER)
+ trans |= BIT(new_crtc_state->master_transcoder);
+
+ if (intel_cpu_transcoders_need_modeset(state, trans)) {
+ new_crtc_state->uapi.mode_changed = true;
+ new_crtc_state->update_pipe = false;
+ }
}
}
@@ -14688,7 +14841,8 @@ static int intel_atomic_check(struct drm_device *dev,
}
if (any_ms && !check_digital_port_conflicts(state)) {
- DRM_DEBUG_KMS("rejecting conflicting digital port configuration\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "rejecting conflicting digital port configuration\n");
ret = EINVAL;
goto fail;
}
@@ -14697,18 +14851,32 @@ static int intel_atomic_check(struct drm_device *dev,
if (ret)
goto fail;
- any_ms |= state->cdclk.force_min_cdclk_changed;
-
ret = intel_atomic_check_planes(state, &any_ms);
if (ret)
goto fail;
+ new_cdclk_state = intel_atomic_get_new_cdclk_state(state);
+ if (new_cdclk_state && new_cdclk_state->force_min_cdclk_changed)
+ any_ms = true;
+
+ /*
+ * distrust_bios_wm will force a full dbuf recomputation
+ * but the hardware state will only get updated accordingly
+ * if state->modeset==true. Hence distrust_bios_wm==true &&
+ * state->modeset==false is an invalid combination which
+ * would cause the hardware and software dbuf state to get
+ * out of sync. We must prevent that.
+ *
+ * FIXME clean up this mess and introduce better
+ * state tracking for dbuf.
+ */
+ if (dev_priv->wm.distrust_bios_wm)
+ any_ms = true;
+
if (any_ms) {
ret = intel_modeset_checks(state);
if (ret)
goto fail;
- } else {
- state->cdclk.logical = dev_priv->cdclk.logical;
}
ret = intel_atomic_check_crtcs(state);
@@ -14814,6 +14982,18 @@ static void intel_pipe_fastset(const struct intel_crtc_state *old_crtc_state,
ilk_pfit_disable(old_crtc_state);
}
+ /*
+ * The register is supposedly single buffered so perhaps
+ * not 100% correct to do this here. But SKL+ calculate
+ * this based on the adjust pixel rate so pfit changes do
+ * affect it and so it must be updated for fastsets.
+ * HSW/BDW only really need this here for fastboot, after
+ * that the value should not change without a full modeset.
+ */
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_BROADWELL(dev_priv) || IS_HASWELL(dev_priv))
+ hsw_set_linetime_wm(new_crtc_state);
+
if (INTEL_GEN(dev_priv) >= 11)
icl_set_pipe_chicken(crtc);
}
@@ -14856,9 +15036,6 @@ static void intel_update_crtc(struct intel_crtc *crtc,
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
bool modeset = needs_modeset(new_crtc_state);
- struct intel_plane_state *new_plane_state =
- intel_atomic_get_new_plane_state(state,
- to_intel_plane(crtc->base.primary));
if (modeset) {
intel_crtc_update_active_timings(new_crtc_state);
@@ -14881,8 +15058,8 @@ static void intel_update_crtc(struct intel_crtc *crtc,
if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
intel_fbc_disable(crtc);
- else if (new_plane_state)
- intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
+ else
+ intel_fbc_enable(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
@@ -14912,7 +15089,8 @@ static struct intel_crtc *intel_get_slave_crtc(const struct intel_crtc_state *ne
struct drm_i915_private *dev_priv = to_i915(new_crtc_state->uapi.crtc->dev);
enum transcoder slave_transcoder;
- WARN_ON(!is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
+ drm_WARN_ON(&dev_priv->drm,
+ !is_power_of_2(new_crtc_state->sync_mode_slaves_mask));
slave_transcoder = ffs(new_crtc_state->sync_mode_slaves_mask) - 1;
return intel_get_crtc_for_pipe(dev_priv,
@@ -15029,7 +15207,7 @@ static void intel_set_dp_tp_ctl_normal(struct intel_crtc *crtc,
if (conn_state->crtc == &crtc->base)
break;
}
- intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(conn)));
+ intel_dp = intel_attached_dp(to_intel_connector(conn));
intel_dp_stop_link_train(intel_dp);
}
@@ -15044,15 +15222,12 @@ static void intel_post_crtc_enable_updates(struct intel_crtc *crtc,
intel_atomic_get_new_crtc_state(state, crtc);
struct intel_crtc_state *old_crtc_state =
intel_atomic_get_old_crtc_state(state, crtc);
- struct intel_plane_state *new_plane_state =
- intel_atomic_get_new_plane_state(state,
- to_intel_plane(crtc->base.primary));
bool modeset = needs_modeset(new_crtc_state);
if (new_crtc_state->update_pipe && !new_crtc_state->enable_fbc)
intel_fbc_disable(crtc);
- else if (new_plane_state)
- intel_fbc_enable(crtc, new_crtc_state, new_plane_state);
+ else
+ intel_fbc_enable(state, crtc);
/* Perform vblank evasion around commit operation */
intel_pipe_update_start(new_crtc_state);
@@ -15076,18 +15251,20 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
struct intel_crtc_state *old_crtc_state,
struct intel_crtc_state *new_crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_crtc *slave_crtc = intel_get_slave_crtc(new_crtc_state);
struct intel_crtc_state *new_slave_crtc_state =
intel_atomic_get_new_crtc_state(state, slave_crtc);
struct intel_crtc_state *old_slave_crtc_state =
intel_atomic_get_old_crtc_state(state, slave_crtc);
- WARN_ON(!slave_crtc || !new_slave_crtc_state ||
- !old_slave_crtc_state);
+ drm_WARN_ON(&i915->drm, !slave_crtc || !new_slave_crtc_state ||
+ !old_slave_crtc_state);
- DRM_DEBUG_KMS("Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
- crtc->base.base.id, crtc->base.name, slave_crtc->base.base.id,
- slave_crtc->base.name);
+ drm_dbg_kms(&i915->drm,
+ "Updating Transcoder Port Sync Master CRTC = %d %s and Slave CRTC %d %s\n",
+ crtc->base.base.id, crtc->base.name,
+ slave_crtc->base.base.id, slave_crtc->base.name);
/* Enable seq for slave with with DP_TP_CTL left Idle until the
* master is ready
@@ -15117,35 +15294,53 @@ static void intel_update_trans_port_sync_crtcs(struct intel_crtc *crtc,
state);
}
+static void icl_dbuf_slice_pre_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
+ u8 required_slices = state->enabled_dbuf_slices_mask;
+ u8 slices_union = hw_enabled_slices | required_slices;
+
+ /* If 2nd DBuf slice required, enable it here */
+ if (INTEL_GEN(dev_priv) >= 11 && slices_union != hw_enabled_slices)
+ icl_dbuf_slices_update(dev_priv, slices_union);
+}
+
+static void icl_dbuf_slice_post_update(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ u8 hw_enabled_slices = dev_priv->enabled_dbuf_slices_mask;
+ u8 required_slices = state->enabled_dbuf_slices_mask;
+
+ /* If 2nd DBuf slice is no more required disable it */
+ if (INTEL_GEN(dev_priv) >= 11 && required_slices != hw_enabled_slices)
+ icl_dbuf_slices_update(dev_priv, required_slices);
+}
+
static void skl_commit_modeset_enables(struct intel_atomic_state *state)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc *crtc;
struct intel_crtc_state *old_crtc_state, *new_crtc_state;
- u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- u8 required_slices = state->wm_results.ddb.enabled_slices;
struct skl_ddb_entry entries[I915_MAX_PIPES] = {};
- const u8 num_pipes = INTEL_NUM_PIPES(dev_priv);
u8 update_pipes = 0, modeset_pipes = 0;
int i;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
+ enum pipe pipe = crtc->pipe;
+
if (!new_crtc_state->hw.active)
continue;
/* ignore allocations for crtc's that have been turned off. */
if (!needs_modeset(new_crtc_state)) {
- entries[i] = old_crtc_state->wm.skl.ddb;
- update_pipes |= BIT(crtc->pipe);
+ entries[pipe] = old_crtc_state->wm.skl.ddb;
+ update_pipes |= BIT(pipe);
} else {
- modeset_pipes |= BIT(crtc->pipe);
+ modeset_pipes |= BIT(pipe);
}
}
- /* If 2nd DBuf slice required, enable it here */
- if (INTEL_GEN(dev_priv) >= 11 && required_slices > hw_enabled_slices)
- icl_dbuf_slices_update(dev_priv, required_slices);
-
/*
* Whenever the number of active pipes changes, we need to make sure we
* update the pipes in the right order so that their ddb allocations
@@ -15164,10 +15359,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
continue;
if (skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, i))
+ entries, I915_MAX_PIPES, pipe))
continue;
- entries[i] = new_crtc_state->wm.skl.ddb;
+ entries[pipe] = new_crtc_state->wm.skl.ddb;
update_pipes &= ~BIT(pipe);
intel_update_crtc(crtc, state, old_crtc_state,
@@ -15201,10 +15396,10 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
is_trans_port_sync_slave(new_crtc_state))
continue;
- WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, i));
+ drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe));
- entries[i] = new_crtc_state->wm.skl.ddb;
+ entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
if (is_trans_port_sync_mode(new_crtc_state)) {
@@ -15236,20 +15431,17 @@ static void skl_commit_modeset_enables(struct intel_atomic_state *state)
if ((modeset_pipes & BIT(pipe)) == 0)
continue;
- WARN_ON(skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
- entries, num_pipes, i));
+ drm_WARN_ON(&dev_priv->drm, skl_ddb_allocation_overlaps(&new_crtc_state->wm.skl.ddb,
+ entries, I915_MAX_PIPES, pipe));
- entries[i] = new_crtc_state->wm.skl.ddb;
+ entries[pipe] = new_crtc_state->wm.skl.ddb;
modeset_pipes &= ~BIT(pipe);
intel_update_crtc(crtc, state, old_crtc_state, new_crtc_state);
}
- WARN_ON(modeset_pipes);
+ drm_WARN_ON(&dev_priv->drm, modeset_pipes);
- /* If 2nd DBuf slice is no more required disable it */
- if (INTEL_GEN(dev_priv) >= 11 && required_slices < hw_enabled_slices)
- icl_dbuf_slices_update(dev_priv, required_slices);
}
static void intel_atomic_helper_free_state(struct drm_i915_private *dev_priv)
@@ -15346,10 +15538,7 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset) {
drm_atomic_helper_update_legacy_modeset_state(dev, &state->base);
- intel_set_cdclk_pre_plane_update(dev_priv,
- &state->cdclk.actual,
- &dev_priv->cdclk.actual,
- state->cdclk.pipe);
+ intel_set_cdclk_pre_plane_update(state);
/*
* SKL workaround: bspec recommends we disable the SAGV when we
@@ -15379,16 +15568,17 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
if (state->modeset)
intel_encoders_update_prepare(state);
+ /* Enable all new slices, we might need */
+ if (state->modeset)
+ icl_dbuf_slice_pre_update(state);
+
/* Now enable the clocks, plane, pipe, and connectors that we set up. */
dev_priv->display.commit_modeset_enables(state);
if (state->modeset) {
intel_encoders_update_complete(state);
- intel_set_cdclk_post_plane_update(dev_priv,
- &state->cdclk.actual,
- &dev_priv->cdclk.actual,
- state->cdclk.pipe);
+ intel_set_cdclk_post_plane_update(state);
}
/* FIXME: We should call drm_atomic_helper_commit_hw_done() here
@@ -15435,6 +15625,10 @@ static void intel_atomic_commit_tail(struct intel_atomic_state *state)
dev_priv->display.optimize_watermarks(state, crtc);
}
+ /* Disable all slices, we don't need */
+ if (state->modeset)
+ icl_dbuf_slice_post_update(state);
+
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
intel_post_plane_update(state, crtc);
@@ -15578,7 +15772,8 @@ static int intel_atomic_commit(struct drm_device *dev,
ret = intel_atomic_prepare_commit(state);
if (ret) {
- DRM_DEBUG_ATOMIC("Preparing state failed with %i\n", ret);
+ drm_dbg_atomic(&dev_priv->drm,
+ "Preparing state failed with %i\n", ret);
i915_sw_fence_commit(&state->commit_ready);
intel_runtime_pm_put(&dev_priv->runtime_pm, state->wakeref);
return ret;
@@ -15587,6 +15782,8 @@ static int intel_atomic_commit(struct drm_device *dev,
ret = drm_atomic_helper_setup_commit(&state->base, nonblock);
if (!ret)
ret = drm_atomic_helper_swap_state(&state->base, true);
+ if (!ret)
+ intel_atomic_swap_global_state(state);
if (ret) {
i915_sw_fence_commit(&state->commit_ready);
@@ -15602,14 +15799,7 @@ static int intel_atomic_commit(struct drm_device *dev,
if (state->global_state_changed) {
assert_global_state_locked(dev_priv);
- memcpy(dev_priv->min_cdclk, state->min_cdclk,
- sizeof(state->min_cdclk));
- memcpy(dev_priv->min_voltage_level, state->min_voltage_level,
- sizeof(state->min_voltage_level));
dev_priv->active_pipes = state->active_pipes;
- dev_priv->cdclk.force_min_cdclk = state->cdclk.force_min_cdclk;
-
- intel_cdclk_swap_state(state);
}
drm_atomic_state_get(&state->base);
@@ -15737,7 +15927,7 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
/**
* intel_prepare_plane_fb - Prepare fb for usage on plane
- * @plane: drm plane to prepare for
+ * @_plane: drm plane to prepare for
* @_new_plane_state: the plane state being prepared
*
* Prepares a framebuffer for usage on a display plane. Generally this
@@ -15748,23 +15938,25 @@ static void fb_obj_bump_render_priority(struct drm_i915_gem_object *obj)
* Returns 0 on success, negative error code on failure.
*/
int
-intel_prepare_plane_fb(struct drm_plane *plane,
+intel_prepare_plane_fb(struct drm_plane *_plane,
struct drm_plane_state *_new_plane_state)
{
+ struct intel_plane *plane = to_intel_plane(_plane);
struct intel_plane_state *new_plane_state =
to_intel_plane_state(_new_plane_state);
- struct intel_atomic_state *intel_state =
+ struct intel_atomic_state *state =
to_intel_atomic_state(new_plane_state->uapi.state);
- struct drm_i915_private *dev_priv = to_i915(plane->dev);
- struct drm_framebuffer *fb = new_plane_state->hw.fb;
- struct drm_i915_gem_object *obj = intel_fb_obj(fb);
- struct drm_i915_gem_object *old_obj = intel_fb_obj(plane->state->fb);
+ struct drm_i915_private *dev_priv = to_i915(plane->base.dev);
+ const struct intel_plane_state *old_plane_state =
+ intel_atomic_get_old_plane_state(state, plane);
+ struct drm_i915_gem_object *obj = intel_fb_obj(new_plane_state->hw.fb);
+ struct drm_i915_gem_object *old_obj = intel_fb_obj(old_plane_state->hw.fb);
int ret;
if (old_obj) {
- struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(intel_state,
- to_intel_crtc(plane->state->crtc));
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state,
+ to_intel_crtc(old_plane_state->hw.crtc));
/* Big Hammer, we also need to ensure that any pending
* MI_WAIT_FOR_EVENT inside a user batch buffer on the
@@ -15778,7 +15970,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* can safely continue.
*/
if (needs_modeset(crtc_state)) {
- ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
old_obj->base.resv, NULL,
false, 0,
GFP_KERNEL);
@@ -15788,7 +15980,7 @@ intel_prepare_plane_fb(struct drm_plane *plane,
}
if (new_plane_state->uapi.fence) { /* explicit fencing */
- ret = i915_sw_fence_await_dma_fence(&intel_state->commit_ready,
+ ret = i915_sw_fence_await_dma_fence(&state->commit_ready,
new_plane_state->uapi.fence,
I915_FENCE_TIMEOUT,
GFP_KERNEL);
@@ -15815,12 +16007,12 @@ intel_prepare_plane_fb(struct drm_plane *plane,
if (!new_plane_state->uapi.fence) { /* implicit fencing */
struct dma_fence *fence;
- ret = i915_sw_fence_await_reservation(&intel_state->commit_ready,
+ ret = i915_sw_fence_await_reservation(&state->commit_ready,
obj->base.resv, NULL,
false, I915_FENCE_TIMEOUT,
GFP_KERNEL);
if (ret < 0)
- return ret;
+ goto unpin_fb;
fence = dma_resv_get_excl_rcu(obj->base.resv);
if (fence) {
@@ -15841,12 +16033,17 @@ intel_prepare_plane_fb(struct drm_plane *plane,
* that are not quite steady state without resorting to forcing
* maximum clocks following a vblank miss (see do_rps_boost()).
*/
- if (!intel_state->rps_interactive) {
+ if (!state->rps_interactive) {
intel_rps_mark_interactive(&dev_priv->gt.rps, true);
- intel_state->rps_interactive = true;
+ state->rps_interactive = true;
}
return 0;
+
+unpin_fb:
+ intel_plane_unpin_fb(new_plane_state);
+
+ return ret;
}
/**
@@ -15862,13 +16059,17 @@ intel_cleanup_plane_fb(struct drm_plane *plane,
{
struct intel_plane_state *old_plane_state =
to_intel_plane_state(_old_plane_state);
- struct intel_atomic_state *intel_state =
+ struct intel_atomic_state *state =
to_intel_atomic_state(old_plane_state->uapi.state);
struct drm_i915_private *dev_priv = to_i915(plane->dev);
+ struct drm_i915_gem_object *obj = intel_fb_obj(old_plane_state->hw.fb);
+
+ if (!obj)
+ return;
- if (intel_state->rps_interactive) {
+ if (state->rps_interactive) {
intel_rps_mark_interactive(&dev_priv->gt.rps, false);
- intel_state->rps_interactive = false;
+ state->rps_interactive = false;
}
/* Should only be called after a successful intel_prepare_plane_fb()! */
@@ -16037,6 +16238,8 @@ intel_legacy_cursor_update(struct drm_plane *_plane,
new_plane_state->uapi.crtc_w = crtc_w;
new_plane_state->uapi.crtc_h = crtc_h;
+ intel_plane_copy_uapi_to_hw_state(new_plane_state, new_plane_state);
+
ret = intel_plane_atomic_check_with_state(crtc_state, new_crtc_state,
old_plane_state, new_plane_state);
if (ret)
@@ -16121,7 +16324,6 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
unsigned int supported_rotations;
- unsigned int possible_crtcs;
const u32 *formats;
int num_formats;
int ret, zpos;
@@ -16202,18 +16404,16 @@ intel_primary_plane_create(struct drm_i915_private *dev_priv, enum pipe pipe)
plane->get_hw_state = i9xx_plane_get_hw_state;
plane->check_plane = i9xx_plane_check;
- possible_crtcs = BIT(pipe);
-
if (INTEL_GEN(dev_priv) >= 5 || IS_G4X(dev_priv))
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats,
i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
"primary %c", pipe_name(pipe));
else
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats,
i9xx_format_modifiers,
DRM_PLANE_TYPE_PRIMARY,
@@ -16255,7 +16455,6 @@ static struct intel_plane *
intel_cursor_plane_create(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- unsigned int possible_crtcs;
struct intel_plane *cursor;
int ret, zpos;
@@ -16288,10 +16487,8 @@ intel_cursor_plane_create(struct drm_i915_private *dev_priv,
if (IS_I845G(dev_priv) || IS_I865G(dev_priv) || HAS_CUR_FBC(dev_priv))
cursor->cursor.size = ~0;
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &cursor->base,
- possible_crtcs, &intel_cursor_plane_funcs,
+ 0, &intel_cursor_plane_funcs,
intel_cursor_formats,
ARRAY_SIZE(intel_cursor_formats),
cursor_format_modifiers,
@@ -16336,6 +16533,7 @@ static const struct drm_crtc_funcs bdw_crtc_funcs = {
.get_vblank_counter = g4x_get_vblank_counter,
.enable_vblank = bdw_enable_vblank,
.disable_vblank = bdw_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs ilk_crtc_funcs = {
@@ -16344,6 +16542,7 @@ static const struct drm_crtc_funcs ilk_crtc_funcs = {
.get_vblank_counter = g4x_get_vblank_counter,
.enable_vblank = ilk_enable_vblank,
.disable_vblank = ilk_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs g4x_crtc_funcs = {
@@ -16352,6 +16551,7 @@ static const struct drm_crtc_funcs g4x_crtc_funcs = {
.get_vblank_counter = g4x_get_vblank_counter,
.enable_vblank = i965_enable_vblank,
.disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i965_crtc_funcs = {
@@ -16360,6 +16560,7 @@ static const struct drm_crtc_funcs i965_crtc_funcs = {
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i965_enable_vblank,
.disable_vblank = i965_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i915gm_crtc_funcs = {
@@ -16368,6 +16569,7 @@ static const struct drm_crtc_funcs i915gm_crtc_funcs = {
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i915gm_enable_vblank,
.disable_vblank = i915gm_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i915_crtc_funcs = {
@@ -16376,6 +16578,7 @@ static const struct drm_crtc_funcs i915_crtc_funcs = {
.get_vblank_counter = i915_get_vblank_counter,
.enable_vblank = i8xx_enable_vblank,
.disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static const struct drm_crtc_funcs i8xx_crtc_funcs = {
@@ -16384,6 +16587,7 @@ static const struct drm_crtc_funcs i8xx_crtc_funcs = {
/* no hw vblank counter */
.enable_vblank = i8xx_enable_vblank,
.disable_vblank = i8xx_disable_vblank,
+ .get_vblank_timestamp = intel_crtc_get_vblank_timestamp,
};
static struct intel_crtc *intel_crtc_alloc(void)
@@ -16413,6 +16617,18 @@ static void intel_crtc_free(struct intel_crtc *crtc)
kfree(crtc);
}
+static void intel_plane_possible_crtcs_init(struct drm_i915_private *dev_priv)
+{
+ struct intel_plane *plane;
+
+ for_each_intel_plane(&dev_priv->drm, plane) {
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv,
+ plane->pipe);
+
+ plane->base.possible_crtcs = drm_crtc_mask(&crtc->base);
+ }
+}
+
static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_plane *primary, *cursor;
@@ -16491,7 +16707,9 @@ static int intel_crtc_init(struct drm_i915_private *dev_priv, enum pipe pipe)
intel_color_init(crtc);
- WARN_ON(drm_crtc_index(&crtc->base) != crtc->pipe);
+ intel_crtc_crc_init(crtc);
+
+ drm_WARN_ON(&dev_priv->drm, drm_crtc_index(&crtc->base) != crtc->pipe);
return 0;
@@ -16551,10 +16769,10 @@ static bool ilk_has_edp_a(struct drm_i915_private *dev_priv)
if (!IS_MOBILE(dev_priv))
return false;
- if ((I915_READ(DP_A) & DP_DETECTED) == 0)
+ if ((intel_de_read(dev_priv, DP_A) & DP_DETECTED) == 0)
return false;
- if (IS_GEN(dev_priv, 5) && (I915_READ(FUSE_STRAP) & ILK_eDP_A_DISABLE))
+ if (IS_GEN(dev_priv, 5) && (intel_de_read(dev_priv, FUSE_STRAP) & ILK_eDP_A_DISABLE))
return false;
return true;
@@ -16569,11 +16787,11 @@ static bool intel_ddi_crt_present(struct drm_i915_private *dev_priv)
return false;
if (HAS_PCH_LPT_H(dev_priv) &&
- I915_READ(SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
+ intel_de_read(dev_priv, SFUSE_STRAP) & SFUSE_STRAP_CRT_DISABLED)
return false;
/* DDI E can't be used if DDI A requires 4 lanes */
- if (I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
+ if (intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_A_4_LANES)
return false;
if (!dev_priv->vbt.int_crt_support)
@@ -16599,10 +16817,10 @@ void intel_pps_unlock_regs_wa(struct drm_i915_private *dev_priv)
pps_num = 1;
for (pps_idx = 0; pps_idx < pps_num; pps_idx++) {
- u32 val = I915_READ(PP_CONTROL(pps_idx));
+ u32 val = intel_de_read(dev_priv, PP_CONTROL(pps_idx));
val = (val & ~PANEL_UNLOCK_MASK) | PANEL_UNLOCK_REGS;
- I915_WRITE(PP_CONTROL(pps_idx), val);
+ intel_de_write(dev_priv, PP_CONTROL(pps_idx), val);
}
}
@@ -16682,14 +16900,14 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* On SKL pre-D0 the strap isn't connected, so we assume
* it's there.
*/
- found = I915_READ(DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
+ found = intel_de_read(dev_priv, DDI_BUF_CTL(PORT_A)) & DDI_INIT_DISPLAY_DETECTED;
/* WaIgnoreDDIAStrap: skl */
if (found || IS_GEN9_BC(dev_priv))
intel_ddi_init(dev_priv, PORT_A);
/* DDI B, C, D, and F detection is indicated by the SFUSE_STRAP
* register */
- found = I915_READ(SFUSE_STRAP);
+ found = intel_de_read(dev_priv, SFUSE_STRAP);
if (found & SFUSE_STRAP_DDIB_DETECTED)
intel_ddi_init(dev_priv, PORT_B);
@@ -16722,25 +16940,25 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
if (ilk_has_edp_a(dev_priv))
intel_dp_init(dev_priv, DP_A, PORT_A);
- if (I915_READ(PCH_HDMIB) & SDVO_DETECTED) {
+ if (intel_de_read(dev_priv, PCH_HDMIB) & SDVO_DETECTED) {
/* PCH SDVOB multiplex with HDMIB */
found = intel_sdvo_init(dev_priv, PCH_SDVOB, PORT_B);
if (!found)
intel_hdmi_init(dev_priv, PCH_HDMIB, PORT_B);
- if (!found && (I915_READ(PCH_DP_B) & DP_DETECTED))
+ if (!found && (intel_de_read(dev_priv, PCH_DP_B) & DP_DETECTED))
intel_dp_init(dev_priv, PCH_DP_B, PORT_B);
}
- if (I915_READ(PCH_HDMIC) & SDVO_DETECTED)
+ if (intel_de_read(dev_priv, PCH_HDMIC) & SDVO_DETECTED)
intel_hdmi_init(dev_priv, PCH_HDMIC, PORT_C);
- if (!dpd_is_edp && I915_READ(PCH_HDMID) & SDVO_DETECTED)
+ if (!dpd_is_edp && intel_de_read(dev_priv, PCH_HDMID) & SDVO_DETECTED)
intel_hdmi_init(dev_priv, PCH_HDMID, PORT_D);
- if (I915_READ(PCH_DP_C) & DP_DETECTED)
+ if (intel_de_read(dev_priv, PCH_DP_C) & DP_DETECTED)
intel_dp_init(dev_priv, PCH_DP_C, PORT_C);
- if (I915_READ(PCH_DP_D) & DP_DETECTED)
+ if (intel_de_read(dev_priv, PCH_DP_D) & DP_DETECTED)
intel_dp_init(dev_priv, PCH_DP_D, PORT_D);
} else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
bool has_edp, has_port;
@@ -16765,16 +16983,16 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
*/
has_edp = intel_dp_is_port_edp(dev_priv, PORT_B);
has_port = intel_bios_is_port_present(dev_priv, PORT_B);
- if (I915_READ(VLV_DP_B) & DP_DETECTED || has_port)
+ if (intel_de_read(dev_priv, VLV_DP_B) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_B, PORT_B);
- if ((I915_READ(VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
+ if ((intel_de_read(dev_priv, VLV_HDMIB) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev_priv, VLV_HDMIB, PORT_B);
has_edp = intel_dp_is_port_edp(dev_priv, PORT_C);
has_port = intel_bios_is_port_present(dev_priv, PORT_C);
- if (I915_READ(VLV_DP_C) & DP_DETECTED || has_port)
+ if (intel_de_read(dev_priv, VLV_DP_C) & DP_DETECTED || has_port)
has_edp &= intel_dp_init(dev_priv, VLV_DP_C, PORT_C);
- if ((I915_READ(VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
+ if ((intel_de_read(dev_priv, VLV_HDMIC) & SDVO_DETECTED || has_port) && !has_edp)
intel_hdmi_init(dev_priv, VLV_HDMIC, PORT_C);
if (IS_CHERRYVIEW(dev_priv)) {
@@ -16783,9 +17001,9 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
* so no need to worry about it
*/
has_port = intel_bios_is_port_present(dev_priv, PORT_D);
- if (I915_READ(CHV_DP_D) & DP_DETECTED || has_port)
+ if (intel_de_read(dev_priv, CHV_DP_D) & DP_DETECTED || has_port)
intel_dp_init(dev_priv, CHV_DP_D, PORT_D);
- if (I915_READ(CHV_HDMID) & SDVO_DETECTED || has_port)
+ if (intel_de_read(dev_priv, CHV_HDMID) & SDVO_DETECTED || has_port)
intel_hdmi_init(dev_priv, CHV_HDMID, PORT_D);
}
@@ -16801,11 +17019,12 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
intel_crt_init(dev_priv);
- if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
- DRM_DEBUG_KMS("probing SDVOB\n");
+ if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(&dev_priv->drm, "probing SDVOB\n");
found = intel_sdvo_init(dev_priv, GEN3_SDVOB, PORT_B);
if (!found && IS_G4X(dev_priv)) {
- DRM_DEBUG_KMS("probing HDMI on SDVOB\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "probing HDMI on SDVOB\n");
intel_hdmi_init(dev_priv, GEN4_HDMIB, PORT_B);
}
@@ -16815,22 +17034,23 @@ static void intel_setup_outputs(struct drm_i915_private *dev_priv)
/* Before G4X SDVOC doesn't have its own detect register */
- if (I915_READ(GEN3_SDVOB) & SDVO_DETECTED) {
- DRM_DEBUG_KMS("probing SDVOC\n");
+ if (intel_de_read(dev_priv, GEN3_SDVOB) & SDVO_DETECTED) {
+ drm_dbg_kms(&dev_priv->drm, "probing SDVOC\n");
found = intel_sdvo_init(dev_priv, GEN3_SDVOC, PORT_C);
}
- if (!found && (I915_READ(GEN3_SDVOC) & SDVO_DETECTED)) {
+ if (!found && (intel_de_read(dev_priv, GEN3_SDVOC) & SDVO_DETECTED)) {
if (IS_G4X(dev_priv)) {
- DRM_DEBUG_KMS("probing HDMI on SDVOC\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "probing HDMI on SDVOC\n");
intel_hdmi_init(dev_priv, GEN4_HDMIC, PORT_C);
}
if (IS_G4X(dev_priv))
intel_dp_init(dev_priv, DP_C, PORT_C);
}
- if (IS_G4X(dev_priv) && (I915_READ(DP_D) & DP_DETECTED))
+ if (IS_G4X(dev_priv) && (intel_de_read(dev_priv, DP_D) & DP_DETECTED))
intel_dp_init(dev_priv, DP_D, PORT_D);
if (SUPPORTS_TV(dev_priv))
@@ -16872,9 +17092,11 @@ static int intel_user_framebuffer_create_handle(struct drm_framebuffer *fb,
unsigned int *handle)
{
struct drm_i915_gem_object *obj = intel_fb_obj(fb);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
if (obj->userptr.mm) {
- DRM_DEBUG("attempting to use a userptr for a framebuffer, denied\n");
+ drm_dbg(&i915->drm,
+ "attempting to use a userptr for a framebuffer, denied\n");
return -EINVAL;
}
@@ -16928,14 +17150,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
*/
if (tiling != I915_TILING_NONE &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- DRM_DEBUG_KMS("tiling_mode doesn't match fb modifier\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "tiling_mode doesn't match fb modifier\n");
goto err;
}
} else {
if (tiling == I915_TILING_X) {
mode_cmd->modifier[0] = I915_FORMAT_MOD_X_TILED;
} else if (tiling == I915_TILING_Y) {
- DRM_DEBUG_KMS("No Y tiling for legacy addfb\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No Y tiling for legacy addfb\n");
goto err;
}
}
@@ -16945,10 +17169,11 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
mode_cmd->modifier[0])) {
struct drm_format_name_buf format_name;
- DRM_DEBUG_KMS("unsupported pixel format %s / modifier 0x%llx\n",
- drm_get_format_name(mode_cmd->pixel_format,
- &format_name),
- mode_cmd->modifier[0]);
+ drm_dbg_kms(&dev_priv->drm,
+ "unsupported pixel format %s / modifier 0x%llx\n",
+ drm_get_format_name(mode_cmd->pixel_format,
+ &format_name),
+ mode_cmd->modifier[0]);
goto err;
}
@@ -16958,17 +17183,19 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
*/
if (INTEL_GEN(dev_priv) < 4 &&
tiling != intel_fb_modifier_to_tiling(mode_cmd->modifier[0])) {
- DRM_DEBUG_KMS("tiling_mode must match fb modifier exactly on gen2/3\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "tiling_mode must match fb modifier exactly on gen2/3\n");
goto err;
}
max_stride = intel_fb_max_stride(dev_priv, mode_cmd->pixel_format,
mode_cmd->modifier[0]);
if (mode_cmd->pitches[0] > max_stride) {
- DRM_DEBUG_KMS("%s pitch (%u) must be at most %d\n",
- mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
- "tiled" : "linear",
- mode_cmd->pitches[0], max_stride);
+ drm_dbg_kms(&dev_priv->drm,
+ "%s pitch (%u) must be at most %d\n",
+ mode_cmd->modifier[0] != DRM_FORMAT_MOD_LINEAR ?
+ "tiled" : "linear",
+ mode_cmd->pitches[0], max_stride);
goto err;
}
@@ -16977,15 +17204,17 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
* the fb pitch and fence stride match.
*/
if (tiling != I915_TILING_NONE && mode_cmd->pitches[0] != stride) {
- DRM_DEBUG_KMS("pitch (%d) must match tiling stride (%d)\n",
- mode_cmd->pitches[0], stride);
+ drm_dbg_kms(&dev_priv->drm,
+ "pitch (%d) must match tiling stride (%d)\n",
+ mode_cmd->pitches[0], stride);
goto err;
}
/* FIXME need to adjust LINOFF/TILEOFF accordingly. */
if (mode_cmd->offsets[0] != 0) {
- DRM_DEBUG_KMS("plane 0 offset (0x%08x) must be 0\n",
- mode_cmd->offsets[0]);
+ drm_dbg_kms(&dev_priv->drm,
+ "plane 0 offset (0x%08x) must be 0\n",
+ mode_cmd->offsets[0]);
goto err;
}
@@ -16995,14 +17224,16 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
u32 stride_alignment;
if (mode_cmd->handles[i] != mode_cmd->handles[0]) {
- DRM_DEBUG_KMS("bad plane %d handle\n", i);
+ drm_dbg_kms(&dev_priv->drm, "bad plane %d handle\n",
+ i);
goto err;
}
stride_alignment = intel_fb_stride_alignment(fb, i);
if (fb->pitches[i] & (stride_alignment - 1)) {
- DRM_DEBUG_KMS("plane %d pitch (%d) must be at least %u byte aligned\n",
- i, fb->pitches[i], stride_alignment);
+ drm_dbg_kms(&dev_priv->drm,
+ "plane %d pitch (%d) must be at least %u byte aligned\n",
+ i, fb->pitches[i], stride_alignment);
goto err;
}
@@ -17010,9 +17241,10 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
int ccs_aux_stride = gen12_ccs_aux_stride(fb, i);
if (fb->pitches[i] != ccs_aux_stride) {
- DRM_DEBUG_KMS("ccs aux plane %d pitch (%d) must be %d\n",
- i,
- fb->pitches[i], ccs_aux_stride);
+ drm_dbg_kms(&dev_priv->drm,
+ "ccs aux plane %d pitch (%d) must be %d\n",
+ i,
+ fb->pitches[i], ccs_aux_stride);
goto err;
}
}
@@ -17026,7 +17258,7 @@ static int intel_framebuffer_init(struct intel_framebuffer *intel_fb,
ret = drm_framebuffer_init(&dev_priv->drm, fb, &intel_fb_funcs);
if (ret) {
- DRM_ERROR("framebuffer init failed %d\n", ret);
+ drm_err(&dev_priv->drm, "framebuffer init failed %d\n", ret);
goto err;
}
@@ -17056,17 +17288,6 @@ intel_user_framebuffer_create(struct drm_device *dev,
return fb;
}
-static void intel_atomic_state_free(struct drm_atomic_state *state)
-{
- struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
-
- drm_atomic_state_default_release(state);
-
- i915_sw_fence_fini(&intel_state->commit_ready);
-
- kfree(state);
-}
-
static enum drm_mode_status
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
@@ -17298,9 +17519,36 @@ void intel_init_display_hooks(struct drm_i915_private *dev_priv)
void intel_modeset_init_hw(struct drm_i915_private *i915)
{
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(i915->cdclk.obj.state);
+
intel_update_cdclk(i915);
- intel_dump_cdclk_state(&i915->cdclk.hw, "Current CDCLK");
- i915->cdclk.logical = i915->cdclk.actual = i915->cdclk.hw;
+ intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK");
+ cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw;
+}
+
+static int sanitize_watermarks_add_affected(struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+
+ drm_for_each_crtc(crtc, state->dev) {
+ struct drm_crtc_state *crtc_state;
+
+ crtc_state = drm_atomic_get_crtc_state(state, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ drm_for_each_plane(plane, state->dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state))
+ return PTR_ERR(plane_state);
+ }
+
+ return 0;
}
/*
@@ -17313,9 +17561,8 @@ void intel_modeset_init_hw(struct drm_i915_private *i915)
* through the atomic check code to calculate new watermark values in the
* state object.
*/
-static void sanitize_watermarks(struct drm_device *dev)
+static void sanitize_watermarks(struct drm_i915_private *dev_priv)
{
- struct drm_i915_private *dev_priv = to_i915(dev);
struct drm_atomic_state *state;
struct intel_atomic_state *intel_state;
struct intel_crtc *crtc;
@@ -17328,26 +17575,17 @@ static void sanitize_watermarks(struct drm_device *dev)
if (!dev_priv->display.optimize_watermarks)
return;
- /*
- * We need to hold connection_mutex before calling duplicate_state so
- * that the connector loop is protected.
- */
- drm_modeset_acquire_init(&ctx, 0);
-retry:
- ret = drm_modeset_lock_all_ctx(dev, &ctx);
- if (ret == -EDEADLK) {
- drm_modeset_backoff(&ctx);
- goto retry;
- } else if (WARN_ON(ret)) {
- goto fail;
- }
-
- state = drm_atomic_helper_duplicate_state(dev, &ctx);
- if (WARN_ON(IS_ERR(state)))
- goto fail;
+ state = drm_atomic_state_alloc(&dev_priv->drm);
+ if (drm_WARN_ON(&dev_priv->drm, !state))
+ return;
intel_state = to_intel_atomic_state(state);
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ state->acquire_ctx = &ctx;
+
/*
* Hardware readout is the only time we don't want to calculate
* intermediate watermarks (since we don't trust the current
@@ -17356,22 +17594,13 @@ retry:
if (!HAS_GMCH(dev_priv))
intel_state->skip_intermediate_wm = true;
- ret = intel_atomic_check(dev, state);
- if (ret) {
- /*
- * If we fail here, it means that the hardware appears to be
- * programmed in a way that shouldn't be possible, given our
- * understanding of watermark requirements. This might mean a
- * mistake in the hardware readout code or a mistake in the
- * watermark calculations for a given platform. Raise a WARN
- * so that this is noticeable.
- *
- * If this actually happens, we'll have to just leave the
- * BIOS-programmed watermarks untouched and hope for the best.
- */
- WARN(true, "Could not determine valid watermarks for inherited state\n");
- goto put_state;
- }
+ ret = sanitize_watermarks_add_affected(state);
+ if (ret)
+ goto fail;
+
+ ret = intel_atomic_check(&dev_priv->drm, state);
+ if (ret)
+ goto fail;
/* Write calculated watermark values back */
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
@@ -17381,9 +17610,29 @@ retry:
to_intel_crtc_state(crtc->base.state)->wm = crtc_state->wm;
}
-put_state:
- drm_atomic_state_put(state);
fail:
+ if (ret == -EDEADLK) {
+ drm_atomic_state_clear(state);
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+
+ /*
+ * If we fail here, it means that the hardware appears to be
+ * programmed in a way that shouldn't be possible, given our
+ * understanding of watermark requirements. This might mean a
+ * mistake in the hardware readout code or a mistake in the
+ * watermark calculations for a given platform. Raise a WARN
+ * so that this is noticeable.
+ *
+ * If this actually happens, we'll have to just leave the
+ * BIOS-programmed watermarks untouched and hope for the best.
+ */
+ drm_WARN(&dev_priv->drm, ret,
+ "Could not determine valid watermarks for inherited state\n");
+
+ drm_atomic_state_put(state);
+
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
@@ -17392,7 +17641,7 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
{
if (IS_GEN(dev_priv, 5)) {
u32 fdi_pll_clk =
- I915_READ(FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
+ intel_de_read(dev_priv, FDI_PLL_BIOS_0) & FDI_PLL_FB_CLOCK_MASK;
dev_priv->fdi_pll_freq = (fdi_pll_clk + 2) * 10000;
} else if (IS_GEN(dev_priv, 6) || IS_IVYBRIDGE(dev_priv)) {
@@ -17401,7 +17650,7 @@ static void intel_update_fdi_pll_freq(struct drm_i915_private *dev_priv)
return;
}
- DRM_DEBUG_DRIVER("FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
+ drm_dbg(&dev_priv->drm, "FDI PLL freq=%d\n", dev_priv->fdi_pll_freq);
}
static int intel_initial_commit(struct drm_device *dev)
@@ -17441,6 +17690,24 @@ retry:
* have readout for pipe gamma enable.
*/
crtc_state->uapi.color_mgmt_changed = true;
+
+ /*
+ * FIXME hack to force full modeset when DSC is being
+ * used.
+ *
+ * As long as we do not have full state readout and
+ * config comparison of crtc_state->dsc, we have no way
+ * to ensure reliable fastset. Remove once we have
+ * readout for DSC.
+ */
+ if (crtc_state->dsc.compression_enable) {
+ ret = drm_atomic_add_affected_connectors(state,
+ &crtc->base);
+ if (ret)
+ goto out;
+ crtc_state->uapi.mode_changed = true;
+ drm_dbg_kms(dev, "Force full modeset for DSC\n");
+ }
}
}
@@ -17466,6 +17733,7 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
struct drm_mode_config *mode_config = &i915->drm.mode_config;
drm_mode_config_init(&i915->drm);
+ INIT_LIST_HEAD(&i915->global_obj_list);
mode_config->min_width = 0;
mode_config->min_height = 0;
@@ -17507,11 +17775,31 @@ static void intel_mode_config_init(struct drm_i915_private *i915)
}
}
-int intel_modeset_init(struct drm_i915_private *i915)
+static void intel_mode_config_cleanup(struct drm_i915_private *i915)
+{
+ intel_atomic_global_obj_cleanup(i915);
+ drm_mode_config_cleanup(&i915->drm);
+}
+
+static void plane_config_fini(struct intel_initial_plane_config *plane_config)
+{
+ if (plane_config->fb) {
+ struct drm_framebuffer *fb = &plane_config->fb->base;
+
+ /* We may only have the stub and not a full framebuffer */
+ if (drm_framebuffer_read_refcount(fb))
+ drm_framebuffer_put(fb);
+ else
+ kfree(fb);
+ }
+
+ if (plane_config->vma)
+ i915_vma_put(plane_config->vma);
+}
+
+/* part #1: call before irq install */
+int intel_modeset_init_noirq(struct drm_i915_private *i915)
{
- struct drm_device *dev = &i915->drm;
- enum pipe pipe;
- struct intel_crtc *crtc;
int ret;
i915->modeset_wq = alloc_ordered_workqueue("i915_modeset", 0);
@@ -17520,6 +17808,10 @@ int intel_modeset_init(struct drm_i915_private *i915)
intel_mode_config_init(i915);
+ ret = intel_cdclk_init(i915);
+ if (ret)
+ return ret;
+
ret = intel_bw_init(i915);
if (ret)
return ret;
@@ -17532,26 +17824,38 @@ int intel_modeset_init(struct drm_i915_private *i915)
intel_fbc_init(i915);
+ return 0;
+}
+
+/* part #2: call after irq install */
+int intel_modeset_init(struct drm_i915_private *i915)
+{
+ struct drm_device *dev = &i915->drm;
+ enum pipe pipe;
+ struct intel_crtc *crtc;
+ int ret;
+
intel_init_pm(i915);
intel_panel_sanitize_ssc(i915);
intel_gmbus_setup(i915);
- DRM_DEBUG_KMS("%d display pipe%s available.\n",
- INTEL_NUM_PIPES(i915),
- INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
+ drm_dbg_kms(&i915->drm, "%d display pipe%s available.\n",
+ INTEL_NUM_PIPES(i915),
+ INTEL_NUM_PIPES(i915) > 1 ? "s" : "");
if (HAS_DISPLAY(i915) && INTEL_DISPLAY_ENABLED(i915)) {
for_each_pipe(i915, pipe) {
ret = intel_crtc_init(i915, pipe);
if (ret) {
- drm_mode_config_cleanup(dev);
+ intel_mode_config_cleanup(i915);
return ret;
}
}
}
+ intel_plane_possible_crtcs_init(i915);
intel_shared_dpll_init(dev);
intel_update_fdi_pll_freq(i915);
@@ -17591,6 +17895,8 @@ int intel_modeset_init(struct drm_i915_private *i915)
* just get the first one.
*/
intel_find_initial_plane_obj(crtc, &plane_config);
+
+ plane_config_fini(&plane_config);
}
/*
@@ -17599,7 +17905,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
* since the watermark calculation done here will use pstate->fb.
*/
if (!HAS_GMCH(i915))
- sanitize_watermarks(dev);
+ sanitize_watermarks(i915);
/*
* Force all active planes to recompute their states. So that on
@@ -17609,7 +17915,7 @@ int intel_modeset_init(struct drm_i915_private *i915)
*/
ret = intel_initial_commit(dev);
if (ret)
- DRM_DEBUG_KMS("Initial commit in probe failed.\n");
+ drm_dbg_kms(&i915->drm, "Initial commit in probe failed.\n");
return 0;
}
@@ -17628,10 +17934,12 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
u32 dpll, fp;
int i;
- WARN_ON(i9xx_calc_dpll_params(48000, &clock) != 25154);
+ drm_WARN_ON(&dev_priv->drm,
+ i9xx_calc_dpll_params(48000, &clock) != 25154);
- DRM_DEBUG_KMS("enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
- pipe_name(pipe), clock.vco, clock.dot);
+ drm_dbg_kms(&dev_priv->drm,
+ "enabling pipe %c due to force quirk (vco=%d dot=%d)\n",
+ pipe_name(pipe), clock.vco, clock.dot);
fp = i9xx_dpll_compute_fp(&clock);
dpll = DPLL_DVO_2X_MODE |
@@ -17641,27 +17949,27 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
PLL_REF_INPUT_DREFCLK |
DPLL_VCO_ENABLE;
- I915_WRITE(FP0(pipe), fp);
- I915_WRITE(FP1(pipe), fp);
+ intel_de_write(dev_priv, FP0(pipe), fp);
+ intel_de_write(dev_priv, FP1(pipe), fp);
- I915_WRITE(HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
- I915_WRITE(HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
- I915_WRITE(HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
- I915_WRITE(VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
- I915_WRITE(VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
- I915_WRITE(VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
- I915_WRITE(PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
+ intel_de_write(dev_priv, HTOTAL(pipe), (640 - 1) | ((800 - 1) << 16));
+ intel_de_write(dev_priv, HBLANK(pipe), (640 - 1) | ((800 - 1) << 16));
+ intel_de_write(dev_priv, HSYNC(pipe), (656 - 1) | ((752 - 1) << 16));
+ intel_de_write(dev_priv, VTOTAL(pipe), (480 - 1) | ((525 - 1) << 16));
+ intel_de_write(dev_priv, VBLANK(pipe), (480 - 1) | ((525 - 1) << 16));
+ intel_de_write(dev_priv, VSYNC(pipe), (490 - 1) | ((492 - 1) << 16));
+ intel_de_write(dev_priv, PIPESRC(pipe), ((640 - 1) << 16) | (480 - 1));
/*
* Apparently we need to have VGA mode enabled prior to changing
* the P1/P2 dividers. Otherwise the DPLL will keep using the old
* dividers, even though the register value does change.
*/
- I915_WRITE(DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
- I915_WRITE(DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), dpll & ~DPLL_VGA_MODE_DIS);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(DPLL(pipe));
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -17669,17 +17977,18 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
*
* So write it again.
*/
- I915_WRITE(DPLL(pipe), dpll);
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
/* We do this three times for luck */
for (i = 0; i < 3 ; i++) {
- I915_WRITE(DPLL(pipe), dpll);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), dpll);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
udelay(150); /* wait for warmup */
}
- I915_WRITE(PIPECONF(pipe), PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
- POSTING_READ(PIPECONF(pipe));
+ intel_de_write(dev_priv, PIPECONF(pipe),
+ PIPECONF_ENABLE | PIPECONF_PROGRESSIVE);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
intel_wait_for_pipe_scanline_moving(crtc);
}
@@ -17688,22 +17997,30 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- DRM_DEBUG_KMS("disabling pipe %c due to force quirk\n",
- pipe_name(pipe));
-
- WARN_ON(I915_READ(DSPCNTR(PLANE_A)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(DSPCNTR(PLANE_B)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE);
- WARN_ON(I915_READ(CURCNTR(PIPE_A)) & MCURSOR_MODE);
- WARN_ON(I915_READ(CURCNTR(PIPE_B)) & MCURSOR_MODE);
-
- I915_WRITE(PIPECONF(pipe), 0);
- POSTING_READ(PIPECONF(pipe));
+ drm_dbg_kms(&dev_priv->drm, "disabling pipe %c due to force quirk\n",
+ pipe_name(pipe));
+
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_A)) &
+ DISPLAY_PLANE_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_B)) &
+ DISPLAY_PLANE_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, DSPCNTR(PLANE_C)) &
+ DISPLAY_PLANE_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE);
+
+ intel_de_write(dev_priv, PIPECONF(pipe), 0);
+ intel_de_posting_read(dev_priv, PIPECONF(pipe));
intel_wait_for_pipe_scanline_stopped(crtc);
- I915_WRITE(DPLL(pipe), DPLL_VGA_MODE_DIS);
- POSTING_READ(DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe), DPLL_VGA_MODE_DIS);
+ intel_de_posting_read(dev_priv, DPLL(pipe));
}
static void
@@ -17726,8 +18043,9 @@ intel_sanitize_plane_mapping(struct drm_i915_private *dev_priv)
if (pipe == crtc->pipe)
continue;
- DRM_DEBUG_KMS("[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
- plane->base.base.id, plane->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] attached to the wrong pipe, disabling plane\n",
+ plane->base.base.id, plane->base.name);
plane_crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
intel_plane_disable_noatomic(plane_crtc, plane);
@@ -17777,18 +18095,18 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
if (transcoder_is_dsi(cpu_transcoder))
return;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~HSW_FRAME_START_DELAY_MASK;
val |= HSW_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
} else {
i915_reg_t reg = PIPECONF(cpu_transcoder);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~PIPECONF_FRAME_START_DELAY_MASK;
val |= PIPECONF_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
if (!crtc_state->has_pch_encoder)
@@ -17798,19 +18116,19 @@ static void intel_sanitize_frame_start_delay(const struct intel_crtc_state *crtc
i915_reg_t reg = PCH_TRANSCONF(crtc->pipe);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~TRANS_FRAME_START_DELAY_MASK;
val |= TRANS_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
} else {
enum pipe pch_transcoder = intel_crtc_pch_transcoder(crtc);
i915_reg_t reg = TRANS_CHICKEN2(pch_transcoder);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val &= ~TRANS_CHICKEN2_FRAME_START_DELAY_MASK;
val |= TRANS_CHICKEN2_FRAME_START_DELAY(0);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
}
@@ -17842,9 +18160,8 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc,
* gamma and CSC to match how we program our planes.
*/
if (INTEL_GEN(dev_priv) >= 9)
- I915_WRITE(SKL_BOTTOM_COLOR(crtc->pipe),
- SKL_BOTTOM_COLOR_GAMMA_ENABLE |
- SKL_BOTTOM_COLOR_CSC_ENABLE);
+ intel_de_write(dev_priv, SKL_BOTTOM_COLOR(crtc->pipe),
+ SKL_BOTTOM_COLOR_GAMMA_ENABLE | SKL_BOTTOM_COLOR_CSC_ENABLE);
}
/* Adjust the state of the output pipe according to whether we
@@ -17916,16 +18233,18 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
crtc_state->hw.active;
if (crtc_state && has_bogus_dpll_config(crtc_state)) {
- DRM_DEBUG_KMS("BIOS has misprogrammed the hardware. Disabling pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "BIOS has misprogrammed the hardware. Disabling pipe %c\n",
+ pipe_name(crtc->pipe));
has_active_crtc = false;
}
connector = intel_encoder_find_connector(encoder);
if (connector && !has_active_crtc) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] has active connectors but no active pipe!\n",
- encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] has active connectors but no active pipe!\n",
+ encoder->base.base.id,
+ encoder->base.name);
/* Connector is active, but has no active pipe. This is
* fallout from our resume register restoring. Disable
@@ -17933,9 +18252,10 @@ static void intel_sanitize_encoder(struct intel_encoder *encoder)
if (crtc_state) {
struct drm_encoder *best_encoder;
- DRM_DEBUG_KMS("[ENCODER:%d:%s] manually disabled\n",
- encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] manually disabled\n",
+ encoder->base.base.id,
+ encoder->base.name);
/* avoid oopsing in case the hooks consult best_encoder */
best_encoder = connector->base.state->best_encoder;
@@ -17988,9 +18308,10 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
intel_set_plane_visible(crtc_state, plane_state, visible);
- DRM_DEBUG_KMS("[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
- plane->base.base.id, plane->base.name,
- enableddisabled(visible), pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] hw state readout: %s, pipe %c\n",
+ plane->base.base.id, plane->base.name,
+ enableddisabled(visible), pipe_name(pipe));
}
for_each_intel_crtc(&dev_priv->drm, crtc) {
@@ -18004,14 +18325,14 @@ static void readout_plane_state(struct drm_i915_private *dev_priv)
static void intel_modeset_readout_hw_state(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
+ struct intel_cdclk_state *cdclk_state =
+ to_intel_cdclk_state(dev_priv->cdclk.obj.state);
enum pipe pipe;
struct intel_crtc *crtc;
struct intel_encoder *encoder;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
- int i;
-
- dev_priv->active_pipes = 0;
+ u8 active_pipes = 0;
for_each_intel_crtc(dev, crtc) {
struct intel_crtc_state *crtc_state =
@@ -18028,41 +18349,19 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc->active = crtc_state->hw.active;
if (crtc_state->hw.active)
- dev_priv->active_pipes |= BIT(crtc->pipe);
+ active_pipes |= BIT(crtc->pipe);
- DRM_DEBUG_KMS("[CRTC:%d:%s] hw state readout: %s\n",
- crtc->base.base.id, crtc->base.name,
- enableddisabled(crtc_state->hw.active));
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] hw state readout: %s\n",
+ crtc->base.base.id, crtc->base.name,
+ enableddisabled(crtc_state->hw.active));
}
- readout_plane_state(dev_priv);
-
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- pll->on = pll->info->funcs->get_hw_state(dev_priv, pll,
- &pll->state.hw_state);
-
- if (IS_ELKHARTLAKE(dev_priv) && pll->on &&
- pll->info->id == DPLL_ID_EHL_DPLL4) {
- pll->wakeref = intel_display_power_get(dev_priv,
- POWER_DOMAIN_DPLL_DC_OFF);
- }
-
- pll->state.crtc_mask = 0;
- for_each_intel_crtc(dev, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
+ dev_priv->active_pipes = cdclk_state->active_pipes = active_pipes;
- if (crtc_state->hw.active &&
- crtc_state->shared_dpll == pll)
- pll->state.crtc_mask |= 1 << crtc->pipe;
- }
- pll->active_mask = pll->state.crtc_mask;
+ readout_plane_state(dev_priv);
- DRM_DEBUG_KMS("%s hw state readout: crtc_mask 0x%08x, on %i\n",
- pll->info->name, pll->state.crtc_mask, pll->on);
- }
+ intel_dpll_readout_hw_state(dev_priv);
for_each_intel_encoder(dev, encoder) {
pipe = 0;
@@ -18079,10 +18378,11 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
encoder->base.crtc = NULL;
}
- DRM_DEBUG_KMS("[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
- encoder->base.base.id, encoder->base.name,
- enableddisabled(encoder->base.crtc),
- pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] hw state readout: %s, pipe %c\n",
+ encoder->base.base.id, encoder->base.name,
+ enableddisabled(encoder->base.crtc),
+ pipe_name(pipe));
}
drm_connector_list_iter_begin(dev, &conn_iter);
@@ -18093,7 +18393,7 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.dpms = DRM_MODE_DPMS_ON;
- encoder = connector->encoder;
+ encoder = intel_attached_encoder(connector);
connector->base.encoder = &encoder->base;
crtc = to_intel_crtc(encoder->base.crtc);
@@ -18114,9 +18414,10 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
connector->base.dpms = DRM_MODE_DPMS_OFF;
connector->base.encoder = NULL;
}
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] hw state readout: %s\n",
- connector->base.base.id, connector->base.name,
- enableddisabled(connector->base.encoder));
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] hw state readout: %s\n",
+ connector->base.base.id, connector->base.name,
+ enableddisabled(connector->base.encoder));
}
drm_connector_list_iter_end(&conn_iter);
@@ -18180,19 +18481,20 @@ static void intel_modeset_readout_hw_state(struct drm_device *dev)
crtc_state->min_cdclk[plane->id] =
crtc_state->pixel_rate;
}
- DRM_DEBUG_KMS("[PLANE:%d:%s] min_cdclk %d kHz\n",
- plane->base.base.id, plane->base.name,
- crtc_state->min_cdclk[plane->id]);
+ drm_dbg_kms(&dev_priv->drm,
+ "[PLANE:%d:%s] min_cdclk %d kHz\n",
+ plane->base.base.id, plane->base.name,
+ crtc_state->min_cdclk[plane->id]);
}
if (crtc_state->hw.active) {
min_cdclk = intel_crtc_compute_min_cdclk(crtc_state);
- if (WARN_ON(min_cdclk < 0))
+ if (drm_WARN_ON(dev, min_cdclk < 0))
min_cdclk = 0;
}
- dev_priv->min_cdclk[crtc->pipe] = min_cdclk;
- dev_priv->min_voltage_level[crtc->pipe] =
+ cdclk_state->min_cdclk[crtc->pipe] = min_cdclk;
+ cdclk_state->min_voltage_level[crtc->pipe] =
crtc_state->min_voltage_level;
intel_bw_crtc_update(bw_state, crtc_state);
@@ -18231,53 +18533,55 @@ static void intel_early_display_was(struct drm_i915_private *dev_priv)
* Also known as Wa_14010480278.
*/
if (IS_GEN_RANGE(dev_priv, 10, 12) || IS_GEMINILAKE(dev_priv))
- I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
- DARBF_GATING_DIS);
+ intel_de_write(dev_priv, GEN9_CLKGATE_DIS_0,
+ intel_de_read(dev_priv, GEN9_CLKGATE_DIS_0) | DARBF_GATING_DIS);
if (IS_HASWELL(dev_priv)) {
/*
* WaRsPkgCStateDisplayPMReq:hsw
* System hang if this isn't done before disabling all planes!
*/
- I915_WRITE(CHICKEN_PAR1_1,
- I915_READ(CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
+ intel_de_write(dev_priv, CHICKEN_PAR1_1,
+ intel_de_read(dev_priv, CHICKEN_PAR1_1) | FORCE_ARB_IDLE_PLANES);
}
}
static void ibx_sanitize_pch_hdmi_port(struct drm_i915_private *dev_priv,
enum port port, i915_reg_t hdmi_reg)
{
- u32 val = I915_READ(hdmi_reg);
+ u32 val = intel_de_read(dev_priv, hdmi_reg);
if (val & SDVO_ENABLE ||
(val & SDVO_PIPE_SEL_MASK) == SDVO_PIPE_SEL(PIPE_A))
return;
- DRM_DEBUG_KMS("Sanitizing transcoder select for HDMI %c\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "Sanitizing transcoder select for HDMI %c\n",
+ port_name(port));
val &= ~SDVO_PIPE_SEL_MASK;
val |= SDVO_PIPE_SEL(PIPE_A);
- I915_WRITE(hdmi_reg, val);
+ intel_de_write(dev_priv, hdmi_reg, val);
}
static void ibx_sanitize_pch_dp_port(struct drm_i915_private *dev_priv,
enum port port, i915_reg_t dp_reg)
{
- u32 val = I915_READ(dp_reg);
+ u32 val = intel_de_read(dev_priv, dp_reg);
if (val & DP_PORT_EN ||
(val & DP_PIPE_SEL_MASK) == DP_PIPE_SEL(PIPE_A))
return;
- DRM_DEBUG_KMS("Sanitizing transcoder select for DP %c\n",
- port_name(port));
+ drm_dbg_kms(&dev_priv->drm,
+ "Sanitizing transcoder select for DP %c\n",
+ port_name(port));
val &= ~DP_PIPE_SEL_MASK;
val |= DP_PIPE_SEL(PIPE_A);
- I915_WRITE(dp_reg, val);
+ intel_de_write(dev_priv, dp_reg, val);
}
static void ibx_sanitize_pch_ports(struct drm_i915_private *dev_priv)
@@ -18314,7 +18618,6 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
struct intel_encoder *encoder;
struct intel_crtc *crtc;
intel_wakeref_t wakeref;
- int i;
wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
@@ -18367,18 +18670,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
intel_modeset_update_connector_atomic_state(dev);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- if (!pll->on || pll->active_mask)
- continue;
-
- DRM_DEBUG_KMS("%s enabled but not in use, disabling\n",
- pll->info->name);
-
- pll->info->funcs->disable(dev_priv, pll);
- pll->on = false;
- }
+ intel_dpll_sanitize_state(dev_priv);
if (IS_G4X(dev_priv)) {
g4x_wm_get_hw_state(dev_priv);
@@ -18398,7 +18690,7 @@ intel_modeset_setup_hw_state(struct drm_device *dev,
u64 put_domains;
put_domains = modeset_get_crtc_power_domains(crtc_state);
- if (WARN_ON(put_domains))
+ if (drm_WARN_ON(dev, put_domains))
modeset_put_power_domains(dev_priv, put_domains);
}
@@ -18434,7 +18726,8 @@ void intel_display_resume(struct drm_device *dev)
drm_modeset_acquire_fini(&ctx);
if (ret)
- DRM_ERROR("Restoring old state failed with %i\n", ret);
+ drm_err(&dev_priv->drm,
+ "Restoring old state failed with %i\n", ret);
if (state)
drm_atomic_state_put(state);
}
@@ -18457,21 +18750,19 @@ static void intel_hpd_poll_fini(struct drm_i915_private *i915)
drm_connector_list_iter_end(&conn_iter);
}
+/* part #1: call before irq uninstall */
void intel_modeset_driver_remove(struct drm_i915_private *i915)
{
flush_workqueue(i915->flip_wq);
flush_workqueue(i915->modeset_wq);
flush_work(&i915->atomic_helper.free_work);
- WARN_ON(!llist_empty(&i915->atomic_helper.free_list));
-
- /*
- * Interrupts and polling as the first thing to avoid creating havoc.
- * Too much stuff here (turning of connectors, ...) would
- * experience fancy races otherwise.
- */
- intel_irq_uninstall(i915);
+ drm_WARN_ON(&i915->drm, !llist_empty(&i915->atomic_helper.free_list));
+}
+/* part #2: call after irq uninstall */
+void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915)
+{
/*
* Due to the hpd irq storm handling the hotplug work can re-arm the
* poll handlers. Hence disable polling after hpd handling is shut down.
@@ -18497,14 +18788,12 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
intel_hdcp_component_fini(i915);
- drm_mode_config_cleanup(&i915->drm);
+ intel_mode_config_cleanup(i915);
intel_overlay_cleanup(i915);
intel_gmbus_teardown(i915);
- intel_bw_cleanup(i915);
-
destroy_workqueue(i915->flip_wq);
destroy_workqueue(i915->modeset_wq);
@@ -18513,6 +18802,15 @@ void intel_modeset_driver_remove(struct drm_i915_private *i915)
#if IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)
+static bool
+has_transcoder(struct drm_i915_private *dev_priv, enum transcoder cpu_transcoder)
+{
+ if (cpu_transcoder == TRANSCODER_EDP)
+ return HAS_TRANSCODER_EDP(dev_priv);
+ else
+ return INTEL_INFO(dev_priv)->pipe_mask & BIT(cpu_transcoder);
+}
+
struct intel_display_error_state {
u32 power_well_driver;
@@ -18579,7 +18877,8 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
return NULL;
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- error->power_well_driver = I915_READ(HSW_PWR_WELL_CTL2);
+ error->power_well_driver = intel_de_read(dev_priv,
+ HSW_PWR_WELL_CTL2);
for_each_pipe(dev_priv, i) {
error->pipe[i].power_domain_on =
@@ -18588,33 +18887,39 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
if (!error->pipe[i].power_domain_on)
continue;
- error->cursor[i].control = I915_READ(CURCNTR(i));
- error->cursor[i].position = I915_READ(CURPOS(i));
- error->cursor[i].base = I915_READ(CURBASE(i));
+ error->cursor[i].control = intel_de_read(dev_priv, CURCNTR(i));
+ error->cursor[i].position = intel_de_read(dev_priv, CURPOS(i));
+ error->cursor[i].base = intel_de_read(dev_priv, CURBASE(i));
- error->plane[i].control = I915_READ(DSPCNTR(i));
- error->plane[i].stride = I915_READ(DSPSTRIDE(i));
+ error->plane[i].control = intel_de_read(dev_priv, DSPCNTR(i));
+ error->plane[i].stride = intel_de_read(dev_priv, DSPSTRIDE(i));
if (INTEL_GEN(dev_priv) <= 3) {
- error->plane[i].size = I915_READ(DSPSIZE(i));
- error->plane[i].pos = I915_READ(DSPPOS(i));
+ error->plane[i].size = intel_de_read(dev_priv,
+ DSPSIZE(i));
+ error->plane[i].pos = intel_de_read(dev_priv,
+ DSPPOS(i));
}
if (INTEL_GEN(dev_priv) <= 7 && !IS_HASWELL(dev_priv))
- error->plane[i].addr = I915_READ(DSPADDR(i));
+ error->plane[i].addr = intel_de_read(dev_priv,
+ DSPADDR(i));
if (INTEL_GEN(dev_priv) >= 4) {
- error->plane[i].surface = I915_READ(DSPSURF(i));
- error->plane[i].tile_offset = I915_READ(DSPTILEOFF(i));
+ error->plane[i].surface = intel_de_read(dev_priv,
+ DSPSURF(i));
+ error->plane[i].tile_offset = intel_de_read(dev_priv,
+ DSPTILEOFF(i));
}
- error->pipe[i].source = I915_READ(PIPESRC(i));
+ error->pipe[i].source = intel_de_read(dev_priv, PIPESRC(i));
if (HAS_GMCH(dev_priv))
- error->pipe[i].stat = I915_READ(PIPESTAT(i));
+ error->pipe[i].stat = intel_de_read(dev_priv,
+ PIPESTAT(i));
}
for (i = 0; i < ARRAY_SIZE(error->transcoder); i++) {
enum transcoder cpu_transcoder = transcoders[i];
- if (!INTEL_INFO(dev_priv)->trans_offsets[cpu_transcoder])
+ if (!has_transcoder(dev_priv, cpu_transcoder))
continue;
error->transcoder[i].available = true;
@@ -18626,13 +18931,20 @@ intel_display_capture_error_state(struct drm_i915_private *dev_priv)
error->transcoder[i].cpu_transcoder = cpu_transcoder;
- error->transcoder[i].conf = I915_READ(PIPECONF(cpu_transcoder));
- error->transcoder[i].htotal = I915_READ(HTOTAL(cpu_transcoder));
- error->transcoder[i].hblank = I915_READ(HBLANK(cpu_transcoder));
- error->transcoder[i].hsync = I915_READ(HSYNC(cpu_transcoder));
- error->transcoder[i].vtotal = I915_READ(VTOTAL(cpu_transcoder));
- error->transcoder[i].vblank = I915_READ(VBLANK(cpu_transcoder));
- error->transcoder[i].vsync = I915_READ(VSYNC(cpu_transcoder));
+ error->transcoder[i].conf = intel_de_read(dev_priv,
+ PIPECONF(cpu_transcoder));
+ error->transcoder[i].htotal = intel_de_read(dev_priv,
+ HTOTAL(cpu_transcoder));
+ error->transcoder[i].hblank = intel_de_read(dev_priv,
+ HBLANK(cpu_transcoder));
+ error->transcoder[i].hsync = intel_de_read(dev_priv,
+ HSYNC(cpu_transcoder));
+ error->transcoder[i].vtotal = intel_de_read(dev_priv,
+ VTOTAL(cpu_transcoder));
+ error->transcoder[i].vblank = intel_de_read(dev_priv,
+ VBLANK(cpu_transcoder));
+ error->transcoder[i].vsync = intel_de_read(dev_priv,
+ VSYNC(cpu_transcoder));
}
return error;
diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h
index 028aab728514..adb1225a3480 100644
--- a/drivers/gpu/drm/i915/display/intel_display.h
+++ b/drivers/gpu/drm/i915/display/intel_display.h
@@ -26,7 +26,6 @@
#define _INTEL_DISPLAY_H_
#include <drm/drm_util.h>
-#include <drm/i915_drm.h>
enum link_m_n_set;
struct dpll;
@@ -40,12 +39,15 @@ struct drm_framebuffer;
struct drm_i915_error_state_buf;
struct drm_i915_gem_object;
struct drm_i915_private;
+struct drm_mode_fb_cmd2;
struct drm_modeset_acquire_ctx;
struct drm_plane;
struct drm_plane_state;
struct i915_ggtt_view;
+struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
+struct intel_crtc_state;
struct intel_digital_port;
struct intel_dp;
struct intel_encoder;
@@ -54,7 +56,6 @@ struct intel_plane;
struct intel_plane_state;
struct intel_remapped_info;
struct intel_rotation_info;
-struct intel_crtc_state;
enum i915_gpio {
GPIOA,
@@ -312,10 +313,11 @@ enum phy_fia {
};
#define for_each_pipe(__dev_priv, __p) \
- for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++)
+ for ((__p) = 0; (__p) < I915_MAX_PIPES; (__p)++) \
+ for_each_if(INTEL_INFO(__dev_priv)->pipe_mask & BIT(__p))
#define for_each_pipe_masked(__dev_priv, __p, __mask) \
- for ((__p) = 0; (__p) < INTEL_NUM_PIPES(__dev_priv); (__p)++) \
+ for_each_pipe(__dev_priv, __p) \
for_each_if((__mask) & BIT(__p))
#define for_each_cpu_transcoder_masked(__dev_priv, __t, __mask) \
@@ -469,6 +471,8 @@ enum phy_fia {
((connector) = to_intel_connector((__state)->base.connectors[__i].ptr), \
(new_connector_state) = to_intel_digital_connector_state((__state)->base.connectors[__i].new_state), 1))
+u8 intel_calc_active_pipes(struct intel_atomic_state *state,
+ u8 active_pipes);
void intel_link_compute_m_n(u16 bpp, int nlanes,
int pixel_clock, int link_clock,
struct intel_link_m_n *m_n,
@@ -486,6 +490,7 @@ enum phy intel_port_to_phy(struct drm_i915_private *i915, enum port port);
bool is_trans_port_sync_mode(const struct intel_crtc_state *state);
void intel_plane_destroy(struct drm_plane *plane);
+void intel_enable_pipe(const struct intel_crtc_state *new_crtc_state);
void intel_disable_pipe(const struct intel_crtc_state *old_crtc_state);
void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe);
@@ -495,6 +500,7 @@ int vlv_get_cck_clock(struct drm_i915_private *dev_priv,
const char *name, u32 reg, int ref_freq);
int vlv_get_cck_clock_hpll(struct drm_i915_private *dev_priv,
const char *name, u32 reg);
+void lpt_pch_enable(const struct intel_crtc_state *crtc_state);
void lpt_disable_pch_transcoder(struct drm_i915_private *dev_priv);
void lpt_disable_iclkip(struct drm_i915_private *dev_priv);
void intel_init_display_hooks(struct drm_i915_private *dev_priv);
@@ -520,6 +526,7 @@ enum tc_port intel_port_to_tc(struct drm_i915_private *dev_priv,
int intel_get_pipe_from_crtc_id_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
u32 intel_crtc_get_vblank_counter(struct intel_crtc *crtc);
+void intel_crtc_vblank_on(const struct intel_crtc_state *crtc_state);
void intel_crtc_vblank_off(const struct intel_crtc_state *crtc_state);
int ilk_get_lanes_required(int target_clock, int link_bw, int bpp);
@@ -608,8 +615,10 @@ intel_format_info_is_yuv_semiplanar(const struct drm_format_info *info,
/* modesetting */
void intel_modeset_init_hw(struct drm_i915_private *i915);
+int intel_modeset_init_noirq(struct drm_i915_private *i915);
int intel_modeset_init(struct drm_i915_private *i915);
void intel_modeset_driver_remove(struct drm_i915_private *i915);
+void intel_modeset_driver_remove_noirq(struct drm_i915_private *i915);
void intel_display_resume(struct drm_device *dev);
void intel_init_pch_refclk(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
new file mode 100644
index 000000000000..1e6eb7f2f72d
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c
@@ -0,0 +1,2134 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <drm/drm_debugfs.h>
+#include <drm/drm_fourcc.h>
+
+#include "i915_debugfs.h"
+#include "intel_csr.h"
+#include "intel_display_debugfs.h"
+#include "intel_display_types.h"
+#include "intel_dp.h"
+#include "intel_fbc.h"
+#include "intel_hdcp.h"
+#include "intel_hdmi.h"
+#include "intel_pm.h"
+#include "intel_psr.h"
+#include "intel_sideband.h"
+
+static inline struct drm_i915_private *node_to_i915(struct drm_info_node *node)
+{
+ return to_i915(node->minor->dev);
+}
+
+static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+
+ seq_printf(m, "FB tracking busy bits: 0x%08x\n",
+ dev_priv->fb_tracking.busy_bits);
+
+ seq_printf(m, "FB tracking flip bits: 0x%08x\n",
+ dev_priv->fb_tracking.flip_bits);
+
+ return 0;
+}
+
+static int i915_fbc_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_fbc *fbc = &dev_priv->fbc;
+ intel_wakeref_t wakeref;
+
+ if (!HAS_FBC(dev_priv))
+ return -ENODEV;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&fbc->lock);
+
+ if (intel_fbc_is_active(dev_priv))
+ seq_puts(m, "FBC enabled\n");
+ else
+ seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
+
+ if (intel_fbc_is_active(dev_priv)) {
+ u32 mask;
+
+ if (INTEL_GEN(dev_priv) >= 8)
+ mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
+ else if (INTEL_GEN(dev_priv) >= 7)
+ mask = intel_de_read(dev_priv, IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
+ else if (INTEL_GEN(dev_priv) >= 5)
+ mask = intel_de_read(dev_priv, ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
+ else if (IS_G4X(dev_priv))
+ mask = intel_de_read(dev_priv, DPFC_STATUS) & DPFC_COMP_SEG_MASK;
+ else
+ mask = intel_de_read(dev_priv, FBC_STATUS) &
+ (FBC_STAT_COMPRESSING | FBC_STAT_COMPRESSED);
+
+ seq_printf(m, "Compressing: %s\n", yesno(mask));
+ }
+
+ mutex_unlock(&fbc->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_fbc_false_color_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+
+ if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+ return -ENODEV;
+
+ *val = dev_priv->fbc.false_color;
+
+ return 0;
+}
+
+static int i915_fbc_false_color_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ u32 reg;
+
+ if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
+ return -ENODEV;
+
+ mutex_lock(&dev_priv->fbc.lock);
+
+ reg = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
+ dev_priv->fbc.false_color = val;
+
+ intel_de_write(dev_priv, ILK_DPFC_CONTROL,
+ val ? (reg | FBC_CTL_FALSE_COLOR) : (reg & ~FBC_CTL_FALSE_COLOR));
+
+ mutex_unlock(&dev_priv->fbc.lock);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
+ i915_fbc_false_color_get, i915_fbc_false_color_set,
+ "%llu\n");
+
+static int i915_ips_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+
+ if (!HAS_IPS(dev_priv))
+ return -ENODEV;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ seq_printf(m, "Enabled by kernel parameter: %s\n",
+ yesno(i915_modparams.enable_ips));
+
+ if (INTEL_GEN(dev_priv) >= 8) {
+ seq_puts(m, "Currently: unknown\n");
+ } else {
+ if (intel_de_read(dev_priv, IPS_CTL) & IPS_ENABLE)
+ seq_puts(m, "Currently: enabled\n");
+ else
+ seq_puts(m, "Currently: disabled\n");
+ }
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_sr_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+ bool sr_enabled = false;
+
+ wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ /* no global SR status; inspect per-plane WM */;
+ else if (HAS_PCH_SPLIT(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, WM1_LP_ILK) & WM1_LP_SR_EN;
+ else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
+ IS_I945G(dev_priv) || IS_I945GM(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF) & FW_BLC_SELF_EN;
+ else if (IS_I915GM(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, INSTPM) & INSTPM_SELF_EN;
+ else if (IS_PINEVIEW(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ sr_enabled = intel_de_read(dev_priv, FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
+
+ seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
+
+ return 0;
+}
+
+static int i915_opregion(struct seq_file *m, void *unused)
+{
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+
+ if (opregion->header)
+ seq_write(m, opregion->header, OPREGION_SIZE);
+
+ return 0;
+}
+
+static int i915_vbt(struct seq_file *m, void *unused)
+{
+ struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
+
+ if (opregion->vbt)
+ seq_write(m, opregion->vbt, opregion->vbt_size);
+
+ return 0;
+}
+
+static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_framebuffer *fbdev_fb = NULL;
+ struct drm_framebuffer *drm_fb;
+
+#ifdef CONFIG_DRM_FBDEV_EMULATION
+ if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
+ fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
+
+ seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fbdev_fb->base.width,
+ fbdev_fb->base.height,
+ fbdev_fb->base.format->depth,
+ fbdev_fb->base.format->cpp[0] * 8,
+ fbdev_fb->base.modifier,
+ drm_framebuffer_read_refcount(&fbdev_fb->base));
+ i915_debugfs_describe_obj(m, intel_fb_obj(&fbdev_fb->base));
+ seq_putc(m, '\n');
+ }
+#endif
+
+ mutex_lock(&dev->mode_config.fb_lock);
+ drm_for_each_fb(drm_fb, dev) {
+ struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
+ if (fb == fbdev_fb)
+ continue;
+
+ seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
+ fb->base.width,
+ fb->base.height,
+ fb->base.format->depth,
+ fb->base.format->cpp[0] * 8,
+ fb->base.modifier,
+ drm_framebuffer_read_refcount(&fb->base));
+ i915_debugfs_describe_obj(m, intel_fb_obj(&fb->base));
+ seq_putc(m, '\n');
+ }
+ mutex_unlock(&dev->mode_config.fb_lock);
+
+ return 0;
+}
+
+static int i915_psr_sink_status_show(struct seq_file *m, void *data)
+{
+ u8 val;
+ static const char * const sink_status[] = {
+ "inactive",
+ "transition to active, capture and display",
+ "active, display from RFB",
+ "active, capture and display on sink device timings",
+ "transition to inactive, capture and display, timing re-sync",
+ "reserved",
+ "reserved",
+ "sink internal error",
+ };
+ struct drm_connector *connector = m->private;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+ struct intel_dp *intel_dp =
+ intel_attached_dp(to_intel_connector(connector));
+ int ret;
+
+ if (!CAN_PSR(dev_priv)) {
+ seq_puts(m, "PSR Unsupported\n");
+ return -ENODEV;
+ }
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
+
+ if (ret == 1) {
+ const char *str = "unknown";
+
+ val &= DP_PSR_SINK_STATE_MASK;
+ if (val < ARRAY_SIZE(sink_status))
+ str = sink_status[val];
+ seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
+ } else {
+ return ret;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
+
+static void
+psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
+{
+ u32 val, status_val;
+ const char *status = "unknown";
+
+ if (dev_priv->psr.psr2_enabled) {
+ static const char * const live_status[] = {
+ "IDLE",
+ "CAPTURE",
+ "CAPTURE_FS",
+ "SLEEP",
+ "BUFON_FW",
+ "ML_UP",
+ "SU_STANDBY",
+ "FAST_SLEEP",
+ "DEEP_SLEEP",
+ "BUF_ON",
+ "TG_ON"
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_STATUS(dev_priv->psr.transcoder));
+ status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
+ EDP_PSR2_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ } else {
+ static const char * const live_status[] = {
+ "IDLE",
+ "SRDONACK",
+ "SRDENT",
+ "BUFOFF",
+ "BUFON",
+ "AUXACK",
+ "SRDOFFACK",
+ "SRDENT_ON",
+ };
+ val = intel_de_read(dev_priv,
+ EDP_PSR_STATUS(dev_priv->psr.transcoder));
+ status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
+ EDP_PSR_STATUS_STATE_SHIFT;
+ if (status_val < ARRAY_SIZE(live_status))
+ status = live_status[status_val];
+ }
+
+ seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
+}
+
+static int i915_edp_psr_status(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_psr *psr = &dev_priv->psr;
+ intel_wakeref_t wakeref;
+ const char *status;
+ bool enabled;
+ u32 val;
+
+ if (!HAS_PSR(dev_priv))
+ return -ENODEV;
+
+ seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
+ if (psr->dp)
+ seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
+ seq_puts(m, "\n");
+
+ if (!psr->sink_support)
+ return 0;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+ mutex_lock(&psr->lock);
+
+ if (psr->enabled)
+ status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
+ else
+ status = "disabled";
+ seq_printf(m, "PSR mode: %s\n", status);
+
+ if (!psr->enabled) {
+ seq_printf(m, "PSR sink not reliable: %s\n",
+ yesno(psr->sink_not_reliable));
+
+ goto unlock;
+ }
+
+ if (psr->psr2_enabled) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ enabled = val & EDP_PSR2_ENABLE;
+ } else {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(dev_priv->psr.transcoder));
+ enabled = val & EDP_PSR_ENABLE;
+ }
+ seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
+ enableddisabled(enabled), val);
+ psr_source_status(dev_priv, m);
+ seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
+ psr->busy_frontbuffer_bits);
+
+ /*
+ * SKL+ Perf counter is reset to 0 everytime DC state is entered
+ */
+ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
+ val = intel_de_read(dev_priv,
+ EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
+ val &= EDP_PSR_PERF_CNT_MASK;
+ seq_printf(m, "Performance counter: %u\n", val);
+ }
+
+ if (psr->debug & I915_PSR_DEBUG_IRQ) {
+ seq_printf(m, "Last attempted entry at: %lld\n",
+ psr->last_entry_attempt);
+ seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
+ }
+
+ if (psr->psr2_enabled) {
+ u32 su_frames_val[3];
+ int frame;
+
+ /*
+ * Reading all 3 registers before hand to minimize crossing a
+ * frame boundary between register reads
+ */
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
+ val = intel_de_read(dev_priv,
+ PSR2_SU_STATUS(dev_priv->psr.transcoder, frame));
+ su_frames_val[frame / 3] = val;
+ }
+
+ seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
+
+ for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
+ u32 su_blocks;
+
+ su_blocks = su_frames_val[frame / 3] &
+ PSR2_SU_STATUS_MASK(frame);
+ su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
+ seq_printf(m, "%d\t%d\n", frame, su_blocks);
+ }
+ }
+
+unlock:
+ mutex_unlock(&psr->lock);
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int
+i915_edp_psr_debug_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ intel_wakeref_t wakeref;
+ int ret;
+
+ if (!CAN_PSR(dev_priv))
+ return -ENODEV;
+
+ drm_dbg_kms(&dev_priv->drm, "Setting PSR debug to %llx\n", val);
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ ret = intel_psr_debug_set(dev_priv, val);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return ret;
+}
+
+static int
+i915_edp_psr_debug_get(void *data, u64 *val)
+{
+ struct drm_i915_private *dev_priv = data;
+
+ if (!CAN_PSR(dev_priv))
+ return -ENODEV;
+
+ *val = READ_ONCE(dev_priv->psr.debug);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
+ i915_edp_psr_debug_get, i915_edp_psr_debug_set,
+ "%llu\n");
+
+static int i915_power_domain_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ int i;
+
+ mutex_lock(&power_domains->lock);
+
+ seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
+ for (i = 0; i < power_domains->power_well_count; i++) {
+ struct i915_power_well *power_well;
+ enum intel_display_power_domain power_domain;
+
+ power_well = &power_domains->power_wells[i];
+ seq_printf(m, "%-25s %d\n", power_well->desc->name,
+ power_well->count);
+
+ for_each_power_domain(power_domain, power_well->desc->domains)
+ seq_printf(m, " %-23s %d\n",
+ intel_display_power_domain_str(power_domain),
+ power_domains->domain_use_count[power_domain]);
+ }
+
+ mutex_unlock(&power_domains->lock);
+
+ return 0;
+}
+
+static int i915_dmc_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ intel_wakeref_t wakeref;
+ struct intel_csr *csr;
+ i915_reg_t dc5_reg, dc6_reg = {};
+
+ if (!HAS_CSR(dev_priv))
+ return -ENODEV;
+
+ csr = &dev_priv->csr;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
+ seq_printf(m, "path: %s\n", csr->fw_path);
+
+ if (!csr->dmc_payload)
+ goto out;
+
+ seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
+ CSR_VERSION_MINOR(csr->version));
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
+ dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
+ /*
+ * NOTE: DMC_DEBUG3 is a general purpose reg.
+ * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
+ * reg for DC3CO debugging and validation,
+ * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
+ */
+ seq_printf(m, "DC3CO count: %d\n",
+ intel_de_read(dev_priv, DMC_DEBUG3));
+ } else {
+ dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
+ SKL_CSR_DC3_DC5_COUNT;
+ if (!IS_GEN9_LP(dev_priv))
+ dc6_reg = SKL_CSR_DC5_DC6_COUNT;
+ }
+
+ seq_printf(m, "DC3 -> DC5 count: %d\n",
+ intel_de_read(dev_priv, dc5_reg));
+ if (dc6_reg.reg)
+ seq_printf(m, "DC5 -> DC6 count: %d\n",
+ intel_de_read(dev_priv, dc6_reg));
+
+out:
+ seq_printf(m, "program base: 0x%08x\n",
+ intel_de_read(dev_priv, CSR_PROGRAM(0)));
+ seq_printf(m, "ssp base: 0x%08x\n",
+ intel_de_read(dev_priv, CSR_SSP_BASE));
+ seq_printf(m, "htp: 0x%08x\n", intel_de_read(dev_priv, CSR_HTP_SKL));
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static void intel_seq_print_mode(struct seq_file *m, int tabs,
+ const struct drm_display_mode *mode)
+{
+ int i;
+
+ for (i = 0; i < tabs; i++)
+ seq_putc(m, '\t');
+
+ seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+}
+
+static void intel_encoder_info(struct seq_file *m,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+
+ seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
+ encoder->base.base.id, encoder->base.name);
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ const struct drm_connector_state *conn_state =
+ connector->state;
+
+ if (conn_state->best_encoder != &encoder->base)
+ continue;
+
+ seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+}
+
+static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
+{
+ const struct drm_display_mode *mode = panel->fixed_mode;
+
+ seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
+}
+
+static void intel_hdcp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ bool hdcp_cap, hdcp2_cap;
+
+ hdcp_cap = intel_hdcp_capable(intel_connector);
+ hdcp2_cap = intel_hdcp2_capable(intel_connector);
+
+ if (hdcp_cap)
+ seq_puts(m, "HDCP1.4 ");
+ if (hdcp2_cap)
+ seq_puts(m, "HDCP2.2 ");
+
+ if (!hdcp_cap && !hdcp2_cap)
+ seq_puts(m, "None");
+
+ seq_puts(m, "\n");
+}
+
+static void intel_dp_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
+
+ seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
+ seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
+ if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
+ intel_panel_info(m, &intel_connector->panel);
+
+ drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
+ &intel_dp->aux);
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
+}
+
+static void intel_dp_mst_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_dp_mst_encoder *intel_mst =
+ enc_to_mst(intel_encoder);
+ struct intel_digital_port *intel_dig_port = intel_mst->primary;
+ struct intel_dp *intel_dp = &intel_dig_port->dp;
+ bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
+ intel_connector->port);
+
+ seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
+}
+
+static void intel_hdmi_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ struct intel_encoder *intel_encoder = intel_attached_encoder(intel_connector);
+ struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
+
+ seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
+ if (intel_connector->hdcp.shim) {
+ seq_puts(m, "\tHDCP version: ");
+ intel_hdcp_info(m, intel_connector);
+ }
+}
+
+static void intel_lvds_info(struct seq_file *m,
+ struct intel_connector *intel_connector)
+{
+ intel_panel_info(m, &intel_connector->panel);
+}
+
+static void intel_connector_info(struct seq_file *m,
+ struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ const struct drm_connector_state *conn_state = connector->state;
+ struct intel_encoder *encoder =
+ to_intel_encoder(conn_state->best_encoder);
+ const struct drm_display_mode *mode;
+
+ seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
+ connector->base.id, connector->name,
+ drm_get_connector_status_name(connector->status));
+
+ if (connector->status == connector_status_disconnected)
+ return;
+
+ seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
+ connector->display_info.width_mm,
+ connector->display_info.height_mm);
+ seq_printf(m, "\tsubpixel order: %s\n",
+ drm_get_subpixel_order_name(connector->display_info.subpixel_order));
+ seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
+
+ if (!encoder)
+ return;
+
+ switch (connector->connector_type) {
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ if (encoder->type == INTEL_OUTPUT_DP_MST)
+ intel_dp_mst_info(m, intel_connector);
+ else
+ intel_dp_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_LVDS:
+ if (encoder->type == INTEL_OUTPUT_LVDS)
+ intel_lvds_info(m, intel_connector);
+ break;
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (encoder->type == INTEL_OUTPUT_HDMI ||
+ encoder->type == INTEL_OUTPUT_DDI)
+ intel_hdmi_info(m, intel_connector);
+ break;
+ default:
+ break;
+ }
+
+ seq_printf(m, "\tmodes:\n");
+ list_for_each_entry(mode, &connector->modes, head)
+ intel_seq_print_mode(m, 2, mode);
+}
+
+static const char *plane_type(enum drm_plane_type type)
+{
+ switch (type) {
+ case DRM_PLANE_TYPE_OVERLAY:
+ return "OVL";
+ case DRM_PLANE_TYPE_PRIMARY:
+ return "PRI";
+ case DRM_PLANE_TYPE_CURSOR:
+ return "CUR";
+ /*
+ * Deliberately omitting default: to generate compiler warnings
+ * when a new drm_plane_type gets added.
+ */
+ }
+
+ return "unknown";
+}
+
+static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
+{
+ /*
+ * According to doc only one DRM_MODE_ROTATE_ is allowed but this
+ * will print them all to visualize if the values are misused
+ */
+ snprintf(buf, bufsize,
+ "%s%s%s%s%s%s(0x%08x)",
+ (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
+ (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
+ (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
+ (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
+ (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
+ (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
+ rotation);
+}
+
+static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->uapi.fb;
+ struct drm_format_name_buf format_name;
+ struct drm_rect src, dst;
+ char rot_str[48];
+
+ src = drm_plane_state_src(&plane_state->uapi);
+ dst = drm_plane_state_dest(&plane_state->uapi);
+
+ if (fb)
+ drm_get_format_name(fb->format->format, &format_name);
+
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->uapi.rotation);
+
+ seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
+ fb ? fb->width : 0, fb ? fb->height : 0,
+ DRM_RECT_FP_ARG(&src),
+ DRM_RECT_ARG(&dst),
+ rot_str);
+}
+
+static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
+{
+ const struct intel_plane_state *plane_state =
+ to_intel_plane_state(plane->base.state);
+ const struct drm_framebuffer *fb = plane_state->hw.fb;
+ struct drm_format_name_buf format_name;
+ char rot_str[48];
+
+ if (!fb)
+ return;
+
+ drm_get_format_name(fb->format->format, &format_name);
+
+ plane_rotation(rot_str, sizeof(rot_str),
+ plane_state->hw.rotation);
+
+ seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
+ fb->base.id, format_name.str,
+ fb->width, fb->height,
+ yesno(plane_state->uapi.visible),
+ DRM_RECT_FP_ARG(&plane_state->uapi.src),
+ DRM_RECT_ARG(&plane_state->uapi.dst),
+ rot_str);
+}
+
+static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_plane *plane;
+
+ for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
+ seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
+ plane->base.base.id, plane->base.name,
+ plane_type(plane->base.type));
+ intel_plane_uapi_info(m, plane);
+ intel_plane_hw_info(m, plane);
+ }
+}
+
+static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ int num_scalers = crtc->num_scalers;
+ int i;
+
+ /* Not all platformas have a scaler */
+ if (num_scalers) {
+ seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
+ num_scalers,
+ crtc_state->scaler_state.scaler_users,
+ crtc_state->scaler_state.scaler_id);
+
+ for (i = 0; i < num_scalers; i++) {
+ const struct intel_scaler *sc =
+ &crtc_state->scaler_state.scalers[i];
+
+ seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
+ i, yesno(sc->in_use), sc->mode);
+ }
+ seq_puts(m, "\n");
+ } else {
+ seq_puts(m, "\tNo scalers available on this platform\n");
+ }
+}
+
+static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ const struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ struct intel_encoder *encoder;
+
+ seq_printf(m, "[CRTC:%d:%s]:\n",
+ crtc->base.base.id, crtc->base.name);
+
+ seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->uapi.enable),
+ yesno(crtc_state->uapi.active),
+ DRM_MODE_ARG(&crtc_state->uapi.mode));
+
+ if (crtc_state->hw.enable) {
+ seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
+ yesno(crtc_state->hw.active),
+ DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
+
+ seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
+ crtc_state->pipe_src_w, crtc_state->pipe_src_h,
+ yesno(crtc_state->dither), crtc_state->pipe_bpp);
+
+ intel_scaler_info(m, crtc);
+ }
+
+ for_each_intel_encoder_mask(&dev_priv->drm, encoder,
+ crtc_state->uapi.encoder_mask)
+ intel_encoder_info(m, crtc, encoder);
+
+ intel_plane_info(m, crtc);
+
+ seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
+ yesno(!crtc->cpu_fifo_underrun_disabled),
+ yesno(!crtc->pch_fifo_underrun_disabled));
+}
+
+static int i915_display_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ intel_wakeref_t wakeref;
+
+ wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "CRTC info\n");
+ seq_printf(m, "---------\n");
+ for_each_intel_crtc(dev, crtc)
+ intel_crtc_info(m, crtc);
+
+ seq_printf(m, "\n");
+ seq_printf(m, "Connector info\n");
+ seq_printf(m, "--------------\n");
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter)
+ intel_connector_info(m, connector);
+ drm_connector_list_iter_end(&conn_iter);
+
+ drm_modeset_unlock_all(dev);
+
+ intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
+
+ return 0;
+}
+
+static int i915_shared_dplls_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ int i;
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "PLL refclks: non-SSC: %d kHz, SSC: %d kHz\n",
+ dev_priv->dpll.ref_clks.nssc,
+ dev_priv->dpll.ref_clks.ssc);
+
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
+
+ seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
+ pll->info->id);
+ seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
+ pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
+ seq_printf(m, " tracked hardware state:\n");
+ seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
+ seq_printf(m, " dpll_md: 0x%08x\n",
+ pll->state.hw_state.dpll_md);
+ seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
+ seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
+ seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
+ seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
+ seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
+ seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
+ pll->state.hw_state.mg_refclkin_ctl);
+ seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
+ pll->state.hw_state.mg_clktop2_coreclkctl1);
+ seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
+ pll->state.hw_state.mg_clktop2_hsclkctl);
+ seq_printf(m, " mg_pll_div0: 0x%08x\n",
+ pll->state.hw_state.mg_pll_div0);
+ seq_printf(m, " mg_pll_div1: 0x%08x\n",
+ pll->state.hw_state.mg_pll_div1);
+ seq_printf(m, " mg_pll_lf: 0x%08x\n",
+ pll->state.hw_state.mg_pll_lf);
+ seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
+ pll->state.hw_state.mg_pll_frac_lock);
+ seq_printf(m, " mg_pll_ssc: 0x%08x\n",
+ pll->state.hw_state.mg_pll_ssc);
+ seq_printf(m, " mg_pll_bias: 0x%08x\n",
+ pll->state.hw_state.mg_pll_bias);
+ seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
+ pll->state.hw_state.mg_pll_tdc_coldst_bias);
+ }
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+static int i915_ipc_status_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Isochronous Priority Control: %s\n",
+ yesno(dev_priv->ipc_enabled));
+ return 0;
+}
+
+static int i915_ipc_status_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (!HAS_IPC(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, i915_ipc_status_show, dev_priv);
+}
+
+static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ intel_wakeref_t wakeref;
+ bool enable;
+ int ret;
+
+ ret = kstrtobool_from_user(ubuf, len, &enable);
+ if (ret < 0)
+ return ret;
+
+ with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
+ if (!dev_priv->ipc_enabled && enable)
+ drm_info(&dev_priv->drm,
+ "Enabling IPC: WM will be proper only after next commit\n");
+ dev_priv->wm.distrust_bios_wm = true;
+ dev_priv->ipc_enabled = enable;
+ intel_enable_ipc(dev_priv);
+ }
+
+ return len;
+}
+
+static const struct file_operations i915_ipc_status_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_ipc_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_ipc_status_write
+};
+
+static int i915_ddb_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct skl_ddb_entry *entry;
+ struct intel_crtc *crtc;
+
+ if (INTEL_GEN(dev_priv) < 9)
+ return -ENODEV;
+
+ drm_modeset_lock_all(dev);
+
+ seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+ enum pipe pipe = crtc->pipe;
+ enum plane_id plane_id;
+
+ seq_printf(m, "Pipe %c\n", pipe_name(pipe));
+
+ for_each_plane_id_on_crtc(crtc, plane_id) {
+ entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
+ seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
+ entry->start, entry->end,
+ skl_ddb_entry_size(entry));
+ }
+
+ entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
+ seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
+ entry->end, skl_ddb_entry_size(entry));
+ }
+
+ drm_modeset_unlock_all(dev);
+
+ return 0;
+}
+
+static void drrs_status_per_crtc(struct seq_file *m,
+ struct drm_device *dev,
+ struct intel_crtc *intel_crtc)
+{
+ struct drm_i915_private *dev_priv = to_i915(dev);
+ struct i915_drrs *drrs = &dev_priv->drrs;
+ int vrefresh = 0;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->state->crtc != &intel_crtc->base)
+ continue;
+
+ seq_printf(m, "%s:\n", connector->name);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ seq_puts(m, "\n");
+
+ if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
+ struct intel_panel *panel;
+
+ mutex_lock(&drrs->mutex);
+ /* DRRS Supported */
+ seq_puts(m, "\tDRRS Supported: Yes\n");
+
+ /* disable_drrs() will make drrs->dp NULL */
+ if (!drrs->dp) {
+ seq_puts(m, "Idleness DRRS: Disabled\n");
+ if (dev_priv->psr.enabled)
+ seq_puts(m,
+ "\tAs PSR is enabled, DRRS is not enabled\n");
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+
+ panel = &drrs->dp->attached_connector->panel;
+ seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
+ drrs->busy_frontbuffer_bits);
+
+ seq_puts(m, "\n\t\t");
+ if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
+ seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
+ vrefresh = panel->fixed_mode->vrefresh;
+ } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
+ seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
+ vrefresh = panel->downclock_mode->vrefresh;
+ } else {
+ seq_printf(m, "DRRS_State: Unknown(%d)\n",
+ drrs->refresh_rate_type);
+ mutex_unlock(&drrs->mutex);
+ return;
+ }
+ seq_printf(m, "\t\tVrefresh: %d", vrefresh);
+
+ seq_puts(m, "\n\t\t");
+ mutex_unlock(&drrs->mutex);
+ } else {
+ /* DRRS not supported. Print the VBT parameter*/
+ seq_puts(m, "\tDRRS Supported : No");
+ }
+ seq_puts(m, "\n");
+}
+
+static int i915_drrs_status(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *intel_crtc;
+ int active_crtc_cnt = 0;
+
+ drm_modeset_lock_all(dev);
+ for_each_intel_crtc(dev, intel_crtc) {
+ if (intel_crtc->base.state->active) {
+ active_crtc_cnt++;
+ seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
+
+ drrs_status_per_crtc(m, dev, intel_crtc);
+ }
+ }
+ drm_modeset_unlock_all(dev);
+
+ if (!active_crtc_cnt)
+ seq_puts(m, "No active crtc found\n");
+
+ return 0;
+}
+
+static int i915_dp_mst_info(struct seq_file *m, void *unused)
+{
+ struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_encoder *intel_encoder;
+ struct intel_digital_port *intel_dig_port;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ intel_encoder = intel_attached_encoder(to_intel_connector(connector));
+ if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ intel_dig_port = enc_to_dig_port(intel_encoder);
+ if (!intel_dig_port->dp.can_mst)
+ continue;
+
+ seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
+ drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+
+static ssize_t i915_displayport_test_active_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ char *input_buffer;
+ int status = 0;
+ struct drm_device *dev;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+ int val = 0;
+
+ dev = ((struct seq_file *)file->private_data)->private;
+
+ if (len == 0)
+ return 0;
+
+ input_buffer = memdup_user_nul(ubuf, len);
+ if (IS_ERR(input_buffer))
+ return PTR_ERR(input_buffer);
+
+ drm_dbg(&to_i915(dev)->drm,
+ "Copied %d bytes from user\n", (unsigned int)len);
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ status = kstrtoint(input_buffer, 10, &val);
+ if (status < 0)
+ break;
+ drm_dbg(&to_i915(dev)->drm,
+ "Got %d for test active\n", val);
+ /* To prevent erroneous activation of the compliance
+ * testing code, only accept an actual value of 1 here
+ */
+ if (val == 1)
+ intel_dp->compliance.test_active = true;
+ else
+ intel_dp->compliance.test_active = false;
+ }
+ }
+ drm_connector_list_iter_end(&conn_iter);
+ kfree(input_buffer);
+ if (status < 0)
+ return status;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_displayport_test_active_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->compliance.test_active)
+ seq_puts(m, "1");
+ else
+ seq_puts(m, "0");
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+
+static int i915_displayport_test_active_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_displayport_test_active_show,
+ inode->i_private);
+}
+
+static const struct file_operations i915_displayport_test_active_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_displayport_test_active_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_displayport_test_active_write
+};
+
+static int i915_displayport_test_data_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_EDID_READ)
+ seq_printf(m, "%lx",
+ intel_dp->compliance.test_data.edid);
+ else if (intel_dp->compliance.test_type ==
+ DP_TEST_LINK_VIDEO_PATTERN) {
+ seq_printf(m, "hdisplay: %d\n",
+ intel_dp->compliance.test_data.hdisplay);
+ seq_printf(m, "vdisplay: %d\n",
+ intel_dp->compliance.test_data.vdisplay);
+ seq_printf(m, "bpc: %u\n",
+ intel_dp->compliance.test_data.bpc);
+ }
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
+
+static int i915_displayport_test_type_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_dp *intel_dp;
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+
+ if (connector->connector_type !=
+ DRM_MODE_CONNECTOR_DisplayPort)
+ continue;
+
+ encoder = to_intel_encoder(connector->encoder);
+ if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
+ continue;
+
+ if (encoder && connector->status == connector_status_connected) {
+ intel_dp = enc_to_intel_dp(encoder);
+ seq_printf(m, "%02lx", intel_dp->compliance.test_type);
+ } else
+ seq_puts(m, "0");
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
+
+static void wm_latency_show(struct seq_file *m, const u16 wm[8])
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ int level;
+ int num_levels;
+
+ if (IS_CHERRYVIEW(dev_priv))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev_priv))
+ num_levels = 1;
+ else if (IS_G4X(dev_priv))
+ num_levels = 3;
+ else
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++) {
+ unsigned int latency = wm[level];
+
+ /*
+ * - WM1+ latency values in 0.5us units
+ * - latencies are in us on gen9/vlv/chv
+ */
+ if (INTEL_GEN(dev_priv) >= 9 ||
+ IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_G4X(dev_priv))
+ latency *= 10;
+ else if (level > 0)
+ latency *= 5;
+
+ seq_printf(m, "WM%d %u (%u.%u usec)\n",
+ level, wm[level], latency / 10, latency % 10);
+ }
+
+ drm_modeset_unlock_all(dev);
+}
+
+static int pri_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.pri_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int spr_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.spr_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int cur_wm_latency_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ const u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.cur_latency;
+
+ wm_latency_show(m, latencies);
+
+ return 0;
+}
+
+static int pri_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, pri_wm_latency_show, dev_priv);
+}
+
+static int spr_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, spr_wm_latency_show, dev_priv);
+}
+
+static int cur_wm_latency_open(struct inode *inode, struct file *file)
+{
+ struct drm_i915_private *dev_priv = inode->i_private;
+
+ if (HAS_GMCH(dev_priv))
+ return -ENODEV;
+
+ return single_open(file, cur_wm_latency_show, dev_priv);
+}
+
+static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp, u16 wm[8])
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct drm_device *dev = &dev_priv->drm;
+ u16 new[8] = { 0 };
+ int num_levels;
+ int level;
+ int ret;
+ char tmp[32];
+
+ if (IS_CHERRYVIEW(dev_priv))
+ num_levels = 3;
+ else if (IS_VALLEYVIEW(dev_priv))
+ num_levels = 1;
+ else if (IS_G4X(dev_priv))
+ num_levels = 3;
+ else
+ num_levels = ilk_wm_max_level(dev_priv) + 1;
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
+ &new[0], &new[1], &new[2], &new[3],
+ &new[4], &new[5], &new[6], &new[7]);
+ if (ret != num_levels)
+ return -EINVAL;
+
+ drm_modeset_lock_all(dev);
+
+ for (level = 0; level < num_levels; level++)
+ wm[level] = new[level];
+
+ drm_modeset_unlock_all(dev);
+
+ return len;
+}
+
+
+static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.pri_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.spr_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ u16 *latencies;
+
+ if (INTEL_GEN(dev_priv) >= 9)
+ latencies = dev_priv->wm.skl_latency;
+ else
+ latencies = dev_priv->wm.cur_latency;
+
+ return wm_latency_write(file, ubuf, len, offp, latencies);
+}
+
+static const struct file_operations i915_pri_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = pri_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = pri_wm_latency_write
+};
+
+static const struct file_operations i915_spr_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = spr_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = spr_wm_latency_write
+};
+
+static const struct file_operations i915_cur_wm_latency_fops = {
+ .owner = THIS_MODULE,
+ .open = cur_wm_latency_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = cur_wm_latency_write
+};
+
+static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+
+ /* Synchronize with everything first in case there's been an HPD
+ * storm, but we haven't finished handling it in the kernel yet
+ */
+ intel_synchronize_irq(dev_priv);
+ flush_work(&dev_priv->hotplug.dig_port_work);
+ flush_delayed_work(&dev_priv->hotplug.hotplug_work);
+
+ seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
+ seq_printf(m, "Detected: %s\n",
+ yesno(delayed_work_pending(&hotplug->reenable_work)));
+
+ return 0;
+}
+
+static ssize_t i915_hpd_storm_ctl_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ unsigned int new_threshold;
+ int i;
+ char *newline;
+ char tmp[16];
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ if (strcmp(tmp, "reset") == 0)
+ new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
+ else if (kstrtouint(tmp, 10, &new_threshold) != 0)
+ return -EINVAL;
+
+ if (new_threshold > 0)
+ drm_dbg_kms(&dev_priv->drm,
+ "Setting HPD storm detection threshold to %d\n",
+ new_threshold);
+ else
+ drm_dbg_kms(&dev_priv->drm, "Disabling HPD storm detection\n");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_storm_threshold = new_threshold;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+ return len;
+}
+
+static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
+}
+
+static const struct file_operations i915_hpd_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_storm_ctl_write
+};
+
+static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
+{
+ struct drm_i915_private *dev_priv = m->private;
+
+ seq_printf(m, "Enabled: %s\n",
+ yesno(dev_priv->hotplug.hpd_short_storm_enabled));
+
+ return 0;
+}
+
+static int
+i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_hpd_short_storm_ctl_show,
+ inode->i_private);
+}
+
+static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ struct drm_i915_private *dev_priv = m->private;
+ struct i915_hotplug *hotplug = &dev_priv->hotplug;
+ char *newline;
+ char tmp[16];
+ int i;
+ bool new_state;
+
+ if (len >= sizeof(tmp))
+ return -EINVAL;
+
+ if (copy_from_user(tmp, ubuf, len))
+ return -EFAULT;
+
+ tmp[len] = '\0';
+
+ /* Strip newline, if any */
+ newline = strchr(tmp, '\n');
+ if (newline)
+ *newline = '\0';
+
+ /* Reset to the "default" state for this system */
+ if (strcmp(tmp, "reset") == 0)
+ new_state = !HAS_DP_MST(dev_priv);
+ else if (kstrtobool(tmp, &new_state) != 0)
+ return -EINVAL;
+
+ drm_dbg_kms(&dev_priv->drm, "%sabling HPD short storm detection\n",
+ new_state ? "En" : "Dis");
+
+ spin_lock_irq(&dev_priv->irq_lock);
+ hotplug->hpd_short_storm_enabled = new_state;
+ /* Reset the HPD storm stats so we don't accidentally trigger a storm */
+ for_each_hpd_pin(i)
+ hotplug->stats[i].count = 0;
+ spin_unlock_irq(&dev_priv->irq_lock);
+
+ /* Re-enable hpd immediately if we were in an irq storm */
+ flush_delayed_work(&dev_priv->hotplug.reenable_work);
+
+ return len;
+}
+
+static const struct file_operations i915_hpd_short_storm_ctl_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_hpd_short_storm_ctl_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_hpd_short_storm_ctl_write,
+};
+
+static int i915_drrs_ctl_set(void *data, u64 val)
+{
+ struct drm_i915_private *dev_priv = data;
+ struct drm_device *dev = &dev_priv->drm;
+ struct intel_crtc *crtc;
+
+ if (INTEL_GEN(dev_priv) < 7)
+ return -ENODEV;
+
+ for_each_intel_crtc(dev, crtc) {
+ struct drm_connector_list_iter conn_iter;
+ struct intel_crtc_state *crtc_state;
+ struct drm_connector *connector;
+ struct drm_crtc_commit *commit;
+ int ret;
+
+ ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(crtc->base.state);
+
+ if (!crtc_state->hw.active ||
+ !crtc_state->has_drrs)
+ goto out;
+
+ commit = crtc_state->uapi.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (ret)
+ goto out;
+ }
+
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct intel_encoder *encoder;
+ struct intel_dp *intel_dp;
+
+ if (!(crtc_state->uapi.connector_mask &
+ drm_connector_mask(connector)))
+ continue;
+
+ encoder = intel_attached_encoder(to_intel_connector(connector));
+ if (encoder->type != INTEL_OUTPUT_EDP)
+ continue;
+
+ drm_dbg(&dev_priv->drm,
+ "Manually %sabling DRRS. %llu\n",
+ val ? "en" : "dis", val);
+
+ intel_dp = enc_to_intel_dp(encoder);
+ if (val)
+ intel_edp_drrs_enable(intel_dp,
+ crtc_state);
+ else
+ intel_edp_drrs_disable(intel_dp,
+ crtc_state);
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+out:
+ drm_modeset_unlock(&crtc->base.mutex);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
+
+static ssize_t
+i915_fifo_underrun_reset_write(struct file *filp,
+ const char __user *ubuf,
+ size_t cnt, loff_t *ppos)
+{
+ struct drm_i915_private *dev_priv = filp->private_data;
+ struct intel_crtc *intel_crtc;
+ struct drm_device *dev = &dev_priv->drm;
+ int ret;
+ bool reset;
+
+ ret = kstrtobool_from_user(ubuf, cnt, &reset);
+ if (ret)
+ return ret;
+
+ if (!reset)
+ return cnt;
+
+ for_each_intel_crtc(dev, intel_crtc) {
+ struct drm_crtc_commit *commit;
+ struct intel_crtc_state *crtc_state;
+
+ ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
+ if (ret)
+ return ret;
+
+ crtc_state = to_intel_crtc_state(intel_crtc->base.state);
+ commit = crtc_state->uapi.commit;
+ if (commit) {
+ ret = wait_for_completion_interruptible(&commit->hw_done);
+ if (!ret)
+ ret = wait_for_completion_interruptible(&commit->flip_done);
+ }
+
+ if (!ret && crtc_state->hw.active) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Re-arming FIFO underruns on pipe %c\n",
+ pipe_name(intel_crtc->pipe));
+
+ intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
+ }
+
+ drm_modeset_unlock(&intel_crtc->base.mutex);
+
+ if (ret)
+ return ret;
+ }
+
+ ret = intel_fbc_reset_underrun(dev_priv);
+ if (ret)
+ return ret;
+
+ return cnt;
+}
+
+static const struct file_operations i915_fifo_underrun_reset_ops = {
+ .owner = THIS_MODULE,
+ .open = simple_open,
+ .write = i915_fifo_underrun_reset_write,
+ .llseek = default_llseek,
+};
+
+static const struct drm_info_list intel_display_debugfs_list[] = {
+ {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
+ {"i915_fbc_status", i915_fbc_status, 0},
+ {"i915_ips_status", i915_ips_status, 0},
+ {"i915_sr_status", i915_sr_status, 0},
+ {"i915_opregion", i915_opregion, 0},
+ {"i915_vbt", i915_vbt, 0},
+ {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
+ {"i915_edp_psr_status", i915_edp_psr_status, 0},
+ {"i915_power_domain_info", i915_power_domain_info, 0},
+ {"i915_dmc_info", i915_dmc_info, 0},
+ {"i915_display_info", i915_display_info, 0},
+ {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
+ {"i915_dp_mst_info", i915_dp_mst_info, 0},
+ {"i915_ddb_info", i915_ddb_info, 0},
+ {"i915_drrs_status", i915_drrs_status, 0},
+};
+
+static const struct {
+ const char *name;
+ const struct file_operations *fops;
+} intel_display_debugfs_files[] = {
+ {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
+ {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
+ {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
+ {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
+ {"i915_fbc_false_color", &i915_fbc_false_color_fops},
+ {"i915_dp_test_data", &i915_displayport_test_data_fops},
+ {"i915_dp_test_type", &i915_displayport_test_type_fops},
+ {"i915_dp_test_active", &i915_displayport_test_active_fops},
+ {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
+ {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
+ {"i915_ipc_status", &i915_ipc_status_fops},
+ {"i915_drrs_ctl", &i915_drrs_ctl_fops},
+ {"i915_edp_psr_debug", &i915_edp_psr_debug_fops},
+};
+
+int intel_display_debugfs_register(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) {
+ debugfs_create_file(intel_display_debugfs_files[i].name,
+ S_IRUGO | S_IWUSR,
+ minor->debugfs_root,
+ to_i915(minor->dev),
+ intel_display_debugfs_files[i].fops);
+ }
+
+ return drm_debugfs_create_files(intel_display_debugfs_list,
+ ARRAY_SIZE(intel_display_debugfs_list),
+ minor->debugfs_root, minor);
+}
+
+static int i915_panel_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_dp *intel_dp =
+ intel_attached_dp(to_intel_connector(connector));
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ seq_printf(m, "Panel power up delay: %d\n",
+ intel_dp->panel_power_up_delay);
+ seq_printf(m, "Panel power down delay: %d\n",
+ intel_dp->panel_power_down_delay);
+ seq_printf(m, "Backlight on delay: %d\n",
+ intel_dp->backlight_on_delay);
+ seq_printf(m, "Backlight off delay: %d\n",
+ intel_dp->backlight_off_delay);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_panel);
+
+static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ if (connector->status != connector_status_connected)
+ return -ENODEV;
+
+ /* HDCP is supported by connector */
+ if (!intel_connector->hdcp.shim)
+ return -EINVAL;
+
+ seq_printf(m, "%s:%d HDCP version: ", connector->name,
+ connector->base.id);
+ intel_hdcp_info(m, intel_connector);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
+
+static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
+{
+ struct drm_connector *connector = m->private;
+ struct drm_device *dev = connector->dev;
+ struct drm_crtc *crtc;
+ struct intel_dp *intel_dp;
+ struct drm_modeset_acquire_ctx ctx;
+ struct intel_crtc_state *crtc_state = NULL;
+ int ret = 0;
+ bool try_again = false;
+
+ drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
+
+ do {
+ try_again = false;
+ ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
+ &ctx);
+ if (ret) {
+ if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
+ try_again = true;
+ continue;
+ }
+ break;
+ }
+ crtc = connector->state->crtc;
+ if (connector->status != connector_status_connected || !crtc) {
+ ret = -ENODEV;
+ break;
+ }
+ ret = drm_modeset_lock(&crtc->mutex, &ctx);
+ if (ret == -EDEADLK) {
+ ret = drm_modeset_backoff(&ctx);
+ if (!ret) {
+ try_again = true;
+ continue;
+ }
+ break;
+ } else if (ret) {
+ break;
+ }
+ intel_dp = intel_attached_dp(to_intel_connector(connector));
+ crtc_state = to_intel_crtc_state(crtc->state);
+ seq_printf(m, "DSC_Enabled: %s\n",
+ yesno(crtc_state->dsc.compression_enable));
+ seq_printf(m, "DSC_Sink_Support: %s\n",
+ yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
+ seq_printf(m, "Force_DSC_Enable: %s\n",
+ yesno(intel_dp->force_dsc_en));
+ if (!intel_dp_is_edp(intel_dp))
+ seq_printf(m, "FEC_Sink_Support: %s\n",
+ yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
+ } while (try_again);
+
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ return ret;
+}
+
+static ssize_t i915_dsc_fec_support_write(struct file *file,
+ const char __user *ubuf,
+ size_t len, loff_t *offp)
+{
+ bool dsc_enable = false;
+ int ret;
+ struct drm_connector *connector =
+ ((struct seq_file *)file->private_data)->private;
+ struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
+ struct drm_i915_private *i915 = to_i915(encoder->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
+
+ if (len == 0)
+ return 0;
+
+ drm_dbg(&i915->drm,
+ "Copied %zu bytes from user to force DSC\n", len);
+
+ ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
+ if (ret < 0)
+ return ret;
+
+ drm_dbg(&i915->drm, "Got %s for DSC Enable\n",
+ (dsc_enable) ? "true" : "false");
+ intel_dp->force_dsc_en = dsc_enable;
+
+ *offp += len;
+ return len;
+}
+
+static int i915_dsc_fec_support_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, i915_dsc_fec_support_show,
+ inode->i_private);
+}
+
+static const struct file_operations i915_dsc_fec_support_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_dsc_fec_support_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = i915_dsc_fec_support_write
+};
+
+/**
+ * intel_connector_debugfs_add - add i915 specific connector debugfs files
+ * @connector: pointer to a registered drm_connector
+ *
+ * Cleanup will be done by drm_connector_unregister() through a call to
+ * drm_debugfs_connector_remove().
+ *
+ * Returns 0 on success, negative error codes on error.
+ */
+int intel_connector_debugfs_add(struct drm_connector *connector)
+{
+ struct dentry *root = connector->debugfs_entry;
+ struct drm_i915_private *dev_priv = to_i915(connector->dev);
+
+ /* The connector must have been registered beforehands. */
+ if (!root)
+ return -ENODEV;
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ debugfs_create_file("i915_panel_timings", S_IRUGO, root,
+ connector, &i915_panel_fops);
+ debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
+ connector, &i915_psr_sink_status_fops);
+ }
+
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
+ connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
+ debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
+ connector, &i915_hdcp_sink_capability_fops);
+ }
+
+ if (INTEL_GEN(dev_priv) >= 10 &&
+ (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
+ connector->connector_type == DRM_MODE_CONNECTOR_eDP))
+ debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
+ connector, &i915_dsc_fec_support_fops);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.h b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
new file mode 100644
index 000000000000..a3bea1ce04c2
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_DISPLAY_DEBUGFS_H__
+#define __INTEL_DISPLAY_DEBUGFS_H__
+
+struct drm_connector;
+struct drm_i915_private;
+
+#ifdef CONFIG_DEBUG_FS
+int intel_display_debugfs_register(struct drm_i915_private *i915);
+int intel_connector_debugfs_add(struct drm_connector *connector);
+#else
+static inline int intel_display_debugfs_register(struct drm_i915_private *i915) { return 0; }
+static inline int intel_connector_debugfs_add(struct drm_connector *connector) { return 0; }
+#endif
+
+#endif /* __INTEL_DISPLAY_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c
index 21561acfa3ac..246e406bb385 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.c
+++ b/drivers/gpu/drm/i915/display/intel_display_power.c
@@ -15,6 +15,7 @@
#include "intel_display_types.h"
#include "intel_dpio_phy.h"
#include "intel_hotplug.h"
+#include "intel_pm.h"
#include "intel_sideband.h"
#include "intel_tc.h"
#include "intel_vga.h"
@@ -159,7 +160,7 @@ intel_display_power_domain_str(enum intel_display_power_domain domain)
static void intel_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- DRM_DEBUG_KMS("enabling %s\n", power_well->desc->name);
+ drm_dbg_kms(&dev_priv->drm, "enabling %s\n", power_well->desc->name);
power_well->desc->ops->enable(dev_priv, power_well);
power_well->hw_enabled = true;
}
@@ -167,7 +168,7 @@ static void intel_power_well_enable(struct drm_i915_private *dev_priv,
static void intel_power_well_disable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- DRM_DEBUG_KMS("disabling %s\n", power_well->desc->name);
+ drm_dbg_kms(&dev_priv->drm, "disabling %s\n", power_well->desc->name);
power_well->hw_enabled = false;
power_well->desc->ops->disable(dev_priv, power_well);
}
@@ -182,8 +183,9 @@ static void intel_power_well_get(struct drm_i915_private *dev_priv,
static void intel_power_well_put(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- WARN(!power_well->count, "Use count on power well %s is already zero",
- power_well->desc->name);
+ drm_WARN(&dev_priv->drm, !power_well->count,
+ "Use count on power well %s is already zero",
+ power_well->desc->name);
if (!--power_well->count)
intel_power_well_disable(dev_priv, power_well);
@@ -289,11 +291,11 @@ static void hsw_wait_for_power_well_enable(struct drm_i915_private *dev_priv,
/* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
if (intel_de_wait_for_set(dev_priv, regs->driver,
HSW_PWR_WELL_CTL_STATE(pw_idx), 1)) {
- DRM_DEBUG_KMS("%s power well enable timeout\n",
- power_well->desc->name);
+ drm_dbg_kms(&dev_priv->drm, "%s power well enable timeout\n",
+ power_well->desc->name);
/* An AUX timeout is expected if the TBT DP tunnel is down. */
- WARN_ON(!power_well->desc->hsw.is_tc_tbt);
+ drm_WARN_ON(&dev_priv->drm, !power_well->desc->hsw.is_tc_tbt);
}
}
@@ -304,11 +306,11 @@ static u32 hsw_power_well_requesters(struct drm_i915_private *dev_priv,
u32 req_mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
u32 ret;
- ret = I915_READ(regs->bios) & req_mask ? 1 : 0;
- ret |= I915_READ(regs->driver) & req_mask ? 2 : 0;
+ ret = intel_de_read(dev_priv, regs->bios) & req_mask ? 1 : 0;
+ ret |= intel_de_read(dev_priv, regs->driver) & req_mask ? 2 : 0;
if (regs->kvmr.reg)
- ret |= I915_READ(regs->kvmr) & req_mask ? 4 : 0;
- ret |= I915_READ(regs->debug) & req_mask ? 8 : 0;
+ ret |= intel_de_read(dev_priv, regs->kvmr) & req_mask ? 4 : 0;
+ ret |= intel_de_read(dev_priv, regs->debug) & req_mask ? 8 : 0;
return ret;
}
@@ -330,23 +332,25 @@ static void hsw_wait_for_power_well_disable(struct drm_i915_private *dev_priv,
* Skip the wait in case any of the request bits are set and print a
* diagnostic message.
*/
- wait_for((disabled = !(I915_READ(regs->driver) &
+ wait_for((disabled = !(intel_de_read(dev_priv, regs->driver) &
HSW_PWR_WELL_CTL_STATE(pw_idx))) ||
(reqs = hsw_power_well_requesters(dev_priv, regs, pw_idx)), 1);
if (disabled)
return;
- DRM_DEBUG_KMS("%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
- power_well->desc->name,
- !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
+ drm_dbg_kms(&dev_priv->drm,
+ "%s forced on (bios:%d driver:%d kvmr:%d debug:%d)\n",
+ power_well->desc->name,
+ !!(reqs & 1), !!(reqs & 2), !!(reqs & 4), !!(reqs & 8));
}
static void gen9_wait_for_power_well_fuses(struct drm_i915_private *dev_priv,
enum skl_power_gate pg)
{
/* Timeout 5us for PG#0, for other PGs 1us */
- WARN_ON(intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
- SKL_FUSE_PG_DIST_STATUS(pg), 1));
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_wait_for_set(dev_priv, SKL_FUSE_STATUS,
+ SKL_FUSE_PG_DIST_STATUS(pg), 1));
}
static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
@@ -372,17 +376,18 @@ static void hsw_power_well_enable(struct drm_i915_private *dev_priv,
gen9_wait_for_power_well_fuses(dev_priv, SKL_PG0);
}
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_enable(dev_priv, power_well);
/* Display WA #1178: cnl */
if (IS_CANNONLAKE(dev_priv) &&
pw_idx >= GLK_PW_CTL_IDX_AUX_B &&
pw_idx <= CNL_PW_CTL_IDX_AUX_F) {
- val = I915_READ(CNL_AUX_ANAOVRD1(pw_idx));
+ val = intel_de_read(dev_priv, CNL_AUX_ANAOVRD1(pw_idx));
val |= CNL_AUX_ANAOVRD1_ENABLE | CNL_AUX_ANAOVRD1_LDO_BYPASS;
- I915_WRITE(CNL_AUX_ANAOVRD1(pw_idx), val);
+ intel_de_write(dev_priv, CNL_AUX_ANAOVRD1(pw_idx), val);
}
if (wait_fuses)
@@ -403,8 +408,9 @@ static void hsw_power_well_disable(struct drm_i915_private *dev_priv,
hsw_power_well_pre_disable(dev_priv,
power_well->desc->hsw.irq_pipe_mask);
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
@@ -419,14 +425,16 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- WARN_ON(!IS_ICELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val | HSW_PWR_WELL_CTL_REQ(pw_idx));
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val | HSW_PWR_WELL_CTL_REQ(pw_idx));
if (INTEL_GEN(dev_priv) < 12) {
- val = I915_READ(ICL_PORT_CL_DW12(phy));
- I915_WRITE(ICL_PORT_CL_DW12(phy), val | ICL_LANE_ENABLE_AUX);
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+ intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+ val | ICL_LANE_ENABLE_AUX);
}
hsw_wait_for_power_well_enable(dev_priv, power_well);
@@ -434,9 +442,9 @@ icl_combo_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
/* Display WA #1178: icl */
if (pw_idx >= ICL_PW_CTL_IDX_AUX_A && pw_idx <= ICL_PW_CTL_IDX_AUX_B &&
!intel_bios_is_port_edp(dev_priv, (enum port)phy)) {
- val = I915_READ(ICL_AUX_ANAOVRD1(pw_idx));
+ val = intel_de_read(dev_priv, ICL_AUX_ANAOVRD1(pw_idx));
val |= ICL_AUX_ANAOVRD1_ENABLE | ICL_AUX_ANAOVRD1_LDO_BYPASS;
- I915_WRITE(ICL_AUX_ANAOVRD1(pw_idx), val);
+ intel_de_write(dev_priv, ICL_AUX_ANAOVRD1(pw_idx), val);
}
}
@@ -449,13 +457,15 @@ icl_combo_phy_aux_power_well_disable(struct drm_i915_private *dev_priv,
enum phy phy = ICL_AUX_PW_TO_PHY(pw_idx);
u32 val;
- WARN_ON(!IS_ICELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
- val = I915_READ(ICL_PORT_CL_DW12(phy));
- I915_WRITE(ICL_PORT_CL_DW12(phy), val & ~ICL_LANE_ENABLE_AUX);
+ val = intel_de_read(dev_priv, ICL_PORT_CL_DW12(phy));
+ intel_de_write(dev_priv, ICL_PORT_CL_DW12(phy),
+ val & ~ICL_LANE_ENABLE_AUX);
- val = I915_READ(regs->driver);
- I915_WRITE(regs->driver, val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
+ val = intel_de_read(dev_priv, regs->driver);
+ intel_de_write(dev_priv, regs->driver,
+ val & ~HSW_PWR_WELL_CTL_REQ(pw_idx));
hsw_wait_for_power_well_disable(dev_priv, power_well);
}
@@ -485,7 +495,7 @@ static int power_well_async_ref_count(struct drm_i915_private *dev_priv,
int refs = hweight64(power_well->desc->domains &
async_put_domains_mask(&dev_priv->power_domains));
- WARN_ON(refs > power_well->count);
+ drm_WARN_ON(&dev_priv->drm, refs > power_well->count);
return refs;
}
@@ -515,7 +525,7 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
continue;
dig_port = enc_to_dig_port(encoder);
- if (WARN_ON(!dig_port))
+ if (drm_WARN_ON(&dev_priv->drm, !dig_port))
continue;
if (dig_port->aux_ch != aux_ch) {
@@ -526,10 +536,10 @@ static void icl_tc_port_assert_ref_held(struct drm_i915_private *dev_priv,
break;
}
- if (WARN_ON(!dig_port))
+ if (drm_WARN_ON(&dev_priv->drm, !dig_port))
return;
- WARN_ON(!intel_tc_port_ref_held(dig_port));
+ drm_WARN_ON(&dev_priv->drm, !intel_tc_port_ref_held(dig_port));
}
#else
@@ -552,11 +562,11 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
icl_tc_port_assert_ref_held(dev_priv, power_well);
- val = I915_READ(DP_AUX_CH_CTL(aux_ch));
+ val = intel_de_read(dev_priv, DP_AUX_CH_CTL(aux_ch));
val &= ~DP_AUX_CH_CTL_TBT_IO;
if (power_well->desc->hsw.is_tc_tbt)
val |= DP_AUX_CH_CTL_TBT_IO;
- I915_WRITE(DP_AUX_CH_CTL(aux_ch), val);
+ intel_de_write(dev_priv, DP_AUX_CH_CTL(aux_ch), val);
hsw_power_well_enable(dev_priv, power_well);
@@ -564,11 +574,13 @@ icl_tc_phy_aux_power_well_enable(struct drm_i915_private *dev_priv,
enum tc_port tc_port;
tc_port = TGL_AUX_PW_TO_TC_PORT(power_well->desc->hsw.idx);
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x2));
if (intel_de_wait_for_set(dev_priv, DKL_CMN_UC_DW_27(tc_port),
DKL_CMN_UC_DW27_UC_HEALTH, 1))
- DRM_WARN("Timeout waiting TC uC health\n");
+ drm_warn(&dev_priv->drm,
+ "Timeout waiting TC uC health\n");
}
}
@@ -596,7 +608,7 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
HSW_PWR_WELL_CTL_STATE(pw_idx);
u32 val;
- val = I915_READ(regs->driver);
+ val = intel_de_read(dev_priv, regs->driver);
/*
* On GEN9 big core due to a DMC bug the driver's request bits for PW1
@@ -606,22 +618,26 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
*/
if (IS_GEN(dev_priv, 9) && !IS_GEN9_LP(dev_priv) &&
(id == SKL_DISP_PW_1 || id == SKL_DISP_PW_MISC_IO))
- val |= I915_READ(regs->bios);
+ val |= intel_de_read(dev_priv, regs->bios);
return (val & mask) == mask;
}
static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
{
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC9),
- "DC9 already programmed to be enabled.\n");
- WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled to enable DC9.\n");
- WARN_ONCE(I915_READ(HSW_PWR_WELL_CTL2) &
- HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
- "Power well 2 on.\n");
- WARN_ONCE(intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC9),
+ "DC9 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled to enable DC9.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, HSW_PWR_WELL_CTL2) &
+ HSW_PWR_WELL_CTL_REQ(SKL_PW_CTL_IDX_PW_2),
+ "Power well 2 on.\n");
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
/*
* TODO: check for the following to verify the conditions to enter DC9
@@ -634,10 +650,12 @@ static void assert_can_enable_dc9(struct drm_i915_private *dev_priv)
static void assert_can_disable_dc9(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(intel_irqs_enabled(dev_priv),
- "Interrupts not disabled yet.\n");
- WARN_ONCE(I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5,
- "DC5 still not disabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm, intel_irqs_enabled(dev_priv),
+ "Interrupts not disabled yet.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5,
+ "DC5 still not disabled.\n");
/*
* TODO: check for the following to verify DC9 state was indeed
@@ -655,7 +673,7 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
int rereads = 0;
u32 v;
- I915_WRITE(DC_STATE_EN, state);
+ intel_de_write(dev_priv, DC_STATE_EN, state);
/* It has been observed that disabling the dc6 state sometimes
* doesn't stick and dmc keeps returning old value. Make sure
@@ -663,10 +681,10 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
* we are confident that state is exactly what we want.
*/
do {
- v = I915_READ(DC_STATE_EN);
+ v = intel_de_read(dev_priv, DC_STATE_EN);
if (v != state) {
- I915_WRITE(DC_STATE_EN, state);
+ intel_de_write(dev_priv, DC_STATE_EN, state);
rewrites++;
rereads = 0;
} else if (rereads++ > 5) {
@@ -676,13 +694,15 @@ static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
} while (rewrites < 100);
if (v != state)
- DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
- state, v);
+ drm_err(&dev_priv->drm,
+ "Writing dc state to 0x%x failed, now 0x%x\n",
+ state, v);
/* Most of the times we need one retry, avoid spam */
if (rewrites > 1)
- DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
- state, rewrites);
+ drm_dbg_kms(&dev_priv->drm,
+ "Rewrote dc state to 0x%x %d times\n",
+ state, rewrites);
}
static u32 gen9_dc_mask(struct drm_i915_private *dev_priv)
@@ -708,10 +728,11 @@ static void gen9_sanitize_dc_state(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(DC_STATE_EN) & gen9_dc_mask(dev_priv);
+ val = intel_de_read(dev_priv, DC_STATE_EN) & gen9_dc_mask(dev_priv);
- DRM_DEBUG_KMS("Resetting DC state tracking from %02x to %02x\n",
- dev_priv->csr.dc_state, val);
+ drm_dbg_kms(&dev_priv->drm,
+ "Resetting DC state tracking from %02x to %02x\n",
+ dev_priv->csr.dc_state, val);
dev_priv->csr.dc_state = val;
}
@@ -743,18 +764,19 @@ static void gen9_set_dc_state(struct drm_i915_private *dev_priv, u32 state)
u32 val;
u32 mask;
- if (WARN_ON_ONCE(state & ~dev_priv->csr.allowed_dc_mask))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ state & ~dev_priv->csr.allowed_dc_mask))
state &= dev_priv->csr.allowed_dc_mask;
- val = I915_READ(DC_STATE_EN);
+ val = intel_de_read(dev_priv, DC_STATE_EN);
mask = gen9_dc_mask(dev_priv);
- DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
- val & mask, state);
+ drm_dbg_kms(&dev_priv->drm, "Setting DC state from %02x to %02x\n",
+ val & mask, state);
/* Check if DMC is ignoring our DC state requests */
if ((val & mask) != dev_priv->csr.dc_state)
- DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
- dev_priv->csr.dc_state, val & mask);
+ drm_err(&dev_priv->drm, "DC state mismatch (0x%x -> 0x%x)\n",
+ dev_priv->csr.dc_state, val & mask);
val &= ~mask;
val |= state;
@@ -791,7 +813,7 @@ sanitize_target_dc_state(struct drm_i915_private *dev_priv,
static void tgl_enable_dc3co(struct drm_i915_private *dev_priv)
{
- DRM_DEBUG_KMS("Enabling DC3CO\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC3CO\n");
gen9_set_dc_state(dev_priv, DC_STATE_EN_DC3CO);
}
@@ -799,10 +821,10 @@ static void tgl_disable_dc3co(struct drm_i915_private *dev_priv)
{
u32 val;
- DRM_DEBUG_KMS("Disabling DC3CO\n");
- val = I915_READ(DC_STATE_EN);
+ drm_dbg_kms(&dev_priv->drm, "Disabling DC3CO\n");
+ val = intel_de_read(dev_priv, DC_STATE_EN);
val &= ~DC_STATE_DC3CO_STATUS;
- I915_WRITE(DC_STATE_EN, val);
+ intel_de_write(dev_priv, DC_STATE_EN, val);
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
/*
* Delay of 200us DC3CO Exit time B.Spec 49196
@@ -814,7 +836,7 @@ static void bxt_enable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc9(dev_priv);
- DRM_DEBUG_KMS("Enabling DC9\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC9\n");
/*
* Power sequencer reset is not needed on
* platforms with South Display Engine on PCH,
@@ -829,7 +851,7 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
{
assert_can_disable_dc9(dev_priv);
- DRM_DEBUG_KMS("Disabling DC9\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling DC9\n");
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
@@ -838,10 +860,13 @@ static void bxt_disable_dc9(struct drm_i915_private *dev_priv)
static void assert_csr_loaded(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(!I915_READ(CSR_PROGRAM(0)),
- "CSR program storage start is NULL\n");
- WARN_ONCE(!I915_READ(CSR_SSP_BASE), "CSR SSP Base Not fine\n");
- WARN_ONCE(!I915_READ(CSR_HTP_SKL), "CSR HTP Not fine\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ !intel_de_read(dev_priv, CSR_PROGRAM(0)),
+ "CSR program storage start is NULL\n");
+ drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_SSP_BASE),
+ "CSR SSP Base Not fine\n");
+ drm_WARN_ONCE(&dev_priv->drm, !intel_de_read(dev_priv, CSR_HTP_SKL),
+ "CSR HTP Not fine\n");
}
static struct i915_power_well *
@@ -861,7 +886,9 @@ lookup_power_well(struct drm_i915_private *dev_priv,
* the first power well and hope the WARN gets reported so we can fix
* our driver.
*/
- WARN(1, "Power well %d not defined for this platform\n", power_well_id);
+ drm_WARN(&dev_priv->drm, 1,
+ "Power well %d not defined for this platform\n",
+ power_well_id);
return &dev_priv->power_domains.power_wells[0];
}
@@ -884,7 +911,7 @@ void intel_display_power_set_target_dc_state(struct drm_i915_private *dev_priv,
mutex_lock(&power_domains->lock);
power_well = lookup_power_well(dev_priv, SKL_DISP_DC_OFF);
- if (WARN_ON(!power_well))
+ if (drm_WARN_ON(&dev_priv->drm, !power_well))
goto unlock;
state = sanitize_target_dc_state(dev_priv, state);
@@ -912,13 +939,22 @@ unlock:
static void assert_can_enable_dc5(struct drm_i915_private *dev_priv)
{
- bool pg2_enabled = intel_display_power_well_is_enabled(dev_priv,
- SKL_DISP_PW_2);
+ enum i915_power_well_id high_pg;
- WARN_ONCE(pg2_enabled, "PG2 not disabled to enable DC5.\n");
+ /* Power wells at this level and above must be disabled for DC5 entry */
+ if (INTEL_GEN(dev_priv) >= 12)
+ high_pg = TGL_DISP_PW_3;
+ else
+ high_pg = SKL_DISP_PW_2;
+
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_display_power_well_is_enabled(dev_priv, high_pg),
+ "Power wells above platform's DC5 limit still enabled.\n");
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5),
- "DC5 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC5),
+ "DC5 already programmed to be enabled.\n");
assert_rpm_wakelock_held(&dev_priv->runtime_pm);
assert_csr_loaded(dev_priv);
@@ -928,22 +964,25 @@ static void gen9_enable_dc5(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc5(dev_priv);
- DRM_DEBUG_KMS("Enabling DC5\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC5\n");
/* Wa Display #1183: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv))
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC5);
}
static void assert_can_enable_dc6(struct drm_i915_private *dev_priv)
{
- WARN_ONCE(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
- "Backlight is not disabled.\n");
- WARN_ONCE((I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC6),
- "DC6 already programmed to be enabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ "Backlight is not disabled.\n");
+ drm_WARN_ONCE(&dev_priv->drm,
+ (intel_de_read(dev_priv, DC_STATE_EN) &
+ DC_STATE_EN_UPTO_DC6),
+ "DC6 already programmed to be enabled.\n");
assert_csr_loaded(dev_priv);
}
@@ -952,12 +991,12 @@ static void skl_enable_dc6(struct drm_i915_private *dev_priv)
{
assert_can_enable_dc6(dev_priv);
- DRM_DEBUG_KMS("Enabling DC6\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling DC6\n");
/* Wa Display #1183: skl,kbl,cfl */
if (IS_GEN9_BC(dev_priv))
- I915_WRITE(GEN8_CHICKEN_DCPR_1, I915_READ(GEN8_CHICKEN_DCPR_1) |
- SKL_SELECT_ALTERNATE_DC_EXIT);
+ intel_de_write(dev_priv, GEN8_CHICKEN_DCPR_1,
+ intel_de_read(dev_priv, GEN8_CHICKEN_DCPR_1) | SKL_SELECT_ALTERNATE_DC_EXIT);
gen9_set_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
}
@@ -968,15 +1007,15 @@ static void hsw_power_well_sync_hw(struct drm_i915_private *dev_priv,
const struct i915_power_well_regs *regs = power_well->desc->hsw.regs;
int pw_idx = power_well->desc->hsw.idx;
u32 mask = HSW_PWR_WELL_CTL_REQ(pw_idx);
- u32 bios_req = I915_READ(regs->bios);
+ u32 bios_req = intel_de_read(dev_priv, regs->bios);
/* Take over the request bit if set by BIOS. */
if (bios_req & mask) {
- u32 drv_req = I915_READ(regs->driver);
+ u32 drv_req = intel_de_read(dev_priv, regs->driver);
if (!(drv_req & mask))
- I915_WRITE(regs->driver, drv_req | mask);
- I915_WRITE(regs->bios, bios_req & ~mask);
+ intel_de_write(dev_priv, regs->driver, drv_req | mask);
+ intel_de_write(dev_priv, regs->bios, bios_req & ~mask);
}
}
@@ -1022,22 +1061,25 @@ static void bxt_verify_ddi_phy_power_wells(struct drm_i915_private *dev_priv)
static bool gen9_dc_off_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return ((I915_READ(DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
- (I915_READ(DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
+ return ((intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_DC3CO) == 0 &&
+ (intel_de_read(dev_priv, DC_STATE_EN) & DC_STATE_EN_UPTO_DC5_DC6_MASK) == 0);
}
static void gen9_assert_dbuf_enabled(struct drm_i915_private *dev_priv)
{
- u32 tmp = I915_READ(DBUF_CTL);
+ u8 hw_enabled_dbuf_slices = intel_enabled_dbuf_slices_mask(dev_priv);
+ u8 enabled_dbuf_slices = dev_priv->enabled_dbuf_slices_mask;
- WARN((tmp & (DBUF_POWER_STATE | DBUF_POWER_REQUEST)) !=
- (DBUF_POWER_STATE | DBUF_POWER_REQUEST),
- "Unexpected DBuf power power state (0x%08x)\n", tmp);
+ drm_WARN(&dev_priv->drm,
+ hw_enabled_dbuf_slices != enabled_dbuf_slices,
+ "Unexpected DBuf power power state (0x%08x, expected 0x%08x)\n",
+ hw_enabled_dbuf_slices,
+ enabled_dbuf_slices);
}
static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
{
- struct intel_cdclk_state cdclk_state = {};
+ struct intel_cdclk_config cdclk_config = {};
if (dev_priv->csr.target_dc_state == DC_STATE_EN_DC3CO) {
tgl_disable_dc3co(dev_priv);
@@ -1046,9 +1088,11 @@ static void gen9_disable_dc_states(struct drm_i915_private *dev_priv)
gen9_set_dc_state(dev_priv, DC_STATE_DISABLE);
- dev_priv->display.get_cdclk(dev_priv, &cdclk_state);
+ dev_priv->display.get_cdclk(dev_priv, &cdclk_config);
/* Can't read out voltage_level so can't use intel_cdclk_changed() */
- WARN_ON(intel_cdclk_needs_modeset(&dev_priv->cdclk.hw, &cdclk_state));
+ drm_WARN_ON(&dev_priv->drm,
+ intel_cdclk_needs_modeset(&dev_priv->cdclk.hw,
+ &cdclk_config));
gen9_assert_dbuf_enabled(dev_priv);
@@ -1108,9 +1152,9 @@ static bool i9xx_always_on_power_well_enabled(struct drm_i915_private *dev_priv,
static void i830_pipes_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- if ((I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_A);
- if ((I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
+ if ((intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE) == 0)
i830_enable_pipe(dev_priv, PIPE_B);
}
@@ -1124,8 +1168,8 @@ static void i830_pipes_power_well_disable(struct drm_i915_private *dev_priv,
static bool i830_pipes_power_well_enabled(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
- return I915_READ(PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
- I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ return intel_de_read(dev_priv, PIPECONF(PIPE_A)) & PIPECONF_ENABLE &&
+ intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
}
static void i830_pipes_power_well_sync_hw(struct drm_i915_private *dev_priv,
@@ -1163,9 +1207,10 @@ static void vlv_set_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_PWRGT_CTRL, ctrl);
if (wait_for(COND, 100))
- DRM_ERROR("timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
+ drm_err(&dev_priv->drm,
+ "timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL));
#undef COND
@@ -1204,8 +1249,8 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- WARN_ON(state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
- state != PUNIT_PWRGT_PWR_GATE(pw_idx));
+ drm_WARN_ON(&dev_priv->drm, state != PUNIT_PWRGT_PWR_ON(pw_idx) &&
+ state != PUNIT_PWRGT_PWR_GATE(pw_idx));
if (state == ctrl)
enabled = true;
@@ -1214,7 +1259,7 @@ static bool vlv_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_PWRGT_CTRL) & mask;
- WARN_ON(ctrl != state);
+ drm_WARN_ON(&dev_priv->drm, ctrl != state);
vlv_punit_put(dev_priv);
@@ -1231,21 +1276,22 @@ static void vlv_init_display_clock_gating(struct drm_i915_private *dev_priv)
* (and never recovering) in this case. intel_dsi_post_disable() will
* clear it when we turn off the display.
*/
- val = I915_READ(DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
val &= DPOUNIT_CLOCK_GATE_DISABLE;
val |= VRHUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
/*
* Disable trickle feed and enable pnd deadline calculation
*/
- I915_WRITE(MI_ARB_VLV, MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
- I915_WRITE(CBR1_VLV, 0);
+ intel_de_write(dev_priv, MI_ARB_VLV,
+ MI_ARB_DISPLAY_TRICKLE_FEED_DISABLE);
+ intel_de_write(dev_priv, CBR1_VLV, 0);
- WARN_ON(dev_priv->rawclk_freq == 0);
-
- I915_WRITE(RAWCLK_FREQ_VLV,
- DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 1000));
+ drm_WARN_ON(&dev_priv->drm, RUNTIME_INFO(dev_priv)->rawclk_freq == 0);
+ intel_de_write(dev_priv, RAWCLK_FREQ_VLV,
+ DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq,
+ 1000));
}
static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
@@ -1262,13 +1308,13 @@ static void vlv_display_power_well_init(struct drm_i915_private *dev_priv)
* CHV DPLL B/C have some issues if VGA mode is enabled.
*/
for_each_pipe(dev_priv, pipe) {
- u32 val = I915_READ(DPLL(pipe));
+ u32 val = intel_de_read(dev_priv, DPLL(pipe));
val |= DPLL_REF_CLK_ENABLE_VLV | DPLL_VGA_MODE_DIS;
if (pipe != PIPE_A)
val |= DPLL_INTEGRATED_CRI_CLK_VLV;
- I915_WRITE(DPLL(pipe), val);
+ intel_de_write(dev_priv, DPLL(pipe), val);
}
vlv_init_display_clock_gating(dev_priv);
@@ -1348,7 +1394,8 @@ static void vlv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
* both PLLs disabled, or we risk losing DPIO and PLL
* synchronization.
*/
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) | DPIO_CMNRST);
+ intel_de_write(dev_priv, DPIO_CTL,
+ intel_de_read(dev_priv, DPIO_CTL) | DPIO_CMNRST);
}
static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
@@ -1360,7 +1407,8 @@ static void vlv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
assert_pll_disabled(dev_priv, pipe);
/* Assert common reset */
- I915_WRITE(DPIO_CTL, I915_READ(DPIO_CTL) & ~DPIO_CMNRST);
+ intel_de_write(dev_priv, DPIO_CTL,
+ intel_de_read(dev_priv, DPIO_CTL) & ~DPIO_CMNRST);
vlv_set_power_well(dev_priv, power_well, false);
}
@@ -1422,7 +1470,7 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
*/
if (BITS_SET(phy_control,
PHY_CH_POWER_DOWN_OVRD(0xf, DPIO_PHY0, DPIO_CH1)) &&
- (I915_READ(DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
+ (intel_de_read(dev_priv, DPLL(PIPE_B)) & DPLL_VCO_ENABLE) == 0)
phy_status |= PHY_STATUS_CMN_LDO(DPIO_PHY0, DPIO_CH1);
if (BITS_SET(phy_control,
@@ -1467,9 +1515,10 @@ static void assert_chv_phy_status(struct drm_i915_private *dev_priv)
*/
if (intel_de_wait_for_register(dev_priv, DISPLAY_PHY_STATUS,
phy_status_mask, phy_status, 10))
- DRM_ERROR("Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
- I915_READ(DISPLAY_PHY_STATUS) & phy_status_mask,
- phy_status, dev_priv->chv_phy_control);
+ drm_err(&dev_priv->drm,
+ "Unexpected PHY_STATUS 0x%08x, expected 0x%08x (PHY_CONTROL=0x%08x)\n",
+ intel_de_read(dev_priv, DISPLAY_PHY_STATUS) & phy_status_mask,
+ phy_status, dev_priv->chv_phy_control);
}
#undef BITS_SET
@@ -1481,8 +1530,9 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
enum pipe pipe;
u32 tmp;
- WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
pipe = PIPE_A;
@@ -1499,7 +1549,8 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
/* Poll for phypwrgood signal */
if (intel_de_wait_for_set(dev_priv, DISPLAY_PHY_STATUS,
PHY_POWERGOOD(phy), 1))
- DRM_ERROR("Display PHY %d is not power up\n", phy);
+ drm_err(&dev_priv->drm, "Display PHY %d is not power up\n",
+ phy);
vlv_dpio_get(dev_priv);
@@ -1527,10 +1578,12 @@ static void chv_dpio_cmn_power_well_enable(struct drm_i915_private *dev_priv,
vlv_dpio_put(dev_priv);
dev_priv->chv_phy_control |= PHY_COM_LANE_RESET_DEASSERT(phy);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
- DRM_DEBUG_KMS("Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm,
+ "Enabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
assert_chv_phy_status(dev_priv);
}
@@ -1540,8 +1593,9 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
{
enum dpio_phy phy;
- WARN_ON_ONCE(power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
- power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
+ drm_WARN_ON_ONCE(&dev_priv->drm,
+ power_well->desc->id != VLV_DISP_PW_DPIO_CMN_BC &&
+ power_well->desc->id != CHV_DISP_PW_DPIO_CMN_D);
if (power_well->desc->id == VLV_DISP_PW_DPIO_CMN_BC) {
phy = DPIO_PHY0;
@@ -1553,12 +1607,14 @@ static void chv_dpio_cmn_power_well_disable(struct drm_i915_private *dev_priv,
}
dev_priv->chv_phy_control &= ~PHY_COM_LANE_RESET_DEASSERT(phy);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
vlv_set_power_well(dev_priv, power_well, false);
- DRM_DEBUG_KMS("Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
- phy, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm,
+ "Disabled DPIO PHY%d (PHY_CONTROL=0x%08x)\n",
+ phy, dev_priv->chv_phy_control);
/* PHY is fully reset now, so we can enable the PHY state asserts */
dev_priv->chv_phy_assert[phy] = true;
@@ -1621,11 +1677,13 @@ static void assert_chv_phy_powergate(struct drm_i915_private *dev_priv, enum dpi
actual = val >> DPIO_ANYDL_POWERDOWN_SHIFT_CH1;
actual &= DPIO_ALLDL_POWERDOWN | DPIO_ANYDL_POWERDOWN;
- WARN(actual != expected,
- "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
- !!(actual & DPIO_ALLDL_POWERDOWN), !!(actual & DPIO_ANYDL_POWERDOWN),
- !!(expected & DPIO_ALLDL_POWERDOWN), !!(expected & DPIO_ANYDL_POWERDOWN),
- reg, val);
+ drm_WARN(&dev_priv->drm, actual != expected,
+ "Unexpected DPIO lane power down: all %d, any %d. Expected: all %d, any %d. (0x%x = 0x%08x)\n",
+ !!(actual & DPIO_ALLDL_POWERDOWN),
+ !!(actual & DPIO_ANYDL_POWERDOWN),
+ !!(expected & DPIO_ALLDL_POWERDOWN),
+ !!(expected & DPIO_ANYDL_POWERDOWN),
+ reg, val);
}
bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
@@ -1646,10 +1704,12 @@ bool chv_phy_powergate_ch(struct drm_i915_private *dev_priv, enum dpio_phy phy,
else
dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
- DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
- phy, ch, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm,
+ "Power gating DPIO PHY%d CH%d (DPIO_PHY_CONTROL=0x%08x)\n",
+ phy, ch, dev_priv->chv_phy_control);
assert_chv_phy_status(dev_priv);
@@ -1677,10 +1737,12 @@ void chv_phy_powergate_lanes(struct intel_encoder *encoder,
else
dev_priv->chv_phy_control &= ~PHY_CH_POWER_DOWN_OVRD_EN(phy, ch);
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
- DRM_DEBUG_KMS("Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
- phy, ch, mask, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm,
+ "Power gating DPIO PHY%d CH%d lanes 0x%x (PHY_CONTROL=0x%08x)\n",
+ phy, ch, mask, dev_priv->chv_phy_control);
assert_chv_phy_status(dev_priv);
@@ -1703,7 +1765,8 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* We only ever set the power-on and power-gate states, anything
* else is unexpected.
*/
- WARN_ON(state != DP_SSS_PWR_ON(pipe) && state != DP_SSS_PWR_GATE(pipe));
+ drm_WARN_ON(&dev_priv->drm, state != DP_SSS_PWR_ON(pipe) &&
+ state != DP_SSS_PWR_GATE(pipe));
enabled = state == DP_SSS_PWR_ON(pipe);
/*
@@ -1711,7 +1774,7 @@ static bool chv_pipe_power_well_enabled(struct drm_i915_private *dev_priv,
* is poking at the power controls too.
*/
ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
- WARN_ON(ctrl << 16 != state);
+ drm_WARN_ON(&dev_priv->drm, ctrl << 16 != state);
vlv_punit_put(dev_priv);
@@ -1742,9 +1805,10 @@ static void chv_set_pipe_power_well(struct drm_i915_private *dev_priv,
vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
if (wait_for(COND, 100))
- DRM_ERROR("timeout setting power well state %08x (%08x)\n",
- state,
- vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
+ drm_err(&dev_priv->drm,
+ "timeout setting power well state %08x (%08x)\n",
+ state,
+ vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
#undef COND
@@ -1752,6 +1816,13 @@ out:
vlv_punit_put(dev_priv);
}
+static void chv_pipe_power_well_sync_hw(struct drm_i915_private *dev_priv,
+ struct i915_power_well *power_well)
+{
+ intel_de_write(dev_priv, DISPLAY_PHY_CONTROL,
+ dev_priv->chv_phy_control);
+}
+
static void chv_pipe_power_well_enable(struct drm_i915_private *dev_priv,
struct i915_power_well *power_well)
{
@@ -1981,12 +2052,13 @@ __intel_display_power_put_domain(struct drm_i915_private *dev_priv,
power_domains = &dev_priv->power_domains;
- WARN(!power_domains->domain_use_count[domain],
- "Use count on domain %s is already zero\n",
- name);
- WARN(async_put_domains_mask(power_domains) & BIT_ULL(domain),
- "Async disabling of domain %s is pending\n",
- name);
+ drm_WARN(&dev_priv->drm, !power_domains->domain_use_count[domain],
+ "Use count on domain %s is already zero\n",
+ name);
+ drm_WARN(&dev_priv->drm,
+ async_put_domains_mask(power_domains) & BIT_ULL(domain),
+ "Async disabling of domain %s is pending\n",
+ name);
power_domains->domain_use_count[domain]--;
@@ -2131,7 +2203,7 @@ void __intel_display_power_put_async(struct drm_i915_private *i915,
goto out_verify;
}
- WARN_ON(power_domains->domain_use_count[domain] != 1);
+ drm_WARN_ON(&i915->drm, power_domains->domain_use_count[domain] != 1);
/* Let a pending work requeue itself or queue a new one. */
if (power_domains->async_put_wakeref) {
@@ -2206,7 +2278,7 @@ intel_display_power_flush_work_sync(struct drm_i915_private *i915)
verify_async_put_domains_state(power_domains);
- WARN_ON(power_domains->async_put_wakeref);
+ drm_WARN_ON(&i915->drm, power_domains->async_put_wakeref);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
@@ -2674,7 +2746,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
BIT_ULL(POWER_DOMAIN_INIT))
#define TGL_DISPLAY_DC_OFF_POWER_DOMAINS ( \
- TGL_PW_2_POWER_DOMAINS | \
+ TGL_PW_3_POWER_DOMAINS | \
BIT_ULL(POWER_DOMAIN_MODESET) | \
BIT_ULL(POWER_DOMAIN_AUX_A) | \
BIT_ULL(POWER_DOMAIN_AUX_B) | \
@@ -2734,7 +2806,7 @@ static const struct i915_power_well_ops i9xx_always_on_power_well_ops = {
};
static const struct i915_power_well_ops chv_pipe_power_well_ops = {
- .sync_hw = i9xx_power_well_sync_hw_noop,
+ .sync_hw = chv_pipe_power_well_sync_hw,
.enable = chv_pipe_power_well_enable,
.disable = chv_pipe_power_well_disable,
.is_enabled = chv_pipe_power_well_enabled,
@@ -3870,7 +3942,7 @@ static const struct i915_power_well_desc tgl_power_wells[] = {
.name = "power well 3",
.domains = TGL_PW_3_POWER_DOMAINS,
.ops = &hsw_power_well_ops,
- .id = DISP_PW_ID_NONE,
+ .id = TGL_DISP_PW_3,
{
.hsw.regs = &hsw_power_well_regs,
.hsw.idx = ICL_PW_CTL_IDX_PW_3,
@@ -4204,11 +4276,13 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
} else if (enable_dc == -1) {
requested_dc = max_dc;
} else if (enable_dc > max_dc && enable_dc <= 4) {
- DRM_DEBUG_KMS("Adjusting requested max DC state (%d->%d)\n",
- enable_dc, max_dc);
+ drm_dbg_kms(&dev_priv->drm,
+ "Adjusting requested max DC state (%d->%d)\n",
+ enable_dc, max_dc);
requested_dc = max_dc;
} else {
- DRM_ERROR("Unexpected value for enable_dc (%d)\n", enable_dc);
+ drm_err(&dev_priv->drm,
+ "Unexpected value for enable_dc (%d)\n", enable_dc);
requested_dc = max_dc;
}
@@ -4227,7 +4301,7 @@ static u32 get_allowed_dc_mask(const struct drm_i915_private *dev_priv,
break;
}
- DRM_DEBUG_KMS("Allowed DC state mask %02x\n", mask);
+ drm_dbg_kms(&dev_priv->drm, "Allowed DC state mask %02x\n", mask);
return mask;
}
@@ -4371,16 +4445,16 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
{
u32 val, status;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
val = enable ? (val | DBUF_POWER_REQUEST) : (val & ~DBUF_POWER_REQUEST);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
udelay(10);
- status = I915_READ(reg) & DBUF_POWER_STATE;
+ status = intel_de_read(dev_priv, reg) & DBUF_POWER_STATE;
if ((enable && !status) || (!enable && status)) {
- DRM_ERROR("DBus power %s timeout!\n",
- enable ? "enable" : "disable");
+ drm_err(&dev_priv->drm, "DBus power %s timeout!\n",
+ enable ? "enable" : "disable");
return false;
}
return true;
@@ -4388,97 +4462,85 @@ bool intel_dbuf_slice_set(struct drm_i915_private *dev_priv,
static void gen9_dbuf_enable(struct drm_i915_private *dev_priv)
{
- intel_dbuf_slice_set(dev_priv, DBUF_CTL, true);
+ icl_dbuf_slices_update(dev_priv, BIT(DBUF_S1));
}
static void gen9_dbuf_disable(struct drm_i915_private *dev_priv)
{
- intel_dbuf_slice_set(dev_priv, DBUF_CTL, false);
-}
-
-static u8 intel_dbuf_max_slices(struct drm_i915_private *dev_priv)
-{
- if (INTEL_GEN(dev_priv) < 11)
- return 1;
- return 2;
+ icl_dbuf_slices_update(dev_priv, 0);
}
void icl_dbuf_slices_update(struct drm_i915_private *dev_priv,
u8 req_slices)
{
- const u8 hw_enabled_slices = dev_priv->wm.skl_hw.ddb.enabled_slices;
- bool ret;
+ int i;
+ int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
- if (req_slices > intel_dbuf_max_slices(dev_priv)) {
- DRM_ERROR("Invalid number of dbuf slices requested\n");
- return;
- }
+ drm_WARN(&dev_priv->drm, hweight8(req_slices) > max_slices,
+ "Invalid number of dbuf slices requested\n");
- if (req_slices == hw_enabled_slices || req_slices == 0)
- return;
+ DRM_DEBUG_KMS("Updating dbuf slices to 0x%x\n", req_slices);
- if (req_slices > hw_enabled_slices)
- ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, true);
- else
- ret = intel_dbuf_slice_set(dev_priv, DBUF_CTL_S2, false);
+ /*
+ * Might be running this in parallel to gen9_dc_off_power_well_enable
+ * being called from intel_dp_detect for instance,
+ * which causes assertion triggered by race condition,
+ * as gen9_assert_dbuf_enabled might preempt this when registers
+ * were already updated, while dev_priv was not.
+ */
+ mutex_lock(&power_domains->lock);
+
+ for (i = 0; i < max_slices; i++) {
+ intel_dbuf_slice_set(dev_priv,
+ DBUF_CTL_S(i),
+ (req_slices & BIT(i)) != 0);
+ }
- if (ret)
- dev_priv->wm.skl_hw.ddb.enabled_slices = req_slices;
+ dev_priv->enabled_dbuf_slices_mask = req_slices;
+
+ mutex_unlock(&power_domains->lock);
}
static void icl_dbuf_enable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) | DBUF_POWER_REQUEST);
- I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) | DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL_S2);
-
- udelay(10);
-
- if (!(I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
- !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power enable timeout\n");
- else
- /*
- * FIXME: for now pretend that we only have 1 slice, see
- * intel_enabled_dbuf_slices_num().
- */
- dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+ skl_ddb_get_hw_state(dev_priv);
+ /*
+ * Just power up at least 1 slice, we will
+ * figure out later which slices we have and what we need.
+ */
+ icl_dbuf_slices_update(dev_priv, dev_priv->enabled_dbuf_slices_mask |
+ BIT(DBUF_S1));
}
static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
{
- I915_WRITE(DBUF_CTL_S1, I915_READ(DBUF_CTL_S1) & ~DBUF_POWER_REQUEST);
- I915_WRITE(DBUF_CTL_S2, I915_READ(DBUF_CTL_S2) & ~DBUF_POWER_REQUEST);
- POSTING_READ(DBUF_CTL_S2);
-
- udelay(10);
-
- if ((I915_READ(DBUF_CTL_S1) & DBUF_POWER_STATE) ||
- (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
- DRM_ERROR("DBuf power disable timeout!\n");
- else
- /*
- * FIXME: for now pretend that the first slice is always
- * enabled, see intel_enabled_dbuf_slices_num().
- */
- dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
+ icl_dbuf_slices_update(dev_priv, 0);
}
static void icl_mbus_init(struct drm_i915_private *dev_priv)
{
- u32 val;
+ u32 mask, val;
+ mask = MBUS_ABOX_BT_CREDIT_POOL1_MASK |
+ MBUS_ABOX_BT_CREDIT_POOL2_MASK |
+ MBUS_ABOX_B_CREDIT_MASK |
+ MBUS_ABOX_BW_CREDIT_MASK;
val = MBUS_ABOX_BT_CREDIT_POOL1(16) |
- MBUS_ABOX_BT_CREDIT_POOL2(16) |
- MBUS_ABOX_B_CREDIT(1) |
- MBUS_ABOX_BW_CREDIT(1);
+ MBUS_ABOX_BT_CREDIT_POOL2(16) |
+ MBUS_ABOX_B_CREDIT(1) |
+ MBUS_ABOX_BW_CREDIT(1);
- I915_WRITE(MBUS_ABOX_CTL, val);
+ intel_de_rmw(dev_priv, MBUS_ABOX_CTL, mask, val);
+ if (INTEL_GEN(dev_priv) >= 12) {
+ intel_de_rmw(dev_priv, MBUS_ABOX1_CTL, mask, val);
+ intel_de_rmw(dev_priv, MBUS_ABOX2_CTL, mask, val);
+ }
}
static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
{
- u32 val = I915_READ(LCPLL_CTL);
+ u32 val = intel_de_read(dev_priv, LCPLL_CTL);
/*
* The LCPLL register should be turned on by the BIOS. For now
@@ -4487,13 +4549,13 @@ static void hsw_assert_cdclk(struct drm_i915_private *dev_priv)
*/
if (val & LCPLL_CD_SOURCE_FCLK)
- DRM_ERROR("CDCLK source is not LCPLL\n");
+ drm_err(&dev_priv->drm, "CDCLK source is not LCPLL\n");
if (val & LCPLL_PLL_DISABLE)
- DRM_ERROR("LCPLL is disabled\n");
+ drm_err(&dev_priv->drm, "LCPLL is disabled\n");
if ((val & LCPLL_REF_MASK) != LCPLL_REF_NON_SSC)
- DRM_ERROR("LCPLL not using non-SSC reference\n");
+ drm_err(&dev_priv->drm, "LCPLL not using non-SSC reference\n");
}
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -4505,26 +4567,26 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
I915_STATE_WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe));
- I915_STATE_WARN(I915_READ(HSW_PWR_WELL_CTL2),
+ I915_STATE_WARN(intel_de_read(dev_priv, HSW_PWR_WELL_CTL2),
"Display power well on\n");
- I915_STATE_WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, SPLL_CTL) & SPLL_PLL_ENABLE,
"SPLL enabled\n");
- I915_STATE_WARN(I915_READ(WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(0)) & WRPLL_PLL_ENABLE,
"WRPLL1 enabled\n");
- I915_STATE_WARN(I915_READ(WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, WRPLL_CTL(1)) & WRPLL_PLL_ENABLE,
"WRPLL2 enabled\n");
- I915_STATE_WARN(I915_READ(PP_STATUS(0)) & PP_ON,
+ I915_STATE_WARN(intel_de_read(dev_priv, PP_STATUS(0)) & PP_ON,
"Panel power on\n");
- I915_STATE_WARN(I915_READ(BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_CPU_CTL2) & BLM_PWM_ENABLE,
"CPU PWM1 enabled\n");
if (IS_HASWELL(dev_priv))
- I915_STATE_WARN(I915_READ(HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, HSW_BLC_PWM2_CTL) & BLM_PWM_ENABLE,
"CPU PWM2 enabled\n");
- I915_STATE_WARN(I915_READ(BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, BLC_PWM_PCH_CTL1) & BLM_PCH_PWM_ENABLE,
"PCH PWM1 enabled\n");
- I915_STATE_WARN(I915_READ(UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, UTIL_PIN_CTL) & UTIL_PIN_ENABLE,
"Utility pin enabled\n");
- I915_STATE_WARN(I915_READ(PCH_GTC_CTL) & PCH_GTC_ENABLE,
+ I915_STATE_WARN(intel_de_read(dev_priv, PCH_GTC_CTL) & PCH_GTC_ENABLE,
"PCH GTC enabled\n");
/*
@@ -4539,9 +4601,9 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv)
{
if (IS_HASWELL(dev_priv))
- return I915_READ(D_COMP_HSW);
+ return intel_de_read(dev_priv, D_COMP_HSW);
else
- return I915_READ(D_COMP_BDW);
+ return intel_de_read(dev_priv, D_COMP_BDW);
}
static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
@@ -4549,10 +4611,11 @@ static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val)
if (IS_HASWELL(dev_priv)) {
if (sandybridge_pcode_write(dev_priv,
GEN6_PCODE_WRITE_D_COMP, val))
- DRM_DEBUG_KMS("Failed to write to D_COMP\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to write to D_COMP\n");
} else {
- I915_WRITE(D_COMP_BDW, val);
- POSTING_READ(D_COMP_BDW);
+ intel_de_write(dev_priv, D_COMP_BDW, val);
+ intel_de_posting_read(dev_priv, D_COMP_BDW);
}
}
@@ -4571,25 +4634,25 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
assert_can_disable_lcpll(dev_priv);
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
if (switch_to_fclk) {
val |= LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
- if (wait_for_us(I915_READ(LCPLL_CTL) &
+ if (wait_for_us(intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE, 1))
- DRM_ERROR("Switching to FCLK failed\n");
+ drm_err(&dev_priv->drm, "Switching to FCLK failed\n");
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
}
val |= LCPLL_PLL_DISABLE;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_posting_read(dev_priv, LCPLL_CTL);
if (intel_de_wait_for_clear(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 1))
- DRM_ERROR("LCPLL still locked\n");
+ drm_err(&dev_priv->drm, "LCPLL still locked\n");
val = hsw_read_dcomp(dev_priv);
val |= D_COMP_COMP_DISABLE;
@@ -4598,13 +4661,13 @@ static void hsw_disable_lcpll(struct drm_i915_private *dev_priv,
if (wait_for((hsw_read_dcomp(dev_priv) &
D_COMP_RCOMP_IN_PROGRESS) == 0, 1))
- DRM_ERROR("D_COMP RCOMP still in progress\n");
+ drm_err(&dev_priv->drm, "D_COMP RCOMP still in progress\n");
if (allow_power_down) {
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val |= LCPLL_POWER_DOWN_ALLOW;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_posting_read(dev_priv, LCPLL_CTL);
}
}
@@ -4616,7 +4679,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
{
u32 val;
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
if ((val & (LCPLL_PLL_LOCK | LCPLL_PLL_DISABLE | LCPLL_CD_SOURCE_FCLK |
LCPLL_POWER_DOWN_ALLOW)) == LCPLL_PLL_LOCK)
@@ -4630,8 +4693,8 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
if (val & LCPLL_POWER_DOWN_ALLOW) {
val &= ~LCPLL_POWER_DOWN_ALLOW;
- I915_WRITE(LCPLL_CTL, val);
- POSTING_READ(LCPLL_CTL);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
+ intel_de_posting_read(dev_priv, LCPLL_CTL);
}
val = hsw_read_dcomp(dev_priv);
@@ -4639,27 +4702,28 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv)
val &= ~D_COMP_COMP_DISABLE;
hsw_write_dcomp(dev_priv, val);
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_PLL_DISABLE;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
if (intel_de_wait_for_set(dev_priv, LCPLL_CTL, LCPLL_PLL_LOCK, 5))
- DRM_ERROR("LCPLL not locked yet\n");
+ drm_err(&dev_priv->drm, "LCPLL not locked yet\n");
if (val & LCPLL_CD_SOURCE_FCLK) {
- val = I915_READ(LCPLL_CTL);
+ val = intel_de_read(dev_priv, LCPLL_CTL);
val &= ~LCPLL_CD_SOURCE_FCLK;
- I915_WRITE(LCPLL_CTL, val);
+ intel_de_write(dev_priv, LCPLL_CTL, val);
- if (wait_for_us((I915_READ(LCPLL_CTL) &
+ if (wait_for_us((intel_de_read(dev_priv, LCPLL_CTL) &
LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1))
- DRM_ERROR("Switching back to LCPLL failed\n");
+ drm_err(&dev_priv->drm,
+ "Switching back to LCPLL failed\n");
}
intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
intel_update_cdclk(dev_priv);
- intel_dump_cdclk_state(&dev_priv->cdclk.hw, "Current CDCLK");
+ intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK");
}
/*
@@ -4689,12 +4753,12 @@ static void hsw_enable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
- DRM_DEBUG_KMS("Enabling package C8+\n");
+ drm_dbg_kms(&dev_priv->drm, "Enabling package C8+\n");
if (HAS_PCH_LPT_LP(dev_priv)) {
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
val &= ~PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
}
lpt_disable_clkout_dp(dev_priv);
@@ -4705,15 +4769,15 @@ static void hsw_disable_pc8(struct drm_i915_private *dev_priv)
{
u32 val;
- DRM_DEBUG_KMS("Disabling package C8+\n");
+ drm_dbg_kms(&dev_priv->drm, "Disabling package C8+\n");
hsw_restore_lcpll(dev_priv);
intel_init_pch_refclk(dev_priv);
if (HAS_PCH_LPT_LP(dev_priv)) {
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
val |= PCH_LP_PARTITION_LEVEL_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
}
}
@@ -4731,14 +4795,14 @@ static void intel_pch_reset_handshake(struct drm_i915_private *dev_priv,
reset_bits = RESET_PCH_HANDSHAKE_ENABLE;
}
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if (enable)
val |= reset_bits;
else
val &= ~reset_bits;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
static void skl_display_core_init(struct drm_i915_private *dev_priv,
@@ -4763,7 +4827,7 @@ static void skl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
- intel_cdclk_init(dev_priv);
+ intel_cdclk_init_hw(dev_priv);
gen9_dbuf_enable(dev_priv);
@@ -4780,7 +4844,7 @@ static void skl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
- intel_cdclk_uninit(dev_priv);
+ intel_cdclk_uninit_hw(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
/* disable PG1 and Misc I/O */
@@ -4824,7 +4888,7 @@ static void bxt_display_core_init(struct drm_i915_private *dev_priv, bool resume
mutex_unlock(&power_domains->lock);
- intel_cdclk_init(dev_priv);
+ intel_cdclk_init_hw(dev_priv);
gen9_dbuf_enable(dev_priv);
@@ -4841,7 +4905,7 @@ static void bxt_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
- intel_cdclk_uninit(dev_priv);
+ intel_cdclk_uninit_hw(dev_priv);
/* The spec doesn't call for removing the reset handshake flag */
@@ -4883,7 +4947,7 @@ static void cnl_display_core_init(struct drm_i915_private *dev_priv, bool resume
mutex_unlock(&power_domains->lock);
/* 5. Enable CD clock */
- intel_cdclk_init(dev_priv);
+ intel_cdclk_init_hw(dev_priv);
/* 6. Enable DBUF */
gen9_dbuf_enable(dev_priv);
@@ -4905,7 +4969,7 @@ static void cnl_display_core_uninit(struct drm_i915_private *dev_priv)
gen9_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
- intel_cdclk_uninit(dev_priv);
+ intel_cdclk_uninit_hw(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
@@ -4964,12 +5028,23 @@ static void tgl_bw_buddy_init(struct drm_i915_private *dev_priv)
break;
if (table[i].page_mask == 0) {
- DRM_DEBUG_DRIVER("Unknown memory configuration; disabling address buddy logic.\n");
- I915_WRITE(BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
- I915_WRITE(BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
+ drm_dbg(&dev_priv->drm,
+ "Unknown memory configuration; disabling address buddy logic.\n");
+ intel_de_write(dev_priv, BW_BUDDY1_CTL, BW_BUDDY_DISABLE);
+ intel_de_write(dev_priv, BW_BUDDY2_CTL, BW_BUDDY_DISABLE);
} else {
- I915_WRITE(BW_BUDDY1_PAGE_MASK, table[i].page_mask);
- I915_WRITE(BW_BUDDY2_PAGE_MASK, table[i].page_mask);
+ intel_de_write(dev_priv, BW_BUDDY1_PAGE_MASK,
+ table[i].page_mask);
+ intel_de_write(dev_priv, BW_BUDDY2_PAGE_MASK,
+ table[i].page_mask);
+
+ /* Wa_22010178259:tgl */
+ intel_de_rmw(dev_priv, BW_BUDDY1_CTL,
+ BW_BUDDY_TLB_REQ_TIMER_MASK,
+ REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
+ intel_de_rmw(dev_priv, BW_BUDDY2_CTL,
+ BW_BUDDY_TLB_REQ_TIMER_MASK,
+ REG_FIELD_PREP(BW_BUDDY_TLB_REQ_TIMER_MASK, 0x8));
}
}
@@ -4997,7 +5072,7 @@ static void icl_display_core_init(struct drm_i915_private *dev_priv,
mutex_unlock(&power_domains->lock);
/* 4. Enable CDCLK. */
- intel_cdclk_init(dev_priv);
+ intel_cdclk_init_hw(dev_priv);
/* 5. Enable DBUF. */
icl_dbuf_enable(dev_priv);
@@ -5026,7 +5101,7 @@ static void icl_display_core_uninit(struct drm_i915_private *dev_priv)
icl_dbuf_disable(dev_priv);
/* 3. Disable CD clock */
- intel_cdclk_uninit(dev_priv);
+ intel_cdclk_uninit_hw(dev_priv);
/*
* 4. Disable Power Well 1 (PG1).
@@ -5071,7 +5146,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
* current lane status.
*/
if (cmn_bc->desc->ops->is_enabled(dev_priv, cmn_bc)) {
- u32 status = I915_READ(DPLL(PIPE_A));
+ u32 status = intel_de_read(dev_priv, DPLL(PIPE_A));
unsigned int mask;
mask = status & DPLL_PORTB_READY_MASK;
@@ -5102,7 +5177,7 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
}
if (cmn_d->desc->ops->is_enabled(dev_priv, cmn_d)) {
- u32 status = I915_READ(DPIO_PHY_STATUS);
+ u32 status = intel_de_read(dev_priv, DPIO_PHY_STATUS);
unsigned int mask;
mask = status & DPLL_PORTD_READY_MASK;
@@ -5123,10 +5198,10 @@ static void chv_phy_control_init(struct drm_i915_private *dev_priv)
dev_priv->chv_phy_assert[DPIO_PHY1] = true;
}
- I915_WRITE(DISPLAY_PHY_CONTROL, dev_priv->chv_phy_control);
+ drm_dbg_kms(&dev_priv->drm, "Initial PHY_CONTROL=0x%08x\n",
+ dev_priv->chv_phy_control);
- DRM_DEBUG_KMS("Initial PHY_CONTROL=0x%08x\n",
- dev_priv->chv_phy_control);
+ /* Defer application of initial phy_control to enabling the powerwell */
}
static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
@@ -5139,10 +5214,10 @@ static void vlv_cmnlane_wa(struct drm_i915_private *dev_priv)
/* If the display might be already active skip this */
if (cmn->desc->ops->is_enabled(dev_priv, cmn) &&
disp2d->desc->ops->is_enabled(dev_priv, disp2d) &&
- I915_READ(DPIO_CTL) & DPIO_CMNRST)
+ intel_de_read(dev_priv, DPIO_CTL) & DPIO_CMNRST)
return;
- DRM_DEBUG_KMS("toggling display PHY side reset\n");
+ drm_dbg_kms(&dev_priv->drm, "toggling display PHY side reset\n");
/* cmnlane needs DPLL registers */
disp2d->desc->ops->enable(dev_priv, disp2d);
@@ -5170,8 +5245,9 @@ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0
static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
{
- WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
- "VED not power gated\n");
+ drm_WARN(&dev_priv->drm,
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+ "VED not power gated\n");
}
static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
@@ -5182,9 +5258,9 @@ static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
{}
};
- WARN(!pci_dev_present(isp_ids) &&
- !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
- "ISP not power gated\n");
+ drm_WARN(&dev_priv->drm, !pci_dev_present(isp_ids) &&
+ !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+ "ISP not power gated\n");
}
static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
@@ -5211,9 +5287,6 @@ void intel_power_domains_init_hw(struct drm_i915_private *i915, bool resume)
power_domains->initializing = true;
- /* Must happen before power domain init on VLV/CHV */
- intel_update_rawclk(i915);
-
if (INTEL_GEN(i915) >= 11) {
icl_display_core_init(i915, resume);
} else if (IS_CANNONLAKE(i915)) {
@@ -5317,7 +5390,7 @@ void intel_power_domains_disable(struct drm_i915_private *i915)
{
struct i915_power_domains *power_domains = &i915->power_domains;
- WARN_ON(power_domains->wakeref);
+ drm_WARN_ON(&i915->drm, power_domains->wakeref);
power_domains->wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
@@ -5399,7 +5472,7 @@ void intel_power_domains_resume(struct drm_i915_private *i915)
intel_power_domains_init_hw(i915, true);
power_domains->display_core_suspended = false;
} else {
- WARN_ON(power_domains->wakeref);
+ drm_WARN_ON(&i915->drm, power_domains->wakeref);
power_domains->wakeref =
intel_display_power_get(i915, POWER_DOMAIN_INIT);
}
@@ -5417,13 +5490,13 @@ static void intel_power_domains_dump_info(struct drm_i915_private *i915)
for_each_power_well(i915, power_well) {
enum intel_display_power_domain domain;
- DRM_DEBUG_DRIVER("%-25s %d\n",
- power_well->desc->name, power_well->count);
+ drm_dbg(&i915->drm, "%-25s %d\n",
+ power_well->desc->name, power_well->count);
for_each_power_domain(domain, power_well->desc->domains)
- DRM_DEBUG_DRIVER(" %-23s %d\n",
- intel_display_power_domain_str(domain),
- power_domains->domain_use_count[domain]);
+ drm_dbg(&i915->drm, " %-23s %d\n",
+ intel_display_power_domain_str(domain),
+ power_domains->domain_use_count[domain]);
}
}
@@ -5456,19 +5529,21 @@ static void intel_power_domains_verify_state(struct drm_i915_private *i915)
enabled = power_well->desc->ops->is_enabled(i915, power_well);
if ((power_well->count || power_well->desc->always_on) !=
enabled)
- DRM_ERROR("power well %s state mismatch (refcount %d/enabled %d)",
- power_well->desc->name,
- power_well->count, enabled);
+ drm_err(&i915->drm,
+ "power well %s state mismatch (refcount %d/enabled %d)",
+ power_well->desc->name,
+ power_well->count, enabled);
domains_count = 0;
for_each_power_domain(domain, power_well->desc->domains)
domains_count += power_domains->domain_use_count[domain];
if (power_well->count != domains_count) {
- DRM_ERROR("power well %s refcount/domain refcount mismatch "
- "(refcount %d/domains refcount %d)\n",
- power_well->desc->name, power_well->count,
- domains_count);
+ drm_err(&i915->drm,
+ "power well %s refcount/domain refcount mismatch "
+ "(refcount %d/domains refcount %d)\n",
+ power_well->desc->name, power_well->count,
+ domains_count);
dump_domain_info = true;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_display_power.h b/drivers/gpu/drm/i915/display/intel_display_power.h
index 2608a65af7fa..da64a5edae7a 100644
--- a/drivers/gpu/drm/i915/display/intel_display_power.h
+++ b/drivers/gpu/drm/i915/display/intel_display_power.h
@@ -100,6 +100,7 @@ enum i915_power_well_id {
SKL_DISP_PW_MISC_IO,
SKL_DISP_PW_1,
SKL_DISP_PW_2,
+ TGL_DISP_PW_3,
SKL_DISP_DC_OFF,
};
@@ -307,6 +308,11 @@ intel_display_power_put_async(struct drm_i915_private *i915,
}
#endif
+enum dbuf_slice {
+ DBUF_S1,
+ DBUF_S2,
+};
+
#define with_intel_display_power(i915, domain, wf) \
for ((wf) = intel_display_power_get((i915), (domain)); (wf); \
intel_display_power_put_async((i915), (domain), (wf)), (wf) = 0)
diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h
index 888ea8a170d1..5e00e611f077 100644
--- a/drivers/gpu/drm/i915/display/intel_display_types.h
+++ b/drivers/gpu/drm/i915/display/intel_display_types.h
@@ -39,13 +39,14 @@
#include <drm/drm_probe_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_vblank.h>
-#include <drm/i915_drm.h>
#include <drm/i915_mei_hdcp_interface.h>
#include <media/cec-notifier.h>
#include "i915_drv.h"
+#include "intel_de.h"
struct drm_printer;
+struct __intel_global_objs_state;
/*
* Display related stuff
@@ -139,6 +140,9 @@ struct intel_encoder {
int (*compute_config)(struct intel_encoder *,
struct intel_crtc_state *,
struct drm_connector_state *);
+ int (*compute_config_late)(struct intel_encoder *,
+ struct intel_crtc_state *,
+ struct drm_connector_state *);
void (*update_prepare)(struct intel_atomic_state *,
struct intel_encoder *,
struct intel_crtc *);
@@ -214,6 +218,9 @@ struct intel_panel {
u8 controller; /* bxt+ only */
struct pwm_device *pwm;
+ /* DPCD backlight */
+ u8 pwmgen_bit_count;
+
struct backlight_device *device;
/* Connector and platform specific backlight functions */
@@ -458,25 +465,8 @@ struct intel_atomic_state {
intel_wakeref_t wakeref;
- struct {
- /*
- * Logical state of cdclk (used for all scaling, watermark,
- * etc. calculations and checks). This is computed as if all
- * enabled crtcs were active.
- */
- struct intel_cdclk_state logical;
-
- /*
- * Actual state of cdclk, can be different from the logical
- * state only when all crtc's are DPMS off.
- */
- struct intel_cdclk_state actual;
-
- int force_min_cdclk;
- bool force_min_cdclk_changed;
- /* pipe to which cd2x update is synchronized */
- enum pipe pipe;
- } cdclk;
+ struct __intel_global_objs_state *global_objs;
+ int num_global_objs;
bool dpll_set, modeset;
@@ -491,10 +481,6 @@ struct intel_atomic_state {
u8 active_pipe_changes;
u8 active_pipes;
- /* minimum acceptable cdclk for each pipe */
- int min_cdclk[I915_MAX_PIPES];
- /* minimum acceptable voltage level for each pipe */
- u8 min_voltage_level[I915_MAX_PIPES];
struct intel_shared_dpll_state shared_dpll[I915_NUM_PLLS];
@@ -508,14 +494,11 @@ struct intel_atomic_state {
/*
* active_pipes
- * min_cdclk[]
- * min_voltage_level[]
- * cdclk.*
*/
bool global_state_changed;
- /* Gen9+ only */
- struct skl_ddb_values wm_results;
+ /* Number of enabled DBuf slices */
+ u8 enabled_dbuf_slices_mask;
struct i915_sw_fence commit_ready;
@@ -611,6 +594,7 @@ struct intel_plane_state {
struct intel_initial_plane_config {
struct intel_framebuffer *fb;
+ struct i915_vma *vma;
unsigned int tiling;
int size;
u32 base;
@@ -657,15 +641,30 @@ struct intel_crtc_scaler_state {
/* Flag to use the scanline counter instead of the pixel counter */
#define I915_MODE_FLAG_USE_SCANLINE_COUNTER (1<<2)
+struct intel_wm_level {
+ bool enable;
+ u32 pri_val;
+ u32 spr_val;
+ u32 cur_val;
+ u32 fbc_val;
+};
+
struct intel_pipe_wm {
struct intel_wm_level wm[5];
- u32 linetime;
bool fbc_wm_enabled;
bool pipe_enabled;
bool sprites_enabled;
bool sprites_scaled;
};
+struct skl_wm_level {
+ u16 min_ddb_alloc;
+ u16 plane_res_b;
+ u8 plane_res_l;
+ bool plane_en;
+ bool ignore_lines;
+};
+
struct skl_plane_wm {
struct skl_wm_level wm[8];
struct skl_wm_level uv_wm[8];
@@ -675,7 +674,6 @@ struct skl_plane_wm {
struct skl_pipe_wm {
struct skl_plane_wm planes[I915_MAX_PLANES];
- u32 linetime;
};
enum vlv_wm_level {
@@ -1046,6 +1044,10 @@ struct intel_crtc_state {
struct drm_dsc_config config;
} dsc;
+ /* HSW+ linetime watermarks */
+ u16 linetime;
+ u16 ips_linetime;
+
/* Forward Error correction State */
bool fec_enable;
@@ -1059,6 +1061,32 @@ struct intel_crtc_state {
enum transcoder mst_master_transcoder;
};
+enum intel_pipe_crc_source {
+ INTEL_PIPE_CRC_SOURCE_NONE,
+ INTEL_PIPE_CRC_SOURCE_PLANE1,
+ INTEL_PIPE_CRC_SOURCE_PLANE2,
+ INTEL_PIPE_CRC_SOURCE_PLANE3,
+ INTEL_PIPE_CRC_SOURCE_PLANE4,
+ INTEL_PIPE_CRC_SOURCE_PLANE5,
+ INTEL_PIPE_CRC_SOURCE_PLANE6,
+ INTEL_PIPE_CRC_SOURCE_PLANE7,
+ INTEL_PIPE_CRC_SOURCE_PIPE,
+ /* TV/DP on pre-gen5/vlv can't use the pipe source. */
+ INTEL_PIPE_CRC_SOURCE_TV,
+ INTEL_PIPE_CRC_SOURCE_DP_B,
+ INTEL_PIPE_CRC_SOURCE_DP_C,
+ INTEL_PIPE_CRC_SOURCE_DP_D,
+ INTEL_PIPE_CRC_SOURCE_AUTO,
+ INTEL_PIPE_CRC_SOURCE_MAX,
+};
+
+#define INTEL_PIPE_CRC_ENTRIES_NR 128
+struct intel_pipe_crc {
+ spinlock_t lock;
+ int skipped;
+ enum intel_pipe_crc_source source;
+};
+
struct intel_crtc {
struct drm_crtc base;
enum pipe pipe;
@@ -1102,6 +1130,10 @@ struct intel_crtc {
/* per pipe DSB related info */
struct intel_dsb dsb;
+
+#ifdef CONFIG_DEBUG_FS
+ struct intel_pipe_crc pipe_crc;
+#endif
};
struct intel_plane {
@@ -1181,8 +1213,6 @@ struct intel_hdmi {
};
struct intel_dp_mst_encoder;
-#define DP_MAX_DOWNSTREAM_PORTS 0x10
-
/*
* enum link_m_n_set:
* When platform provides two set of M_N registers for dp, we can
@@ -1250,6 +1280,7 @@ struct intel_dp {
int max_link_rate;
/* sink or branch descriptor */
struct drm_dp_desc desc;
+ u32 edid_quirks;
struct drm_dp_aux aux;
u32 aux_busy_last_status;
u8 train_set[4];
@@ -1422,8 +1453,17 @@ vlv_pipe_to_channel(enum pipe pipe)
}
static inline struct intel_crtc *
+intel_get_first_crtc(struct drm_i915_private *dev_priv)
+{
+ return to_intel_crtc(drm_crtc_from_index(&dev_priv->drm, 0));
+}
+
+static inline struct intel_crtc *
intel_get_crtc_for_pipe(struct drm_i915_private *dev_priv, enum pipe pipe)
{
+ /* pipe_to_crtc_mapping may have hole on any of 3 display pipe system */
+ drm_WARN_ON(&dev_priv->drm,
+ !(INTEL_INFO(dev_priv)->pipe_mask & BIT(pipe)));
return dev_priv->pipe_to_crtc_mapping[pipe];
}
@@ -1469,7 +1509,7 @@ enc_to_dig_port(struct intel_encoder *encoder)
}
static inline struct intel_digital_port *
-conn_to_dig_port(struct intel_connector *connector)
+intel_attached_dig_port(struct intel_connector *connector)
{
return enc_to_dig_port(intel_attached_encoder(connector));
}
@@ -1486,6 +1526,11 @@ static inline struct intel_dp *enc_to_intel_dp(struct intel_encoder *encoder)
return &enc_to_dig_port(encoder)->dp;
}
+static inline struct intel_dp *intel_attached_dp(struct intel_connector *connector)
+{
+ return enc_to_intel_dp(intel_attached_encoder(connector));
+}
+
static inline bool intel_encoder_is_dp(struct intel_encoder *encoder)
{
switch (encoder->type) {
@@ -1608,11 +1653,15 @@ intel_crtc_has_dp_encoder(const struct intel_crtc_state *crtc_state)
(1 << INTEL_OUTPUT_DP_MST) |
(1 << INTEL_OUTPUT_EDP));
}
+
static inline void
intel_wait_for_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- drm_wait_one_vblank(&dev_priv->drm, pipe);
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ drm_crtc_wait_one_vblank(&crtc->base);
}
+
static inline void
intel_wait_for_vblank_if_active(struct drm_i915_private *dev_priv, enum pipe pipe)
{
diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c
index c7424e2a04a3..0a417cd2af2b 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.c
+++ b/drivers/gpu/drm/i915/display/intel_dp.c
@@ -40,7 +40,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "i915_debugfs.h"
#include "i915_drv.h"
@@ -49,6 +48,7 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dp_link_training.h"
@@ -146,11 +146,6 @@ bool intel_dp_is_edp(struct intel_dp *intel_dp)
return intel_dig_port->base.type == INTEL_OUTPUT_EDP;
}
-static struct intel_dp *intel_attached_dp(struct intel_connector *connector)
-{
- return enc_to_intel_dp(intel_attached_encoder(connector));
-}
-
static void intel_dp_link_down(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state);
static bool edp_panel_vdd_on(struct intel_dp *intel_dp);
@@ -272,7 +267,7 @@ static int cnl_max_source_rate(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
enum port port = dig_port->base.port;
- u32 voltage = I915_READ(CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
+ u32 voltage = intel_de_read(dev_priv, CNL_PORT_COMP_DW3) & VOLTAGE_INFO_MASK;
/* Low voltage SKUs are limited to max of 5.4G */
if (voltage == VOLTAGE_INFO_0_85V)
@@ -323,14 +318,14 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
162000, 270000
};
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct intel_encoder *encoder = &dig_port->base;
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[dig_port->base.port];
const int *source_rates;
- int size, max_rate = 0, vbt_max_rate = info->dp_max_link_rate;
+ int size, max_rate = 0, vbt_max_rate;
/* This should only be done once */
- WARN_ON(intel_dp->source_rates || intel_dp->num_source_rates);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->source_rates || intel_dp->num_source_rates);
if (INTEL_GEN(dev_priv) >= 10) {
source_rates = cnl_rates;
@@ -354,6 +349,7 @@ intel_dp_set_source_rates(struct intel_dp *intel_dp)
size = ARRAY_SIZE(g4x_rates);
}
+ vbt_max_rate = intel_bios_dp_max_link_rate(encoder);
if (max_rate && vbt_max_rate)
max_rate = min(max_rate, vbt_max_rate);
else if (vbt_max_rate)
@@ -519,12 +515,13 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
*/
bits_per_pixel = (link_clock * lane_count * 8) /
intel_dp_mode_to_fec_clock(mode_clock);
- DRM_DEBUG_KMS("Max link bpp: %u\n", bits_per_pixel);
+ drm_dbg_kms(&i915->drm, "Max link bpp: %u\n", bits_per_pixel);
/* Small Joiner Check: output bpp <= joiner RAM (bits) / Horiz. width */
max_bpp_small_joiner_ram = small_joiner_ram_size_bits(i915) /
mode_hdisplay;
- DRM_DEBUG_KMS("Max small joiner bpp: %u\n", max_bpp_small_joiner_ram);
+ drm_dbg_kms(&i915->drm, "Max small joiner bpp: %u\n",
+ max_bpp_small_joiner_ram);
/*
* Greatest allowed DSC BPP = MIN (output BPP from available Link BW
@@ -534,8 +531,8 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915,
/* Error out if the max bpp is less than smallest allowed valid bpp */
if (bits_per_pixel < valid_dsc_bpp[0]) {
- DRM_DEBUG_KMS("Unsupported BPP %u, min %u\n",
- bits_per_pixel, valid_dsc_bpp[0]);
+ drm_dbg_kms(&i915->drm, "Unsupported BPP %u, min %u\n",
+ bits_per_pixel, valid_dsc_bpp[0]);
return 0;
}
@@ -760,20 +757,22 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
enum dpio_channel ch = vlv_pipe_to_channel(pipe);
u32 DP;
- if (WARN(I915_READ(intel_dp->output_reg) & DP_PORT_EN,
- "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name))
+ if (drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN,
+ "skipping pipe %c power sequencer kick due to [ENCODER:%d:%s] being active\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name))
return;
- DRM_DEBUG_KMS("kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "kicking pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
- DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+ DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
DP |= DP_PORT_WIDTH(1);
DP |= DP_LINK_TRAIN_PAT_1;
@@ -783,7 +782,7 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
else
DP |= DP_PIPE_SEL(pipe);
- pll_enabled = I915_READ(DPLL(pipe)) & DPLL_VCO_ENABLE;
+ pll_enabled = intel_de_read(dev_priv, DPLL(pipe)) & DPLL_VCO_ENABLE;
/*
* The DPLL for the pipe must be enabled for this to work.
@@ -795,8 +794,9 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
if (vlv_force_pll_on(dev_priv, pipe, IS_CHERRYVIEW(dev_priv) ?
&chv_dpll[0].dpll : &vlv_dpll[0].dpll)) {
- DRM_ERROR("Failed to force on pll for pipe %c!\n",
- pipe_name(pipe));
+ drm_err(&dev_priv->drm,
+ "Failed to force on pll for pipe %c!\n",
+ pipe_name(pipe));
return;
}
}
@@ -807,14 +807,14 @@ vlv_power_sequencer_kick(struct intel_dp *intel_dp)
* to make this power sequencer lock onto the port.
* Otherwise even VDD force bit won't work.
*/
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
- I915_WRITE(intel_dp->output_reg, DP | DP_PORT_EN);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP | DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
- I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP & ~DP_PORT_EN);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
if (!pll_enabled) {
vlv_force_pll_off(dev_priv, pipe);
@@ -837,13 +837,16 @@ static enum pipe vlv_find_free_pps(struct drm_i915_private *dev_priv)
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
if (encoder->type == INTEL_OUTPUT_EDP) {
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe != intel_dp->pps_pipe);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->active_pipe != INVALID_PIPE &&
+ intel_dp->active_pipe !=
+ intel_dp->pps_pipe);
if (intel_dp->pps_pipe != INVALID_PIPE)
pipes &= ~(1 << intel_dp->pps_pipe);
} else {
- WARN_ON(intel_dp->pps_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->pps_pipe != INVALID_PIPE);
if (intel_dp->active_pipe != INVALID_PIPE)
pipes &= ~(1 << intel_dp->active_pipe);
@@ -866,10 +869,10 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE &&
- intel_dp->active_pipe != intel_dp->pps_pipe);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE &&
+ intel_dp->active_pipe != intel_dp->pps_pipe);
if (intel_dp->pps_pipe != INVALID_PIPE)
return intel_dp->pps_pipe;
@@ -880,16 +883,17 @@ vlv_power_sequencer_pipe(struct intel_dp *intel_dp)
* Didn't find one. This should not happen since there
* are two power sequencers and up to two eDP ports.
*/
- if (WARN_ON(pipe == INVALID_PIPE))
+ if (drm_WARN_ON(&dev_priv->drm, pipe == INVALID_PIPE))
pipe = PIPE_A;
vlv_steal_power_sequencer(dev_priv, pipe);
intel_dp->pps_pipe = pipe;
- DRM_DEBUG_KMS("picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe),
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "picked pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps_pipe),
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -913,7 +917,7 @@ bxt_power_sequencer_idx(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
/* We should never land here with regular DP ports */
- WARN_ON(!intel_dp_is_edp(intel_dp));
+ drm_WARN_ON(&dev_priv->drm, !intel_dp_is_edp(intel_dp));
if (!intel_dp->pps_reset)
return backlight_controller;
@@ -935,13 +939,13 @@ typedef bool (*vlv_pipe_check)(struct drm_i915_private *dev_priv,
static bool vlv_pipe_has_pp_on(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- return I915_READ(PP_STATUS(pipe)) & PP_ON;
+ return intel_de_read(dev_priv, PP_STATUS(pipe)) & PP_ON;
}
static bool vlv_pipe_has_vdd_on(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- return I915_READ(PP_CONTROL(pipe)) & EDP_FORCE_VDD;
+ return intel_de_read(dev_priv, PP_CONTROL(pipe)) & EDP_FORCE_VDD;
}
static bool vlv_pipe_any(struct drm_i915_private *dev_priv,
@@ -958,7 +962,7 @@ vlv_initial_pps_pipe(struct drm_i915_private *dev_priv,
enum pipe pipe;
for (pipe = PIPE_A; pipe <= PIPE_B; pipe++) {
- u32 port_sel = I915_READ(PP_ON_DELAYS(pipe)) &
+ u32 port_sel = intel_de_read(dev_priv, PP_ON_DELAYS(pipe)) &
PANEL_PORT_SELECT_MASK;
if (port_sel != PANEL_PORT_SELECT_VLV(port))
@@ -997,16 +1001,18 @@ vlv_initial_power_sequencer_setup(struct intel_dp *intel_dp)
/* didn't find one? just let vlv_power_sequencer_pipe() pick one when needed */
if (intel_dp->pps_pipe == INVALID_PIPE) {
- DRM_DEBUG_KMS("no initial power sequencer for [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "no initial power sequencer for [ENCODER:%d:%s]\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
return;
}
- DRM_DEBUG_KMS("initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name,
- pipe_name(intel_dp->pps_pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "initial power sequencer for [ENCODER:%d:%s]: pipe %c\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name,
+ pipe_name(intel_dp->pps_pipe));
intel_dp_init_panel_power_sequencer(intel_dp);
intel_dp_init_panel_power_sequencer_registers(intel_dp, false);
@@ -1016,8 +1022,10 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
{
struct intel_encoder *encoder;
- if (WARN_ON(!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv) &&
- !IS_GEN9_LP(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !(IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv) ||
+ IS_GEN9_LP(dev_priv))))
return;
/*
@@ -1033,7 +1041,8 @@ void intel_power_sequencer_reset(struct drm_i915_private *dev_priv)
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_dp->active_pipe != INVALID_PIPE);
if (encoder->type != INTEL_OUTPUT_EDP)
continue;
@@ -1119,12 +1128,13 @@ static int edp_notify_handler(struct notifier_block *this, unsigned long code,
pp_ctrl_reg = PP_CONTROL(pipe);
pp_div_reg = PP_DIVISOR(pipe);
- pp_div = I915_READ(pp_div_reg);
+ pp_div = intel_de_read(dev_priv, pp_div_reg);
pp_div &= PP_REFERENCE_DIVIDER_MASK;
/* 0x1F write to PP_DIV_REG sets max cycle delay */
- I915_WRITE(pp_div_reg, pp_div | 0x1F);
- I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS);
+ intel_de_write(dev_priv, pp_div_reg, pp_div | 0x1F);
+ intel_de_write(dev_priv, pp_ctrl_reg,
+ PANEL_UNLOCK_REGS);
msleep(intel_dp->panel_power_cycle_delay);
}
}
@@ -1142,7 +1152,7 @@ static bool edp_have_panel_power(struct intel_dp *intel_dp)
intel_dp->pps_pipe == INVALID_PIPE)
return false;
- return (I915_READ(_pp_stat_reg(intel_dp)) & PP_ON) != 0;
+ return (intel_de_read(dev_priv, _pp_stat_reg(intel_dp)) & PP_ON) != 0;
}
static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
@@ -1155,7 +1165,7 @@ static bool edp_have_panel_vdd(struct intel_dp *intel_dp)
intel_dp->pps_pipe == INVALID_PIPE)
return false;
- return I915_READ(_pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
+ return intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)) & EDP_FORCE_VDD;
}
static void
@@ -1167,10 +1177,11 @@ intel_dp_check_edp(struct intel_dp *intel_dp)
return;
if (!edp_have_panel_power(intel_dp) && !edp_have_panel_vdd(intel_dp)) {
- WARN(1, "eDP powered off while attempting aux channel communication.\n");
- DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n",
- I915_READ(_pp_stat_reg(intel_dp)),
- I915_READ(_pp_ctrl_reg(intel_dp)));
+ drm_WARN(&dev_priv->drm, 1,
+ "eDP powered off while attempting aux channel communication.\n");
+ drm_dbg_kms(&dev_priv->drm, "Status 0x%08x Control 0x%08x\n",
+ intel_de_read(dev_priv, _pp_stat_reg(intel_dp)),
+ intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp)));
}
}
@@ -1191,8 +1202,9 @@ intel_dp_aux_wait_done(struct intel_dp *intel_dp)
trace_i915_reg_rw(false, ch_ctl, status, sizeof(status), true);
if (!done)
- DRM_ERROR("%s did not complete or timeout within %ums (status 0x%08x)\n",
- intel_dp->aux.name, timeout_ms, status);
+ drm_err(&i915->drm,
+ "%s: did not complete or timeout within %ums (status 0x%08x)\n",
+ intel_dp->aux.name, timeout_ms, status);
#undef C
return status;
@@ -1209,13 +1221,14 @@ static u32 g4x_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* The clock divider is based off the hrawclk, and would like to run at
* 2MHz. So, take the hrawclk value and divide by 2000 and use that
*/
- return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
+ return DIV_ROUND_CLOSEST(RUNTIME_INFO(dev_priv)->rawclk_freq, 2000);
}
static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ u32 freq;
if (index)
return 0;
@@ -1226,9 +1239,10 @@ static u32 ilk_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
* divide by 2000 and use that
*/
if (dig_port->aux_ch == AUX_CH_A)
- return DIV_ROUND_CLOSEST(dev_priv->cdclk.hw.cdclk, 2000);
+ freq = dev_priv->cdclk.hw.cdclk;
else
- return DIV_ROUND_CLOSEST(dev_priv->rawclk_freq, 2000);
+ freq = RUNTIME_INFO(dev_priv)->rawclk_freq;
+ return DIV_ROUND_CLOSEST(freq, 2000);
}
static u32 hsw_get_aux_clock_divider(struct intel_dp *intel_dp, int index)
@@ -1378,8 +1392,9 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
const u32 status = intel_uncore_read(uncore, ch_ctl);
if (status != intel_dp->aux_busy_last_status) {
- WARN(1, "dp_aux_ch not started status 0x%08x\n",
- status);
+ drm_WARN(&i915->drm, 1,
+ "%s: not started (status 0x%08x)\n",
+ intel_dp->aux.name, status);
intel_dp->aux_busy_last_status = status;
}
@@ -1388,7 +1403,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
/* Only 5 data registers! */
- if (WARN_ON(send_bytes > 20 || recv_size > 20)) {
+ if (drm_WARN_ON(&i915->drm, send_bytes > 20 || recv_size > 20)) {
ret = -E2BIG;
goto out;
}
@@ -1440,7 +1455,8 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp,
}
if ((status & DP_AUX_CH_CTL_DONE) == 0) {
- DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status);
+ drm_err(&i915->drm, "%s: not done (status 0x%08x)\n",
+ intel_dp->aux.name, status);
ret = -EBUSY;
goto out;
}
@@ -1450,7 +1466,8 @@ done:
* Timeouts occur when the sink is not connected
*/
if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) {
- DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status);
+ drm_err(&i915->drm, "%s: receive error (status 0x%08x)\n",
+ intel_dp->aux.name, status);
ret = -EIO;
goto out;
}
@@ -1458,7 +1475,8 @@ done:
/* Timeouts occur when the device isn't connected, so they're
* "normal" -- don't fill the kernel log with these */
if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) {
- DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status);
+ drm_dbg_kms(&i915->drm, "%s: timeout (status 0x%08x)\n",
+ intel_dp->aux.name, status);
ret = -ETIMEDOUT;
goto out;
}
@@ -1473,8 +1491,9 @@ done:
* drm layer takes care for the necessary retries.
*/
if (recv_bytes == 0 || recv_bytes > 20) {
- DRM_DEBUG_KMS("Forbidden recv_bytes = %d on aux transaction\n",
- recv_bytes);
+ drm_dbg_kms(&i915->drm,
+ "%s: Forbidden recv_bytes = %d on aux transaction\n",
+ intel_dp->aux.name, recv_bytes);
ret = -EBUSY;
goto out;
}
@@ -1742,7 +1761,8 @@ intel_dp_aux_init(struct intel_dp *intel_dp)
drm_dp_aux_init(&intel_dp->aux);
/* Failure to allocate our preferred name is not critical */
- intel_dp->aux.name = kasprintf(GFP_KERNEL, "DPDDC-%c",
+ intel_dp->aux.name = kasprintf(GFP_KERNEL, "AUX %c/port %c",
+ aux_ch_name(dig_port->aux_ch),
port_name(encoder->port));
intel_dp->aux.transfer = intel_dp_aux_transfer;
}
@@ -1918,8 +1938,9 @@ static int intel_dp_compute_bpp(struct intel_dp *intel_dp,
/* Get bpp from vbt only for panels that dont have bpp in edid */
if (intel_connector->base.display_info.bpc == 0 &&
dev_priv->vbt.edp.bpp && dev_priv->vbt.edp.bpp < bpp) {
- DRM_DEBUG_KMS("clamping bpp for eDP panel to BIOS-provided %i\n",
- dev_priv->vbt.edp.bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "clamping bpp for eDP panel to BIOS-provided %i\n",
+ dev_priv->vbt.edp.bpp);
bpp = dev_priv->vbt.edp.bpp;
}
}
@@ -2115,7 +2136,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
/* Min Input BPC for ICL+ is 8 */
if (pipe_bpp < 8 * 3) {
- DRM_DEBUG_KMS("No DSC support for less than 8bpc\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No DSC support for less than 8bpc\n");
return -EINVAL;
}
@@ -2150,7 +2172,8 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
adjusted_mode->crtc_clock,
adjusted_mode->crtc_hdisplay);
if (!dsc_max_output_bpp || !dsc_dp_slice_count) {
- DRM_DEBUG_KMS("Compressed BPP/Slice Count not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Compressed BPP/Slice Count not supported\n");
return -EINVAL;
}
pipe_config->dsc.compressed_bpp = min_t(u16,
@@ -2167,26 +2190,28 @@ static int intel_dp_dsc_compute_config(struct intel_dp *intel_dp,
if (pipe_config->dsc.slice_count > 1) {
pipe_config->dsc.dsc_split = true;
} else {
- DRM_DEBUG_KMS("Cannot split stream to use 2 VDSC instances\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Cannot split stream to use 2 VDSC instances\n");
return -EINVAL;
}
}
ret = intel_dp_dsc_compute_params(&dig_port->base, pipe_config);
if (ret < 0) {
- DRM_DEBUG_KMS("Cannot compute valid DSC parameters for Input Bpp = %d "
- "Compressed BPP = %d\n",
- pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "Cannot compute valid DSC parameters for Input Bpp = %d "
+ "Compressed BPP = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc.compressed_bpp);
return ret;
}
pipe_config->dsc.compression_enable = true;
- DRM_DEBUG_KMS("DP DSC computed with Input Bpp = %d "
- "Compressed Bpp = %d Slice Count = %d\n",
- pipe_config->pipe_bpp,
- pipe_config->dsc.compressed_bpp,
- pipe_config->dsc.slice_count);
+ drm_dbg_kms(&dev_priv->drm, "DP DSC computed with Input Bpp = %d "
+ "Compressed Bpp = %d Slice Count = %d\n",
+ pipe_config->pipe_bpp,
+ pipe_config->dsc.compressed_bpp,
+ pipe_config->dsc.slice_count);
return 0;
}
@@ -2214,7 +2239,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder,
intel_dp->max_link_rate);
/* No common link rates between source and sink */
- WARN_ON(common_len <= 0);
+ drm_WARN_ON(encoder->base.dev, common_len <= 0);
limits.min_clock = 0;
limits.max_clock = common_len - 1;
@@ -2373,7 +2398,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
struct intel_connector *intel_connector = intel_dp->attached_connector;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int ret = 0, output_bpp;
@@ -2515,7 +2540,7 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
/* Preserve the BIOS-computed detected bit. This is
* supposed to be read-only.
*/
- intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED;
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg) & DP_DETECTED;
/* Handle DP bits in common between all three register formats */
intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0;
@@ -2539,12 +2564,12 @@ static void intel_dp_prepare(struct intel_encoder *encoder,
intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT;
- trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+ trans_dp = intel_de_read(dev_priv, TRANS_DP_CTL(crtc->pipe));
if (drm_dp_enhanced_frame_cap(intel_dp->dpcd))
trans_dp |= TRANS_DP_ENH_FRAMING;
else
trans_dp &= ~TRANS_DP_ENH_FRAMING;
- I915_WRITE(TRANS_DP_CTL(crtc->pipe), trans_dp);
+ intel_de_write(dev_priv, TRANS_DP_CTL(crtc->pipe), trans_dp);
} else {
if (IS_G4X(dev_priv) && pipe_config->limited_color_range)
intel_dp->DP |= DP_COLOR_RANGE_16_235;
@@ -2590,18 +2615,20 @@ static void wait_panel_status(struct intel_dp *intel_dp,
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n",
- mask, value,
- I915_READ(pp_stat_reg),
- I915_READ(pp_ctrl_reg));
+ drm_dbg_kms(&dev_priv->drm,
+ "mask %08x value %08x status %08x control %08x\n",
+ mask, value,
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
if (intel_de_wait_for_register(dev_priv, pp_stat_reg,
mask, value, 5000))
- DRM_ERROR("Panel status timeout: status %08x control %08x\n",
- I915_READ(pp_stat_reg),
- I915_READ(pp_ctrl_reg));
+ drm_err(&dev_priv->drm,
+ "Panel status timeout: status %08x control %08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
- DRM_DEBUG_KMS("Wait complete\n");
+ drm_dbg_kms(&dev_priv->drm, "Wait complete\n");
}
static void wait_panel_on(struct intel_dp *intel_dp)
@@ -2660,9 +2687,9 @@ static u32 ilk_get_pp_control(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- control = I915_READ(_pp_ctrl_reg(intel_dp));
- if (WARN_ON(!HAS_DDI(dev_priv) &&
- (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
+ control = intel_de_read(dev_priv, _pp_ctrl_reg(intel_dp));
+ if (drm_WARN_ON(&dev_priv->drm, !HAS_DDI(dev_priv) &&
+ (control & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS)) {
control &= ~PANEL_UNLOCK_MASK;
control |= PANEL_UNLOCK_REGS;
}
@@ -2696,9 +2723,9 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
intel_display_power_get(dev_priv,
intel_aux_power_domain(intel_dig_port));
- DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD on\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD on\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
if (!edp_have_panel_power(intel_dp))
wait_panel_power_cycle(intel_dp);
@@ -2709,17 +2736,19 @@ static bool edp_panel_vdd_on(struct intel_dp *intel_dp)
pp_stat_reg = _pp_stat_reg(intel_dp);
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
- DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
/*
* If the panel wasn't on, delay before accessing aux channel
*/
if (!edp_have_panel_power(intel_dp)) {
- DRM_DEBUG_KMS("[ENCODER:%d:%s] panel power wasn't enabled\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[ENCODER:%d:%s] panel power wasn't enabled\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
msleep(intel_dp->panel_power_up_delay);
}
@@ -2759,14 +2788,14 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
lockdep_assert_held(&dev_priv->pps_mutex);
- WARN_ON(intel_dp->want_panel_vdd);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->want_panel_vdd);
if (!edp_have_panel_vdd(intel_dp))
return;
- DRM_DEBUG_KMS("Turning [ENCODER:%d:%s] VDD off\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "Turning [ENCODER:%d:%s] VDD off\n",
+ intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_FORCE_VDD;
@@ -2774,12 +2803,13 @@ static void edp_panel_vdd_off_sync(struct intel_dp *intel_dp)
pp_ctrl_reg = _pp_ctrl_reg(intel_dp);
pp_stat_reg = _pp_stat_reg(intel_dp);
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
/* Make sure sequencer is idle before allowing subsequent activity */
- DRM_DEBUG_KMS("PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
- I915_READ(pp_stat_reg), I915_READ(pp_ctrl_reg));
+ drm_dbg_kms(&dev_priv->drm, "PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n",
+ intel_de_read(dev_priv, pp_stat_reg),
+ intel_de_read(dev_priv, pp_ctrl_reg));
if ((pp & PANEL_POWER_ON) == 0)
intel_dp->panel_power_off_time = ktime_get_boottime();
@@ -2851,14 +2881,14 @@ static void edp_panel_on(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name);
- if (WARN(edp_have_panel_power(intel_dp),
- "[ENCODER:%d:%s] panel power already on\n",
- dp_to_dig_port(intel_dp)->base.base.base.id,
- dp_to_dig_port(intel_dp)->base.base.name))
+ if (drm_WARN(&dev_priv->drm, edp_have_panel_power(intel_dp),
+ "[ENCODER:%d:%s] panel power already on\n",
+ dp_to_dig_port(intel_dp)->base.base.base.id,
+ dp_to_dig_port(intel_dp)->base.base.name))
return;
wait_panel_power_cycle(intel_dp);
@@ -2868,24 +2898,24 @@ static void edp_panel_on(struct intel_dp *intel_dp)
if (IS_GEN(dev_priv, 5)) {
/* ILK workaround: disable reset around power sequence */
pp &= ~PANEL_POWER_RESET;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
}
pp |= PANEL_POWER_ON;
if (!IS_GEN(dev_priv, 5))
pp |= PANEL_POWER_RESET;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
wait_panel_on(intel_dp);
intel_dp->last_power_on = jiffies;
if (IS_GEN(dev_priv, 5)) {
pp |= PANEL_POWER_RESET; /* restore panel reset bit */
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
}
}
@@ -2913,11 +2943,12 @@ static void edp_panel_off(struct intel_dp *intel_dp)
if (!intel_dp_is_edp(intel_dp))
return;
- DRM_DEBUG_KMS("Turn [ENCODER:%d:%s] panel power off\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
+ drm_dbg_kms(&dev_priv->drm, "Turn [ENCODER:%d:%s] panel power off\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
- WARN(!intel_dp->want_panel_vdd, "Need [ENCODER:%d:%s] VDD to turn off panel\n",
- dig_port->base.base.base.id, dig_port->base.base.name);
+ drm_WARN(&dev_priv->drm, !intel_dp->want_panel_vdd,
+ "Need [ENCODER:%d:%s] VDD to turn off panel\n",
+ dig_port->base.base.base.id, dig_port->base.base.name);
pp = ilk_get_pp_control(intel_dp);
/* We need to switch off panel power _and_ force vdd, for otherwise some
@@ -2929,8 +2960,8 @@ static void edp_panel_off(struct intel_dp *intel_dp)
intel_dp->want_panel_vdd = false;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
wait_panel_off(intel_dp);
intel_dp->panel_power_off_time = ktime_get_boottime();
@@ -2971,8 +3002,8 @@ static void _intel_edp_backlight_on(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp |= EDP_BLC_ENABLE;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
}
}
@@ -3007,8 +3038,8 @@ static void _intel_edp_backlight_off(struct intel_dp *intel_dp)
pp = ilk_get_pp_control(intel_dp);
pp &= ~EDP_BLC_ENABLE;
- I915_WRITE(pp_ctrl_reg, pp);
- POSTING_READ(pp_ctrl_reg);
+ intel_de_write(dev_priv, pp_ctrl_reg, pp);
+ intel_de_posting_read(dev_priv, pp_ctrl_reg);
}
intel_dp->last_backlight_off = jiffies;
@@ -3059,7 +3090,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
{
struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
struct drm_i915_private *dev_priv = to_i915(dig_port->base.base.dev);
- bool cur_state = I915_READ(intel_dp->output_reg) & DP_PORT_EN;
+ bool cur_state = intel_de_read(dev_priv, intel_dp->output_reg) & DP_PORT_EN;
I915_STATE_WARN(cur_state != state,
"[ENCODER:%d:%s] state assertion failure (expected %s, current %s)\n",
@@ -3070,7 +3101,7 @@ static void assert_dp_port(struct intel_dp *intel_dp, bool state)
static void assert_edp_pll(struct drm_i915_private *dev_priv, bool state)
{
- bool cur_state = I915_READ(DP_A) & DP_PLL_ENABLE;
+ bool cur_state = intel_de_read(dev_priv, DP_A) & DP_PLL_ENABLE;
I915_STATE_WARN(cur_state != state,
"eDP PLL state assertion failure (expected %s, current %s)\n",
@@ -3089,8 +3120,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
assert_dp_port_disabled(intel_dp);
assert_edp_pll_disabled(dev_priv);
- DRM_DEBUG_KMS("enabling eDP PLL for clock %d\n",
- pipe_config->port_clock);
+ drm_dbg_kms(&dev_priv->drm, "enabling eDP PLL for clock %d\n",
+ pipe_config->port_clock);
intel_dp->DP &= ~DP_PLL_FREQ_MASK;
@@ -3099,8 +3130,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
else
intel_dp->DP |= DP_PLL_FREQ_270MHZ;
- I915_WRITE(DP_A, intel_dp->DP);
- POSTING_READ(DP_A);
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
udelay(500);
/*
@@ -3114,8 +3145,8 @@ static void ilk_edp_pll_on(struct intel_dp *intel_dp,
intel_dp->DP |= DP_PLL_ENABLE;
- I915_WRITE(DP_A, intel_dp->DP);
- POSTING_READ(DP_A);
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
udelay(200);
}
@@ -3129,12 +3160,12 @@ static void ilk_edp_pll_off(struct intel_dp *intel_dp,
assert_dp_port_disabled(intel_dp);
assert_edp_pll_enabled(dev_priv);
- DRM_DEBUG_KMS("disabling eDP PLL\n");
+ drm_dbg_kms(&dev_priv->drm, "disabling eDP PLL\n");
intel_dp->DP &= ~DP_PLL_ENABLE;
- I915_WRITE(DP_A, intel_dp->DP);
- POSTING_READ(DP_A);
+ intel_de_write(dev_priv, DP_A, intel_dp->DP);
+ intel_de_posting_read(dev_priv, DP_A);
udelay(200);
}
@@ -3149,7 +3180,7 @@ static bool downstream_hpd_needs_d0(struct intel_dp *intel_dp)
* FIXME should really check all downstream ports...
*/
return intel_dp->dpcd[DP_DPCD_REV] == 0x11 &&
- intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & DP_DWN_STRM_PORT_PRESENT &&
+ drm_dp_is_branch(intel_dp->dpcd) &&
intel_dp->downstream_ports[0] & DP_DS_PORT_HPD;
}
@@ -3214,7 +3245,7 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
enum pipe p;
for_each_pipe(dev_priv, p) {
- u32 val = I915_READ(TRANS_DP_CTL(p));
+ u32 val = intel_de_read(dev_priv, TRANS_DP_CTL(p));
if ((val & TRANS_DP_PORT_SEL_MASK) == TRANS_DP_PORT_SEL(port)) {
*pipe = p;
@@ -3222,7 +3253,8 @@ static bool cpt_dp_port_selected(struct drm_i915_private *dev_priv,
}
}
- DRM_DEBUG_KMS("No pipe for DP port %c found\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "No pipe for DP port %c found\n",
+ port_name(port));
/* must initialize pipe to something for the asserts */
*pipe = PIPE_A;
@@ -3237,7 +3269,7 @@ bool intel_dp_port_enabled(struct drm_i915_private *dev_priv,
bool ret;
u32 val;
- val = I915_READ(dp_reg);
+ val = intel_de_read(dev_priv, dp_reg);
ret = val & DP_PORT_EN;
@@ -3289,12 +3321,13 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
else
pipe_config->output_types |= BIT(INTEL_OUTPUT_DP);
- tmp = I915_READ(intel_dp->output_reg);
+ tmp = intel_de_read(dev_priv, intel_dp->output_reg);
pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
if (HAS_PCH_CPT(dev_priv) && port != PORT_A) {
- u32 trans_dp = I915_READ(TRANS_DP_CTL(crtc->pipe));
+ u32 trans_dp = intel_de_read(dev_priv,
+ TRANS_DP_CTL(crtc->pipe));
if (trans_dp & TRANS_DP_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -3328,7 +3361,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
intel_dp_get_m_n(crtc, pipe_config);
if (port == PORT_A) {
- if ((I915_READ(DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
+ if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ)
pipe_config->port_clock = 162000;
else
pipe_config->port_clock = 270000;
@@ -3353,8 +3386,9 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
* up by the BIOS, and thus we can't get the mode at module
* load.
*/
- DRM_DEBUG_KMS("pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
- pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "pipe has %d bpp for eDP panel, overriding BIOS-provided max %d bpp\n",
+ pipe_config->pipe_bpp, dev_priv->vbt.edp.bpp);
dev_priv->vbt.edp.bpp = pipe_config->pipe_bpp;
}
}
@@ -3447,11 +3481,12 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
u8 train_pat_mask = drm_dp_training_pattern_mask(intel_dp->dpcd);
if (dp_train_pat & train_pat_mask)
- DRM_DEBUG_KMS("Using DP training pattern TPS%d\n",
- dp_train_pat & train_pat_mask);
+ drm_dbg_kms(&dev_priv->drm,
+ "Using DP training pattern TPS%d\n",
+ dp_train_pat & train_pat_mask);
if (HAS_DDI(dev_priv)) {
- u32 temp = I915_READ(intel_dp->regs.dp_tp_ctl);
+ u32 temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
if (dp_train_pat & DP_LINK_SCRAMBLING_DISABLE)
temp |= DP_TP_CTL_SCRAMBLE_DISABLE;
@@ -3477,7 +3512,7 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
temp |= DP_TP_CTL_LINK_TRAIN_PAT4;
break;
}
- I915_WRITE(intel_dp->regs.dp_tp_ctl, temp);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, temp);
} else if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -3494,7 +3529,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
case DP_TRAINING_PATTERN_3:
- DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "TPS3 not supported, using TPS2 instead\n");
*DP |= DP_LINK_TRAIN_PAT_2_CPT;
break;
}
@@ -3513,7 +3549,8 @@ _intel_dp_set_link_train(struct intel_dp *intel_dp,
*DP |= DP_LINK_TRAIN_PAT_2;
break;
case DP_TRAINING_PATTERN_3:
- DRM_DEBUG_KMS("TPS3 not supported, using TPS2 instead\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "TPS3 not supported, using TPS2 instead\n");
*DP |= DP_LINK_TRAIN_PAT_2;
break;
}
@@ -3539,8 +3576,8 @@ static void intel_dp_enable_port(struct intel_dp *intel_dp,
if (old_crtc_state->has_audio)
intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE;
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
static void intel_enable_dp(struct intel_encoder *encoder,
@@ -3550,11 +3587,11 @@ static void intel_enable_dp(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- u32 dp_reg = I915_READ(intel_dp->output_reg);
+ u32 dp_reg = intel_de_read(dev_priv, intel_dp->output_reg);
enum pipe pipe = crtc->pipe;
intel_wakeref_t wakeref;
- if (WARN_ON(dp_reg & DP_PORT_EN))
+ if (drm_WARN_ON(&dev_priv->drm, dp_reg & DP_PORT_EN))
return;
with_pps_lock(intel_dp, wakeref) {
@@ -3583,8 +3620,8 @@ static void intel_enable_dp(struct intel_encoder *encoder,
intel_dp_stop_link_train(intel_dp);
if (pipe_config->has_audio) {
- DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n",
- pipe_name(pipe));
+ drm_dbg(&dev_priv->drm, "Enabling DP audio on pipe %c\n",
+ pipe_name(pipe));
intel_audio_codec_enable(encoder, pipe_config, conn_state);
}
}
@@ -3625,9 +3662,9 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
enum pipe pipe = intel_dp->pps_pipe;
i915_reg_t pp_on_reg = PP_ON_DELAYS(pipe);
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
- if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return;
edp_panel_vdd_off_sync(intel_dp);
@@ -3641,11 +3678,12 @@ static void vlv_detach_power_sequencer(struct intel_dp *intel_dp)
* port select always when logically disconnecting a power sequencer
* from a port.
*/
- DRM_DEBUG_KMS("detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
- I915_WRITE(pp_on_reg, 0);
- POSTING_READ(pp_on_reg);
+ drm_dbg_kms(&dev_priv->drm,
+ "detaching pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), intel_dig_port->base.base.base.id,
+ intel_dig_port->base.base.name);
+ intel_de_write(dev_priv, pp_on_reg, 0);
+ intel_de_posting_read(dev_priv, pp_on_reg);
intel_dp->pps_pipe = INVALID_PIPE;
}
@@ -3660,17 +3698,18 @@ static void vlv_steal_power_sequencer(struct drm_i915_private *dev_priv,
for_each_intel_dp(&dev_priv->drm, encoder) {
struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
- WARN(intel_dp->active_pipe == pipe,
- "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
+ drm_WARN(&dev_priv->drm, intel_dp->active_pipe == pipe,
+ "stealing pipe %c power sequencer from active [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
if (intel_dp->pps_pipe != pipe)
continue;
- DRM_DEBUG_KMS("stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
- pipe_name(pipe), encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "stealing pipe %c power sequencer from [ENCODER:%d:%s]\n",
+ pipe_name(pipe), encoder->base.base.id,
+ encoder->base.name);
/* make sure vdd is off before we steal it */
vlv_detach_power_sequencer(intel_dp);
@@ -3686,7 +3725,7 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
lockdep_assert_held(&dev_priv->pps_mutex);
- WARN_ON(intel_dp->active_pipe != INVALID_PIPE);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->active_pipe != INVALID_PIPE);
if (intel_dp->pps_pipe != INVALID_PIPE &&
intel_dp->pps_pipe != crtc->pipe) {
@@ -3712,9 +3751,10 @@ static void vlv_init_panel_power_sequencer(struct intel_encoder *encoder,
/* now it's all ours */
intel_dp->pps_pipe = crtc->pipe;
- DRM_DEBUG_KMS("initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
- pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
- encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "initializing pipe %c power sequencer for [ENCODER:%d:%s]\n",
+ pipe_name(intel_dp->pps_pipe), encoder->base.base.id,
+ encoder->base.name);
/* init power sequencer on this pipe and port */
intel_dp_init_panel_power_sequencer(intel_dp);
@@ -4140,18 +4180,22 @@ intel_dp_set_signal_levels(struct intel_dp *intel_dp)
}
if (mask)
- DRM_DEBUG_KMS("Using signal levels %08x\n", signal_levels);
-
- DRM_DEBUG_KMS("Using vswing level %d\n",
- train_set & DP_TRAIN_VOLTAGE_SWING_MASK);
- DRM_DEBUG_KMS("Using pre-emphasis level %d\n",
- (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
- DP_TRAIN_PRE_EMPHASIS_SHIFT);
+ drm_dbg_kms(&dev_priv->drm, "Using signal levels %08x\n",
+ signal_levels);
+
+ drm_dbg_kms(&dev_priv->drm, "Using vswing level %d%s\n",
+ train_set & DP_TRAIN_VOLTAGE_SWING_MASK,
+ train_set & DP_TRAIN_MAX_SWING_REACHED ? " (max)" : "");
+ drm_dbg_kms(&dev_priv->drm, "Using pre-emphasis level %d%s\n",
+ (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) >>
+ DP_TRAIN_PRE_EMPHASIS_SHIFT,
+ train_set & DP_TRAIN_MAX_PRE_EMPHASIS_REACHED ?
+ " (max)" : "");
intel_dp->DP = (intel_dp->DP & ~mask) | signal_levels;
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
void
@@ -4164,8 +4208,8 @@ intel_dp_program_link_training_pattern(struct intel_dp *intel_dp,
_intel_dp_set_link_train(intel_dp, &intel_dp->DP, dp_train_pat);
- I915_WRITE(intel_dp->output_reg, intel_dp->DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, intel_dp->DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
}
void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
@@ -4178,10 +4222,10 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (!HAS_DDI(dev_priv))
return;
- val = I915_READ(intel_dp->regs.dp_tp_ctl);
+ val = intel_de_read(dev_priv, intel_dp->regs.dp_tp_ctl);
val &= ~DP_TP_CTL_LINK_TRAIN_MASK;
val |= DP_TP_CTL_LINK_TRAIN_IDLE;
- I915_WRITE(intel_dp->regs.dp_tp_ctl, val);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_ctl, val);
/*
* Until TGL on PORT_A we can have only eDP in SST mode. There the only
@@ -4195,7 +4239,8 @@ void intel_dp_set_idle_link_train(struct intel_dp *intel_dp)
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_IDLE_DONE, 1))
- DRM_ERROR("Timed out waiting for DP idle patterns\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DP idle patterns\n");
}
static void
@@ -4208,10 +4253,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
enum port port = encoder->port;
u32 DP = intel_dp->DP;
- if (WARN_ON((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0))
+ if (drm_WARN_ON(&dev_priv->drm,
+ (intel_de_read(dev_priv, intel_dp->output_reg) &
+ DP_PORT_EN) == 0))
return;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
if ((IS_IVYBRIDGE(dev_priv) && port == PORT_A) ||
(HAS_PCH_CPT(dev_priv) && port != PORT_A)) {
@@ -4221,12 +4268,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
DP &= ~DP_LINK_TRAIN_MASK;
DP |= DP_LINK_TRAIN_PAT_IDLE;
}
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
DP &= ~(DP_PORT_EN | DP_AUDIO_OUTPUT_ENABLE);
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
/*
* HW workaround for IBX, we need to move the port
@@ -4245,12 +4292,12 @@ intel_dp_link_down(struct intel_encoder *encoder,
DP &= ~(DP_PIPE_SEL_MASK | DP_LINK_TRAIN_MASK);
DP |= DP_PORT_EN | DP_PIPE_SEL(PIPE_A) |
DP_LINK_TRAIN_PAT_1;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
DP &= ~DP_PORT_EN;
- I915_WRITE(intel_dp->output_reg, DP);
- POSTING_READ(intel_dp->output_reg);
+ intel_de_write(dev_priv, intel_dp->output_reg, DP);
+ intel_de_posting_read(dev_priv, intel_dp->output_reg);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
@@ -4370,7 +4417,7 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
/* this function is meant to be called only once */
- WARN_ON(intel_dp->dpcd[DP_DPCD_REV] != 0);
+ drm_WARN_ON(&dev_priv->drm, intel_dp->dpcd[DP_DPCD_REV] != 0);
if (!intel_dp_read_dpcd(intel_dp))
return false;
@@ -4390,8 +4437,9 @@ intel_edp_init_dpcd(struct intel_dp *intel_dp)
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_DPCD_REV,
intel_dp->edp_dpcd, sizeof(intel_dp->edp_dpcd)) ==
sizeof(intel_dp->edp_dpcd))
- DRM_DEBUG_KMS("eDP DPCD: %*ph\n", (int) sizeof(intel_dp->edp_dpcd),
- intel_dp->edp_dpcd);
+ drm_dbg_kms(&dev_priv->drm, "eDP DPCD: %*ph\n",
+ (int)sizeof(intel_dp->edp_dpcd),
+ intel_dp->edp_dpcd);
/*
* This has to be called after intel_dp->edp_dpcd is filled, PSR checks
@@ -4466,7 +4514,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
* it don't care about read it here and in intel_edp_init_dpcd().
*/
if (!intel_dp_is_edp(intel_dp) &&
- !drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_SINK_COUNT)) {
+ !drm_dp_has_quirk(&intel_dp->desc, 0,
+ DP_DPCD_QUIRK_NO_SINK_COUNT)) {
u8 count;
ssize_t r;
@@ -5125,7 +5174,7 @@ int intel_dp_retrain_link(struct intel_encoder *encoder,
crtc_state = to_intel_crtc_state(crtc->base.state);
- WARN_ON(!intel_crtc_has_dp_encoder(crtc_state));
+ drm_WARN_ON(&dev_priv->drm, !intel_crtc_has_dp_encoder(crtc_state));
if (!crtc_state->hw.active)
return 0;
@@ -5195,7 +5244,8 @@ intel_dp_hotplug(struct intel_encoder *encoder,
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
- WARN(ret, "Acquiring modeset locks failed with %i\n", ret);
+ drm_WARN(encoder->base.dev, ret,
+ "Acquiring modeset locks failed with %i\n", ret);
/*
* Keeping it consistent with intel_ddi_hotplug() and
@@ -5281,7 +5331,8 @@ intel_dp_short_pulse(struct intel_dp *intel_dp)
intel_psr_short_pulse(intel_dp);
if (intel_dp->compliance.test_type == DP_TEST_LINK_TRAINING) {
- DRM_DEBUG_KMS("Link Training Compliance Test requested\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Link Training Compliance Test requested\n");
/* Send a Hotplug Uevent to userspace to start modeset */
drm_kms_helper_hotplug_event(&dev_priv->drm);
}
@@ -5370,7 +5421,7 @@ static bool ibx_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(SDEISR) & bit;
+ return intel_de_read(dev_priv, SDEISR) & bit;
}
static bool cpt_digital_port_connected(struct intel_encoder *encoder)
@@ -5393,7 +5444,7 @@ static bool cpt_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(SDEISR) & bit;
+ return intel_de_read(dev_priv, SDEISR) & bit;
}
static bool spt_digital_port_connected(struct intel_encoder *encoder)
@@ -5412,7 +5463,7 @@ static bool spt_digital_port_connected(struct intel_encoder *encoder)
return cpt_digital_port_connected(encoder);
}
- return I915_READ(SDEISR) & bit;
+ return intel_de_read(dev_priv, SDEISR) & bit;
}
static bool g4x_digital_port_connected(struct intel_encoder *encoder)
@@ -5435,7 +5486,7 @@ static bool g4x_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(PORT_HOTPLUG_STAT) & bit;
+ return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
}
static bool gm45_digital_port_connected(struct intel_encoder *encoder)
@@ -5458,7 +5509,7 @@ static bool gm45_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(PORT_HOTPLUG_STAT) & bit;
+ return intel_de_read(dev_priv, PORT_HOTPLUG_STAT) & bit;
}
static bool ilk_digital_port_connected(struct intel_encoder *encoder)
@@ -5466,7 +5517,7 @@ static bool ilk_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
- return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
+ return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
else
return ibx_digital_port_connected(encoder);
}
@@ -5476,7 +5527,7 @@ static bool snb_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
- return I915_READ(DEISR) & DE_DP_A_HOTPLUG;
+ return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG;
else
return cpt_digital_port_connected(encoder);
}
@@ -5486,7 +5537,7 @@ static bool ivb_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
- return I915_READ(DEISR) & DE_DP_A_HOTPLUG_IVB;
+ return intel_de_read(dev_priv, DEISR) & DE_DP_A_HOTPLUG_IVB;
else
return cpt_digital_port_connected(encoder);
}
@@ -5496,7 +5547,7 @@ static bool bdw_digital_port_connected(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
if (encoder->hpd_pin == HPD_PORT_A)
- return I915_READ(GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
+ return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & GEN8_PORT_DP_A_HOTPLUG;
else
return cpt_digital_port_connected(encoder);
}
@@ -5521,16 +5572,16 @@ static bool bxt_digital_port_connected(struct intel_encoder *encoder)
return false;
}
- return I915_READ(GEN8_DE_PORT_ISR) & bit;
+ return intel_de_read(dev_priv, GEN8_DE_PORT_ISR) & bit;
}
static bool intel_combo_phy_connected(struct drm_i915_private *dev_priv,
enum phy phy)
{
if (HAS_PCH_MCC(dev_priv) && phy == PHY_C)
- return I915_READ(SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
+ return intel_de_read(dev_priv, SDEISR) & SDE_TC_HOTPLUG_ICP(PORT_TC1);
- return I915_READ(SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
+ return intel_de_read(dev_priv, SDEISR) & SDE_DDI_HOTPLUG_ICP(phy);
}
static bool icp_digital_port_connected(struct intel_encoder *encoder)
@@ -5631,6 +5682,7 @@ intel_dp_set_edid(struct intel_dp *intel_dp)
intel_dp->has_audio = drm_detect_monitor_audio(edid);
drm_dp_cec_set_edid(&intel_dp->aux, edid);
+ intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
}
static void
@@ -5643,6 +5695,7 @@ intel_dp_unset_edid(struct intel_dp *intel_dp)
intel_connector->detect_edid = NULL;
intel_dp->has_audio = false;
+ intel_dp->edid_quirks = 0;
}
static int
@@ -5656,9 +5709,10 @@ intel_dp_detect(struct drm_connector *connector,
struct intel_encoder *encoder = &dig_port->base;
enum drm_connector_status status;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
- WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
/* Can't disconnect eDP */
if (intel_dp_is_edp(intel_dp))
@@ -5673,9 +5727,10 @@ intel_dp_detect(struct drm_connector *connector,
memset(intel_dp->dsc_dpcd, 0, sizeof(intel_dp->dsc_dpcd));
if (intel_dp->is_mst) {
- DRM_DEBUG_KMS("MST device may have disappeared %d vs %d\n",
- intel_dp->is_mst,
- intel_dp->mst_mgr.mst_state);
+ drm_dbg_kms(&dev_priv->drm,
+ "MST device may have disappeared %d vs %d\n",
+ intel_dp->is_mst,
+ intel_dp->mst_mgr.mst_state);
intel_dp->is_mst = false;
drm_dp_mst_topology_mgr_set_mst(&intel_dp->mst_mgr,
intel_dp->is_mst);
@@ -5763,8 +5818,8 @@ intel_dp_force(struct drm_connector *connector)
intel_aux_power_domain(dig_port);
intel_wakeref_t wakeref;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
intel_dp_unset_edid(intel_dp);
if (connector->status != connector_status_connected)
@@ -5815,7 +5870,7 @@ intel_dp_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- i915_debugfs_connector_add(connector);
+ intel_connector_debugfs_add(connector);
DRM_DEBUG_KMS("registering %s bus for %s\n",
intel_dp->aux.name, connector->kdev->kobj.name);
@@ -6396,6 +6451,7 @@ static
int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
bool is_repeater, u8 content_type)
{
+ int ret;
struct hdcp2_dp_errata_stream_type stream_type_msg;
if (is_repeater)
@@ -6411,8 +6467,11 @@ int intel_dp_hdcp2_config_stream_type(struct intel_digital_port *intel_dig_port,
stream_type_msg.msg_id = HDCP_2_2_ERRATA_DP_STREAM_TYPE;
stream_type_msg.stream_type = content_type;
- return intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
+ ret = intel_dp_hdcp2_write_msg(intel_dig_port, &stream_type_msg,
sizeof(stream_type_msg));
+
+ return ret < 0 ? ret : 0;
+
}
static
@@ -6492,7 +6551,8 @@ static void intel_edp_panel_vdd_sanitize(struct intel_dp *intel_dp)
* schedule a vdd off, so we don't hold on to the reference
* indefinitely.
*/
- DRM_DEBUG_KMS("VDD left on by BIOS, adjusting state tracking\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD left on by BIOS, adjusting state tracking\n");
intel_display_power_get(dev_priv, intel_aux_power_domain(dig_port));
edp_panel_vdd_schedule_off(intel_dp);
@@ -6519,7 +6579,7 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
intel_wakeref_t wakeref;
if (!HAS_DDI(dev_priv))
- intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
if (lspcon->active)
lspcon_resume(lspcon);
@@ -6545,6 +6605,140 @@ void intel_dp_encoder_reset(struct drm_encoder *encoder)
}
}
+static int intel_modeset_tile_group(struct intel_atomic_state *state,
+ int tile_group_id)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct drm_connector_list_iter conn_iter;
+ struct drm_connector *connector;
+ int ret = 0;
+
+ drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
+ struct drm_connector_state *conn_state;
+ struct intel_crtc_state *crtc_state;
+ struct intel_crtc *crtc;
+
+ if (!connector->has_tile ||
+ connector->tile_group->id != tile_group_id)
+ continue;
+
+ conn_state = drm_atomic_get_connector_state(&state->base,
+ connector);
+ if (IS_ERR(conn_state)) {
+ ret = PTR_ERR(conn_state);
+ break;
+ }
+
+ crtc = to_intel_crtc(conn_state->crtc);
+
+ if (!crtc)
+ continue;
+
+ crtc_state = intel_atomic_get_new_crtc_state(state, crtc);
+ crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret)
+ break;
+ }
+ drm_connector_list_iter_end(&conn_iter);
+
+ return ret;
+}
+
+static int intel_modeset_affected_transcoders(struct intel_atomic_state *state, u8 transcoders)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ if (transcoders == 0)
+ return 0;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+ int ret;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ if (!crtc_state->hw.enable)
+ continue;
+
+ if (!(transcoders & BIT(crtc_state->cpu_transcoder)))
+ continue;
+
+ crtc_state->uapi.mode_changed = true;
+
+ ret = drm_atomic_add_affected_connectors(&state->base, &crtc->base);
+ if (ret)
+ return ret;
+
+ ret = drm_atomic_add_affected_planes(&state->base, &crtc->base);
+ if (ret)
+ return ret;
+
+ transcoders &= ~BIT(crtc_state->cpu_transcoder);
+ }
+
+ drm_WARN_ON(&dev_priv->drm, transcoders != 0);
+
+ return 0;
+}
+
+static int intel_modeset_synced_crtcs(struct intel_atomic_state *state,
+ struct drm_connector *connector)
+{
+ const struct drm_connector_state *old_conn_state =
+ drm_atomic_get_old_connector_state(&state->base, connector);
+ const struct intel_crtc_state *old_crtc_state;
+ struct intel_crtc *crtc;
+ u8 transcoders;
+
+ crtc = to_intel_crtc(old_conn_state->crtc);
+ if (!crtc)
+ return 0;
+
+ old_crtc_state = intel_atomic_get_old_crtc_state(state, crtc);
+
+ if (!old_crtc_state->hw.active)
+ return 0;
+
+ transcoders = old_crtc_state->sync_mode_slaves_mask;
+ if (old_crtc_state->master_transcoder != INVALID_TRANSCODER)
+ transcoders |= BIT(old_crtc_state->master_transcoder);
+
+ return intel_modeset_affected_transcoders(state,
+ transcoders);
+}
+
+static int intel_dp_connector_atomic_check(struct drm_connector *conn,
+ struct drm_atomic_state *_state)
+{
+ struct drm_i915_private *dev_priv = to_i915(conn->dev);
+ struct intel_atomic_state *state = to_intel_atomic_state(_state);
+ int ret;
+
+ ret = intel_digital_connector_atomic_check(conn, &state->base);
+ if (ret)
+ return ret;
+
+ if (INTEL_GEN(dev_priv) < 11)
+ return 0;
+
+ if (!intel_connector_needs_modeset(state, conn))
+ return 0;
+
+ if (conn->has_tile) {
+ ret = intel_modeset_tile_group(state, conn->tile_group->id);
+ if (ret)
+ return ret;
+ }
+
+ return intel_modeset_synced_crtcs(state, conn);
+}
+
static const struct drm_connector_funcs intel_dp_connector_funcs = {
.force = intel_dp_force,
.fill_modes = drm_helper_probe_single_connector_modes,
@@ -6561,7 +6755,7 @@ static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs =
.detect_ctx = intel_dp_detect,
.get_modes = intel_dp_get_modes,
.mode_valid = intel_dp_mode_valid,
- .atomic_check = intel_digital_connector_atomic_check,
+ .atomic_check = intel_dp_connector_atomic_check,
};
static const struct drm_encoder_funcs intel_dp_enc_funcs = {
@@ -6697,10 +6891,10 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
/* Ensure PPS is unlocked */
if (!HAS_DDI(dev_priv))
- I915_WRITE(regs.pp_ctrl, pp_ctl);
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
- pp_on = I915_READ(regs.pp_on);
- pp_off = I915_READ(regs.pp_off);
+ pp_on = intel_de_read(dev_priv, regs.pp_on);
+ pp_off = intel_de_read(dev_priv, regs.pp_off);
/* Pull timing values out of registers */
seq->t1_t3 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, pp_on);
@@ -6711,7 +6905,7 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq)
if (i915_mmio_reg_valid(regs.pp_div)) {
u32 pp_div;
- pp_div = I915_READ(regs.pp_div);
+ pp_div = intel_de_read(dev_priv, regs.pp_div);
seq->t11_t12 = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, pp_div) * 1000;
} else {
@@ -6768,8 +6962,9 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
*/
if (dev_priv->quirks & QUIRK_INCREASE_T12_DELAY) {
vbt.t11_t12 = max_t(u16, vbt.t11_t12, 1300 * 10);
- DRM_DEBUG_KMS("Increasing T12 panel delay as per the quirk to %d\n",
- vbt.t11_t12);
+ drm_dbg_kms(&dev_priv->drm,
+ "Increasing T12 panel delay as per the quirk to %d\n",
+ vbt.t11_t12);
}
/* T11_T12 delay is special and actually in units of 100ms, but zero
* based in the hw (so we need to add 100 ms). But the sw vbt
@@ -6811,12 +7006,15 @@ intel_dp_init_panel_power_sequencer(struct intel_dp *intel_dp)
intel_dp->panel_power_cycle_delay = get_delay(t11_t12);
#undef get_delay
- DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n",
- intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay,
- intel_dp->panel_power_cycle_delay);
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power up delay %d, power down delay %d, power cycle delay %d\n",
+ intel_dp->panel_power_up_delay,
+ intel_dp->panel_power_down_delay,
+ intel_dp->panel_power_cycle_delay);
- DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n",
- intel_dp->backlight_on_delay, intel_dp->backlight_off_delay);
+ drm_dbg_kms(&dev_priv->drm, "backlight on delay %d, off delay %d\n",
+ intel_dp->backlight_on_delay,
+ intel_dp->backlight_off_delay);
/*
* We override the HW backlight delays to 1 because we do manual waits
@@ -6841,7 +7039,7 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 pp_on, pp_off, port_sel = 0;
- int div = dev_priv->rawclk_freq / 1000;
+ int div = RUNTIME_INFO(dev_priv)->rawclk_freq / 1000;
struct pps_registers regs;
enum port port = dp_to_dig_port(intel_dp)->base.port;
const struct edp_power_seq *seq = &intel_dp->pps_delays;
@@ -6865,14 +7063,16 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
if (force_disable_vdd) {
u32 pp = ilk_get_pp_control(intel_dp);
- WARN(pp & PANEL_POWER_ON, "Panel power already on\n");
+ drm_WARN(&dev_priv->drm, pp & PANEL_POWER_ON,
+ "Panel power already on\n");
if (pp & EDP_FORCE_VDD)
- DRM_DEBUG_KMS("VDD already on, disabling first\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "VDD already on, disabling first\n");
pp &= ~EDP_FORCE_VDD;
- I915_WRITE(regs.pp_ctrl, pp);
+ intel_de_write(dev_priv, regs.pp_ctrl, pp);
}
pp_on = REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, seq->t1_t3) |
@@ -6903,31 +7103,31 @@ intel_dp_init_panel_power_sequencer_registers(struct intel_dp *intel_dp,
pp_on |= port_sel;
- I915_WRITE(regs.pp_on, pp_on);
- I915_WRITE(regs.pp_off, pp_off);
+ intel_de_write(dev_priv, regs.pp_on, pp_on);
+ intel_de_write(dev_priv, regs.pp_off, pp_off);
/*
* Compute the divisor for the pp clock, simply match the Bspec formula.
*/
if (i915_mmio_reg_valid(regs.pp_div)) {
- I915_WRITE(regs.pp_div,
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) |
- REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
+ intel_de_write(dev_priv, regs.pp_div,
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, (100 * div) / 2 - 1) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000)));
} else {
u32 pp_ctl;
- pp_ctl = I915_READ(regs.pp_ctrl);
+ pp_ctl = intel_de_read(dev_priv, regs.pp_ctrl);
pp_ctl &= ~BXT_POWER_CYCLE_DELAY_MASK;
pp_ctl |= REG_FIELD_PREP(BXT_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(seq->t11_t12, 1000));
- I915_WRITE(regs.pp_ctrl, pp_ctl);
+ intel_de_write(dev_priv, regs.pp_ctrl, pp_ctl);
}
- DRM_DEBUG_KMS("panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
- I915_READ(regs.pp_on),
- I915_READ(regs.pp_off),
- i915_mmio_reg_valid(regs.pp_div) ?
- I915_READ(regs.pp_div) :
- (I915_READ(regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
+ drm_dbg_kms(&dev_priv->drm,
+ "panel power sequencer register settings: PP_ON %#x, PP_OFF %#x, PP_DIV %#x\n",
+ intel_de_read(dev_priv, regs.pp_on),
+ intel_de_read(dev_priv, regs.pp_off),
+ i915_mmio_reg_valid(regs.pp_div) ?
+ intel_de_read(dev_priv, regs.pp_div) :
+ (intel_de_read(dev_priv, regs.pp_ctrl) & BXT_POWER_CYCLE_DELAY_MASK));
}
static void intel_dp_pps_init(struct intel_dp *intel_dp)
@@ -6964,22 +7164,24 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
enum drrs_refresh_rate_type index = DRRS_HIGH_RR;
if (refresh_rate <= 0) {
- DRM_DEBUG_KMS("Refresh rate should be positive non-zero.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Refresh rate should be positive non-zero.\n");
return;
}
if (intel_dp == NULL) {
- DRM_DEBUG_KMS("DRRS not supported.\n");
+ drm_dbg_kms(&dev_priv->drm, "DRRS not supported.\n");
return;
}
if (!intel_crtc) {
- DRM_DEBUG_KMS("DRRS: intel_crtc not initialized\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS: intel_crtc not initialized\n");
return;
}
if (dev_priv->drrs.type < SEAMLESS_DRRS_SUPPORT) {
- DRM_DEBUG_KMS("Only Seamless DRRS supported.\n");
+ drm_dbg_kms(&dev_priv->drm, "Only Seamless DRRS supported.\n");
return;
}
@@ -6988,13 +7190,14 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
index = DRRS_LOW_RR;
if (index == dev_priv->drrs.refresh_rate_type) {
- DRM_DEBUG_KMS(
- "DRRS requested for previously set RR...ignoring\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS requested for previously set RR...ignoring\n");
return;
}
if (!crtc_state->hw.active) {
- DRM_DEBUG_KMS("eDP encoder disabled. CRTC not Active\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "eDP encoder disabled. CRTC not Active\n");
return;
}
@@ -7008,13 +7211,14 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
break;
case DRRS_MAX_RR:
default:
- DRM_ERROR("Unsupported refreshrate type\n");
+ drm_err(&dev_priv->drm,
+ "Unsupported refreshrate type\n");
}
} else if (INTEL_GEN(dev_priv) > 6) {
i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder);
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if (index > DRRS_HIGH_RR) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV;
@@ -7026,12 +7230,13 @@ static void intel_dp_set_drrs_state(struct drm_i915_private *dev_priv,
else
val &= ~PIPECONF_EDP_RR_MODE_SWITCH;
}
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
dev_priv->drrs.refresh_rate_type = index;
- DRM_DEBUG_KMS("eDP Refresh Rate set to : %dHz\n", refresh_rate);
+ drm_dbg_kms(&dev_priv->drm, "eDP Refresh Rate set to : %dHz\n",
+ refresh_rate);
}
/**
@@ -7047,18 +7252,19 @@ void intel_edp_drrs_enable(struct intel_dp *intel_dp,
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (!crtc_state->has_drrs) {
- DRM_DEBUG_KMS("Panel doesn't support DRRS\n");
+ drm_dbg_kms(&dev_priv->drm, "Panel doesn't support DRRS\n");
return;
}
if (dev_priv->psr.enabled) {
- DRM_DEBUG_KMS("PSR enabled. Not enabling DRRS.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR enabled. Not enabling DRRS.\n");
return;
}
mutex_lock(&dev_priv->drrs.mutex);
if (dev_priv->drrs.dp) {
- DRM_DEBUG_KMS("DRRS already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "DRRS already enabled\n");
goto unlock;
}
@@ -7284,25 +7490,28 @@ intel_dp_drrs_init(struct intel_connector *connector,
mutex_init(&dev_priv->drrs.mutex);
if (INTEL_GEN(dev_priv) <= 6) {
- DRM_DEBUG_KMS("DRRS supported for Gen7 and above\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "DRRS supported for Gen7 and above\n");
return NULL;
}
if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) {
- DRM_DEBUG_KMS("VBT doesn't support DRRS\n");
+ drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n");
return NULL;
}
downclock_mode = intel_panel_edid_downclock_mode(connector, fixed_mode);
if (!downclock_mode) {
- DRM_DEBUG_KMS("Downclock mode is not found. DRRS not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Downclock mode is not found. DRRS not supported\n");
return NULL;
}
dev_priv->drrs.type = dev_priv->vbt.drrs_type;
dev_priv->drrs.refresh_rate_type = DRRS_HIGH_RR;
- DRM_DEBUG_KMS("seamless DRRS supported for eDP panel.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "seamless DRRS supported for eDP panel.\n");
return downclock_mode;
}
@@ -7331,8 +7540,10 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
* with an already powered-on LVDS power sequencer.
*/
if (intel_get_lvds_encoder(dev_priv)) {
- WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
- DRM_INFO("LVDS was detected, not registering eDP\n");
+ drm_WARN_ON(dev,
+ !(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
+ drm_info(&dev_priv->drm,
+ "LVDS was detected, not registering eDP\n");
return false;
}
@@ -7348,7 +7559,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (!has_dpcd) {
/* if this fails, presume the device is a ghost */
- DRM_INFO("failed to retrieve link info, disabling eDP\n");
+ drm_info(&dev_priv->drm,
+ "failed to retrieve link info, disabling eDP\n");
goto out_vdd_off;
}
@@ -7356,8 +7568,8 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
edid = drm_get_edid(connector, &intel_dp->aux.ddc);
if (edid) {
if (drm_add_edid_modes(connector, edid)) {
- drm_connector_update_edid_property(connector,
- edid);
+ drm_connector_update_edid_property(connector, edid);
+ intel_dp->edid_quirks = drm_dp_get_edid_quirks(edid);
} else {
kfree(edid);
edid = ERR_PTR(-EINVAL);
@@ -7393,17 +7605,20 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
if (pipe != PIPE_A && pipe != PIPE_B)
pipe = PIPE_A;
- DRM_DEBUG_KMS("using pipe %c for initial backlight setup\n",
- pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm,
+ "using pipe %c for initial backlight setup\n",
+ pipe_name(pipe));
}
intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
intel_connector->panel.backlight.power = intel_edp_backlight_power;
intel_panel_setup_backlight(connector, pipe);
- if (fixed_mode)
- drm_connector_init_panel_orientation_property(
- connector, fixed_mode->hdisplay, fixed_mode->vdisplay);
+ if (fixed_mode) {
+ drm_connector_set_panel_orientation_with_quirk(connector,
+ dev_priv->vbt.orientation,
+ fixed_mode->hdisplay, fixed_mode->vdisplay);
+ }
return true;
@@ -7459,10 +7674,10 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
INIT_WORK(&intel_connector->modeset_retry_work,
intel_dp_modeset_retry_work_fn);
- if (WARN(intel_dig_port->max_lanes < 1,
- "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
- intel_dig_port->max_lanes, intel_encoder->base.base.id,
- intel_encoder->base.name))
+ if (drm_WARN(dev, intel_dig_port->max_lanes < 1,
+ "Not enough lanes (%d) for DP on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return false;
intel_dp_set_source_rates(intel_dp);
@@ -7472,7 +7687,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_dp->active_pipe = INVALID_PIPE;
/* Preserve the current hw state. */
- intel_dp->DP = I915_READ(intel_dp->output_reg);
+ intel_dp->DP = intel_de_read(dev_priv, intel_dp->output_reg);
intel_dp->attached_connector = intel_connector;
if (intel_dp_is_port_edp(dev_priv, port)) {
@@ -7480,7 +7695,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* Currently we don't support eDP on TypeC ports, although in
* theory it could work on TypeC legacy ports.
*/
- WARN_ON(intel_phy_is_tc(dev_priv, phy));
+ drm_WARN_ON(dev, intel_phy_is_tc(dev_priv, phy));
type = DRM_MODE_CONNECTOR_eDP;
} else {
type = DRM_MODE_CONNECTOR_DisplayPort;
@@ -7498,14 +7713,16 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
intel_encoder->type = INTEL_OUTPUT_EDP;
/* eDP only on port B and/or C on vlv/chv */
- if (WARN_ON((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
- intel_dp_is_edp(intel_dp) &&
- port != PORT_B && port != PORT_C))
+ if (drm_WARN_ON(dev, (IS_VALLEYVIEW(dev_priv) ||
+ IS_CHERRYVIEW(dev_priv)) &&
+ intel_dp_is_edp(intel_dp) &&
+ port != PORT_B && port != PORT_C))
return false;
- DRM_DEBUG_KMS("Adding %s connector on [ENCODER:%d:%s]\n",
- type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
- intel_encoder->base.base.id, intel_encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Adding %s connector on [ENCODER:%d:%s]\n",
+ type == DRM_MODE_CONNECTOR_eDP ? "eDP" : "DP",
+ intel_encoder->base.base.id, intel_encoder->base.name);
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
@@ -7518,6 +7735,7 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
connector->ycbcr_420_allowed = true;
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_dp_aux_init(intel_dp);
@@ -7543,7 +7761,8 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
if (is_hdcp_supported(dev_priv, port) && !intel_dp_is_edp(intel_dp)) {
int ret = intel_hdcp_init(intel_connector, &intel_dp_hdcp_shim);
if (ret)
- DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP init failed, skipping.\n");
}
/* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
@@ -7551,8 +7770,9 @@ intel_dp_init_connector(struct intel_digital_port *intel_dig_port,
* generated on the port when a cable is not attached.
*/
if (IS_G45(dev_priv)) {
- u32 temp = I915_READ(PEG_BAND_GAP_DATA);
- I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
+ intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
+ (temp & ~0xf) | 0xd);
}
return true;
diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h
index 3da166054788..0c7be8ed1423 100644
--- a/drivers/gpu/drm/i915/display/intel_dp.h
+++ b/drivers/gpu/drm/i915/display/intel_dp.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
enum pipe;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
index 7c653f8c307f..3e706bb850a8 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c
@@ -57,10 +57,27 @@ static void set_aux_backlight_enable(struct intel_dp *intel_dp, bool enable)
*/
static u32 intel_dp_aux_get_backlight(struct intel_connector *connector)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
u8 read_val[2] = { 0x0 };
+ u8 mode_reg;
u16 level = 0;
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER,
+ &mode_reg) != 1) {
+ DRM_DEBUG_KMS("Failed to read the DPCD register 0x%x\n",
+ DP_EDP_BACKLIGHT_MODE_SET_REGISTER);
+ return 0;
+ }
+
+ /*
+ * If we're not in DPCD control mode yet, the programmed brightness
+ * value is meaningless and we should assume max brightness
+ */
+ if ((mode_reg & DP_EDP_BACKLIGHT_CONTROL_MODE_MASK) !=
+ DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD)
+ return connector->panel.backlight.max;
+
if (drm_dp_dpcd_read(&intel_dp->aux, DP_EDP_BACKLIGHT_BRIGHTNESS_MSB,
&read_val, sizeof(read_val)) < 0) {
DRM_DEBUG_KMS("Failed to read DPCD register 0x%x\n",
@@ -82,7 +99,7 @@ static void
intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 level)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
u8 vals[2] = { 0x0 };
vals[0] = level;
@@ -110,62 +127,29 @@ intel_dp_aux_set_backlight(const struct drm_connector_state *conn_state, u32 lev
static bool intel_dp_aux_set_pwm_freq(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
- int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
- u8 pn, pn_min, pn_max;
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ const u8 pn = connector->panel.backlight.pwmgen_bit_count;
+ int freq, fxp, f, fxp_actual, fxp_min, fxp_max;
- /* Find desired value of (F x P)
- * Note that, if F x P is out of supported range, the maximum value or
- * minimum value will applied automatically. So no need to check that.
- */
freq = dev_priv->vbt.backlight.pwm_freq_hz;
- DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
if (!freq) {
DRM_DEBUG_KMS("Use panel default backlight frequency\n");
return false;
}
fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq);
+ f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
+ fxp_actual = f << pn;
- /* Use highest possible value of Pn for more granularity of brightness
- * adjustment while satifying the conditions below.
- * - Pn is in the range of Pn_min and Pn_max
- * - F is in the range of 1 and 255
- * - FxP is within 25% of desired value.
- * Note: 25% is arbitrary value and may need some tweak.
- */
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
- DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
- return false;
- }
- if (drm_dp_dpcd_readb(&intel_dp->aux,
- DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
- DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
- return false;
- }
- pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
- pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
-
+ /* Ensure frequency is within 25% of desired value */
fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
- if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
- DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
- return false;
- }
-
- for (pn = pn_max; pn >= pn_min; pn--) {
- f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
- fxp_actual = f << pn;
- if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
- break;
- }
- if (drm_dp_dpcd_writeb(&intel_dp->aux,
- DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
- DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ if (fxp_min > fxp_actual || fxp_actual > fxp_max) {
+ DRM_DEBUG_KMS("Actual frequency out of range\n");
return false;
}
+
if (drm_dp_dpcd_writeb(&intel_dp->aux,
DP_EDP_BACKLIGHT_FREQ_SET, (u8) f) < 0) {
DRM_DEBUG_KMS("Failed to write aux backlight freq\n");
@@ -178,7 +162,8 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
const struct drm_connector_state *conn_state)
{
struct intel_connector *connector = to_intel_connector(conn_state->connector);
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_panel *panel = &connector->panel;
u8 dpcd_buf, new_dpcd_buf, edp_backlight_mode;
if (drm_dp_dpcd_readb(&intel_dp->aux,
@@ -197,6 +182,12 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
case DP_EDP_BACKLIGHT_CONTROL_MODE_PRODUCT:
new_dpcd_buf &= ~DP_EDP_BACKLIGHT_CONTROL_MODE_MASK;
new_dpcd_buf |= DP_EDP_BACKLIGHT_CONTROL_MODE_DPCD;
+
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT,
+ panel->backlight.pwmgen_bit_count) < 0)
+ DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+
break;
/* Do nothing when it is already DPCD mode */
@@ -216,8 +207,9 @@ static void intel_dp_aux_enable_backlight(const struct intel_crtc_state *crtc_st
}
}
+ intel_dp_aux_set_backlight(conn_state,
+ connector->panel.backlight.level);
set_aux_backlight_enable(intel_dp, true);
- intel_dp_aux_set_backlight(conn_state, connector->panel.backlight.level);
}
static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -226,20 +218,91 @@ static void intel_dp_aux_disable_backlight(const struct drm_connector_state *old
false);
}
+static u32 intel_dp_aux_calc_max_backlight(struct intel_connector *connector)
+{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
+ struct intel_panel *panel = &connector->panel;
+ u32 max_backlight = 0;
+ int freq, fxp, fxp_min, fxp_max, fxp_actual, f = 1;
+ u8 pn, pn_min, pn_max;
+
+ if (drm_dp_dpcd_readb(&intel_dp->aux, DP_EDP_PWMGEN_BIT_COUNT, &pn) == 1) {
+ pn &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+ max_backlight = (1 << pn) - 1;
+ }
+
+ /* Find desired value of (F x P)
+ * Note that, if F x P is out of supported range, the maximum value or
+ * minimum value will applied automatically. So no need to check that.
+ */
+ freq = i915->vbt.backlight.pwm_freq_hz;
+ DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n", freq);
+ if (!freq) {
+ DRM_DEBUG_KMS("Use panel default backlight frequency\n");
+ return max_backlight;
+ }
+
+ fxp = DIV_ROUND_CLOSEST(KHz(DP_EDP_BACKLIGHT_FREQ_BASE_KHZ), freq);
+
+ /* Use highest possible value of Pn for more granularity of brightness
+ * adjustment while satifying the conditions below.
+ * - Pn is in the range of Pn_min and Pn_max
+ * - F is in the range of 1 and 255
+ * - FxP is within 25% of desired value.
+ * Note: 25% is arbitrary value and may need some tweak.
+ */
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT_CAP_MIN, &pn_min) != 1) {
+ DRM_DEBUG_KMS("Failed to read pwmgen bit count cap min\n");
+ return max_backlight;
+ }
+ if (drm_dp_dpcd_readb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT_CAP_MAX, &pn_max) != 1) {
+ DRM_DEBUG_KMS("Failed to read pwmgen bit count cap max\n");
+ return max_backlight;
+ }
+ pn_min &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+ pn_max &= DP_EDP_PWMGEN_BIT_COUNT_MASK;
+
+ fxp_min = DIV_ROUND_CLOSEST(fxp * 3, 4);
+ fxp_max = DIV_ROUND_CLOSEST(fxp * 5, 4);
+ if (fxp_min < (1 << pn_min) || (255 << pn_max) < fxp_max) {
+ DRM_DEBUG_KMS("VBT defined backlight frequency out of range\n");
+ return max_backlight;
+ }
+
+ for (pn = pn_max; pn >= pn_min; pn--) {
+ f = clamp(DIV_ROUND_CLOSEST(fxp, 1 << pn), 1, 255);
+ fxp_actual = f << pn;
+ if (fxp_min <= fxp_actual && fxp_actual <= fxp_max)
+ break;
+ }
+
+ DRM_DEBUG_KMS("Using eDP pwmgen bit count of %d\n", pn);
+ if (drm_dp_dpcd_writeb(&intel_dp->aux,
+ DP_EDP_PWMGEN_BIT_COUNT, pn) < 0) {
+ DRM_DEBUG_KMS("Failed to write aux pwmgen bit count\n");
+ return max_backlight;
+ }
+ panel->backlight.pwmgen_bit_count = pn;
+
+ max_backlight = (1 << pn) - 1;
+
+ return max_backlight;
+}
+
static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
enum pipe pipe)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
struct intel_panel *panel = &connector->panel;
- if (intel_dp->edp_dpcd[2] & DP_EDP_BACKLIGHT_BRIGHTNESS_BYTE_COUNT)
- panel->backlight.max = 0xFFFF;
- else
- panel->backlight.max = 0xFF;
+ panel->backlight.max = intel_dp_aux_calc_max_backlight(connector);
+ if (!panel->backlight.max)
+ return -ENODEV;
panel->backlight.min = 0;
panel->backlight.level = intel_dp_aux_get_backlight(connector);
-
panel->backlight.enabled = panel->backlight.level != 0;
return 0;
@@ -248,7 +311,7 @@ static int intel_dp_aux_setup_backlight(struct intel_connector *connector,
static bool
intel_dp_aux_display_control_capable(struct intel_connector *connector)
{
- struct intel_dp *intel_dp = enc_to_intel_dp(connector->encoder);
+ struct intel_dp *intel_dp = intel_attached_dp(connector);
/* Check the eDP Display control capabilities registers to determine if
* the panel can support backlight control over the aux channel
@@ -265,15 +328,31 @@ intel_dp_aux_display_control_capable(struct intel_connector *connector)
int intel_dp_aux_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct intel_panel *panel = &intel_connector->panel;
- struct drm_i915_private *dev_priv = to_i915(intel_connector->base.dev);
+ struct intel_dp *intel_dp = enc_to_intel_dp(intel_connector->encoder);
+ struct drm_device *dev = intel_connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(dev);
if (i915_modparams.enable_dpcd_backlight == 0 ||
- (i915_modparams.enable_dpcd_backlight == -1 &&
- dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE))
+ !intel_dp_aux_display_control_capable(intel_connector))
return -ENODEV;
- if (!intel_dp_aux_display_control_capable(intel_connector))
+ /*
+ * There are a lot of machines that don't advertise the backlight
+ * control interface to use properly in their VBIOS, :\
+ */
+ if (dev_priv->vbt.backlight.type !=
+ INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE &&
+ !drm_dp_has_quirk(&intel_dp->desc, intel_dp->edid_quirks,
+ DP_QUIRK_FORCE_DPCD_BACKLIGHT)) {
+ DRM_DEV_INFO(dev->dev,
+ "Panel advertises DPCD backlight support, but "
+ "VBT disagrees. If your backlight controls "
+ "don't work try booting with "
+ "i915.enable_dpcd_backlight=1. If your machine "
+ "needs this, please file a _new_ bug report on "
+ "drm/i915, see " FDO_BUG_URL " for details.\n");
return -ENODEV;
+ }
panel->backlight.setup = intel_dp_aux_setup_backlight;
panel->backlight.enable = intel_dp_aux_enable_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
index 2a1130dd1ad0..a7defb37ab00 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c
@@ -130,6 +130,7 @@ static bool intel_dp_link_max_vswing_reached(struct intel_dp *intel_dp)
static bool
intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
u8 voltage;
int voltage_tries, cr_tries, max_cr_tries;
bool max_vswing_reached = false;
@@ -143,9 +144,11 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
&link_bw, &rate_select);
if (link_bw)
- DRM_DEBUG_KMS("Using LINK_BW_SET value %02x\n", link_bw);
+ drm_dbg_kms(&i915->drm,
+ "Using LINK_BW_SET value %02x\n", link_bw);
else
- DRM_DEBUG_KMS("Using LINK_RATE_SET value %02x\n", rate_select);
+ drm_dbg_kms(&i915->drm,
+ "Using LINK_RATE_SET value %02x\n", rate_select);
/* Write the link configuration data */
link_config[0] = link_bw;
@@ -169,7 +172,7 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
if (!intel_dp_reset_link_train(intel_dp,
DP_TRAINING_PATTERN_1 |
DP_LINK_SCRAMBLING_DISABLE)) {
- DRM_ERROR("failed to enable link training\n");
+ drm_err(&i915->drm, "failed to enable link training\n");
return false;
}
@@ -193,22 +196,23 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
drm_dp_link_train_clock_recovery_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
- DRM_ERROR("failed to get link status\n");
+ drm_err(&i915->drm, "failed to get link status\n");
return false;
}
if (drm_dp_clock_recovery_ok(link_status, intel_dp->lane_count)) {
- DRM_DEBUG_KMS("clock recovery OK\n");
+ drm_dbg_kms(&i915->drm, "clock recovery OK\n");
return true;
}
if (voltage_tries == 5) {
- DRM_DEBUG_KMS("Same voltage tried 5 times\n");
+ drm_dbg_kms(&i915->drm,
+ "Same voltage tried 5 times\n");
return false;
}
if (max_vswing_reached) {
- DRM_DEBUG_KMS("Max Voltage Swing reached\n");
+ drm_dbg_kms(&i915->drm, "Max Voltage Swing reached\n");
return false;
}
@@ -217,7 +221,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
- DRM_ERROR("failed to update link training\n");
+ drm_err(&i915->drm,
+ "failed to update link training\n");
return false;
}
@@ -231,7 +236,8 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp)
max_vswing_reached = true;
}
- DRM_ERROR("Failed clock recovery %d times, giving up!\n", max_cr_tries);
+ drm_err(&i915->drm,
+ "Failed clock recovery %d times, giving up!\n", max_cr_tries);
return false;
}
@@ -256,9 +262,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
return DP_TRAINING_PATTERN_4;
} else if (intel_dp->link_rate == 810000) {
if (!source_tps4)
- DRM_DEBUG_KMS("8.1 Gbps link rate without source HBR3/TPS4 support\n");
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "8.1 Gbps link rate without source HBR3/TPS4 support\n");
if (!sink_tps4)
- DRM_DEBUG_KMS("8.1 Gbps link rate without sink TPS4 support\n");
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "8.1 Gbps link rate without sink TPS4 support\n");
}
/*
* Intel platforms that support HBR2 also support TPS3. TPS3 support is
@@ -271,9 +279,11 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
return DP_TRAINING_PATTERN_3;
} else if (intel_dp->link_rate >= 540000) {
if (!source_tps3)
- DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ ">=5.4/6.48 Gbps link rate without source HBR2/TPS3 support\n");
if (!sink_tps3)
- DRM_DEBUG_KMS(">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ ">=5.4/6.48 Gbps link rate without sink TPS3 support\n");
}
return DP_TRAINING_PATTERN_2;
@@ -282,6 +292,7 @@ static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
static bool
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
+ struct drm_i915_private *i915 = dp_to_i915(intel_dp);
int tries;
u32 training_pattern;
u8 link_status[DP_LINK_STATUS_SIZE];
@@ -295,7 +306,7 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp,
training_pattern)) {
- DRM_ERROR("failed to start channel equalization\n");
+ drm_err(&i915->drm, "failed to start channel equalization\n");
return false;
}
@@ -303,7 +314,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
drm_dp_link_train_channel_eq_delay(intel_dp->dpcd);
if (!intel_dp_get_link_status(intel_dp, link_status)) {
- DRM_ERROR("failed to get link status\n");
+ drm_err(&i915->drm,
+ "failed to get link status\n");
break;
}
@@ -311,23 +323,25 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
if (!drm_dp_clock_recovery_ok(link_status,
intel_dp->lane_count)) {
intel_dp_dump_link_status(link_status);
- DRM_DEBUG_KMS("Clock recovery check failed, cannot "
- "continue channel equalization\n");
+ drm_dbg_kms(&i915->drm,
+ "Clock recovery check failed, cannot "
+ "continue channel equalization\n");
break;
}
if (drm_dp_channel_eq_ok(link_status,
intel_dp->lane_count)) {
channel_eq = true;
- DRM_DEBUG_KMS("Channel EQ done. DP Training "
- "successful\n");
+ drm_dbg_kms(&i915->drm, "Channel EQ done. DP Training "
+ "successful\n");
break;
}
/* Update training set as requested by target */
intel_get_adjust_train(intel_dp, link_status);
if (!intel_dp_update_link_train(intel_dp)) {
- DRM_ERROR("failed to update link training\n");
+ drm_err(&i915->drm,
+ "failed to update link training\n");
break;
}
}
@@ -335,7 +349,8 @@ intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
/* Try 5 times, else fail and try at lower BW */
if (tries == 5) {
intel_dp_dump_link_status(link_status);
- DRM_DEBUG_KMS("Channel equalization failed 5 times\n");
+ drm_dbg_kms(&i915->drm,
+ "Channel equalization failed 5 times\n");
}
intel_dp_set_idle_link_train(intel_dp);
@@ -362,17 +377,19 @@ intel_dp_start_link_train(struct intel_dp *intel_dp)
if (!intel_dp_link_training_channel_equalization(intel_dp))
goto failure_handling;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d",
- intel_connector->base.base.id,
- intel_connector->base.name,
- intel_dp->link_rate, intel_dp->lane_count);
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "[CONNECTOR:%d:%s] Link Training Passed at Link Rate = %d, Lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
+ intel_dp->link_rate, intel_dp->lane_count);
return;
failure_handling:
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
- intel_connector->base.base.id,
- intel_connector->base.name,
- intel_dp->link_rate, intel_dp->lane_count);
+ drm_dbg_kms(&dp_to_i915(intel_dp)->drm,
+ "[CONNECTOR:%d:%s] Link Training failed at link rate = %d, lane count = %d",
+ intel_connector->base.base.id,
+ intel_connector->base.name,
+ intel_dp->link_rate, intel_dp->lane_count);
if (!intel_dp_get_link_train_fallback_values(intel_dp,
intel_dp->link_rate,
intel_dp->lane_count))
diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c
index cba68c5a80fa..44f3fd251ca1 100644
--- a/drivers/gpu/drm/i915/display/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c
@@ -50,7 +50,7 @@ static int intel_dp_mst_compute_link_config(struct intel_encoder *encoder,
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
void *port = connector->port;
- bool constant_n = drm_dp_has_quirk(&intel_dp->desc,
+ bool constant_n = drm_dp_has_quirk(&intel_dp->desc, 0,
DP_DPCD_QUIRK_CONSTANT_N);
int bpp, slots = -EINVAL;
@@ -352,8 +352,9 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
intel_dp->active_mst_links--;
last_mst_stream = intel_dp->active_mst_links == 0;
- WARN_ON(INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
- !intel_dp_mst_is_master_trans(old_crtc_state));
+ drm_WARN_ON(&dev_priv->drm,
+ INTEL_GEN(dev_priv) >= 12 && last_mst_stream &&
+ !intel_dp_mst_is_master_trans(old_crtc_state));
intel_crtc_vblank_off(old_crtc_state);
@@ -361,9 +362,12 @@ static void intel_mst_post_disable_dp(struct intel_encoder *encoder,
drm_dp_update_payload_part2(&intel_dp->mst_mgr);
- val = I915_READ(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
+ val = intel_de_read(dev_priv,
+ TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder));
val &= ~TRANS_DDI_DP_VC_PAYLOAD_ALLOC;
- I915_WRITE(TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder), val);
+ intel_de_write(dev_priv,
+ TRANS_DDI_FUNC_CTL(old_crtc_state->cpu_transcoder),
+ val);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
DP_TP_STATUS_ACT_SENT, 1))
@@ -437,8 +441,9 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
connector->encoder = encoder;
intel_mst->connector = connector;
first_mst_stream = intel_dp->active_mst_links == 0;
- WARN_ON(INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
- !intel_dp_mst_is_master_trans(pipe_config));
+ drm_WARN_ON(&dev_priv->drm,
+ INTEL_GEN(dev_priv) >= 12 && first_mst_stream &&
+ !intel_dp_mst_is_master_trans(pipe_config));
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
@@ -459,8 +464,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
DRM_ERROR("failed to allocate vcpi\n");
intel_dp->active_mst_links++;
- temp = I915_READ(intel_dp->regs.dp_tp_status);
- I915_WRITE(intel_dp->regs.dp_tp_status, temp);
+ temp = intel_de_read(dev_priv, intel_dp->regs.dp_tp_status);
+ intel_de_write(dev_priv, intel_dp->regs.dp_tp_status, temp);
ret = drm_dp_update_payload_part1(&intel_dp->mst_mgr);
@@ -475,6 +480,8 @@ static void intel_mst_pre_enable_dp(struct intel_encoder *encoder,
intel_ddi_enable_pipe_clock(pipe_config);
intel_ddi_set_dp_msa(pipe_config, conn_state);
+
+ intel_dp_set_m_n(pipe_config, M1_N1);
}
static void intel_mst_enable_dp(struct intel_encoder *encoder,
@@ -486,6 +493,12 @@ static void intel_mst_enable_dp(struct intel_encoder *encoder,
struct intel_dp *intel_dp = &intel_dig_port->dp;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ drm_WARN_ON(&dev_priv->drm, pipe_config->has_pch_encoder);
+
+ intel_enable_pipe(pipe_config);
+
+ intel_crtc_vblank_on(pipe_config);
+
DRM_DEBUG_KMS("active links %d\n", intel_dp->active_mst_links);
if (intel_de_wait_for_set(dev_priv, intel_dp->regs.dp_tp_status,
@@ -535,12 +548,41 @@ static int intel_dp_mst_get_ddc_modes(struct drm_connector *connector)
return ret;
}
+static int
+intel_dp_mst_connector_late_register(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+ int ret;
+
+ ret = drm_dp_mst_connector_late_register(connector,
+ intel_connector->port);
+ if (ret < 0)
+ return ret;
+
+ ret = intel_connector_register(connector);
+ if (ret < 0)
+ drm_dp_mst_connector_early_unregister(connector,
+ intel_connector->port);
+
+ return ret;
+}
+
+static void
+intel_dp_mst_connector_early_unregister(struct drm_connector *connector)
+{
+ struct intel_connector *intel_connector = to_intel_connector(connector);
+
+ intel_connector_unregister(connector);
+ drm_dp_mst_connector_early_unregister(connector,
+ intel_connector->port);
+}
+
static const struct drm_connector_funcs intel_dp_mst_connector_funcs = {
.fill_modes = drm_helper_probe_single_connector_modes,
.atomic_get_property = intel_digital_connector_atomic_get_property,
.atomic_set_property = intel_digital_connector_atomic_set_property,
- .late_register = intel_connector_register,
- .early_unregister = intel_connector_unregister,
+ .late_register = intel_dp_mst_connector_late_register,
+ .early_unregister = intel_dp_mst_connector_early_unregister,
.destroy = intel_connector_destroy,
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
@@ -632,9 +674,9 @@ static const struct drm_encoder_funcs intel_dp_mst_enc_funcs = {
static bool intel_dp_mst_get_hw_state(struct intel_connector *connector)
{
- if (connector->encoder && connector->base.state->crtc) {
+ if (intel_attached_encoder(connector) && connector->base.state->crtc) {
enum pipe pipe;
- if (!connector->encoder->get_hw_state(connector->encoder, &pipe))
+ if (!intel_attached_encoder(connector)->get_hw_state(intel_attached_encoder(connector), &pipe))
return false;
return true;
}
@@ -706,36 +748,8 @@ err:
return NULL;
}
-static void intel_dp_register_mst_connector(struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
- if (dev_priv->fbdev)
- drm_fb_helper_add_one_connector(&dev_priv->fbdev->helper,
- connector);
-
- drm_connector_register(connector);
-}
-
-static void intel_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, connector->name);
- drm_connector_unregister(connector);
-
- if (dev_priv->fbdev)
- drm_fb_helper_remove_one_connector(&dev_priv->fbdev->helper,
- connector);
-
- drm_connector_put(connector);
-}
-
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = intel_dp_add_mst_connector,
- .register_connector = intel_dp_register_mst_connector,
- .destroy_connector = intel_dp_destroy_mst_connector,
};
static struct intel_dp_mst_encoder *
diff --git a/drivers/gpu/drm/i915/display/intel_dpio_phy.c b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
index 6fb1f7a7364e..399a7edb4568 100644
--- a/drivers/gpu/drm/i915/display/intel_dpio_phy.c
+++ b/drivers/gpu/drm/i915/display/intel_dpio_phy.c
@@ -259,7 +259,8 @@ void bxt_port_to_phy_channel(struct drm_i915_private *dev_priv, enum port port,
}
}
- WARN(1, "PHY not found for PORT %c", port_name(port));
+ drm_WARN(&dev_priv->drm, 1, "PHY not found for PORT %c",
+ port_name(port));
*phy = DPIO_PHY0;
*ch = DPIO_CH0;
}
@@ -278,33 +279,34 @@ void bxt_ddi_phy_set_signal_level(struct drm_i915_private *dev_priv,
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
val &= ~(TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT);
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
- val = I915_READ(BXT_PORT_TX_DW2_LN0(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW2_LN0(phy, ch));
val &= ~(MARGIN_000 | UNIQ_TRANS_SCALE);
val |= margin << MARGIN_000_SHIFT | scale << UNIQ_TRANS_SCALE_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW2_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW2_GRP(phy, ch), val);
- val = I915_READ(BXT_PORT_TX_DW3_LN0(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW3_LN0(phy, ch));
val &= ~SCALE_DCOMP_METHOD;
if (enable)
val |= SCALE_DCOMP_METHOD;
if ((val & UNIQUE_TRANGE_EN_METHOD) && !(val & SCALE_DCOMP_METHOD))
- DRM_ERROR("Disabled scaling while ouniqetrangenmethod was set");
+ drm_err(&dev_priv->drm,
+ "Disabled scaling while ouniqetrangenmethod was set");
- I915_WRITE(BXT_PORT_TX_DW3_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW3_GRP(phy, ch), val);
- val = I915_READ(BXT_PORT_TX_DW4_LN0(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_TX_DW4_LN0(phy, ch));
val &= ~DE_EMPHASIS;
val |= deemphasis << DEEMPH_SHIFT;
- I915_WRITE(BXT_PORT_TX_DW4_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW4_GRP(phy, ch), val);
- val = I915_READ(BXT_PORT_PCS_DW10_LN01(phy, ch));
+ val = intel_de_read(dev_priv, BXT_PORT_PCS_DW10_LN01(phy, ch));
val |= TX2_SWING_CALC_INIT | TX1_SWING_CALC_INIT;
- I915_WRITE(BXT_PORT_PCS_DW10_GRP(phy, ch), val);
+ intel_de_write(dev_priv, BXT_PORT_PCS_DW10_GRP(phy, ch), val);
}
bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
@@ -314,20 +316,20 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
phy_info = bxt_get_phy_info(dev_priv, phy);
- if (!(I915_READ(BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
+ if (!(intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON) & phy_info->pwron_mask))
return false;
- if ((I915_READ(BXT_PORT_CL1CM_DW0(phy)) &
+ if ((intel_de_read(dev_priv, BXT_PORT_CL1CM_DW0(phy)) &
(PHY_POWER_GOOD | PHY_RESERVED)) != PHY_POWER_GOOD) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but power hasn't settled\n",
- phy);
+ drm_dbg(&dev_priv->drm,
+ "DDI PHY %d powered, but power hasn't settled\n", phy);
return false;
}
- if (!(I915_READ(BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
- DRM_DEBUG_DRIVER("DDI PHY %d powered, but still in reset\n",
- phy);
+ if (!(intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy)) & COMMON_RESET_DIS)) {
+ drm_dbg(&dev_priv->drm,
+ "DDI PHY %d powered, but still in reset\n", phy);
return false;
}
@@ -337,7 +339,7 @@ bool bxt_ddi_phy_is_enabled(struct drm_i915_private *dev_priv,
static u32 bxt_get_grc(struct drm_i915_private *dev_priv, enum dpio_phy phy)
{
- u32 val = I915_READ(BXT_PORT_REF_DW6(phy));
+ u32 val = intel_de_read(dev_priv, BXT_PORT_REF_DW6(phy));
return (val & GRC_CODE_MASK) >> GRC_CODE_SHIFT;
}
@@ -347,7 +349,8 @@ static void bxt_phy_wait_grc_done(struct drm_i915_private *dev_priv,
{
if (intel_de_wait_for_set(dev_priv, BXT_PORT_REF_DW3(phy),
GRC_DONE, 10))
- DRM_ERROR("timeout waiting for PHY%d GRC\n", phy);
+ drm_err(&dev_priv->drm, "timeout waiting for PHY%d GRC\n",
+ phy);
}
static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
@@ -364,18 +367,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
dev_priv->bxt_phy_grc = bxt_get_grc(dev_priv, phy);
if (bxt_ddi_phy_verify_state(dev_priv, phy)) {
- DRM_DEBUG_DRIVER("DDI PHY %d already enabled, "
- "won't reprogram it\n", phy);
+ drm_dbg(&dev_priv->drm, "DDI PHY %d already enabled, "
+ "won't reprogram it\n", phy);
return;
}
- DRM_DEBUG_DRIVER("DDI PHY %d enabled with invalid state, "
- "force reprogramming it\n", phy);
+ drm_dbg(&dev_priv->drm,
+ "DDI PHY %d enabled with invalid state, "
+ "force reprogramming it\n", phy);
}
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
val |= phy_info->pwron_mask;
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
/*
* The PHY registers start out inaccessible and respond to reads with
@@ -390,29 +394,30 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
PHY_RESERVED | PHY_POWER_GOOD,
PHY_POWER_GOOD,
1))
- DRM_ERROR("timeout during PHY%d power on\n", phy);
+ drm_err(&dev_priv->drm, "timeout during PHY%d power on\n",
+ phy);
/* Program PLL Rcomp code offset */
- val = I915_READ(BXT_PORT_CL1CM_DW9(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW9(phy));
val &= ~IREF0RC_OFFSET_MASK;
val |= 0xE4 << IREF0RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW9(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_CL1CM_DW9(phy), val);
- val = I915_READ(BXT_PORT_CL1CM_DW10(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW10(phy));
val &= ~IREF1RC_OFFSET_MASK;
val |= 0xE4 << IREF1RC_OFFSET_SHIFT;
- I915_WRITE(BXT_PORT_CL1CM_DW10(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_CL1CM_DW10(phy), val);
/* Program power gating */
- val = I915_READ(BXT_PORT_CL1CM_DW28(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_CL1CM_DW28(phy));
val |= OCL1_POWER_DOWN_EN | DW28_OLDO_DYN_PWR_DOWN_EN |
SUS_CLK_CONFIG;
- I915_WRITE(BXT_PORT_CL1CM_DW28(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_CL1CM_DW28(phy), val);
if (phy_info->dual_channel) {
- val = I915_READ(BXT_PORT_CL2CM_DW6(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_CL2CM_DW6(phy));
val |= DW6_OLDO_DYN_PWR_DOWN_EN;
- I915_WRITE(BXT_PORT_CL2CM_DW6(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_CL2CM_DW6(phy), val);
}
if (phy_info->rcomp_phy != -1) {
@@ -430,19 +435,19 @@ static void _bxt_ddi_phy_init(struct drm_i915_private *dev_priv,
grc_code = val << GRC_CODE_FAST_SHIFT |
val << GRC_CODE_SLOW_SHIFT |
val;
- I915_WRITE(BXT_PORT_REF_DW6(phy), grc_code);
+ intel_de_write(dev_priv, BXT_PORT_REF_DW6(phy), grc_code);
- val = I915_READ(BXT_PORT_REF_DW8(phy));
+ val = intel_de_read(dev_priv, BXT_PORT_REF_DW8(phy));
val |= GRC_DIS | GRC_RDY_OVRD;
- I915_WRITE(BXT_PORT_REF_DW8(phy), val);
+ intel_de_write(dev_priv, BXT_PORT_REF_DW8(phy), val);
}
if (phy_info->reset_delay)
udelay(phy_info->reset_delay);
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
val |= COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
}
void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
@@ -452,13 +457,13 @@ void bxt_ddi_phy_uninit(struct drm_i915_private *dev_priv, enum dpio_phy phy)
phy_info = bxt_get_phy_info(dev_priv, phy);
- val = I915_READ(BXT_PHY_CTL_FAMILY(phy));
+ val = intel_de_read(dev_priv, BXT_PHY_CTL_FAMILY(phy));
val &= ~COMMON_RESET_DIS;
- I915_WRITE(BXT_PHY_CTL_FAMILY(phy), val);
+ intel_de_write(dev_priv, BXT_PHY_CTL_FAMILY(phy), val);
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
val &= ~phy_info->pwron_mask;
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON, val);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON, val);
}
void bxt_ddi_phy_init(struct drm_i915_private *dev_priv, enum dpio_phy phy)
@@ -496,7 +501,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
va_list args;
u32 val;
- val = I915_READ(reg);
+ val = intel_de_read(dev_priv, reg);
if ((val & mask) == expected)
return true;
@@ -504,7 +509,7 @@ __phy_reg_verify_state(struct drm_i915_private *dev_priv, enum dpio_phy phy,
vaf.fmt = reg_fmt;
vaf.va = &args;
- DRM_DEBUG_DRIVER("DDI PHY %d reg %pV [%08x] state mismatch: "
+ drm_dbg(&dev_priv->drm, "DDI PHY %d reg %pV [%08x] state mismatch: "
"current %08x, expected %08x (mask %08x)\n",
phy, &vaf, reg.reg, val, (val & ~mask) | expected,
mask);
@@ -599,7 +604,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+ u32 val = intel_de_read(dev_priv,
+ BXT_PORT_TX_DW14_LN(phy, ch, lane));
/*
* Note that on CHV this flag is called UPAR, but has
@@ -609,7 +615,8 @@ void bxt_ddi_phy_set_lane_optim_mask(struct intel_encoder *encoder,
if (lane_lat_optim_mask & BIT(lane))
val |= LATENCY_OPTIM;
- I915_WRITE(BXT_PORT_TX_DW14_LN(phy, ch, lane), val);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW14_LN(phy, ch, lane),
+ val);
}
}
@@ -627,7 +634,8 @@ bxt_ddi_phy_get_lane_lat_optim_mask(struct intel_encoder *encoder)
mask = 0;
for (lane = 0; lane < 4; lane++) {
- u32 val = I915_READ(BXT_PORT_TX_DW14_LN(phy, ch, lane));
+ u32 val = intel_de_read(dev_priv,
+ BXT_PORT_TX_DW14_LN(phy, ch, lane));
if (val & LATENCY_OPTIM)
mask |= BIT(lane);
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
index c75e34d87111..2d47f1f756a2 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c
@@ -45,6 +45,22 @@
* commit phase.
*/
+struct intel_dpll_mgr {
+ const struct dpll_info *dpll_info;
+
+ bool (*get_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*put_dplls)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+ void (*update_active_dpll)(struct intel_atomic_state *state,
+ struct intel_crtc *crtc,
+ struct intel_encoder *encoder);
+ void (*update_ref_clks)(struct drm_i915_private *i915);
+ void (*dump_hw_state)(struct drm_i915_private *dev_priv,
+ const struct intel_dpll_hw_state *hw_state);
+};
+
static void
intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
struct intel_shared_dpll_state *shared_dpll)
@@ -52,8 +68,8 @@ intel_atomic_duplicate_dpll_state(struct drm_i915_private *dev_priv,
enum intel_dpll_id i;
/* Copy shared dpll state */
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
+ struct intel_shared_dpll *pll = &dev_priv->dpll.shared_dplls[i];
shared_dpll[i] = pll->state;
}
@@ -88,7 +104,7 @@ struct intel_shared_dpll *
intel_get_shared_dpll_by_id(struct drm_i915_private *dev_priv,
enum intel_dpll_id id)
{
- return &dev_priv->shared_dplls[id];
+ return &dev_priv->dpll.shared_dplls[id];
}
/**
@@ -103,11 +119,14 @@ enum intel_dpll_id
intel_get_shared_dpll_id(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- if (WARN_ON(pll < dev_priv->shared_dplls||
- pll > &dev_priv->shared_dplls[dev_priv->num_shared_dpll]))
+ long pll_idx = pll - dev_priv->dpll.shared_dplls;
+
+ if (drm_WARN_ON(&dev_priv->drm,
+ pll_idx < 0 ||
+ pll_idx >= dev_priv->dpll.num_shared_dpll))
return -1;
- return (enum intel_dpll_id) (pll - dev_priv->shared_dplls);
+ return pll_idx;
}
/* For ILK+ */
@@ -118,7 +137,8 @@ void assert_shared_dpll(struct drm_i915_private *dev_priv,
bool cur_state;
struct intel_dpll_hw_state hw_state;
- if (WARN(!pll, "asserting DPLL %s with no DPLL\n", onoff(state)))
+ if (drm_WARN(&dev_priv->drm, !pll,
+ "asserting DPLL %s with no DPLL\n", onoff(state)))
return;
cur_state = pll->info->funcs->get_hw_state(dev_priv, pll, &hw_state);
@@ -140,19 +160,19 @@ void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll = crtc_state->shared_dpll;
- if (WARN_ON(pll == NULL))
+ if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll_lock);
- WARN_ON(!pll->state.crtc_mask);
+ mutex_lock(&dev_priv->dpll.lock);
+ drm_WARN_ON(&dev_priv->drm, !pll->state.crtc_mask);
if (!pll->active_mask) {
- DRM_DEBUG_DRIVER("setting up %s\n", pll->info->name);
- WARN_ON(pll->on);
+ drm_dbg(&dev_priv->drm, "setting up %s\n", pll->info->name);
+ drm_WARN_ON(&dev_priv->drm, pll->on);
assert_shared_dpll_disabled(dev_priv, pll);
pll->info->funcs->prepare(dev_priv, pll);
}
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
/**
@@ -169,35 +189,36 @@ void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state)
unsigned int crtc_mask = drm_crtc_mask(&crtc->base);
unsigned int old_mask;
- if (WARN_ON(pll == NULL))
+ if (drm_WARN_ON(&dev_priv->drm, pll == NULL))
return;
- mutex_lock(&dev_priv->dpll_lock);
+ mutex_lock(&dev_priv->dpll.lock);
old_mask = pll->active_mask;
- if (WARN_ON(!(pll->state.crtc_mask & crtc_mask)) ||
- WARN_ON(pll->active_mask & crtc_mask))
+ if (drm_WARN_ON(&dev_priv->drm, !(pll->state.crtc_mask & crtc_mask)) ||
+ drm_WARN_ON(&dev_priv->drm, pll->active_mask & crtc_mask))
goto out;
pll->active_mask |= crtc_mask;
- DRM_DEBUG_KMS("enable %s (active %x, on? %d) for crtc %d\n",
- pll->info->name, pll->active_mask, pll->on,
- crtc->base.base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "enable %s (active %x, on? %d) for crtc %d\n",
+ pll->info->name, pll->active_mask, pll->on,
+ crtc->base.base.id);
if (old_mask) {
- WARN_ON(!pll->on);
+ drm_WARN_ON(&dev_priv->drm, !pll->on);
assert_shared_dpll_enabled(dev_priv, pll);
goto out;
}
- WARN_ON(pll->on);
+ drm_WARN_ON(&dev_priv->drm, pll->on);
- DRM_DEBUG_KMS("enabling %s\n", pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "enabling %s\n", pll->info->name);
pll->info->funcs->enable(dev_priv, pll);
pll->on = true;
out:
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
/**
@@ -220,27 +241,28 @@ void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state)
if (pll == NULL)
return;
- mutex_lock(&dev_priv->dpll_lock);
- if (WARN_ON(!(pll->active_mask & crtc_mask)))
+ mutex_lock(&dev_priv->dpll.lock);
+ if (drm_WARN_ON(&dev_priv->drm, !(pll->active_mask & crtc_mask)))
goto out;
- DRM_DEBUG_KMS("disable %s (active %x, on? %d) for crtc %d\n",
- pll->info->name, pll->active_mask, pll->on,
- crtc->base.base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "disable %s (active %x, on? %d) for crtc %d\n",
+ pll->info->name, pll->active_mask, pll->on,
+ crtc->base.base.id);
assert_shared_dpll_enabled(dev_priv, pll);
- WARN_ON(!pll->on);
+ drm_WARN_ON(&dev_priv->drm, !pll->on);
pll->active_mask &= ~crtc_mask;
if (pll->active_mask)
goto out;
- DRM_DEBUG_KMS("disabling %s\n", pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "disabling %s\n", pll->info->name);
pll->info->funcs->disable(dev_priv, pll);
pll->on = false;
out:
- mutex_unlock(&dev_priv->dpll_lock);
+ mutex_unlock(&dev_priv->dpll.lock);
}
static struct intel_shared_dpll *
@@ -256,10 +278,10 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
shared_dpll = intel_atomic_get_shared_dpll_state(&state->base);
- WARN_ON(dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
+ drm_WARN_ON(&dev_priv->drm, dpll_mask & ~(BIT(I915_NUM_PLLS) - 1));
for_each_set_bit(i, &dpll_mask, I915_NUM_PLLS) {
- pll = &dev_priv->shared_dplls[i];
+ pll = &dev_priv->dpll.shared_dplls[i];
/* Only want to check enabled timings first */
if (shared_dpll[i].crtc_mask == 0) {
@@ -271,20 +293,21 @@ intel_find_shared_dpll(struct intel_atomic_state *state,
if (memcmp(pll_state,
&shared_dpll[i].hw_state,
sizeof(*pll_state)) == 0) {
- DRM_DEBUG_KMS("[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
- crtc->base.base.id, crtc->base.name,
- pll->info->name,
- shared_dpll[i].crtc_mask,
- pll->active_mask);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] sharing existing %s (crtc mask 0x%08x, active %x)\n",
+ crtc->base.base.id, crtc->base.name,
+ pll->info->name,
+ shared_dpll[i].crtc_mask,
+ pll->active_mask);
return pll;
}
}
/* Ok no matching timings, maybe there's a free one? */
if (unused_pll) {
- DRM_DEBUG_KMS("[CRTC:%d:%s] allocated %s\n",
- crtc->base.base.id, crtc->base.name,
- unused_pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] allocated %s\n",
+ crtc->base.base.id, crtc->base.name,
+ unused_pll->info->name);
return unused_pll;
}
@@ -297,6 +320,7 @@ intel_reference_shared_dpll(struct intel_atomic_state *state,
const struct intel_shared_dpll *pll,
const struct intel_dpll_hw_state *pll_state)
{
+ struct drm_i915_private *i915 = to_i915(state->base.dev);
struct intel_shared_dpll_state *shared_dpll;
const enum intel_dpll_id id = pll->info->id;
@@ -305,8 +329,8 @@ intel_reference_shared_dpll(struct intel_atomic_state *state,
if (shared_dpll[id].crtc_mask == 0)
shared_dpll[id].hw_state = *pll_state;
- DRM_DEBUG_DRIVER("using %s for pipe %c\n", pll->info->name,
- pipe_name(crtc->pipe));
+ drm_dbg(&i915->drm, "using %s for pipe %c\n", pll->info->name,
+ pipe_name(crtc->pipe));
shared_dpll[id].crtc_mask |= 1 << crtc->pipe;
}
@@ -357,9 +381,9 @@ void intel_shared_dpll_swap_state(struct intel_atomic_state *state)
if (!state->dpll_set)
return;
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+ for (i = 0; i < dev_priv->dpll.num_shared_dpll; i++) {
struct intel_shared_dpll *pll =
- &dev_priv->shared_dplls[i];
+ &dev_priv->dpll.shared_dplls[i];
swap(pll->state, shared_dpll[i]);
}
@@ -378,10 +402,10 @@ static bool ibx_pch_dpll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(PCH_DPLL(id));
+ val = intel_de_read(dev_priv, PCH_DPLL(id));
hw_state->dpll = val;
- hw_state->fp0 = I915_READ(PCH_FP0(id));
- hw_state->fp1 = I915_READ(PCH_FP1(id));
+ hw_state->fp0 = intel_de_read(dev_priv, PCH_FP0(id));
+ hw_state->fp1 = intel_de_read(dev_priv, PCH_FP1(id));
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
@@ -393,8 +417,8 @@ static void ibx_pch_dpll_prepare(struct drm_i915_private *dev_priv,
{
const enum intel_dpll_id id = pll->info->id;
- I915_WRITE(PCH_FP0(id), pll->state.hw_state.fp0);
- I915_WRITE(PCH_FP1(id), pll->state.hw_state.fp1);
+ intel_de_write(dev_priv, PCH_FP0(id), pll->state.hw_state.fp0);
+ intel_de_write(dev_priv, PCH_FP1(id), pll->state.hw_state.fp1);
}
static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
@@ -404,7 +428,7 @@ static void ibx_assert_pch_refclk_enabled(struct drm_i915_private *dev_priv)
I915_STATE_WARN_ON(!(HAS_PCH_IBX(dev_priv) || HAS_PCH_CPT(dev_priv)));
- val = I915_READ(PCH_DREF_CONTROL);
+ val = intel_de_read(dev_priv, PCH_DREF_CONTROL);
enabled = !!(val & (DREF_SSC_SOURCE_MASK | DREF_NONSPREAD_SOURCE_MASK |
DREF_SUPERSPREAD_SOURCE_MASK));
I915_STATE_WARN(!enabled, "PCH refclk assertion failure, should be active but is disabled\n");
@@ -418,10 +442,10 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
/* PCH refclock must be enabled first */
ibx_assert_pch_refclk_enabled(dev_priv);
- I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
/* Wait for the clocks to stabilize. */
- POSTING_READ(PCH_DPLL(id));
+ intel_de_posting_read(dev_priv, PCH_DPLL(id));
udelay(150);
/* The pixel multiplier can only be updated once the
@@ -429,8 +453,8 @@ static void ibx_pch_dpll_enable(struct drm_i915_private *dev_priv,
*
* So write it again.
*/
- I915_WRITE(PCH_DPLL(id), pll->state.hw_state.dpll);
- POSTING_READ(PCH_DPLL(id));
+ intel_de_write(dev_priv, PCH_DPLL(id), pll->state.hw_state.dpll);
+ intel_de_posting_read(dev_priv, PCH_DPLL(id));
udelay(200);
}
@@ -439,8 +463,8 @@ static void ibx_pch_dpll_disable(struct drm_i915_private *dev_priv,
{
const enum intel_dpll_id id = pll->info->id;
- I915_WRITE(PCH_DPLL(id), 0);
- POSTING_READ(PCH_DPLL(id));
+ intel_de_write(dev_priv, PCH_DPLL(id), 0);
+ intel_de_posting_read(dev_priv, PCH_DPLL(id));
udelay(200);
}
@@ -457,11 +481,12 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
if (HAS_PCH_IBX(dev_priv)) {
/* Ironlake PCH has a fixed PLL->PCH pipe mapping. */
i = (enum intel_dpll_id) crtc->pipe;
- pll = &dev_priv->shared_dplls[i];
+ pll = &dev_priv->dpll.shared_dplls[i];
- DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
- crtc->base.base.id, crtc->base.name,
- pll->info->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name,
+ pll->info->name);
} else {
pll = intel_find_shared_dpll(state, crtc,
&crtc_state->dpll_hw_state,
@@ -484,12 +509,13 @@ static bool ibx_get_dpll(struct intel_atomic_state *state,
static void ibx_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
- "fp0: 0x%x, fp1: 0x%x\n",
- hw_state->dpll,
- hw_state->dpll_md,
- hw_state->fp0,
- hw_state->fp1);
+ drm_dbg_kms(&dev_priv->drm,
+ "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
}
static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
@@ -499,21 +525,34 @@ static const struct intel_shared_dpll_funcs ibx_pch_dpll_funcs = {
.get_hw_state = ibx_pch_dpll_get_hw_state,
};
+static const struct dpll_info pch_plls[] = {
+ { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
+ { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr pch_pll_mgr = {
+ .dpll_info = pch_plls,
+ .get_dplls = ibx_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .dump_hw_state = ibx_dump_hw_state,
+};
+
static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
const enum intel_dpll_id id = pll->info->id;
- I915_WRITE(WRPLL_CTL(id), pll->state.hw_state.wrpll);
- POSTING_READ(WRPLL_CTL(id));
+ intel_de_write(dev_priv, WRPLL_CTL(id), pll->state.hw_state.wrpll);
+ intel_de_posting_read(dev_priv, WRPLL_CTL(id));
udelay(20);
}
static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll)
{
- I915_WRITE(SPLL_CTL, pll->state.hw_state.spll);
- POSTING_READ(SPLL_CTL);
+ intel_de_write(dev_priv, SPLL_CTL, pll->state.hw_state.spll);
+ intel_de_posting_read(dev_priv, SPLL_CTL);
udelay(20);
}
@@ -523,9 +562,9 @@ static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
u32 val;
- val = I915_READ(WRPLL_CTL(id));
- I915_WRITE(WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
- POSTING_READ(WRPLL_CTL(id));
+ val = intel_de_read(dev_priv, WRPLL_CTL(id));
+ intel_de_write(dev_priv, WRPLL_CTL(id), val & ~WRPLL_PLL_ENABLE);
+ intel_de_posting_read(dev_priv, WRPLL_CTL(id));
/*
* Try to set up the PCH reference clock once all DPLLs
@@ -541,9 +580,9 @@ static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
enum intel_dpll_id id = pll->info->id;
u32 val;
- val = I915_READ(SPLL_CTL);
- I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
- POSTING_READ(SPLL_CTL);
+ val = intel_de_read(dev_priv, SPLL_CTL);
+ intel_de_write(dev_priv, SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+ intel_de_posting_read(dev_priv, SPLL_CTL);
/*
* Try to set up the PCH reference clock once all DPLLs
@@ -566,7 +605,7 @@ static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(WRPLL_CTL(id));
+ val = intel_de_read(dev_priv, WRPLL_CTL(id));
hw_state->wrpll = val;
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
@@ -586,7 +625,7 @@ static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(SPLL_CTL);
+ val = intel_de_read(dev_priv, SPLL_CTL);
hw_state->spll = val;
intel_display_power_put(dev_priv, POWER_DOMAIN_DISPLAY_CORE, wakeref);
@@ -811,8 +850,8 @@ hsw_ddi_calculate_wrpll(int clock /* in Hz */,
}
static struct intel_shared_dpll *
-hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
+hsw_ddi_wrpll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
@@ -839,8 +878,47 @@ hsw_ddi_hdmi_get_dpll(struct intel_atomic_state *state,
return pll;
}
+static int hsw_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll)
+{
+ int refclk;
+ int n, p, r;
+ u32 wrpll = pll->state.hw_state.wrpll;
+
+ switch (wrpll & WRPLL_REF_MASK) {
+ case WRPLL_REF_SPECIAL_HSW:
+ /* Muxed-SSC for BDW, non-SSC for non-ULT HSW. */
+ if (IS_HASWELL(dev_priv) && !IS_HSW_ULT(dev_priv)) {
+ refclk = dev_priv->dpll.ref_clks.nssc;
+ break;
+ }
+ /* fall through */
+ case WRPLL_REF_PCH_SSC:
+ /*
+ * We could calculate spread here, but our checking
+ * code only cares about 5% accuracy, and spread is a max of
+ * 0.5% downspread.
+ */
+ refclk = dev_priv->dpll.ref_clks.ssc;
+ break;
+ case WRPLL_REF_LCPLL:
+ refclk = 2700000;
+ break;
+ default:
+ MISSING_CASE(wrpll);
+ return 0;
+ }
+
+ r = wrpll & WRPLL_DIVIDER_REF_MASK;
+ p = (wrpll & WRPLL_DIVIDER_POST_MASK) >> WRPLL_DIVIDER_POST_SHIFT;
+ n = (wrpll & WRPLL_DIVIDER_FB_MASK) >> WRPLL_DIVIDER_FB_SHIFT;
+
+ /* Convert to KHz, p & r have a fixed point portion */
+ return (refclk * n / 10) / (p * r) * 2;
+}
+
static struct intel_shared_dpll *
-hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
+hsw_ddi_lcpll_get_dpll(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
struct intel_shared_dpll *pll;
@@ -858,7 +936,8 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
pll_id = DPLL_ID_LCPLL_2700;
break;
default:
- DRM_DEBUG_KMS("Invalid clock for DP: %d\n", clock);
+ drm_dbg_kms(&dev_priv->drm, "Invalid clock for DP: %d\n",
+ clock);
return NULL;
}
@@ -870,6 +949,69 @@ hsw_ddi_dp_get_dpll(struct intel_crtc_state *crtc_state)
return pll;
}
+static int hsw_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->info->id) {
+ case DPLL_ID_LCPLL_810:
+ link_clock = 81000;
+ break;
+ case DPLL_ID_LCPLL_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_ID_LCPLL_2700:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "bad port clock sel\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
+static struct intel_shared_dpll *
+hsw_ddi_spll_get_dpll(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
+{
+ struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+
+ if (WARN_ON(crtc_state->port_clock / 2 != 135000))
+ return NULL;
+
+ crtc_state->dpll_hw_state.spll = SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz |
+ SPLL_REF_MUXED_SSC;
+
+ return intel_find_shared_dpll(state, crtc, &crtc_state->dpll_hw_state,
+ BIT(DPLL_ID_SPLL));
+}
+
+static int hsw_ddi_spll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->state.hw_state.spll & SPLL_FREQ_MASK) {
+ case SPLL_FREQ_810MHz:
+ link_clock = 81000;
+ break;
+ case SPLL_FREQ_1350MHz:
+ link_clock = 135000;
+ break;
+ case SPLL_FREQ_2700MHz:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "bad spll freq\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool hsw_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -881,23 +1023,14 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
memset(&crtc_state->dpll_hw_state, 0,
sizeof(crtc_state->dpll_hw_state));
- if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
- pll = hsw_ddi_hdmi_get_dpll(state, crtc);
- } else if (intel_crtc_has_dp_encoder(crtc_state)) {
- pll = hsw_ddi_dp_get_dpll(crtc_state);
- } else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG)) {
- if (WARN_ON(crtc_state->port_clock / 2 != 135000))
- return false;
-
- crtc_state->dpll_hw_state.spll =
- SPLL_PLL_ENABLE | SPLL_FREQ_1350MHz | SPLL_REF_MUXED_SSC;
-
- pll = intel_find_shared_dpll(state, crtc,
- &crtc_state->dpll_hw_state,
- BIT(DPLL_ID_SPLL));
- } else {
+ if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI))
+ pll = hsw_ddi_wrpll_get_dpll(state, crtc);
+ else if (intel_crtc_has_dp_encoder(crtc_state))
+ pll = hsw_ddi_lcpll_get_dpll(crtc_state);
+ else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_ANALOG))
+ pll = hsw_ddi_spll_get_dpll(state, crtc);
+ else
return false;
- }
if (!pll)
return false;
@@ -910,23 +1043,35 @@ static bool hsw_get_dpll(struct intel_atomic_state *state,
return true;
}
+static void hsw_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ i915->dpll.ref_clks.ssc = 135000;
+ /* Non-SSC is only used on non-ULT HSW. */
+ if (intel_de_read(i915, FUSE_STRAP3) & HSW_REF_CLK_SELECT)
+ i915->dpll.ref_clks.nssc = 24000;
+ else
+ i915->dpll.ref_clks.nssc = 135000;
+}
+
static void hsw_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
- hw_state->wrpll, hw_state->spll);
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
+ hw_state->wrpll, hw_state->spll);
}
static const struct intel_shared_dpll_funcs hsw_ddi_wrpll_funcs = {
.enable = hsw_ddi_wrpll_enable,
.disable = hsw_ddi_wrpll_disable,
.get_hw_state = hsw_ddi_wrpll_get_hw_state,
+ .get_freq = hsw_ddi_wrpll_get_freq,
};
static const struct intel_shared_dpll_funcs hsw_ddi_spll_funcs = {
.enable = hsw_ddi_spll_enable,
.disable = hsw_ddi_spll_disable,
.get_hw_state = hsw_ddi_spll_get_hw_state,
+ .get_freq = hsw_ddi_spll_get_freq,
};
static void hsw_ddi_lcpll_enable(struct drm_i915_private *dev_priv,
@@ -950,6 +1095,25 @@ static const struct intel_shared_dpll_funcs hsw_ddi_lcpll_funcs = {
.enable = hsw_ddi_lcpll_enable,
.disable = hsw_ddi_lcpll_disable,
.get_hw_state = hsw_ddi_lcpll_get_hw_state,
+ .get_freq = hsw_ddi_lcpll_get_freq,
+};
+
+static const struct dpll_info hsw_plls[] = {
+ { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
+ { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
+ { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
+ { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
+ { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
+ { },
+};
+
+static const struct intel_dpll_mgr hsw_pll_mgr = {
+ .dpll_info = hsw_plls,
+ .get_dplls = hsw_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = hsw_update_dpll_ref_clks,
+ .dump_hw_state = hsw_dump_hw_state,
};
struct skl_dpll_regs {
@@ -989,15 +1153,15 @@ static void skl_ddi_pll_write_ctrl1(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
u32 val;
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
val &= ~(DPLL_CTRL1_HDMI_MODE(id) |
DPLL_CTRL1_SSC(id) |
DPLL_CTRL1_LINK_RATE_MASK(id));
val |= pll->state.hw_state.ctrl1 << (id * 6);
- I915_WRITE(DPLL_CTRL1, val);
- POSTING_READ(DPLL_CTRL1);
+ intel_de_write(dev_priv, DPLL_CTRL1, val);
+ intel_de_posting_read(dev_priv, DPLL_CTRL1);
}
static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1008,17 +1172,17 @@ static void skl_ddi_pll_enable(struct drm_i915_private *dev_priv,
skl_ddi_pll_write_ctrl1(dev_priv, pll);
- I915_WRITE(regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
- I915_WRITE(regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
- POSTING_READ(regs[id].cfgcr1);
- POSTING_READ(regs[id].cfgcr2);
+ intel_de_write(dev_priv, regs[id].cfgcr1, pll->state.hw_state.cfgcr1);
+ intel_de_write(dev_priv, regs[id].cfgcr2, pll->state.hw_state.cfgcr2);
+ intel_de_posting_read(dev_priv, regs[id].cfgcr1);
+ intel_de_posting_read(dev_priv, regs[id].cfgcr2);
/* the enable bit is always bit 31 */
- I915_WRITE(regs[id].ctl,
- I915_READ(regs[id].ctl) | LCPLL_PLL_ENABLE);
+ intel_de_write(dev_priv, regs[id].ctl,
+ intel_de_read(dev_priv, regs[id].ctl) | LCPLL_PLL_ENABLE);
if (intel_de_wait_for_set(dev_priv, DPLL_STATUS, DPLL_LOCK(id), 5))
- DRM_ERROR("DPLL %d not locked\n", id);
+ drm_err(&dev_priv->drm, "DPLL %d not locked\n", id);
}
static void skl_ddi_dpll0_enable(struct drm_i915_private *dev_priv,
@@ -1034,9 +1198,9 @@ static void skl_ddi_pll_disable(struct drm_i915_private *dev_priv,
const enum intel_dpll_id id = pll->info->id;
/* the enable bit is always bit 31 */
- I915_WRITE(regs[id].ctl,
- I915_READ(regs[id].ctl) & ~LCPLL_PLL_ENABLE);
- POSTING_READ(regs[id].ctl);
+ intel_de_write(dev_priv, regs[id].ctl,
+ intel_de_read(dev_priv, regs[id].ctl) & ~LCPLL_PLL_ENABLE);
+ intel_de_posting_read(dev_priv, regs[id].ctl);
}
static void skl_ddi_dpll0_disable(struct drm_i915_private *dev_priv,
@@ -1061,17 +1225,17 @@ static bool skl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
- val = I915_READ(regs[id].ctl);
+ val = intel_de_read(dev_priv, regs[id].ctl);
if (!(val & LCPLL_PLL_ENABLE))
goto out;
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
/* avoid reading back stale values if HDMI mode is not enabled */
if (val & DPLL_CTRL1_HDMI_MODE(id)) {
- hw_state->cfgcr1 = I915_READ(regs[id].cfgcr1);
- hw_state->cfgcr2 = I915_READ(regs[id].cfgcr2);
+ hw_state->cfgcr1 = intel_de_read(dev_priv, regs[id].cfgcr1);
+ hw_state->cfgcr2 = intel_de_read(dev_priv, regs[id].cfgcr2);
}
ret = true;
@@ -1099,11 +1263,11 @@ static bool skl_ddi_dpll0_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
/* DPLL0 is always enabled since it drives CDCLK */
- val = I915_READ(regs[id].ctl);
- if (WARN_ON(!(val & LCPLL_PLL_ENABLE)))
+ val = intel_de_read(dev_priv, regs[id].ctl);
+ if (drm_WARN_ON(&dev_priv->drm, !(val & LCPLL_PLL_ENABLE)))
goto out;
- val = I915_READ(DPLL_CTRL1);
+ val = intel_de_read(dev_priv, DPLL_CTRL1);
hw_state->ctrl1 = (val >> (id * 6)) & 0x3f;
ret = true;
@@ -1222,6 +1386,7 @@ struct skl_wrpll_params {
static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
u64 afe_clock,
+ int ref_clock,
u64 central_freq,
u32 p0, u32 p1, u32 p2)
{
@@ -1281,14 +1446,15 @@ static void skl_wrpll_params_populate(struct skl_wrpll_params *params,
* Intermediate values are in Hz.
* Divide by MHz to match bsepc
*/
- params->dco_integer = div_u64(dco_freq, 24 * MHz(1));
+ params->dco_integer = div_u64(dco_freq, ref_clock * KHz(1));
params->dco_fraction =
- div_u64((div_u64(dco_freq, 24) -
+ div_u64((div_u64(dco_freq, ref_clock / KHz(1)) -
params->dco_integer * MHz(1)) * 0x8000, MHz(1));
}
static bool
skl_ddi_calculate_wrpll(int clock /* in Hz */,
+ int ref_clock,
struct skl_wrpll_params *wrpll_params)
{
u64 afe_clock = clock * 5; /* AFE Clock is 5x Pixel clock */
@@ -1354,14 +1520,15 @@ skip_remaining_dividers:
*/
p0 = p1 = p2 = 0;
skl_wrpll_get_multipliers(ctx.p, &p0, &p1, &p2);
- skl_wrpll_params_populate(wrpll_params, afe_clock, ctx.central_freq,
- p0, p1, p2);
+ skl_wrpll_params_populate(wrpll_params, afe_clock, ref_clock,
+ ctx.central_freq, p0, p1, p2);
return true;
}
static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
u32 ctrl1, cfgcr1, cfgcr2;
struct skl_wrpll_params wrpll_params = { 0, };
@@ -1374,6 +1541,7 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
ctrl1 |= DPLL_CTRL1_HDMI_MODE(0);
if (!skl_ddi_calculate_wrpll(crtc_state->port_clock * 1000,
+ i915->dpll.ref_clks.nssc,
&wrpll_params))
return false;
@@ -1396,6 +1564,64 @@ static bool skl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
return true;
}
+static int skl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ int ref_clock = i915->dpll.ref_clks.nssc;
+ u32 p0, p1, p2, dco_freq;
+
+ p0 = pll_state->cfgcr2 & DPLL_CFGCR2_PDIV_MASK;
+ p2 = pll_state->cfgcr2 & DPLL_CFGCR2_KDIV_MASK;
+
+ if (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr2 & DPLL_CFGCR2_QDIV_RATIO_MASK) >> 8;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR2_PDIV_1:
+ p0 = 1;
+ break;
+ case DPLL_CFGCR2_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR2_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR2_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR2_KDIV_5:
+ p2 = 5;
+ break;
+ case DPLL_CFGCR2_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR2_KDIV_3:
+ p2 = 3;
+ break;
+ case DPLL_CFGCR2_KDIV_1:
+ p2 = 1;
+ break;
+ }
+
+ dco_freq = (pll_state->cfgcr1 & DPLL_CFGCR1_DCO_INTEGER_MASK) *
+ ref_clock;
+
+ dco_freq += ((pll_state->cfgcr1 & DPLL_CFGCR1_DCO_FRACTION_MASK) >> 9) *
+ ref_clock / 0x8000;
+
+ if (WARN_ON(p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
static bool
skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -1436,25 +1662,62 @@ skl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
+static int skl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch ((pll->state.hw_state.ctrl1 &
+ DPLL_CTRL1_LINK_RATE_MASK(0)) >>
+ DPLL_CTRL1_LINK_RATE_SHIFT(0)) {
+ case DPLL_CTRL1_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CTRL1_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool skl_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *i915 = to_i915(crtc->base.dev);
struct intel_shared_dpll *pll;
bool bret;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
bret = skl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
- DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not get HDMI pll dividers.\n");
return false;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = skl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
- DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not set DP dpll HW state.\n");
return false;
}
} else {
@@ -1482,10 +1745,29 @@ static bool skl_get_dpll(struct intel_atomic_state *state,
return true;
}
+static int skl_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ /*
+ * ctrl1 register is already shifted for each pll, just use 0 to get
+ * the internal shift for each field
+ */
+ if (pll->state.hw_state.ctrl1 & DPLL_CTRL1_HDMI_MODE(0))
+ return skl_ddi_wrpll_get_freq(i915, pll);
+ else
+ return skl_ddi_lcpll_get_freq(i915, pll);
+}
+
+static void skl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC ref */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void skl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: "
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
"ctrl1: 0x%x, cfgcr1: 0x%x, cfgcr2: 0x%x\n",
hw_state->ctrl1,
hw_state->cfgcr1,
@@ -1496,12 +1778,30 @@ static const struct intel_shared_dpll_funcs skl_ddi_pll_funcs = {
.enable = skl_ddi_pll_enable,
.disable = skl_ddi_pll_disable,
.get_hw_state = skl_ddi_pll_get_hw_state,
+ .get_freq = skl_ddi_pll_get_freq,
};
static const struct intel_shared_dpll_funcs skl_ddi_dpll0_funcs = {
.enable = skl_ddi_dpll0_enable,
.disable = skl_ddi_dpll0_disable,
.get_hw_state = skl_ddi_dpll0_get_hw_state,
+ .get_freq = skl_ddi_pll_get_freq,
+};
+
+static const struct dpll_info skl_plls[] = {
+ { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
+ { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
+ { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
+ { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
+ { },
+};
+
+static const struct intel_dpll_mgr skl_pll_mgr = {
+ .dpll_info = skl_plls,
+ .get_dplls = skl_get_dpll,
+ .put_dplls = intel_put_dpll,
+ .update_ref_clks = skl_update_dpll_ref_clks,
+ .dump_hw_state = skl_dump_hw_state,
};
static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
@@ -1515,113 +1815,114 @@ static void bxt_ddi_pll_enable(struct drm_i915_private *dev_priv,
bxt_port_to_phy_channel(dev_priv, port, &phy, &ch);
/* Non-SSC reference */
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_REF_SEL;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
if (IS_GEMINILAKE(dev_priv)) {
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_POWER_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
- if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) &
+ if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
PORT_PLL_POWER_STATE), 200))
- DRM_ERROR("Power state not set for PLL:%d\n", port);
+ drm_err(&dev_priv->drm,
+ "Power state not set for PLL:%d\n", port);
}
/* Disable 10 bit clock */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Write P1 & P2 */
- temp = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
temp &= ~(PORT_PLL_P1_MASK | PORT_PLL_P2_MASK);
temp |= pll->state.hw_state.ebb0;
- I915_WRITE(BXT_PORT_PLL_EBB_0(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch), temp);
/* Write M2 integer */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 0));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
temp &= ~PORT_PLL_M2_MASK;
temp |= pll->state.hw_state.pll0;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 0), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 0), temp);
/* Write N */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 1));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
temp &= ~PORT_PLL_N_MASK;
temp |= pll->state.hw_state.pll1;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 1), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 1), temp);
/* Write M2 fraction */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 2));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
temp &= ~PORT_PLL_M2_FRAC_MASK;
temp |= pll->state.hw_state.pll2;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 2), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 2), temp);
/* Write M2 fraction enable */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 3));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
temp &= ~PORT_PLL_M2_FRAC_ENABLE;
temp |= pll->state.hw_state.pll3;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 3), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 3), temp);
/* Write coeff */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 6));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
temp &= ~PORT_PLL_PROP_COEFF_MASK;
temp &= ~PORT_PLL_INT_COEFF_MASK;
temp &= ~PORT_PLL_GAIN_CTL_MASK;
temp |= pll->state.hw_state.pll6;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 6), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 6), temp);
/* Write calibration val */
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 8));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
temp &= ~PORT_PLL_TARGET_CNT_MASK;
temp |= pll->state.hw_state.pll8;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 8), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 8), temp);
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 9));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
temp &= ~PORT_PLL_LOCK_THRESHOLD_MASK;
temp |= pll->state.hw_state.pll9;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 9), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 9), temp);
- temp = I915_READ(BXT_PORT_PLL(phy, ch, 10));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
temp &= ~PORT_PLL_DCO_AMP_OVR_EN_H;
temp &= ~PORT_PLL_DCO_AMP_MASK;
temp |= pll->state.hw_state.pll10;
- I915_WRITE(BXT_PORT_PLL(phy, ch, 10), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL(phy, ch, 10), temp);
/* Recalibrate with new settings */
- temp = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
temp |= PORT_PLL_RECALIBRATE;
- I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
temp &= ~PORT_PLL_10BIT_CLK_ENABLE;
temp |= pll->state.hw_state.ebb4;
- I915_WRITE(BXT_PORT_PLL_EBB_4(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch), temp);
/* Enable PLL */
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp |= PORT_PLL_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
- POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
- if (wait_for_us((I915_READ(BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
+ if (wait_for_us((intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK),
200))
- DRM_ERROR("PLL %d not locked\n", port);
+ drm_err(&dev_priv->drm, "PLL %d not locked\n", port);
if (IS_GEMINILAKE(dev_priv)) {
- temp = I915_READ(BXT_PORT_TX_DW5_LN0(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_TX_DW5_LN0(phy, ch));
temp |= DCC_DELAY_RANGE_2;
- I915_WRITE(BXT_PORT_TX_DW5_GRP(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_TX_DW5_GRP(phy, ch), temp);
}
/*
* While we write to the group register to program all lanes at once we
* can read only lane registers and we pick lanes 0/1 for that.
*/
- temp = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
+ temp = intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN01(phy, ch));
temp &= ~LANE_STAGGER_MASK;
temp &= ~LANESTAGGER_STRAP_OVRD;
temp |= pll->state.hw_state.pcsdw12;
- I915_WRITE(BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
+ intel_de_write(dev_priv, BXT_PORT_PCS_DW12_GRP(phy, ch), temp);
}
static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
@@ -1630,19 +1931,20 @@ static void bxt_ddi_pll_disable(struct drm_i915_private *dev_priv,
enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */
u32 temp;
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
- POSTING_READ(BXT_PORT_PLL_ENABLE(port));
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_posting_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (IS_GEMINILAKE(dev_priv)) {
- temp = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ temp = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
temp &= ~PORT_PLL_POWER_ENABLE;
- I915_WRITE(BXT_PORT_PLL_ENABLE(port), temp);
+ intel_de_write(dev_priv, BXT_PORT_PLL_ENABLE(port), temp);
- if (wait_for_us(!(I915_READ(BXT_PORT_PLL_ENABLE(port)) &
- PORT_PLL_POWER_STATE), 200))
- DRM_ERROR("Power state not reset for PLL:%d\n", port);
+ if (wait_for_us(!(intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port)) &
+ PORT_PLL_POWER_STATE), 200))
+ drm_err(&dev_priv->drm,
+ "Power state not reset for PLL:%d\n", port);
}
}
@@ -1666,40 +1968,40 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
- val = I915_READ(BXT_PORT_PLL_ENABLE(port));
+ val = intel_de_read(dev_priv, BXT_PORT_PLL_ENABLE(port));
if (!(val & PORT_PLL_ENABLE))
goto out;
- hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(phy, ch));
+ hw_state->ebb0 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_0(phy, ch));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
- hw_state->ebb4 = I915_READ(BXT_PORT_PLL_EBB_4(phy, ch));
+ hw_state->ebb4 = intel_de_read(dev_priv, BXT_PORT_PLL_EBB_4(phy, ch));
hw_state->ebb4 &= PORT_PLL_10BIT_CLK_ENABLE;
- hw_state->pll0 = I915_READ(BXT_PORT_PLL(phy, ch, 0));
+ hw_state->pll0 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 0));
hw_state->pll0 &= PORT_PLL_M2_MASK;
- hw_state->pll1 = I915_READ(BXT_PORT_PLL(phy, ch, 1));
+ hw_state->pll1 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 1));
hw_state->pll1 &= PORT_PLL_N_MASK;
- hw_state->pll2 = I915_READ(BXT_PORT_PLL(phy, ch, 2));
+ hw_state->pll2 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 2));
hw_state->pll2 &= PORT_PLL_M2_FRAC_MASK;
- hw_state->pll3 = I915_READ(BXT_PORT_PLL(phy, ch, 3));
+ hw_state->pll3 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 3));
hw_state->pll3 &= PORT_PLL_M2_FRAC_ENABLE;
- hw_state->pll6 = I915_READ(BXT_PORT_PLL(phy, ch, 6));
+ hw_state->pll6 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 6));
hw_state->pll6 &= PORT_PLL_PROP_COEFF_MASK |
PORT_PLL_INT_COEFF_MASK |
PORT_PLL_GAIN_CTL_MASK;
- hw_state->pll8 = I915_READ(BXT_PORT_PLL(phy, ch, 8));
+ hw_state->pll8 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 8));
hw_state->pll8 &= PORT_PLL_TARGET_CNT_MASK;
- hw_state->pll9 = I915_READ(BXT_PORT_PLL(phy, ch, 9));
+ hw_state->pll9 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 9));
hw_state->pll9 &= PORT_PLL_LOCK_THRESHOLD_MASK;
- hw_state->pll10 = I915_READ(BXT_PORT_PLL(phy, ch, 10));
+ hw_state->pll10 = intel_de_read(dev_priv, BXT_PORT_PLL(phy, ch, 10));
hw_state->pll10 &= PORT_PLL_DCO_AMP_OVR_EN_H |
PORT_PLL_DCO_AMP_MASK;
@@ -1708,11 +2010,14 @@ static bool bxt_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
* can read only lane registers. We configure all lanes the same way, so
* here just read out lanes 0/1 and output a note if lanes 2/3 differ.
*/
- hw_state->pcsdw12 = I915_READ(BXT_PORT_PCS_DW12_LN01(phy, ch));
- if (I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
- DRM_DEBUG_DRIVER("lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
- hw_state->pcsdw12,
- I915_READ(BXT_PORT_PCS_DW12_LN23(phy, ch)));
+ hw_state->pcsdw12 = intel_de_read(dev_priv,
+ BXT_PORT_PCS_DW12_LN01(phy, ch));
+ if (intel_de_read(dev_priv, BXT_PORT_PCS_DW12_LN23(phy, ch)) != hw_state->pcsdw12)
+ drm_dbg(&dev_priv->drm,
+ "lane stagger config different for lane 01 (%08x) and 23 (%08x)\n",
+ hw_state->pcsdw12,
+ intel_de_read(dev_priv,
+ BXT_PORT_PCS_DW12_LN23(phy, ch)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
ret = true;
@@ -1751,6 +2056,7 @@ static bool
bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
struct bxt_clk_div *clk_div)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct dpll best_clock;
@@ -1760,9 +2066,9 @@ bxt_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state,
* i9xx_crtc_compute_clock
*/
if (!bxt_find_best_dpll(crtc_state, &best_clock)) {
- DRM_DEBUG_DRIVER("no PLL dividers found for clock %d pipe %c\n",
- crtc_state->port_clock,
- pipe_name(crtc->pipe));
+ drm_dbg(&i915->drm, "no PLL dividers found for clock %d pipe %c\n",
+ crtc_state->port_clock,
+ pipe_name(crtc->pipe));
return false;
}
@@ -1799,6 +2105,7 @@ static void bxt_ddi_dp_pll_dividers(struct intel_crtc_state *crtc_state,
static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
const struct bxt_clk_div *clk_div)
{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct intel_dpll_hw_state *dpll_hw_state = &crtc_state->dpll_hw_state;
int clock = crtc_state->port_clock;
int vco = clk_div->vco;
@@ -1824,7 +2131,7 @@ static bool bxt_ddi_set_dpll_hw_state(struct intel_crtc_state *crtc_state,
gain_ctl = 1;
targ_cnt = 9;
} else {
- DRM_ERROR("Invalid VCO\n");
+ drm_err(&i915->drm, "Invalid VCO\n");
return false;
}
@@ -1885,6 +2192,23 @@ bxt_ddi_hdmi_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return bxt_ddi_set_dpll_hw_state(crtc_state, &clk_div);
}
+static int bxt_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ struct dpll clock;
+
+ clock.m1 = 2;
+ clock.m2 = (pll_state->pll0 & PORT_PLL_M2_MASK) << 22;
+ if (pll_state->pll3 & PORT_PLL_M2_FRAC_ENABLE)
+ clock.m2 |= pll_state->pll2 & PORT_PLL_M2_FRAC_MASK;
+ clock.n = (pll_state->pll1 & PORT_PLL_N_MASK) >> PORT_PLL_N_SHIFT;
+ clock.p1 = (pll_state->ebb0 & PORT_PLL_P1_MASK) >> PORT_PLL_P1_SHIFT;
+ clock.p2 = (pll_state->ebb0 & PORT_PLL_P2_MASK) >> PORT_PLL_P2_SHIFT;
+
+ return chv_calc_dpll_params(i915->dpll.ref_clks.nssc, &clock);
+}
+
static bool bxt_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
@@ -1907,8 +2231,8 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
id = (enum intel_dpll_id) encoder->port;
pll = intel_get_shared_dpll_by_id(dev_priv, id);
- DRM_DEBUG_KMS("[CRTC:%d:%s] using pre-allocated %s\n",
- crtc->base.base.id, crtc->base.name, pll->info->name);
+ drm_dbg_kms(&dev_priv->drm, "[CRTC:%d:%s] using pre-allocated %s\n",
+ crtc->base.base.id, crtc->base.name, pll->info->name);
intel_reference_shared_dpll(state, crtc,
pll, &crtc_state->dpll_hw_state);
@@ -1918,89 +2242,37 @@ static bool bxt_get_dpll(struct intel_atomic_state *state,
return true;
}
+static void bxt_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ i915->dpll.ref_clks.ssc = 100000;
+ i915->dpll.ref_clks.nssc = 100000;
+ /* DSI non-SSC ref 19.2MHz */
+}
+
static void bxt_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
- "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
- "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
- hw_state->ebb0,
- hw_state->ebb4,
- hw_state->pll0,
- hw_state->pll1,
- hw_state->pll2,
- hw_state->pll3,
- hw_state->pll6,
- hw_state->pll8,
- hw_state->pll9,
- hw_state->pll10,
- hw_state->pcsdw12);
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: ebb0: 0x%x, ebb4: 0x%x,"
+ "pll0: 0x%x, pll1: 0x%x, pll2: 0x%x, pll3: 0x%x, "
+ "pll6: 0x%x, pll8: 0x%x, pll9: 0x%x, pll10: 0x%x, pcsdw12: 0x%x\n",
+ hw_state->ebb0,
+ hw_state->ebb4,
+ hw_state->pll0,
+ hw_state->pll1,
+ hw_state->pll2,
+ hw_state->pll3,
+ hw_state->pll6,
+ hw_state->pll8,
+ hw_state->pll9,
+ hw_state->pll10,
+ hw_state->pcsdw12);
}
static const struct intel_shared_dpll_funcs bxt_ddi_pll_funcs = {
.enable = bxt_ddi_pll_enable,
.disable = bxt_ddi_pll_disable,
.get_hw_state = bxt_ddi_pll_get_hw_state,
-};
-
-struct intel_dpll_mgr {
- const struct dpll_info *dpll_info;
-
- bool (*get_dplls)(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
- void (*put_dplls)(struct intel_atomic_state *state,
- struct intel_crtc *crtc);
- void (*update_active_dpll)(struct intel_atomic_state *state,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder);
- void (*dump_hw_state)(struct drm_i915_private *dev_priv,
- const struct intel_dpll_hw_state *hw_state);
-};
-
-static const struct dpll_info pch_plls[] = {
- { "PCH DPLL A", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_A, 0 },
- { "PCH DPLL B", &ibx_pch_dpll_funcs, DPLL_ID_PCH_PLL_B, 0 },
- { },
-};
-
-static const struct intel_dpll_mgr pch_pll_mgr = {
- .dpll_info = pch_plls,
- .get_dplls = ibx_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = ibx_dump_hw_state,
-};
-
-static const struct dpll_info hsw_plls[] = {
- { "WRPLL 1", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL1, 0 },
- { "WRPLL 2", &hsw_ddi_wrpll_funcs, DPLL_ID_WRPLL2, 0 },
- { "SPLL", &hsw_ddi_spll_funcs, DPLL_ID_SPLL, 0 },
- { "LCPLL 810", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_810, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 1350", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_1350, INTEL_DPLL_ALWAYS_ON },
- { "LCPLL 2700", &hsw_ddi_lcpll_funcs, DPLL_ID_LCPLL_2700, INTEL_DPLL_ALWAYS_ON },
- { },
-};
-
-static const struct intel_dpll_mgr hsw_pll_mgr = {
- .dpll_info = hsw_plls,
- .get_dplls = hsw_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = hsw_dump_hw_state,
-};
-
-static const struct dpll_info skl_plls[] = {
- { "DPLL 0", &skl_ddi_dpll0_funcs, DPLL_ID_SKL_DPLL0, INTEL_DPLL_ALWAYS_ON },
- { "DPLL 1", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL1, 0 },
- { "DPLL 2", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL2, 0 },
- { "DPLL 3", &skl_ddi_pll_funcs, DPLL_ID_SKL_DPLL3, 0 },
- { },
-};
-
-static const struct intel_dpll_mgr skl_pll_mgr = {
- .dpll_info = skl_plls,
- .get_dplls = skl_get_dpll,
- .put_dplls = intel_put_dpll,
- .dump_hw_state = skl_dump_hw_state,
+ .get_freq = bxt_ddi_pll_get_freq,
};
static const struct dpll_info bxt_plls[] = {
@@ -2014,6 +2286,7 @@ static const struct intel_dpll_mgr bxt_pll_mgr = {
.dpll_info = bxt_plls,
.get_dplls = bxt_get_dpll,
.put_dplls = intel_put_dpll,
+ .update_ref_clks = bxt_update_dpll_ref_clks,
.dump_hw_state = bxt_dump_hw_state,
};
@@ -2024,32 +2297,32 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
u32 val;
/* 1. Enable DPLL power in DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
val |= PLL_POWER_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
/* 2. Wait for DPLL power state enabled in DPLL_ENABLE. */
if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id),
PLL_POWER_STATE, 5))
- DRM_ERROR("PLL %d Power not enabled\n", id);
+ drm_err(&dev_priv->drm, "PLL %d Power not enabled\n", id);
/*
* 3. Configure DPLL_CFGCR0 to set SSC enable/disable,
* select DP mode, and set DP link rate.
*/
val = pll->state.hw_state.cfgcr0;
- I915_WRITE(CNL_DPLL_CFGCR0(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_CFGCR0(id), val);
/* 4. Reab back to ensure writes completed */
- POSTING_READ(CNL_DPLL_CFGCR0(id));
+ intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR0(id));
/* 3. Configure DPLL_CFGCR0 */
/* Avoid touch CFGCR1 if HDMI mode is not enabled */
if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE) {
val = pll->state.hw_state.cfgcr1;
- I915_WRITE(CNL_DPLL_CFGCR1(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_CFGCR1(id), val);
/* 4. Reab back to ensure writes completed */
- POSTING_READ(CNL_DPLL_CFGCR1(id));
+ intel_de_posting_read(dev_priv, CNL_DPLL_CFGCR1(id));
}
/*
@@ -2062,13 +2335,13 @@ static void cnl_ddi_pll_enable(struct drm_i915_private *dev_priv,
*/
/* 6. Enable DPLL in DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
val |= PLL_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
/* 7. Wait for PLL lock status in DPLL_ENABLE. */
if (intel_de_wait_for_set(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
- DRM_ERROR("PLL %d not locked\n", id);
+ drm_err(&dev_priv->drm, "PLL %d not locked\n", id);
/*
* 8. If the frequency will result in a change to the voltage
@@ -2106,13 +2379,13 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
*/
/* 3. Disable DPLL through DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
val &= ~PLL_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
/* 4. Wait for PLL not locked status in DPLL_ENABLE. */
if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id), PLL_LOCK, 5))
- DRM_ERROR("PLL %d locked\n", id);
+ drm_err(&dev_priv->drm, "PLL %d locked\n", id);
/*
* 5. If the frequency will result in a change to the voltage
@@ -2124,14 +2397,14 @@ static void cnl_ddi_pll_disable(struct drm_i915_private *dev_priv,
*/
/* 6. Disable DPLL power in DPLL_ENABLE. */
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
val &= ~PLL_POWER_ENABLE;
- I915_WRITE(CNL_DPLL_ENABLE(id), val);
+ intel_de_write(dev_priv, CNL_DPLL_ENABLE(id), val);
/* 7. Wait for DPLL power state disabled in DPLL_ENABLE. */
if (intel_de_wait_for_clear(dev_priv, CNL_DPLL_ENABLE(id),
PLL_POWER_STATE, 5))
- DRM_ERROR("PLL %d Power not disabled\n", id);
+ drm_err(&dev_priv->drm, "PLL %d Power not disabled\n", id);
}
static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
@@ -2150,16 +2423,17 @@ static bool cnl_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
ret = false;
- val = I915_READ(CNL_DPLL_ENABLE(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_ENABLE(id));
if (!(val & PLL_ENABLE))
goto out;
- val = I915_READ(CNL_DPLL_CFGCR0(id));
+ val = intel_de_read(dev_priv, CNL_DPLL_CFGCR0(id));
hw_state->cfgcr0 = val;
/* avoid reading back stale values if HDMI mode is not enabled */
if (val & DPLL_CFGCR0_HDMI_MODE) {
- hw_state->cfgcr1 = I915_READ(CNL_DPLL_CFGCR1(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ CNL_DPLL_CFGCR1(id));
}
ret = true;
@@ -2256,27 +2530,12 @@ static void cnl_wrpll_params_populate(struct skl_wrpll_params *params,
params->dco_fraction = dco & 0x7fff;
}
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv)
-{
- int ref_clock = dev_priv->cdclk.hw.ref;
-
- /*
- * For ICL+, the spec states: if reference frequency is 38.4,
- * use 19.2 because the DPLL automatically divides that by 2.
- */
- if (INTEL_GEN(dev_priv) >= 11 && ref_clock == 38400)
- ref_clock = 19200;
-
- return ref_clock;
-}
-
static bool
-cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
- struct skl_wrpll_params *wrpll_params)
+__cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params,
+ int ref_clock)
{
- struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
u32 afe_clock = crtc_state->port_clock * 5;
- u32 ref_clock;
u32 dco_min = 7998000;
u32 dco_max = 10000000;
u32 dco_mid = (dco_min + dco_max) / 2;
@@ -2308,15 +2567,22 @@ cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
return false;
cnl_wrpll_get_multipliers(best_div, &pdiv, &qdiv, &kdiv);
-
- ref_clock = cnl_hdmi_pll_ref_clock(dev_priv);
-
cnl_wrpll_params_populate(wrpll_params, best_dco, ref_clock,
pdiv, qdiv, kdiv);
return true;
}
+static bool
+cnl_ddi_calculate_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
+ i915->dpll.ref_clks.nssc);
+}
+
static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
{
u32 cfgcr0, cfgcr1;
@@ -2344,6 +2610,68 @@ static bool cnl_ddi_hdmi_pll_dividers(struct intel_crtc_state *crtc_state)
return true;
}
+static int __cnl_ddi_wrpll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll,
+ int ref_clock)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ u32 p0, p1, p2, dco_freq;
+
+ p0 = pll_state->cfgcr1 & DPLL_CFGCR1_PDIV_MASK;
+ p2 = pll_state->cfgcr1 & DPLL_CFGCR1_KDIV_MASK;
+
+ if (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_MODE(1))
+ p1 = (pll_state->cfgcr1 & DPLL_CFGCR1_QDIV_RATIO_MASK) >>
+ DPLL_CFGCR1_QDIV_RATIO_SHIFT;
+ else
+ p1 = 1;
+
+
+ switch (p0) {
+ case DPLL_CFGCR1_PDIV_2:
+ p0 = 2;
+ break;
+ case DPLL_CFGCR1_PDIV_3:
+ p0 = 3;
+ break;
+ case DPLL_CFGCR1_PDIV_5:
+ p0 = 5;
+ break;
+ case DPLL_CFGCR1_PDIV_7:
+ p0 = 7;
+ break;
+ }
+
+ switch (p2) {
+ case DPLL_CFGCR1_KDIV_1:
+ p2 = 1;
+ break;
+ case DPLL_CFGCR1_KDIV_2:
+ p2 = 2;
+ break;
+ case DPLL_CFGCR1_KDIV_3:
+ p2 = 3;
+ break;
+ }
+
+ dco_freq = (pll_state->cfgcr0 & DPLL_CFGCR0_DCO_INTEGER_MASK) *
+ ref_clock;
+
+ dco_freq += (((pll_state->cfgcr0 & DPLL_CFGCR0_DCO_FRACTION_MASK) >>
+ DPLL_CFGCR0_DCO_FRACTION_SHIFT) * ref_clock) / 0x8000;
+
+ if (drm_WARN_ON(&dev_priv->drm, p0 == 0 || p1 == 0 || p2 == 0))
+ return 0;
+
+ return dco_freq / (p0 * p1 * p2 * 5);
+}
+
+static int cnl_ddi_wrpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ return __cnl_ddi_wrpll_get_freq(i915, pll, i915->dpll.ref_clks.nssc);
+}
+
static bool
cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
{
@@ -2389,30 +2717,72 @@ cnl_ddi_dp_set_dpll_hw_state(struct intel_crtc_state *crtc_state)
return true;
}
+static int cnl_ddi_lcpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ int link_clock = 0;
+
+ switch (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_LINK_RATE_MASK) {
+ case DPLL_CFGCR0_LINK_RATE_810:
+ link_clock = 81000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1080:
+ link_clock = 108000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1350:
+ link_clock = 135000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_1620:
+ link_clock = 162000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2160:
+ link_clock = 216000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_2700:
+ link_clock = 270000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_3240:
+ link_clock = 324000;
+ break;
+ case DPLL_CFGCR0_LINK_RATE_4050:
+ link_clock = 405000;
+ break;
+ default:
+ drm_WARN(&i915->drm, 1, "Unsupported link rate\n");
+ break;
+ }
+
+ return link_clock * 2;
+}
+
static bool cnl_get_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder)
{
struct intel_crtc_state *crtc_state =
intel_atomic_get_new_crtc_state(state, crtc);
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
struct intel_shared_dpll *pll;
bool bret;
if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) {
bret = cnl_ddi_hdmi_pll_dividers(crtc_state);
if (!bret) {
- DRM_DEBUG_KMS("Could not get HDMI pll dividers.\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not get HDMI pll dividers.\n");
return false;
}
} else if (intel_crtc_has_dp_encoder(crtc_state)) {
bret = cnl_ddi_dp_set_dpll_hw_state(crtc_state);
if (!bret) {
- DRM_DEBUG_KMS("Could not set DP dpll HW state.\n");
+ drm_dbg_kms(&i915->drm,
+ "Could not set DP dpll HW state.\n");
return false;
}
} else {
- DRM_DEBUG_KMS("Skip DPLL setup for output_types 0x%x\n",
- crtc_state->output_types);
+ drm_dbg_kms(&i915->drm,
+ "Skip DPLL setup for output_types 0x%x\n",
+ crtc_state->output_types);
return false;
}
@@ -2422,7 +2792,7 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
BIT(DPLL_ID_SKL_DPLL1) |
BIT(DPLL_ID_SKL_DPLL0));
if (!pll) {
- DRM_DEBUG_KMS("No PLL selected\n");
+ drm_dbg_kms(&i915->drm, "No PLL selected\n");
return false;
}
@@ -2434,19 +2804,35 @@ static bool cnl_get_dpll(struct intel_atomic_state *state,
return true;
}
+static int cnl_ddi_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ if (pll->state.hw_state.cfgcr0 & DPLL_CFGCR0_HDMI_MODE)
+ return cnl_ddi_wrpll_get_freq(i915, pll);
+ else
+ return cnl_ddi_lcpll_get_freq(i915, pll);
+}
+
+static void cnl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC reference */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void cnl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: "
- "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
- hw_state->cfgcr0,
- hw_state->cfgcr1);
+ drm_dbg_kms(&dev_priv->drm, "dpll_hw_state: "
+ "cfgcr0: 0x%x, cfgcr1: 0x%x\n",
+ hw_state->cfgcr0,
+ hw_state->cfgcr1);
}
static const struct intel_shared_dpll_funcs cnl_ddi_pll_funcs = {
.enable = cnl_ddi_pll_enable,
.disable = cnl_ddi_pll_disable,
.get_hw_state = cnl_ddi_pll_get_hw_state,
+ .get_freq = cnl_ddi_pll_get_freq,
};
static const struct dpll_info cnl_plls[] = {
@@ -2460,6 +2846,7 @@ static const struct intel_dpll_mgr cnl_pll_mgr = {
.dpll_info = cnl_plls,
.get_dplls = cnl_get_dpll,
.put_dplls = intel_put_dpll,
+ .update_ref_clks = cnl_update_dpll_ref_clks,
.dump_hw_state = cnl_dump_hw_state,
};
@@ -2555,7 +2942,7 @@ static bool icl_calc_dp_combo_pll(struct intel_crtc_state *crtc_state,
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
const struct icl_combo_pll_params *params =
- dev_priv->cdclk.hw.ref == 24000 ?
+ dev_priv->dpll.ref_clks.nssc == 24000 ?
icl_dp_combo_pll_24MHz_values :
icl_dp_combo_pll_19_2MHz_values;
int clock = crtc_state->port_clock;
@@ -2578,9 +2965,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
if (INTEL_GEN(dev_priv) >= 12) {
- switch (dev_priv->cdclk.hw.ref) {
+ switch (dev_priv->dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->cdclk.hw.ref);
+ MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
/* fall-through */
case 19200:
case 38400:
@@ -2591,9 +2978,9 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
break;
}
} else {
- switch (dev_priv->cdclk.hw.ref) {
+ switch (dev_priv->dpll.ref_clks.nssc) {
default:
- MISSING_CASE(dev_priv->cdclk.hw.ref);
+ MISSING_CASE(dev_priv->dpll.ref_clks.nssc);
/* fall-through */
case 19200:
case 38400:
@@ -2608,6 +2995,49 @@ static bool icl_calc_tbt_pll(struct intel_crtc_state *crtc_state,
return true;
}
+static int icl_ddi_tbt_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ /*
+ * The PLL outputs multiple frequencies at the same time, selection is
+ * made at DDI clock mux level.
+ */
+ drm_WARN_ON(&i915->drm, 1);
+
+ return 0;
+}
+
+static int icl_wrpll_ref_clock(struct drm_i915_private *i915)
+{
+ int ref_clock = i915->dpll.ref_clks.nssc;
+
+ /*
+ * For ICL+, the spec states: if reference frequency is 38.4,
+ * use 19.2 because the DPLL automatically divides that by 2.
+ */
+ if (ref_clock == 38400)
+ ref_clock = 19200;
+
+ return ref_clock;
+}
+
+static bool
+icl_calc_wrpll(struct intel_crtc_state *crtc_state,
+ struct skl_wrpll_params *wrpll_params)
+{
+ struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev);
+
+ return __cnl_ddi_calculate_wrpll(crtc_state, wrpll_params,
+ icl_wrpll_ref_clock(i915));
+}
+
+static int icl_ddi_combo_pll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ return __cnl_ddi_wrpll_get_freq(i915, pll,
+ icl_wrpll_ref_clock(i915));
+}
+
static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
struct intel_encoder *encoder,
struct intel_dpll_hw_state *pll_state)
@@ -2622,7 +3052,7 @@ static bool icl_calc_dpll_state(struct intel_crtc_state *crtc_state,
ret = icl_calc_tbt_pll(crtc_state, &pll_params);
else if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI) ||
intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DSI))
- ret = cnl_ddi_calculate_wrpll(crtc_state, &pll_params);
+ ret = icl_calc_wrpll(crtc_state, &pll_params);
else
ret = icl_calc_dp_combo_pll(crtc_state, &pll_params);
@@ -2745,7 +3175,7 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
struct intel_dpll_hw_state *pll_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- int refclk_khz = dev_priv->cdclk.hw.ref;
+ int refclk_khz = dev_priv->dpll.ref_clks.nssc;
int clock = crtc_state->port_clock;
u32 dco_khz, m1div, m2div_int, m2div_rem, m2div_frac;
u32 iref_ndiv, iref_trim, iref_pulse_w;
@@ -2761,7 +3191,8 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
if (!icl_mg_pll_find_divisors(clock, is_dp, use_ssc, &dco_khz,
pll_state, is_dkl)) {
- DRM_DEBUG_KMS("Failed to find divisors for clock %d\n", clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to find divisors for clock %d\n", clock);
return false;
}
@@ -2774,8 +3205,9 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
}
if (m2div_int > 255) {
- DRM_DEBUG_KMS("Failed to find mdiv for clock %d\n",
- clock);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to find mdiv for clock %d\n",
+ clock);
return false;
}
}
@@ -2944,6 +3376,78 @@ static bool icl_calc_mg_pll_state(struct intel_crtc_state *crtc_state,
return true;
}
+static int icl_ddi_mg_pll_get_freq(struct drm_i915_private *dev_priv,
+ const struct intel_shared_dpll *pll)
+{
+ const struct intel_dpll_hw_state *pll_state = &pll->state.hw_state;
+ u32 m1, m2_int, m2_frac, div1, div2, ref_clock;
+ u64 tmp;
+
+ ref_clock = dev_priv->dpll.ref_clks.nssc;
+
+ if (INTEL_GEN(dev_priv) >= 12) {
+ m1 = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBPREDIV_MASK;
+ m1 = m1 >> DKL_PLL_DIV0_FBPREDIV_SHIFT;
+ m2_int = pll_state->mg_pll_div0 & DKL_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_bias & DKL_PLL_BIAS_FRAC_EN_H) {
+ m2_frac = pll_state->mg_pll_bias &
+ DKL_PLL_BIAS_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> DKL_PLL_BIAS_FBDIV_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ } else {
+ m1 = pll_state->mg_pll_div1 & MG_PLL_DIV1_FBPREDIV_MASK;
+ m2_int = pll_state->mg_pll_div0 & MG_PLL_DIV0_FBDIV_INT_MASK;
+
+ if (pll_state->mg_pll_div0 & MG_PLL_DIV0_FRACNEN_H) {
+ m2_frac = pll_state->mg_pll_div0 &
+ MG_PLL_DIV0_FBDIV_FRAC_MASK;
+ m2_frac = m2_frac >> MG_PLL_DIV0_FBDIV_FRAC_SHIFT;
+ } else {
+ m2_frac = 0;
+ }
+ }
+
+ switch (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK) {
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2:
+ div1 = 2;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3:
+ div1 = 3;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5:
+ div1 = 5;
+ break;
+ case MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7:
+ div1 = 7;
+ break;
+ default:
+ MISSING_CASE(pll_state->mg_clktop2_hsclkctl);
+ return 0;
+ }
+
+ div2 = (pll_state->mg_clktop2_hsclkctl &
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK) >>
+ MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT;
+
+ /* div2 value of 0 is same as 1 means no div */
+ if (div2 == 0)
+ div2 = 1;
+
+ /*
+ * Adjust the original formula to delay the division by 2^22 in order to
+ * minimize possible rounding errors.
+ */
+ tmp = (u64)m1 * m2_int * ref_clock +
+ (((u64)m1 * m2_frac * ref_clock) >> 22);
+ tmp = div_u64(tmp, 5 * div1 * div2);
+
+ return tmp;
+}
+
/**
* icl_set_active_port_dpll - select the active port DPLL for a given CRTC
* @crtc_state: state for the CRTC to select the DPLL for
@@ -2996,7 +3500,8 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
unsigned long dpll_mask;
if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
- DRM_DEBUG_KMS("Could not calculate combo PHY PLL state.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Could not calculate combo PHY PLL state.\n");
return false;
}
@@ -3013,8 +3518,9 @@ static bool icl_get_combo_phy_dpll(struct intel_atomic_state *state,
&port_dpll->hw_state,
dpll_mask);
if (!port_dpll->pll) {
- DRM_DEBUG_KMS("No combo PHY PLL found for [ENCODER:%d:%s]\n",
- encoder->base.base.id, encoder->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "No combo PHY PLL found for [ENCODER:%d:%s]\n",
+ encoder->base.base.id, encoder->base.name);
return false;
}
@@ -3038,7 +3544,8 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_DEFAULT];
if (!icl_calc_dpll_state(crtc_state, encoder, &port_dpll->hw_state)) {
- DRM_DEBUG_KMS("Could not calculate TBT PLL state.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Could not calculate TBT PLL state.\n");
return false;
}
@@ -3046,7 +3553,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
&port_dpll->hw_state,
BIT(DPLL_ID_ICL_TBTPLL));
if (!port_dpll->pll) {
- DRM_DEBUG_KMS("No TBT-ALT PLL found\n");
+ drm_dbg_kms(&dev_priv->drm, "No TBT-ALT PLL found\n");
return false;
}
intel_reference_shared_dpll(state, crtc,
@@ -3055,7 +3562,8 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
port_dpll = &crtc_state->icl_port_dplls[ICL_PORT_DPLL_MG_PHY];
if (!icl_calc_mg_pll_state(crtc_state, &port_dpll->hw_state)) {
- DRM_DEBUG_KMS("Could not calculate MG PHY PLL state.\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Could not calculate MG PHY PLL state.\n");
goto err_unreference_tbt_pll;
}
@@ -3065,7 +3573,7 @@ static bool icl_get_tc_phy_dplls(struct intel_atomic_state *state,
&port_dpll->hw_state,
BIT(dpll_id));
if (!port_dpll->pll) {
- DRM_DEBUG_KMS("No MG PHY PLL found\n");
+ drm_dbg_kms(&dev_priv->drm, "No MG PHY PLL found\n");
goto err_unreference_tbt_pll;
}
intel_reference_shared_dpll(state, crtc,
@@ -3140,37 +3648,39 @@ static bool mg_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(MG_PLL_ENABLE(tc_port));
+ val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
if (!(val & PLL_ENABLE))
goto out;
- hw_state->mg_refclkin_ctl = I915_READ(MG_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
+ MG_REFCLKIN_CTL(tc_port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
hw_state->mg_clktop2_coreclkctl1 =
- I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+ intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
hw_state->mg_clktop2_coreclkctl1 &=
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
hw_state->mg_clktop2_hsclkctl =
- I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+ intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
hw_state->mg_clktop2_hsclkctl &=
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
- hw_state->mg_pll_div0 = I915_READ(MG_PLL_DIV0(tc_port));
- hw_state->mg_pll_div1 = I915_READ(MG_PLL_DIV1(tc_port));
- hw_state->mg_pll_lf = I915_READ(MG_PLL_LF(tc_port));
- hw_state->mg_pll_frac_lock = I915_READ(MG_PLL_FRAC_LOCK(tc_port));
- hw_state->mg_pll_ssc = I915_READ(MG_PLL_SSC(tc_port));
+ hw_state->mg_pll_div0 = intel_de_read(dev_priv, MG_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div1 = intel_de_read(dev_priv, MG_PLL_DIV1(tc_port));
+ hw_state->mg_pll_lf = intel_de_read(dev_priv, MG_PLL_LF(tc_port));
+ hw_state->mg_pll_frac_lock = intel_de_read(dev_priv,
+ MG_PLL_FRAC_LOCK(tc_port));
+ hw_state->mg_pll_ssc = intel_de_read(dev_priv, MG_PLL_SSC(tc_port));
- hw_state->mg_pll_bias = I915_READ(MG_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
hw_state->mg_pll_tdc_coldst_bias =
- I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
- if (dev_priv->cdclk.hw.ref == 38400) {
+ if (dev_priv->dpll.ref_clks.nssc == 38400) {
hw_state->mg_pll_tdc_coldst_bias_mask = MG_PLL_TDC_COLDST_COLDSTART;
hw_state->mg_pll_bias_mask = 0;
} else {
@@ -3202,7 +3712,7 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(MG_PLL_ENABLE(tc_port));
+ val = intel_de_read(dev_priv, MG_PLL_ENABLE(tc_port));
if (!(val & PLL_ENABLE))
goto out;
@@ -3210,13 +3720,15 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
* All registers read here have the same HIP_INDEX_REG even though
* they are on different building blocks
*/
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x2));
- hw_state->mg_refclkin_ctl = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ hw_state->mg_refclkin_ctl = intel_de_read(dev_priv,
+ DKL_REFCLKIN_CTL(tc_port));
hw_state->mg_refclkin_ctl &= MG_REFCLKIN_CTL_OD_2_MUX_MASK;
hw_state->mg_clktop2_hsclkctl =
- I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
hw_state->mg_clktop2_hsclkctl &=
MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
@@ -3224,32 +3736,32 @@ static bool dkl_pll_get_hw_state(struct drm_i915_private *dev_priv,
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK;
hw_state->mg_clktop2_coreclkctl1 =
- I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
hw_state->mg_clktop2_coreclkctl1 &=
MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
- hw_state->mg_pll_div0 = I915_READ(DKL_PLL_DIV0(tc_port));
+ hw_state->mg_pll_div0 = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
hw_state->mg_pll_div0 &= (DKL_PLL_DIV0_INTEG_COEFF_MASK |
DKL_PLL_DIV0_PROP_COEFF_MASK |
DKL_PLL_DIV0_FBPREDIV_MASK |
DKL_PLL_DIV0_FBDIV_INT_MASK);
- hw_state->mg_pll_div1 = I915_READ(DKL_PLL_DIV1(tc_port));
+ hw_state->mg_pll_div1 = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
hw_state->mg_pll_div1 &= (DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
- hw_state->mg_pll_ssc = I915_READ(DKL_PLL_SSC(tc_port));
+ hw_state->mg_pll_ssc = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
hw_state->mg_pll_ssc &= (DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN);
- hw_state->mg_pll_bias = I915_READ(DKL_PLL_BIAS(tc_port));
+ hw_state->mg_pll_bias = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
hw_state->mg_pll_bias &= (DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK);
hw_state->mg_pll_tdc_coldst_bias =
- I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
hw_state->mg_pll_tdc_coldst_bias &= (DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
@@ -3274,20 +3786,26 @@ static bool icl_pll_get_hw_state(struct drm_i915_private *dev_priv,
if (!wakeref)
return false;
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
if (!(val & PLL_ENABLE))
goto out;
if (INTEL_GEN(dev_priv) >= 12) {
- hw_state->cfgcr0 = I915_READ(TGL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = I915_READ(TGL_DPLL_CFGCR1(id));
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ TGL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ TGL_DPLL_CFGCR1(id));
} else {
if (IS_ELKHARTLAKE(dev_priv) && id == DPLL_ID_EHL_DPLL4) {
- hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(4));
- hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(4));
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR0(4));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR1(4));
} else {
- hw_state->cfgcr0 = I915_READ(ICL_DPLL_CFGCR0(id));
- hw_state->cfgcr1 = I915_READ(ICL_DPLL_CFGCR1(id));
+ hw_state->cfgcr0 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR0(id));
+ hw_state->cfgcr1 = intel_de_read(dev_priv,
+ ICL_DPLL_CFGCR1(id));
}
}
@@ -3338,9 +3856,9 @@ static void icl_dpll_write(struct drm_i915_private *dev_priv,
}
}
- I915_WRITE(cfgcr0_reg, hw_state->cfgcr0);
- I915_WRITE(cfgcr1_reg, hw_state->cfgcr1);
- POSTING_READ(cfgcr1_reg);
+ intel_de_write(dev_priv, cfgcr0_reg, hw_state->cfgcr0);
+ intel_de_write(dev_priv, cfgcr1_reg, hw_state->cfgcr1);
+ intel_de_posting_read(dev_priv, cfgcr1_reg);
}
static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
@@ -3356,41 +3874,42 @@ static void icl_mg_pll_write(struct drm_i915_private *dev_priv,
* during the calc/readout phase if the mask depends on some other HW
* state like refclk, see icl_calc_mg_pll_state().
*/
- val = I915_READ(MG_REFCLKIN_CTL(tc_port));
+ val = intel_de_read(dev_priv, MG_REFCLKIN_CTL(tc_port));
val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
val |= hw_state->mg_refclkin_ctl;
- I915_WRITE(MG_REFCLKIN_CTL(tc_port), val);
+ intel_de_write(dev_priv, MG_REFCLKIN_CTL(tc_port), val);
- val = I915_READ(MG_CLKTOP2_CORECLKCTL1(tc_port));
+ val = intel_de_read(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port));
val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
val |= hw_state->mg_clktop2_coreclkctl1;
- I915_WRITE(MG_CLKTOP2_CORECLKCTL1(tc_port), val);
+ intel_de_write(dev_priv, MG_CLKTOP2_CORECLKCTL1(tc_port), val);
- val = I915_READ(MG_CLKTOP2_HSCLKCTL(tc_port));
+ val = intel_de_read(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port));
val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
val |= hw_state->mg_clktop2_hsclkctl;
- I915_WRITE(MG_CLKTOP2_HSCLKCTL(tc_port), val);
+ intel_de_write(dev_priv, MG_CLKTOP2_HSCLKCTL(tc_port), val);
- I915_WRITE(MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
- I915_WRITE(MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
- I915_WRITE(MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
- I915_WRITE(MG_PLL_FRAC_LOCK(tc_port), hw_state->mg_pll_frac_lock);
- I915_WRITE(MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
+ intel_de_write(dev_priv, MG_PLL_DIV0(tc_port), hw_state->mg_pll_div0);
+ intel_de_write(dev_priv, MG_PLL_DIV1(tc_port), hw_state->mg_pll_div1);
+ intel_de_write(dev_priv, MG_PLL_LF(tc_port), hw_state->mg_pll_lf);
+ intel_de_write(dev_priv, MG_PLL_FRAC_LOCK(tc_port),
+ hw_state->mg_pll_frac_lock);
+ intel_de_write(dev_priv, MG_PLL_SSC(tc_port), hw_state->mg_pll_ssc);
- val = I915_READ(MG_PLL_BIAS(tc_port));
+ val = intel_de_read(dev_priv, MG_PLL_BIAS(tc_port));
val &= ~hw_state->mg_pll_bias_mask;
val |= hw_state->mg_pll_bias;
- I915_WRITE(MG_PLL_BIAS(tc_port), val);
+ intel_de_write(dev_priv, MG_PLL_BIAS(tc_port), val);
- val = I915_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+ val = intel_de_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
val &= ~hw_state->mg_pll_tdc_coldst_bias_mask;
val |= hw_state->mg_pll_tdc_coldst_bias;
- I915_WRITE(MG_PLL_TDC_COLDST_BIAS(tc_port), val);
+ intel_de_write(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port), val);
- POSTING_READ(MG_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_posting_read(dev_priv, MG_PLL_TDC_COLDST_BIAS(tc_port));
}
static void dkl_pll_write(struct drm_i915_private *dev_priv,
@@ -3404,62 +3923,63 @@ static void dkl_pll_write(struct drm_i915_private *dev_priv,
* All registers programmed here have the same HIP_INDEX_REG even
* though on different building block
*/
- I915_WRITE(HIP_INDEX_REG(tc_port), HIP_INDEX_VAL(tc_port, 0x2));
+ intel_de_write(dev_priv, HIP_INDEX_REG(tc_port),
+ HIP_INDEX_VAL(tc_port, 0x2));
/* All the registers are RMW */
- val = I915_READ(DKL_REFCLKIN_CTL(tc_port));
+ val = intel_de_read(dev_priv, DKL_REFCLKIN_CTL(tc_port));
val &= ~MG_REFCLKIN_CTL_OD_2_MUX_MASK;
val |= hw_state->mg_refclkin_ctl;
- I915_WRITE(DKL_REFCLKIN_CTL(tc_port), val);
+ intel_de_write(dev_priv, DKL_REFCLKIN_CTL(tc_port), val);
- val = I915_READ(DKL_CLKTOP2_CORECLKCTL1(tc_port));
+ val = intel_de_read(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port));
val &= ~MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK;
val |= hw_state->mg_clktop2_coreclkctl1;
- I915_WRITE(DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
+ intel_de_write(dev_priv, DKL_CLKTOP2_CORECLKCTL1(tc_port), val);
- val = I915_READ(DKL_CLKTOP2_HSCLKCTL(tc_port));
+ val = intel_de_read(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port));
val &= ~(MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK |
MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK |
MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK |
MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK);
val |= hw_state->mg_clktop2_hsclkctl;
- I915_WRITE(DKL_CLKTOP2_HSCLKCTL(tc_port), val);
+ intel_de_write(dev_priv, DKL_CLKTOP2_HSCLKCTL(tc_port), val);
- val = I915_READ(DKL_PLL_DIV0(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_DIV0(tc_port));
val &= ~(DKL_PLL_DIV0_INTEG_COEFF_MASK |
DKL_PLL_DIV0_PROP_COEFF_MASK |
DKL_PLL_DIV0_FBPREDIV_MASK |
DKL_PLL_DIV0_FBDIV_INT_MASK);
val |= hw_state->mg_pll_div0;
- I915_WRITE(DKL_PLL_DIV0(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_DIV0(tc_port), val);
- val = I915_READ(DKL_PLL_DIV1(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_DIV1(tc_port));
val &= ~(DKL_PLL_DIV1_IREF_TRIM_MASK |
DKL_PLL_DIV1_TDC_TARGET_CNT_MASK);
val |= hw_state->mg_pll_div1;
- I915_WRITE(DKL_PLL_DIV1(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_DIV1(tc_port), val);
- val = I915_READ(DKL_PLL_SSC(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_SSC(tc_port));
val &= ~(DKL_PLL_SSC_IREF_NDIV_RATIO_MASK |
DKL_PLL_SSC_STEP_LEN_MASK |
DKL_PLL_SSC_STEP_NUM_MASK |
DKL_PLL_SSC_EN);
val |= hw_state->mg_pll_ssc;
- I915_WRITE(DKL_PLL_SSC(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_SSC(tc_port), val);
- val = I915_READ(DKL_PLL_BIAS(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_BIAS(tc_port));
val &= ~(DKL_PLL_BIAS_FRAC_EN_H |
DKL_PLL_BIAS_FBDIV_FRAC_MASK);
val |= hw_state->mg_pll_bias;
- I915_WRITE(DKL_PLL_BIAS(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_BIAS(tc_port), val);
- val = I915_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ val = intel_de_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
val &= ~(DKL_PLL_TDC_SSC_STEP_SIZE_MASK |
DKL_PLL_TDC_FEED_FWD_GAIN_MASK);
val |= hw_state->mg_pll_tdc_coldst_bias;
- I915_WRITE(DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
+ intel_de_write(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port), val);
- POSTING_READ(DKL_PLL_TDC_COLDST_BIAS(tc_port));
+ intel_de_posting_read(dev_priv, DKL_PLL_TDC_COLDST_BIAS(tc_port));
}
static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
@@ -3468,16 +3988,17 @@ static void icl_pll_power_enable(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
val |= PLL_POWER_ENABLE;
- I915_WRITE(enable_reg, val);
+ intel_de_write(dev_priv, enable_reg, val);
/*
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_POWER_STATE, 1))
- DRM_ERROR("PLL %d Power not enabled\n", pll->info->id);
+ drm_err(&dev_priv->drm, "PLL %d Power not enabled\n",
+ pll->info->id);
}
static void icl_pll_enable(struct drm_i915_private *dev_priv,
@@ -3486,13 +4007,13 @@ static void icl_pll_enable(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
val |= PLL_ENABLE;
- I915_WRITE(enable_reg, val);
+ intel_de_write(dev_priv, enable_reg, val);
/* Timeout is actually 600us. */
if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 1))
- DRM_ERROR("PLL %d not locked\n", pll->info->id);
+ drm_err(&dev_priv->drm, "PLL %d not locked\n", pll->info->id);
}
static void combo_pll_enable(struct drm_i915_private *dev_priv,
@@ -3584,26 +4105,27 @@ static void icl_pll_disable(struct drm_i915_private *dev_priv,
* nothign here.
*/
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
val &= ~PLL_ENABLE;
- I915_WRITE(enable_reg, val);
+ intel_de_write(dev_priv, enable_reg, val);
/* Timeout is actually 1us. */
if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 1))
- DRM_ERROR("PLL %d locked\n", pll->info->id);
+ drm_err(&dev_priv->drm, "PLL %d locked\n", pll->info->id);
/* DVFS post sequence would be here. See the comment above. */
- val = I915_READ(enable_reg);
+ val = intel_de_read(dev_priv, enable_reg);
val &= ~PLL_POWER_ENABLE;
- I915_WRITE(enable_reg, val);
+ intel_de_write(dev_priv, enable_reg, val);
/*
* The spec says we need to "wait" but it also says it should be
* immediate.
*/
if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_POWER_STATE, 1))
- DRM_ERROR("PLL %d Power not disabled\n", pll->info->id);
+ drm_err(&dev_priv->drm, "PLL %d Power not disabled\n",
+ pll->info->id);
}
static void combo_pll_disable(struct drm_i915_private *dev_priv,
@@ -3639,44 +4161,54 @@ static void mg_pll_disable(struct drm_i915_private *dev_priv,
icl_pll_disable(dev_priv, pll, enable_reg);
}
+static void icl_update_dpll_ref_clks(struct drm_i915_private *i915)
+{
+ /* No SSC ref */
+ i915->dpll.ref_clks.nssc = i915->cdclk.hw.ref;
+}
+
static void icl_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- DRM_DEBUG_KMS("dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
- "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
- "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
- "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
- "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
- "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
- hw_state->cfgcr0, hw_state->cfgcr1,
- hw_state->mg_refclkin_ctl,
- hw_state->mg_clktop2_coreclkctl1,
- hw_state->mg_clktop2_hsclkctl,
- hw_state->mg_pll_div0,
- hw_state->mg_pll_div1,
- hw_state->mg_pll_lf,
- hw_state->mg_pll_frac_lock,
- hw_state->mg_pll_ssc,
- hw_state->mg_pll_bias,
- hw_state->mg_pll_tdc_coldst_bias);
+ drm_dbg_kms(&dev_priv->drm,
+ "dpll_hw_state: cfgcr0: 0x%x, cfgcr1: 0x%x, "
+ "mg_refclkin_ctl: 0x%x, hg_clktop2_coreclkctl1: 0x%x, "
+ "mg_clktop2_hsclkctl: 0x%x, mg_pll_div0: 0x%x, "
+ "mg_pll_div2: 0x%x, mg_pll_lf: 0x%x, "
+ "mg_pll_frac_lock: 0x%x, mg_pll_ssc: 0x%x, "
+ "mg_pll_bias: 0x%x, mg_pll_tdc_coldst_bias: 0x%x\n",
+ hw_state->cfgcr0, hw_state->cfgcr1,
+ hw_state->mg_refclkin_ctl,
+ hw_state->mg_clktop2_coreclkctl1,
+ hw_state->mg_clktop2_hsclkctl,
+ hw_state->mg_pll_div0,
+ hw_state->mg_pll_div1,
+ hw_state->mg_pll_lf,
+ hw_state->mg_pll_frac_lock,
+ hw_state->mg_pll_ssc,
+ hw_state->mg_pll_bias,
+ hw_state->mg_pll_tdc_coldst_bias);
}
static const struct intel_shared_dpll_funcs combo_pll_funcs = {
.enable = combo_pll_enable,
.disable = combo_pll_disable,
.get_hw_state = combo_pll_get_hw_state,
+ .get_freq = icl_ddi_combo_pll_get_freq,
};
static const struct intel_shared_dpll_funcs tbt_pll_funcs = {
.enable = tbt_pll_enable,
.disable = tbt_pll_disable,
.get_hw_state = tbt_pll_get_hw_state,
+ .get_freq = icl_ddi_tbt_pll_get_freq,
};
static const struct intel_shared_dpll_funcs mg_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = mg_pll_get_hw_state,
+ .get_freq = icl_ddi_mg_pll_get_freq,
};
static const struct dpll_info icl_plls[] = {
@@ -3695,6 +4227,7 @@ static const struct intel_dpll_mgr icl_pll_mgr = {
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3709,6 +4242,7 @@ static const struct intel_dpll_mgr ehl_pll_mgr = {
.dpll_info = ehl_plls,
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3716,6 +4250,7 @@ static const struct intel_shared_dpll_funcs dkl_pll_funcs = {
.enable = mg_pll_enable,
.disable = mg_pll_disable,
.get_hw_state = dkl_pll_get_hw_state,
+ .get_freq = icl_ddi_mg_pll_get_freq,
};
static const struct dpll_info tgl_plls[] = {
@@ -3736,6 +4271,7 @@ static const struct intel_dpll_mgr tgl_pll_mgr = {
.get_dplls = icl_get_dplls,
.put_dplls = icl_put_dplls,
.update_active_dpll = icl_update_active_dpll,
+ .update_ref_clks = icl_update_dpll_ref_clks,
.dump_hw_state = icl_dump_hw_state,
};
@@ -3770,22 +4306,22 @@ void intel_shared_dpll_init(struct drm_device *dev)
dpll_mgr = &pch_pll_mgr;
if (!dpll_mgr) {
- dev_priv->num_shared_dpll = 0;
+ dev_priv->dpll.num_shared_dpll = 0;
return;
}
dpll_info = dpll_mgr->dpll_info;
for (i = 0; dpll_info[i].name; i++) {
- WARN_ON(i != dpll_info[i].id);
- dev_priv->shared_dplls[i].info = &dpll_info[i];
+ drm_WARN_ON(dev, i != dpll_info[i].id);
+ dev_priv->dpll.shared_dplls[i].info = &dpll_info[i];
}
- dev_priv->dpll_mgr = dpll_mgr;
- dev_priv->num_shared_dpll = i;
- mutex_init(&dev_priv->dpll_lock);
+ dev_priv->dpll.mgr = dpll_mgr;
+ dev_priv->dpll.num_shared_dpll = i;
+ mutex_init(&dev_priv->dpll.lock);
- BUG_ON(dev_priv->num_shared_dpll > I915_NUM_PLLS);
+ BUG_ON(dev_priv->dpll.num_shared_dpll > I915_NUM_PLLS);
}
/**
@@ -3812,9 +4348,9 @@ bool intel_reserve_shared_dplls(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
- if (WARN_ON(!dpll_mgr))
+ if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return false;
return dpll_mgr->get_dplls(state, crtc, encoder);
@@ -3835,7 +4371,7 @@ void intel_release_shared_dplls(struct intel_atomic_state *state,
struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
/*
* FIXME: this function is called for every platform having a
@@ -3864,35 +4400,114 @@ void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll_mgr;
+ const struct intel_dpll_mgr *dpll_mgr = dev_priv->dpll.mgr;
- if (WARN_ON(!dpll_mgr))
+ if (drm_WARN_ON(&dev_priv->drm, !dpll_mgr))
return;
dpll_mgr->update_active_dpll(state, crtc, encoder);
}
/**
+ * intel_dpll_get_freq - calculate the DPLL's output frequency
+ * @i915: i915 device
+ * @pll: DPLL for which to calculate the output frequency
+ *
+ * Return the output frequency corresponding to @pll's current state.
+ */
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll)
+{
+ if (drm_WARN_ON(&i915->drm, !pll->info->funcs->get_freq))
+ return 0;
+
+ return pll->info->funcs->get_freq(i915, pll);
+}
+
+static void readout_dpll_hw_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ struct intel_crtc *crtc;
+
+ pll->on = pll->info->funcs->get_hw_state(i915, pll,
+ &pll->state.hw_state);
+
+ if (IS_ELKHARTLAKE(i915) && pll->on &&
+ pll->info->id == DPLL_ID_EHL_DPLL4) {
+ pll->wakeref = intel_display_power_get(i915,
+ POWER_DOMAIN_DPLL_DC_OFF);
+ }
+
+ pll->state.crtc_mask = 0;
+ for_each_intel_crtc(&i915->drm, crtc) {
+ struct intel_crtc_state *crtc_state =
+ to_intel_crtc_state(crtc->base.state);
+
+ if (crtc_state->hw.active && crtc_state->shared_dpll == pll)
+ pll->state.crtc_mask |= 1 << crtc->pipe;
+ }
+ pll->active_mask = pll->state.crtc_mask;
+
+ drm_dbg_kms(&i915->drm,
+ "%s hw state readout: crtc_mask 0x%08x, on %i\n",
+ pll->info->name, pll->state.crtc_mask, pll->on);
+}
+
+void intel_dpll_readout_hw_state(struct drm_i915_private *i915)
+{
+ int i;
+
+ if (i915->dpll.mgr && i915->dpll.mgr->update_ref_clks)
+ i915->dpll.mgr->update_ref_clks(i915);
+
+ for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+ readout_dpll_hw_state(i915, &i915->dpll.shared_dplls[i]);
+}
+
+static void sanitize_dpll_state(struct drm_i915_private *i915,
+ struct intel_shared_dpll *pll)
+{
+ if (!pll->on || pll->active_mask)
+ return;
+
+ drm_dbg_kms(&i915->drm,
+ "%s enabled but not in use, disabling\n",
+ pll->info->name);
+
+ pll->info->funcs->disable(i915, pll);
+ pll->on = false;
+}
+
+void intel_dpll_sanitize_state(struct drm_i915_private *i915)
+{
+ int i;
+
+ for (i = 0; i < i915->dpll.num_shared_dpll; i++)
+ sanitize_dpll_state(i915, &i915->dpll.shared_dplls[i]);
+}
+
+/**
* intel_shared_dpll_dump_hw_state - write hw_state to dmesg
* @dev_priv: i915 drm device
* @hw_state: hw state to be written to the log
*
- * Write the relevant values in @hw_state to dmesg using DRM_DEBUG_KMS.
+ * Write the relevant values in @hw_state to dmesg using drm_dbg_kms.
*/
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state)
{
- if (dev_priv->dpll_mgr) {
- dev_priv->dpll_mgr->dump_hw_state(dev_priv, hw_state);
+ if (dev_priv->dpll.mgr) {
+ dev_priv->dpll.mgr->dump_hw_state(dev_priv, hw_state);
} else {
/* fallback for platforms that don't use the shared dpll
* infrastructure
*/
- DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
- "fp0: 0x%x, fp1: 0x%x\n",
- hw_state->dpll,
- hw_state->dpll_md,
- hw_state->fp0,
- hw_state->fp1);
+ drm_dbg_kms(&dev_priv->drm,
+ "dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
+ "fp0: 0x%x, fp1: 0x%x\n",
+ hw_state->dpll,
+ hw_state->dpll_md,
+ hw_state->fp0,
+ hw_state->fp1);
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
index 2a104c64291d..5d9a2bc371e7 100644
--- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
+++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h
@@ -278,6 +278,15 @@ struct intel_shared_dpll_funcs {
bool (*get_hw_state)(struct drm_i915_private *dev_priv,
struct intel_shared_dpll *pll,
struct intel_dpll_hw_state *hw_state);
+
+ /**
+ * @get_freq:
+ *
+ * Hook for calculating the pll's output frequency based on its
+ * current state.
+ */
+ int (*get_freq)(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll);
};
/**
@@ -372,15 +381,18 @@ void icl_set_active_port_dpll(struct intel_crtc_state *crtc_state,
void intel_update_active_dpll(struct intel_atomic_state *state,
struct intel_crtc *crtc,
struct intel_encoder *encoder);
+int intel_dpll_get_freq(struct drm_i915_private *i915,
+ const struct intel_shared_dpll *pll);
void intel_prepare_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_enable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_disable_shared_dpll(const struct intel_crtc_state *crtc_state);
void intel_shared_dpll_swap_state(struct intel_atomic_state *state);
void intel_shared_dpll_init(struct drm_device *dev);
+void intel_dpll_readout_hw_state(struct drm_i915_private *dev_priv);
+void intel_dpll_sanitize_state(struct drm_i915_private *dev_priv);
void intel_dpll_dump_hw_state(struct drm_i915_private *dev_priv,
const struct intel_dpll_hw_state *hw_state);
-int cnl_hdmi_pll_ref_clock(struct drm_i915_private *dev_priv);
enum intel_dpll_id icl_tc_port_to_pll_id(enum tc_port tc_port);
bool intel_dpll_is_combophy(enum intel_dpll_id id);
diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c
index ada006a690df..d7a6bf2277df 100644
--- a/drivers/gpu/drm/i915/display/intel_dsb.c
+++ b/drivers/gpu/drm/i915/display/intel_dsb.c
@@ -40,7 +40,7 @@ static inline bool is_dsb_busy(struct intel_dsb *dsb)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- return DSB_STATUS & I915_READ(DSB_CTRL(pipe, dsb->id));
+ return DSB_STATUS & intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
}
static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
@@ -50,16 +50,16 @@ static inline bool intel_dsb_enable_engine(struct intel_dsb *dsb)
enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
- dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
if (DSB_STATUS & dsb_ctrl) {
- DRM_DEBUG_KMS("DSB engine is busy.\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
return false;
}
dsb_ctrl |= DSB_ENABLE;
- I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+ intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
- POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
return true;
}
@@ -70,16 +70,16 @@ static inline bool intel_dsb_disable_engine(struct intel_dsb *dsb)
enum pipe pipe = crtc->pipe;
u32 dsb_ctrl;
- dsb_ctrl = I915_READ(DSB_CTRL(pipe, dsb->id));
+ dsb_ctrl = intel_de_read(dev_priv, DSB_CTRL(pipe, dsb->id));
if (DSB_STATUS & dsb_ctrl) {
- DRM_DEBUG_KMS("DSB engine is busy.\n");
+ drm_dbg_kms(&dev_priv->drm, "DSB engine is busy.\n");
return false;
}
dsb_ctrl &= ~DSB_ENABLE;
- I915_WRITE(DSB_CTRL(pipe, dsb->id), dsb_ctrl);
+ intel_de_write(dev_priv, DSB_CTRL(pipe, dsb->id), dsb_ctrl);
- POSTING_READ(DSB_CTRL(pipe, dsb->id));
+ intel_de_posting_read(dev_priv, DSB_CTRL(pipe, dsb->id));
return true;
}
@@ -115,20 +115,20 @@ intel_dsb_get(struct intel_crtc *crtc)
obj = i915_gem_object_create_internal(i915, DSB_BUF_SIZE);
if (IS_ERR(obj)) {
- DRM_ERROR("Gem object creation failed\n");
+ drm_err(&i915->drm, "Gem object creation failed\n");
goto out;
}
vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
- DRM_ERROR("Vma creation failed\n");
+ drm_err(&i915->drm, "Vma creation failed\n");
i915_gem_object_put(obj);
goto out;
}
buf = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
if (IS_ERR(buf)) {
- DRM_ERROR("Command buffer creation failed\n");
+ drm_err(&i915->drm, "Command buffer creation failed\n");
goto out;
}
@@ -165,7 +165,7 @@ void intel_dsb_put(struct intel_dsb *dsb)
if (!HAS_DSB(i915))
return;
- if (WARN_ON(dsb->refcount == 0))
+ if (drm_WARN_ON(&i915->drm, dsb->refcount == 0))
return;
if (--dsb->refcount == 0) {
@@ -198,12 +198,12 @@ void intel_dsb_indexed_reg_write(struct intel_dsb *dsb, i915_reg_t reg,
u32 reg_val;
if (!buf) {
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
return;
}
- if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
- DRM_DEBUG_KMS("DSB buffer overflow\n");
+ if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
+ drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
}
@@ -272,12 +272,12 @@ void intel_dsb_reg_write(struct intel_dsb *dsb, i915_reg_t reg, u32 val)
u32 *buf = dsb->cmd_buf;
if (!buf) {
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
return;
}
- if (WARN_ON(dsb->free_pos >= DSB_BUF_SIZE)) {
- DRM_DEBUG_KMS("DSB buffer overflow\n");
+ if (drm_WARN_ON(&dev_priv->drm, dsb->free_pos >= DSB_BUF_SIZE)) {
+ drm_dbg_kms(&dev_priv->drm, "DSB buffer overflow\n");
return;
}
@@ -310,10 +310,12 @@ void intel_dsb_commit(struct intel_dsb *dsb)
goto reset;
if (is_dsb_busy(dsb)) {
- DRM_ERROR("HEAD_PTR write failed - dsb engine is busy.\n");
+ drm_err(&dev_priv->drm,
+ "HEAD_PTR write failed - dsb engine is busy.\n");
goto reset;
}
- I915_WRITE(DSB_HEAD(pipe, dsb->id), i915_ggtt_offset(dsb->vma));
+ intel_de_write(dev_priv, DSB_HEAD(pipe, dsb->id),
+ i915_ggtt_offset(dsb->vma));
tail = ALIGN(dsb->free_pos * 4, CACHELINE_BYTES);
if (tail > dsb->free_pos * 4)
@@ -321,14 +323,18 @@ void intel_dsb_commit(struct intel_dsb *dsb)
(tail - dsb->free_pos * 4));
if (is_dsb_busy(dsb)) {
- DRM_ERROR("TAIL_PTR write failed - dsb engine is busy.\n");
+ drm_err(&dev_priv->drm,
+ "TAIL_PTR write failed - dsb engine is busy.\n");
goto reset;
}
- DRM_DEBUG_KMS("DSB execution started - head 0x%x, tail 0x%x\n",
- i915_ggtt_offset(dsb->vma), tail);
- I915_WRITE(DSB_TAIL(pipe, dsb->id), i915_ggtt_offset(dsb->vma) + tail);
+ drm_dbg_kms(&dev_priv->drm,
+ "DSB execution started - head 0x%x, tail 0x%x\n",
+ i915_ggtt_offset(dsb->vma), tail);
+ intel_de_write(dev_priv, DSB_TAIL(pipe, dsb->id),
+ i915_ggtt_offset(dsb->vma) + tail);
if (wait_for(!is_dsb_busy(dsb), 1)) {
- DRM_ERROR("Timed out waiting for DSB workload completion.\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DSB workload completion.\n");
goto reset;
}
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
index c87838843d0b..b53c50372918 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_dcs_backlight.c
@@ -45,7 +45,7 @@
static u32 dcs_get_backlight(struct intel_connector *connector)
{
- struct intel_encoder *encoder = connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
struct mipi_dsi_device *dsi_device;
u8 data = 0;
@@ -160,13 +160,13 @@ int intel_dsi_dcs_init_backlight_funcs(struct intel_connector *intel_connector)
{
struct drm_device *dev = intel_connector->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_encoder *encoder = intel_connector->encoder;
+ struct intel_encoder *encoder = intel_attached_encoder(intel_connector);
struct intel_panel *panel = &intel_connector->panel;
if (dev_priv->vbt.backlight.type != INTEL_BACKLIGHT_DSI_DCS)
return -ENODEV;
- if (WARN_ON(encoder->type != INTEL_OUTPUT_DSI))
+ if (drm_WARN_ON(dev, encoder->type != INTEL_OUTPUT_DSI))
return -EINVAL;
panel->backlight.setup = dcs_setup_backlight;
diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
index 89fb0d90b694..574dcfec9577 100644
--- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
+++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c
@@ -36,7 +36,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include <video/mipi_display.h>
@@ -136,7 +135,7 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
u16 len;
enum port port;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
flags = *data++;
type = *data++;
@@ -158,7 +157,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
dsi_device = intel_dsi->dsi_hosts[port]->device;
if (!dsi_device) {
- DRM_DEBUG_KMS("no dsi device for port %c\n", port_name(port));
+ drm_dbg_kms(&dev_priv->drm, "no dsi device for port %c\n",
+ port_name(port));
goto out;
}
@@ -182,7 +182,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
case MIPI_DSI_GENERIC_READ_REQUEST_0_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_1_PARAM:
case MIPI_DSI_GENERIC_READ_REQUEST_2_PARAM:
- DRM_DEBUG_DRIVER("Generic Read not yet implemented or used\n");
+ drm_dbg(&dev_priv->drm,
+ "Generic Read not yet implemented or used\n");
break;
case MIPI_DSI_GENERIC_LONG_WRITE:
mipi_dsi_generic_write(dsi_device, data, len);
@@ -194,7 +195,8 @@ static const u8 *mipi_exec_send_packet(struct intel_dsi *intel_dsi,
mipi_dsi_dcs_write_buffer(dsi_device, data, 2);
break;
case MIPI_DSI_DCS_READ:
- DRM_DEBUG_DRIVER("DCS Read not yet implemented or used\n");
+ drm_dbg(&dev_priv->drm,
+ "DCS Read not yet implemented or used\n");
break;
case MIPI_DSI_DCS_LONG_WRITE:
mipi_dsi_dcs_write_buffer(dsi_device, data, len);
@@ -212,9 +214,10 @@ out:
static const u8 *mipi_exec_delay(struct intel_dsi *intel_dsi, const u8 *data)
{
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
u32 delay = *((const u32 *) data);
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&i915->drm, "\n");
usleep_range(delay, delay + 10);
data += 4;
@@ -231,7 +234,8 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
u8 port;
if (gpio_index >= ARRAY_SIZE(vlv_gpio_table)) {
- DRM_DEBUG_KMS("unknown gpio index %u\n", gpio_index);
+ drm_dbg_kms(&dev_priv->drm, "unknown gpio index %u\n",
+ gpio_index);
return;
}
@@ -244,10 +248,11 @@ static void vlv_exec_gpio(struct drm_i915_private *dev_priv,
if (gpio_source == 0) {
port = IOSF_PORT_GPIO_NC;
} else if (gpio_source == 1) {
- DRM_DEBUG_KMS("SC gpio not supported\n");
+ drm_dbg_kms(&dev_priv->drm, "SC gpio not supported\n");
return;
} else {
- DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+ drm_dbg_kms(&dev_priv->drm,
+ "unknown gpio source %u\n", gpio_source);
return;
}
}
@@ -291,13 +296,15 @@ static void chv_exec_gpio(struct drm_i915_private *dev_priv,
} else {
/* XXX: The spec is unclear about CHV GPIO on seq v2 */
if (gpio_source != 0) {
- DRM_DEBUG_KMS("unknown gpio source %u\n", gpio_source);
+ drm_dbg_kms(&dev_priv->drm,
+ "unknown gpio source %u\n", gpio_source);
return;
}
if (gpio_index >= CHV_GPIO_IDX_START_E) {
- DRM_DEBUG_KMS("invalid gpio index %u for GPIO N\n",
- gpio_index);
+ drm_dbg_kms(&dev_priv->drm,
+ "invalid gpio index %u for GPIO N\n",
+ gpio_index);
return;
}
@@ -332,8 +339,9 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
GPIOD_OUT_HIGH);
if (IS_ERR_OR_NULL(gpio_desc)) {
- DRM_ERROR("GPIO index %u request failed (%ld)\n",
- gpio_index, PTR_ERR(gpio_desc));
+ drm_err(&dev_priv->drm,
+ "GPIO index %u request failed (%ld)\n",
+ gpio_index, PTR_ERR(gpio_desc));
return;
}
@@ -346,7 +354,7 @@ static void bxt_exec_gpio(struct drm_i915_private *dev_priv,
static void icl_exec_gpio(struct drm_i915_private *dev_priv,
u8 gpio_source, u8 gpio_index, bool value)
{
- DRM_DEBUG_KMS("Skipping ICL GPIO element execution\n");
+ drm_dbg_kms(&dev_priv->drm, "Skipping ICL GPIO element execution\n");
}
static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
@@ -356,7 +364,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
u8 gpio_source, gpio_index = 0, gpio_number;
bool value;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
if (dev_priv->vbt.dsi.seq_version >= 3)
gpio_index = *data++;
@@ -384,6 +392,7 @@ static const u8 *mipi_exec_gpio(struct intel_dsi *intel_dsi, const u8 *data)
return data;
}
+#ifdef CONFIG_ACPI
static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
{
struct i2c_adapter_lookup *lookup = data;
@@ -393,8 +402,7 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
acpi_handle adapter_handle;
acpi_status status;
- if (intel_dsi->i2c_bus_num >= 0 ||
- !i2c_acpi_get_i2c_resource(ares, &sb))
+ if (!i2c_acpi_get_i2c_resource(ares, &sb))
return 1;
if (lookup->slave_addr != sb->slave_address)
@@ -413,14 +421,41 @@ static int i2c_adapter_lookup(struct acpi_resource *ares, void *data)
return 1;
}
-static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
+static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
+ const u16 slave_addr)
{
struct drm_device *drm_dev = intel_dsi->base.base.dev;
struct device *dev = &drm_dev->pdev->dev;
- struct i2c_adapter *adapter;
struct acpi_device *acpi_dev;
struct list_head resource_list;
struct i2c_adapter_lookup lookup;
+
+ acpi_dev = ACPI_COMPANION(dev);
+ if (acpi_dev) {
+ memset(&lookup, 0, sizeof(lookup));
+ lookup.slave_addr = slave_addr;
+ lookup.intel_dsi = intel_dsi;
+ lookup.dev_handle = acpi_device_handle(acpi_dev);
+
+ INIT_LIST_HEAD(&resource_list);
+ acpi_dev_get_resources(acpi_dev, &resource_list,
+ i2c_adapter_lookup,
+ &lookup);
+ acpi_dev_free_resource_list(&resource_list);
+ }
+}
+#else
+static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi,
+ const u16 slave_addr)
+{
+}
+#endif
+
+static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
+{
+ struct drm_device *drm_dev = intel_dsi->base.base.dev;
+ struct device *dev = &drm_dev->pdev->dev;
+ struct i2c_adapter *adapter;
struct i2c_msg msg;
int ret;
u8 vbt_i2c_bus_num = *(data + 2);
@@ -431,20 +466,7 @@ static const u8 *mipi_exec_i2c(struct intel_dsi *intel_dsi, const u8 *data)
if (intel_dsi->i2c_bus_num < 0) {
intel_dsi->i2c_bus_num = vbt_i2c_bus_num;
-
- acpi_dev = ACPI_COMPANION(dev);
- if (acpi_dev) {
- memset(&lookup, 0, sizeof(lookup));
- lookup.slave_addr = slave_addr;
- lookup.intel_dsi = intel_dsi;
- lookup.dev_handle = acpi_device_handle(acpi_dev);
-
- INIT_LIST_HEAD(&resource_list);
- acpi_dev_get_resources(acpi_dev, &resource_list,
- i2c_adapter_lookup,
- &lookup);
- acpi_dev_free_resource_list(&resource_list);
- }
+ i2c_acpi_find_adapter(intel_dsi, slave_addr);
}
adapter = i2c_get_adapter(intel_dsi->i2c_bus_num);
@@ -480,13 +502,16 @@ err_bus:
static const u8 *mipi_exec_spi(struct intel_dsi *intel_dsi, const u8 *data)
{
- DRM_DEBUG_KMS("Skipping SPI element execution\n");
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
+ drm_dbg_kms(&i915->drm, "Skipping SPI element execution\n");
return data + *(data + 5) + 6;
}
static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
{
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
#ifdef CONFIG_PMIC_OPREGION
u32 value, mask, reg_address;
u16 i2c_address;
@@ -502,9 +527,10 @@ static const u8 *mipi_exec_pmic(struct intel_dsi *intel_dsi, const u8 *data)
reg_address,
value, mask);
if (ret)
- DRM_ERROR("%s failed, error: %d\n", __func__, ret);
+ drm_err(&i915->drm, "%s failed, error: %d\n", __func__, ret);
#else
- DRM_ERROR("Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
+ drm_err(&i915->drm,
+ "Your hardware requires CONFIG_PMIC_OPREGION and it is not set\n");
#endif
return data + 15;
@@ -556,17 +582,18 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
const u8 *data;
fn_mipi_elem_exec mipi_elem_exec;
- if (WARN_ON(seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ seq_id >= ARRAY_SIZE(dev_priv->vbt.dsi.sequence)))
return;
data = dev_priv->vbt.dsi.sequence[seq_id];
if (!data)
return;
- WARN_ON(*data != seq_id);
+ drm_WARN_ON(&dev_priv->drm, *data != seq_id);
- DRM_DEBUG_KMS("Starting MIPI sequence %d - %s\n",
- seq_id, sequence_name(seq_id));
+ drm_dbg_kms(&dev_priv->drm, "Starting MIPI sequence %d - %s\n",
+ seq_id, sequence_name(seq_id));
/* Skip Sequence Byte. */
data++;
@@ -598,18 +625,21 @@ static void intel_dsi_vbt_exec(struct intel_dsi *intel_dsi,
/* Consistency check if we have size. */
if (operation_size && data != next) {
- DRM_ERROR("Inconsistent operation size\n");
+ drm_err(&dev_priv->drm,
+ "Inconsistent operation size\n");
return;
}
} else if (operation_size) {
/* We have size, skip. */
- DRM_DEBUG_KMS("Unsupported MIPI operation byte %u\n",
- operation_byte);
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported MIPI operation byte %u\n",
+ operation_byte);
data += operation_size;
} else {
/* No size, can't skip without parsing. */
- DRM_ERROR("Unsupported MIPI operation byte %u\n",
- operation_byte);
+ drm_err(&dev_priv->drm,
+ "Unsupported MIPI operation byte %u\n",
+ operation_byte);
return;
}
}
@@ -644,40 +674,54 @@ void intel_dsi_msleep(struct intel_dsi *intel_dsi, int msec)
void intel_dsi_log_params(struct intel_dsi *intel_dsi)
{
- DRM_DEBUG_KMS("Pclk %d\n", intel_dsi->pclk);
- DRM_DEBUG_KMS("Pixel overlap %d\n", intel_dsi->pixel_overlap);
- DRM_DEBUG_KMS("Lane count %d\n", intel_dsi->lane_count);
- DRM_DEBUG_KMS("DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
- DRM_DEBUG_KMS("Video mode format %s\n",
- intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
- "non-burst with sync pulse" :
- intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
- "non-burst with sync events" :
- intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
- "burst" : "<unknown>");
- DRM_DEBUG_KMS("Burst mode ratio %d\n", intel_dsi->burst_mode_ratio);
- DRM_DEBUG_KMS("Reset timer %d\n", intel_dsi->rst_timer_val);
- DRM_DEBUG_KMS("Eot %s\n", enableddisabled(intel_dsi->eotp_pkt));
- DRM_DEBUG_KMS("Clockstop %s\n", enableddisabled(!intel_dsi->clock_stop));
- DRM_DEBUG_KMS("Mode %s\n", intel_dsi->operation_mode ? "command" : "video");
+ struct drm_i915_private *i915 = to_i915(intel_dsi->base.base.dev);
+
+ drm_dbg_kms(&i915->drm, "Pclk %d\n", intel_dsi->pclk);
+ drm_dbg_kms(&i915->drm, "Pixel overlap %d\n",
+ intel_dsi->pixel_overlap);
+ drm_dbg_kms(&i915->drm, "Lane count %d\n", intel_dsi->lane_count);
+ drm_dbg_kms(&i915->drm, "DPHY param reg 0x%x\n", intel_dsi->dphy_reg);
+ drm_dbg_kms(&i915->drm, "Video mode format %s\n",
+ intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_PULSE ?
+ "non-burst with sync pulse" :
+ intel_dsi->video_mode_format == VIDEO_MODE_NON_BURST_WITH_SYNC_EVENTS ?
+ "non-burst with sync events" :
+ intel_dsi->video_mode_format == VIDEO_MODE_BURST ?
+ "burst" : "<unknown>");
+ drm_dbg_kms(&i915->drm, "Burst mode ratio %d\n",
+ intel_dsi->burst_mode_ratio);
+ drm_dbg_kms(&i915->drm, "Reset timer %d\n", intel_dsi->rst_timer_val);
+ drm_dbg_kms(&i915->drm, "Eot %s\n",
+ enableddisabled(intel_dsi->eotp_pkt));
+ drm_dbg_kms(&i915->drm, "Clockstop %s\n",
+ enableddisabled(!intel_dsi->clock_stop));
+ drm_dbg_kms(&i915->drm, "Mode %s\n",
+ intel_dsi->operation_mode ? "command" : "video");
if (intel_dsi->dual_link == DSI_DUAL_LINK_FRONT_BACK)
- DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
+ drm_dbg_kms(&i915->drm,
+ "Dual link: DSI_DUAL_LINK_FRONT_BACK\n");
else if (intel_dsi->dual_link == DSI_DUAL_LINK_PIXEL_ALT)
- DRM_DEBUG_KMS("Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
+ drm_dbg_kms(&i915->drm,
+ "Dual link: DSI_DUAL_LINK_PIXEL_ALT\n");
else
- DRM_DEBUG_KMS("Dual link: NONE\n");
- DRM_DEBUG_KMS("Pixel Format %d\n", intel_dsi->pixel_format);
- DRM_DEBUG_KMS("TLPX %d\n", intel_dsi->escape_clk_div);
- DRM_DEBUG_KMS("LP RX Timeout 0x%x\n", intel_dsi->lp_rx_timeout);
- DRM_DEBUG_KMS("Turnaround Timeout 0x%x\n", intel_dsi->turn_arnd_val);
- DRM_DEBUG_KMS("Init Count 0x%x\n", intel_dsi->init_count);
- DRM_DEBUG_KMS("HS to LP Count 0x%x\n", intel_dsi->hs_to_lp_count);
- DRM_DEBUG_KMS("LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
- DRM_DEBUG_KMS("DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
- DRM_DEBUG_KMS("LP to HS Clock Count 0x%x\n", intel_dsi->clk_lp_to_hs_count);
- DRM_DEBUG_KMS("HS to LP Clock Count 0x%x\n", intel_dsi->clk_hs_to_lp_count);
- DRM_DEBUG_KMS("BTA %s\n",
- enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
+ drm_dbg_kms(&i915->drm, "Dual link: NONE\n");
+ drm_dbg_kms(&i915->drm, "Pixel Format %d\n", intel_dsi->pixel_format);
+ drm_dbg_kms(&i915->drm, "TLPX %d\n", intel_dsi->escape_clk_div);
+ drm_dbg_kms(&i915->drm, "LP RX Timeout 0x%x\n",
+ intel_dsi->lp_rx_timeout);
+ drm_dbg_kms(&i915->drm, "Turnaround Timeout 0x%x\n",
+ intel_dsi->turn_arnd_val);
+ drm_dbg_kms(&i915->drm, "Init Count 0x%x\n", intel_dsi->init_count);
+ drm_dbg_kms(&i915->drm, "HS to LP Count 0x%x\n",
+ intel_dsi->hs_to_lp_count);
+ drm_dbg_kms(&i915->drm, "LP Byte Clock %d\n", intel_dsi->lp_byte_clk);
+ drm_dbg_kms(&i915->drm, "DBI BW Timer 0x%x\n", intel_dsi->bw_timer);
+ drm_dbg_kms(&i915->drm, "LP to HS Clock Count 0x%x\n",
+ intel_dsi->clk_lp_to_hs_count);
+ drm_dbg_kms(&i915->drm, "HS to LP Clock Count 0x%x\n",
+ intel_dsi->clk_hs_to_lp_count);
+ drm_dbg_kms(&i915->drm, "BTA %s\n",
+ enableddisabled(!(intel_dsi->video_frmt_cfg_bits & DISABLE_VIDEO_BTA)));
}
bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
@@ -690,7 +734,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
u16 burst_mode_ratio;
enum port port;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
intel_dsi->eotp_pkt = mipi_config->eot_pkt_disabled ? 0 : 1;
intel_dsi->clock_stop = mipi_config->enable_clk_stop ? 1 : 0;
@@ -749,7 +793,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
mipi_config->target_burst_mode_freq = bitrate;
if (mipi_config->target_burst_mode_freq < bitrate) {
- DRM_ERROR("Burst mode freq is less than computed\n");
+ drm_err(&dev_priv->drm,
+ "Burst mode freq is less than computed\n");
return false;
}
@@ -759,7 +804,8 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id)
intel_dsi->pclk = DIV_ROUND_UP(intel_dsi->pclk * burst_mode_ratio, 100);
} else {
- DRM_ERROR("Burst mode target is not set\n");
+ drm_err(&dev_priv->drm,
+ "Burst mode target is not set\n");
return false;
}
} else
@@ -842,17 +888,20 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
ret = pinctrl_register_mappings(soc_pwm_pinctrl_map,
ARRAY_SIZE(soc_pwm_pinctrl_map));
if (ret)
- DRM_ERROR("Failed to register pwm0 pinmux mapping\n");
+ drm_err(&dev_priv->drm,
+ "Failed to register pwm0 pinmux mapping\n");
pinctrl = devm_pinctrl_get_select(dev->dev, "soc_pwm0");
if (IS_ERR(pinctrl))
- DRM_ERROR("Failed to set pinmux to PWM\n");
+ drm_err(&dev_priv->drm,
+ "Failed to set pinmux to PWM\n");
}
if (want_panel_gpio) {
intel_dsi->gpio_panel = gpiod_get(dev->dev, "panel", flags);
if (IS_ERR(intel_dsi->gpio_panel)) {
- DRM_ERROR("Failed to own gpio for panel control\n");
+ drm_err(&dev_priv->drm,
+ "Failed to own gpio for panel control\n");
intel_dsi->gpio_panel = NULL;
}
}
@@ -861,7 +910,8 @@ void intel_dsi_vbt_gpio_init(struct intel_dsi *intel_dsi, bool panel_is_on)
intel_dsi->gpio_backlight =
gpiod_get(dev->dev, "backlight", flags);
if (IS_ERR(intel_dsi->gpio_backlight)) {
- DRM_ERROR("Failed to own gpio for backlight control\n");
+ drm_err(&dev_priv->drm,
+ "Failed to own gpio for backlight control\n");
intel_dsi->gpio_backlight = NULL;
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_dvo.c b/drivers/gpu/drm/i915/display/intel_dvo.c
index 86a337c9d85d..341d5ce8b062 100644
--- a/drivers/gpu/drm/i915/display/intel_dvo.c
+++ b/drivers/gpu/drm/i915/display/intel_dvo.c
@@ -30,7 +30,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
@@ -44,6 +43,7 @@
#define INTEL_DVO_CHIP_LVDS 1
#define INTEL_DVO_CHIP_TMDS 2
#define INTEL_DVO_CHIP_TVOUT 4
+#define INTEL_DVO_CHIP_LVDS_NO_FIXED 5
#define SIL164_ADDR 0x38
#define CH7xxx_ADDR 0x76
@@ -101,13 +101,13 @@ static const struct intel_dvo_device intel_dvo_devices[] = {
.dev_ops = &ch7017_ops,
},
{
- .type = INTEL_DVO_CHIP_TMDS,
+ .type = INTEL_DVO_CHIP_LVDS_NO_FIXED,
.name = "ns2501",
.dvo_reg = DVOB,
.dvo_srcdim_reg = DVOB_SRCDIM,
.slave_addr = NS2501_ADDR,
.dev_ops = &ns2501_ops,
- }
+ },
};
struct intel_dvo {
@@ -137,7 +137,7 @@ static bool intel_dvo_connector_get_hw_state(struct intel_connector *connector)
struct intel_dvo *intel_dvo = intel_attached_dvo(connector);
u32 tmp;
- tmp = I915_READ(intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
if (!(tmp & DVO_ENABLE))
return false;
@@ -152,7 +152,7 @@ static bool intel_dvo_get_hw_state(struct intel_encoder *encoder,
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
u32 tmp;
- tmp = I915_READ(intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
*pipe = (tmp & DVO_PIPE_SEL_MASK) >> DVO_PIPE_SEL_SHIFT;
@@ -168,7 +168,7 @@ static void intel_dvo_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_DVO);
- tmp = I915_READ(intel_dvo->dev.dvo_reg);
+ tmp = intel_de_read(dev_priv, intel_dvo->dev.dvo_reg);
if (tmp & DVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
else
@@ -190,11 +190,11 @@ static void intel_disable_dvo(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
- u32 temp = I915_READ(dvo_reg);
+ u32 temp = intel_de_read(dev_priv, dvo_reg);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, false);
- I915_WRITE(dvo_reg, temp & ~DVO_ENABLE);
- I915_READ(dvo_reg);
+ intel_de_write(dev_priv, dvo_reg, temp & ~DVO_ENABLE);
+ intel_de_read(dev_priv, dvo_reg);
}
static void intel_enable_dvo(struct intel_encoder *encoder,
@@ -204,14 +204,14 @@ static void intel_enable_dvo(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dvo *intel_dvo = enc_to_dvo(encoder);
i915_reg_t dvo_reg = intel_dvo->dev.dvo_reg;
- u32 temp = I915_READ(dvo_reg);
+ u32 temp = intel_de_read(dev_priv, dvo_reg);
intel_dvo->dev.dev_ops->mode_set(&intel_dvo->dev,
&pipe_config->hw.mode,
&pipe_config->hw.adjusted_mode);
- I915_WRITE(dvo_reg, temp | DVO_ENABLE);
- I915_READ(dvo_reg);
+ intel_de_write(dev_priv, dvo_reg, temp | DVO_ENABLE);
+ intel_de_read(dev_priv, dvo_reg);
intel_dvo->dev.dev_ops->dpms(&intel_dvo->dev, true);
}
@@ -286,7 +286,7 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
i915_reg_t dvo_srcdim_reg = intel_dvo->dev.dvo_srcdim_reg;
/* Save the data order, since I don't know what it should be set to. */
- dvo_val = I915_READ(dvo_reg) &
+ dvo_val = intel_de_read(dev_priv, dvo_reg) &
(DVO_PRESERVE_MASK | DVO_DATA_ORDER_GBRG);
dvo_val |= DVO_DATA_ORDER_FP | DVO_BORDER_ENABLE |
DVO_BLANK_ACTIVE_HIGH;
@@ -301,11 +301,10 @@ static void intel_dvo_pre_enable(struct intel_encoder *encoder,
/*I915_WRITE(DVOB_SRCDIM,
(adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
(adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));*/
- I915_WRITE(dvo_srcdim_reg,
- (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) |
- (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
+ intel_de_write(dev_priv, dvo_srcdim_reg,
+ (adjusted_mode->crtc_hdisplay << DVO_SRCDIM_HORIZONTAL_SHIFT) | (adjusted_mode->crtc_vdisplay << DVO_SRCDIM_VERTICAL_SHIFT));
/*I915_WRITE(DVOB, dvo_val);*/
- I915_WRITE(dvo_reg, dvo_val);
+ intel_de_write(dev_priv, dvo_reg, dvo_val);
}
static enum drm_connector_status
@@ -481,15 +480,16 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
* initialize the device.
*/
for_each_pipe(dev_priv, pipe) {
- dpll[pipe] = I915_READ(DPLL(pipe));
- I915_WRITE(DPLL(pipe), dpll[pipe] | DPLL_DVO_2X_MODE);
+ dpll[pipe] = intel_de_read(dev_priv, DPLL(pipe));
+ intel_de_write(dev_priv, DPLL(pipe),
+ dpll[pipe] | DPLL_DVO_2X_MODE);
}
dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c);
/* restore the DVO 2x clock state to original */
for_each_pipe(dev_priv, pipe) {
- I915_WRITE(DPLL(pipe), dpll[pipe]);
+ intel_de_write(dev_priv, DPLL(pipe), dpll[pipe]);
}
intel_gmbus_force_bit(i2c, false);
@@ -507,17 +507,21 @@ void intel_dvo_init(struct drm_i915_private *dev_priv)
intel_encoder->port = port;
intel_encoder->pipe_mask = ~0;
- switch (dvo->type) {
- case INTEL_DVO_CHIP_TMDS:
+ if (dvo->type != INTEL_DVO_CHIP_LVDS)
intel_encoder->cloneable = (1 << INTEL_OUTPUT_ANALOG) |
(1 << INTEL_OUTPUT_DVO);
+
+ switch (dvo->type) {
+ case INTEL_DVO_CHIP_TMDS:
+ intel_connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
drm_connector_init(&dev_priv->drm, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_DVII);
encoder_type = DRM_MODE_ENCODER_TMDS;
break;
+ case INTEL_DVO_CHIP_LVDS_NO_FIXED:
case INTEL_DVO_CHIP_LVDS:
- intel_encoder->cloneable = 0;
drm_connector_init(&dev_priv->drm, connector,
&intel_dvo_connector_funcs,
DRM_MODE_CONNECTOR_LVDS);
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c
index a1048ece541e..2e5d835a9eaa 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.c
+++ b/drivers/gpu/drm/i915/display/intel_fbc.c
@@ -41,15 +41,12 @@
#include <drm/drm_fourcc.h>
#include "i915_drv.h"
+#include "i915_trace.h"
+#include "i915_vgpu.h"
#include "intel_display_types.h"
#include "intel_fbc.h"
#include "intel_frontbuffer.h"
-static inline bool fbc_supported(struct drm_i915_private *dev_priv)
-{
- return HAS_FBC(dev_priv);
-}
-
/*
* In some platforms where the CRTC's x:0/y:0 coordinates doesn't match the
* frontbuffer's x:0/y:0 coordinates we lie to the hardware about the plane's
@@ -97,12 +94,12 @@ static void i8xx_fbc_deactivate(struct drm_i915_private *dev_priv)
u32 fbc_ctl;
/* Disable compression */
- fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
if ((fbc_ctl & FBC_CTL_EN) == 0)
return;
fbc_ctl &= ~FBC_CTL_EN;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
+ intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
/* Wait for compressing bit to clear */
if (intel_de_wait_for_clear(dev_priv, FBC_STATUS,
@@ -132,7 +129,7 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
/* Clear old tags */
for (i = 0; i < (FBC_LL_SIZE / 32) + 1; i++)
- I915_WRITE(FBC_TAG(i), 0);
+ intel_de_write(dev_priv, FBC_TAG(i), 0);
if (IS_GEN(dev_priv, 4)) {
u32 fbc_ctl2;
@@ -142,12 +139,13 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
fbc_ctl2 |= FBC_CTL_PLANE(params->crtc.i9xx_plane);
if (params->fence_id >= 0)
fbc_ctl2 |= FBC_CTL_CPU_FENCE;
- I915_WRITE(FBC_CONTROL2, fbc_ctl2);
- I915_WRITE(FBC_FENCE_OFF, params->crtc.fence_y_offset);
+ intel_de_write(dev_priv, FBC_CONTROL2, fbc_ctl2);
+ intel_de_write(dev_priv, FBC_FENCE_OFF,
+ params->crtc.fence_y_offset);
}
/* enable it... */
- fbc_ctl = I915_READ(FBC_CONTROL);
+ fbc_ctl = intel_de_read(dev_priv, FBC_CONTROL);
fbc_ctl &= 0x3fff << FBC_CTL_INTERVAL_SHIFT;
fbc_ctl |= FBC_CTL_EN | FBC_CTL_PERIODIC;
if (IS_I945GM(dev_priv))
@@ -155,12 +153,12 @@ static void i8xx_fbc_activate(struct drm_i915_private *dev_priv)
fbc_ctl |= (cfb_pitch & 0xff) << FBC_CTL_STRIDE_SHIFT;
if (params->fence_id >= 0)
fbc_ctl |= params->fence_id;
- I915_WRITE(FBC_CONTROL, fbc_ctl);
+ intel_de_write(dev_priv, FBC_CONTROL, fbc_ctl);
}
static bool i8xx_fbc_is_active(struct drm_i915_private *dev_priv)
{
- return I915_READ(FBC_CONTROL) & FBC_CTL_EN;
+ return intel_de_read(dev_priv, FBC_CONTROL) & FBC_CTL_EN;
}
static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
@@ -176,13 +174,14 @@ static void g4x_fbc_activate(struct drm_i915_private *dev_priv)
if (params->fence_id >= 0) {
dpfc_ctl |= DPFC_CTL_FENCE_EN | params->fence_id;
- I915_WRITE(DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+ intel_de_write(dev_priv, DPFC_FENCE_YOFF,
+ params->crtc.fence_y_offset);
} else {
- I915_WRITE(DPFC_FENCE_YOFF, 0);
+ intel_de_write(dev_priv, DPFC_FENCE_YOFF, 0);
}
/* enable it... */
- I915_WRITE(DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
}
static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
@@ -190,23 +189,27 @@ static void g4x_fbc_deactivate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
/* Disable compression */
- dpfc_ctl = I915_READ(DPFC_CONTROL);
+ dpfc_ctl = intel_de_read(dev_priv, DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(DPFC_CONTROL, dpfc_ctl);
+ intel_de_write(dev_priv, DPFC_CONTROL, dpfc_ctl);
}
}
static bool g4x_fbc_is_active(struct drm_i915_private *dev_priv)
{
- return I915_READ(DPFC_CONTROL) & DPFC_CTL_EN;
+ return intel_de_read(dev_priv, DPFC_CONTROL) & DPFC_CTL_EN;
}
/* This function forces a CFB recompression through the nuke operation. */
static void intel_fbc_recompress(struct drm_i915_private *dev_priv)
{
- I915_WRITE(MSG_FBC_REND_STATE, FBC_REND_NUKE);
- POSTING_READ(MSG_FBC_REND_STATE);
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ trace_intel_fbc_nuke(fbc->crtc);
+
+ intel_de_write(dev_priv, MSG_FBC_REND_STATE, FBC_REND_NUKE);
+ intel_de_posting_read(dev_priv, MSG_FBC_REND_STATE);
}
static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
@@ -237,22 +240,22 @@ static void ilk_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_GEN(dev_priv, 5))
dpfc_ctl |= params->fence_id;
if (IS_GEN(dev_priv, 6)) {
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE |
- params->fence_id);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET,
- params->crtc.fence_y_offset);
+ intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | params->fence_id);
+ intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
+ params->crtc.fence_y_offset);
}
} else {
if (IS_GEN(dev_priv, 6)) {
- I915_WRITE(SNB_DPFC_CTL_SA, 0);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+ intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
+ intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
}
}
- I915_WRITE(ILK_DPFC_FENCE_YOFF, params->crtc.fence_y_offset);
+ intel_de_write(dev_priv, ILK_DPFC_FENCE_YOFF,
+ params->crtc.fence_y_offset);
/* enable it... */
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
intel_fbc_recompress(dev_priv);
}
@@ -262,16 +265,16 @@ static void ilk_fbc_deactivate(struct drm_i915_private *dev_priv)
u32 dpfc_ctl;
/* Disable compression */
- dpfc_ctl = I915_READ(ILK_DPFC_CONTROL);
+ dpfc_ctl = intel_de_read(dev_priv, ILK_DPFC_CONTROL);
if (dpfc_ctl & DPFC_CTL_EN) {
dpfc_ctl &= ~DPFC_CTL_EN;
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl);
+ intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl);
}
}
static bool ilk_fbc_is_active(struct drm_i915_private *dev_priv)
{
- return I915_READ(ILK_DPFC_CONTROL) & DPFC_CTL_EN;
+ return intel_de_read(dev_priv, ILK_DPFC_CONTROL) & DPFC_CTL_EN;
}
static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
@@ -282,14 +285,14 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
/* Display WA #0529: skl, kbl, bxt. */
if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) {
- u32 val = I915_READ(CHICKEN_MISC_4);
+ u32 val = intel_de_read(dev_priv, CHICKEN_MISC_4);
val &= ~(FBC_STRIDE_OVERRIDE | FBC_STRIDE_MASK);
if (params->gen9_wa_cfb_stride)
val |= FBC_STRIDE_OVERRIDE | params->gen9_wa_cfb_stride;
- I915_WRITE(CHICKEN_MISC_4, val);
+ intel_de_write(dev_priv, CHICKEN_MISC_4, val);
}
dpfc_ctl = 0;
@@ -314,13 +317,13 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
if (params->fence_id >= 0) {
dpfc_ctl |= IVB_DPFC_CTL_FENCE_EN;
- I915_WRITE(SNB_DPFC_CTL_SA,
- SNB_CPU_FENCE_ENABLE |
- params->fence_id);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, params->crtc.fence_y_offset);
- } else {
- I915_WRITE(SNB_DPFC_CTL_SA,0);
- I915_WRITE(DPFC_CPU_FENCE_OFFSET, 0);
+ intel_de_write(dev_priv, SNB_DPFC_CTL_SA,
+ SNB_CPU_FENCE_ENABLE | params->fence_id);
+ intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET,
+ params->crtc.fence_y_offset);
+ } else if (dev_priv->ggtt.num_fences) {
+ intel_de_write(dev_priv, SNB_DPFC_CTL_SA, 0);
+ intel_de_write(dev_priv, DPFC_CPU_FENCE_OFFSET, 0);
}
if (dev_priv->fbc.false_color)
@@ -328,21 +331,20 @@ static void gen7_fbc_activate(struct drm_i915_private *dev_priv)
if (IS_IVYBRIDGE(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:ivb */
- I915_WRITE(ILK_DISPLAY_CHICKEN1,
- I915_READ(ILK_DISPLAY_CHICKEN1) |
- ILK_FBCQ_DIS);
+ intel_de_write(dev_priv, ILK_DISPLAY_CHICKEN1,
+ intel_de_read(dev_priv, ILK_DISPLAY_CHICKEN1) | ILK_FBCQ_DIS);
} else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
/* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */
- I915_WRITE(CHICKEN_PIPESL_1(params->crtc.pipe),
- I915_READ(CHICKEN_PIPESL_1(params->crtc.pipe)) |
- HSW_FBCQ_DIS);
+ intel_de_write(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe),
+ intel_de_read(dev_priv, CHICKEN_PIPESL_1(params->crtc.pipe)) | HSW_FBCQ_DIS);
}
if (INTEL_GEN(dev_priv) >= 11)
/* Wa_1409120013:icl,ehl,tgl */
- I915_WRITE(ILK_DPFC_CHICKEN, ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
+ intel_de_write(dev_priv, ILK_DPFC_CHICKEN,
+ ILK_DPFC_CHICKEN_COMP_DUMMY_PIXEL);
- I915_WRITE(ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
+ intel_de_write(dev_priv, ILK_DPFC_CONTROL, dpfc_ctl | DPFC_CTL_EN);
intel_fbc_recompress(dev_priv);
}
@@ -361,6 +363,8 @@ static void intel_fbc_hw_activate(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
+ trace_intel_fbc_activate(fbc->crtc);
+
fbc->active = true;
fbc->activated = true;
@@ -378,6 +382,8 @@ static void intel_fbc_hw_deactivate(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
+ trace_intel_fbc_deactivate(fbc->crtc);
+
fbc->active = false;
if (INTEL_GEN(dev_priv) >= 5)
@@ -407,7 +413,7 @@ static void intel_fbc_deactivate(struct drm_i915_private *dev_priv,
{
struct intel_fbc *fbc = &dev_priv->fbc;
- WARN_ON(!mutex_is_locked(&fbc->lock));
+ drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
if (fbc->active)
intel_fbc_hw_deactivate(dev_priv);
@@ -471,7 +477,8 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
struct drm_mm_node *uninitialized_var(compressed_llb);
int ret;
- WARN_ON(drm_mm_node_allocated(&fbc->compressed_fb));
+ drm_WARN_ON(&dev_priv->drm,
+ drm_mm_node_allocated(&fbc->compressed_fb));
ret = find_compression_threshold(dev_priv, &fbc->compressed_fb,
size, fb_cpp);
@@ -485,9 +492,11 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
fbc->threshold = ret;
if (INTEL_GEN(dev_priv) >= 5)
- I915_WRITE(ILK_DPFC_CB_BASE, fbc->compressed_fb.start);
+ intel_de_write(dev_priv, ILK_DPFC_CB_BASE,
+ fbc->compressed_fb.start);
else if (IS_GM45(dev_priv)) {
- I915_WRITE(DPFC_CB_BASE, fbc->compressed_fb.start);
+ intel_de_write(dev_priv, DPFC_CB_BASE,
+ fbc->compressed_fb.start);
} else {
compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
if (!compressed_llb)
@@ -500,16 +509,16 @@ static int intel_fbc_alloc_cfb(struct drm_i915_private *dev_priv,
fbc->compressed_llb = compressed_llb;
- GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
- fbc->compressed_fb.start,
- U32_MAX));
- GEM_BUG_ON(range_overflows_t(u64, dev_priv->dsm.start,
- fbc->compressed_llb->start,
- U32_MAX));
- I915_WRITE(FBC_CFB_BASE,
- dev_priv->dsm.start + fbc->compressed_fb.start);
- I915_WRITE(FBC_LL_BASE,
- dev_priv->dsm.start + compressed_llb->start);
+ GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
+ fbc->compressed_fb.start,
+ U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64, dev_priv->dsm.start,
+ fbc->compressed_llb->start,
+ U32_MAX));
+ intel_de_write(dev_priv, FBC_CFB_BASE,
+ dev_priv->dsm.start + fbc->compressed_fb.start);
+ intel_de_write(dev_priv, FBC_LL_BASE,
+ dev_priv->dsm.start + compressed_llb->start);
}
DRM_DEBUG_KMS("reserved %llu bytes of contiguous stolen space for FBC, threshold: %d\n",
@@ -530,20 +539,22 @@ static void __intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (drm_mm_node_allocated(&fbc->compressed_fb))
- i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
+ if (!drm_mm_node_allocated(&fbc->compressed_fb))
+ return;
if (fbc->compressed_llb) {
i915_gem_stolen_remove_node(dev_priv, fbc->compressed_llb);
kfree(fbc->compressed_llb);
}
+
+ i915_gem_stolen_remove_node(dev_priv, &fbc->compressed_fb);
}
void intel_fbc_cleanup_cfb(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
mutex_lock(&fbc->lock);
@@ -555,7 +566,7 @@ static bool stride_is_valid(struct drm_i915_private *dev_priv,
unsigned int stride)
{
/* This should have been caught earlier. */
- if (WARN_ON_ONCE((stride & (64 - 1)) != 0))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, (stride & (64 - 1)) != 0))
return false;
/* Below are the additional FBC restrictions. */
@@ -663,8 +674,8 @@ static void intel_fbc_update_state_cache(struct intel_crtc *crtc,
cache->fb.format = fb->format;
cache->fb.stride = fb->pitches[0];
- WARN_ON(plane_state->flags & PLANE_HAS_FENCE &&
- !plane_state->vma->fence);
+ drm_WARN_ON(&dev_priv->drm, plane_state->flags & PLANE_HAS_FENCE &&
+ !plane_state->vma->fence);
if (plane_state->flags & PLANE_HAS_FENCE &&
plane_state->vma->fence)
@@ -681,12 +692,37 @@ static bool intel_fbc_cfb_size_changed(struct drm_i915_private *dev_priv)
fbc->compressed_fb.size * fbc->threshold;
}
+static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
+{
+ struct intel_fbc *fbc = &dev_priv->fbc;
+
+ if (intel_vgpu_active(dev_priv)) {
+ fbc->no_fbc_reason = "VGPU is active";
+ return false;
+ }
+
+ if (!i915_modparams.enable_fbc) {
+ fbc->no_fbc_reason = "disabled per module param or by default";
+ return false;
+ }
+
+ if (fbc->underrun_detected) {
+ fbc->no_fbc_reason = "underrun detected";
+ return false;
+ }
+
+ return true;
+}
+
static bool intel_fbc_can_activate(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
+ if (!intel_fbc_can_enable(dev_priv))
+ return false;
+
if (!cache->plane.visible) {
fbc->no_fbc_reason = "primary plane not visible";
return false;
@@ -785,28 +821,6 @@ static bool intel_fbc_can_activate(struct intel_crtc *crtc)
return true;
}
-static bool intel_fbc_can_enable(struct drm_i915_private *dev_priv)
-{
- struct intel_fbc *fbc = &dev_priv->fbc;
-
- if (intel_vgpu_active(dev_priv)) {
- fbc->no_fbc_reason = "VGPU is active";
- return false;
- }
-
- if (!i915_modparams.enable_fbc) {
- fbc->no_fbc_reason = "disabled per module param or by default";
- return false;
- }
-
- if (fbc->underrun_detected) {
- fbc->no_fbc_reason = "underrun detected";
- return false;
- }
-
- return true;
-}
-
static void intel_fbc_get_reg_params(struct intel_crtc *crtc,
struct intel_fbc_reg_params *params)
{
@@ -867,16 +881,20 @@ static bool intel_fbc_can_flip_nuke(const struct intel_crtc_state *crtc_state)
return true;
}
-bool intel_fbc_pre_update(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+bool intel_fbc_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
const char *reason = "update pending";
bool need_vblank_wait = false;
- if (!fbc_supported(dev_priv))
+ if (!plane->has_fbc || !plane_state)
return need_vblank_wait;
mutex_lock(&fbc->lock);
@@ -926,9 +944,9 @@ static void __intel_fbc_disable(struct drm_i915_private *dev_priv)
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_crtc *crtc = fbc->crtc;
- WARN_ON(!mutex_is_locked(&fbc->lock));
- WARN_ON(!fbc->crtc);
- WARN_ON(fbc->active);
+ drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
+ drm_WARN_ON(&dev_priv->drm, !fbc->crtc);
+ drm_WARN_ON(&dev_priv->drm, fbc->active);
DRM_DEBUG_KMS("Disabling FBC on pipe %c\n", pipe_name(crtc->pipe));
@@ -942,7 +960,7 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct intel_fbc *fbc = &dev_priv->fbc;
- WARN_ON(!mutex_is_locked(&fbc->lock));
+ drm_WARN_ON(&dev_priv->drm, !mutex_is_locked(&fbc->lock));
if (fbc->crtc != crtc)
return;
@@ -967,12 +985,16 @@ static void __intel_fbc_post_update(struct intel_crtc *crtc)
intel_fbc_deactivate(dev_priv, "frontbuffer write");
}
-void intel_fbc_post_update(struct intel_crtc *crtc)
+void intel_fbc_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!plane->has_fbc || !plane_state)
return;
mutex_lock(&fbc->lock);
@@ -994,7 +1016,7 @@ void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
if (origin == ORIGIN_GTT || origin == ORIGIN_FLIP)
@@ -1015,7 +1037,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv,
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
mutex_lock(&fbc->lock);
@@ -1099,24 +1121,26 @@ out:
/**
* intel_fbc_enable: tries to enable FBC on the CRTC
* @crtc: the CRTC
- * @crtc_state: corresponding &drm_crtc_state for @crtc
- * @plane_state: corresponding &drm_plane_state for the primary plane of @crtc
+ * @state: corresponding &drm_crtc_state for @crtc
*
* This function checks if the given CRTC was chosen for FBC, then enables it if
* possible. Notice that it doesn't activate FBC. It is valid to call
* intel_fbc_enable multiple times for the same pipe without an
* intel_fbc_disable in the middle, as long as it is deactivated.
*/
-void intel_fbc_enable(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state)
+void intel_fbc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
+ const struct intel_crtc_state *crtc_state =
+ intel_atomic_get_new_crtc_state(state, crtc);
+ const struct intel_plane_state *plane_state =
+ intel_atomic_get_new_plane_state(state, plane);
struct intel_fbc *fbc = &dev_priv->fbc;
struct intel_fbc_state_cache *cache = &fbc->state_cache;
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- if (!fbc_supported(dev_priv))
+ if (!plane->has_fbc || !plane_state)
return;
mutex_lock(&fbc->lock);
@@ -1129,7 +1153,7 @@ void intel_fbc_enable(struct intel_crtc *crtc,
__intel_fbc_disable(dev_priv);
}
- WARN_ON(fbc->active);
+ drm_WARN_ON(&dev_priv->drm, fbc->active);
intel_fbc_update_state_cache(crtc, crtc_state, plane_state);
@@ -1139,14 +1163,14 @@ void intel_fbc_enable(struct intel_crtc *crtc,
if (intel_fbc_alloc_cfb(dev_priv,
intel_fbc_calculate_cfb_size(dev_priv, cache),
- fb->format->cpp[0])) {
+ plane_state->hw.fb->format->cpp[0])) {
cache->plane.visible = false;
fbc->no_fbc_reason = "not enough stolen memory";
goto out;
}
if ((IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv)) &&
- fb->modifier != I915_FORMAT_MOD_X_TILED)
+ plane_state->hw.fb->modifier != I915_FORMAT_MOD_X_TILED)
cache->gen9_wa_cfb_stride =
DIV_ROUND_UP(cache->plane.src_w, 32 * fbc->threshold) * 8;
else
@@ -1169,9 +1193,10 @@ out:
void intel_fbc_disable(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ struct intel_plane *plane = to_intel_plane(crtc->base.primary);
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!plane->has_fbc)
return;
mutex_lock(&fbc->lock);
@@ -1190,12 +1215,12 @@ void intel_fbc_global_disable(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
mutex_lock(&fbc->lock);
if (fbc->crtc) {
- WARN_ON(fbc->crtc->active);
+ drm_WARN_ON(&dev_priv->drm, fbc->crtc->active);
__intel_fbc_disable(dev_priv);
}
mutex_unlock(&fbc->lock);
@@ -1267,7 +1292,7 @@ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *dev_priv)
{
struct intel_fbc *fbc = &dev_priv->fbc;
- if (!fbc_supported(dev_priv))
+ if (!HAS_FBC(dev_priv))
return;
/* There's no guarantee that underrun_detected won't be set to true
@@ -1348,7 +1373,8 @@ void intel_fbc_init(struct drm_i915_private *dev_priv)
/* This value was pulled out of someone's hat */
if (INTEL_GEN(dev_priv) <= 4 && !IS_GM45(dev_priv))
- I915_WRITE(FBC_CONTROL, 500 << FBC_CTL_INTERVAL_SHIFT);
+ intel_de_write(dev_priv, FBC_CONTROL,
+ 500 << FBC_CTL_INTERVAL_SHIFT);
/* We still don't have any sort of hardware state readout for FBC, so
* deactivate it in case the BIOS activated it to make sure software
diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h
index c8a5e5098687..6dc1edefe81b 100644
--- a/drivers/gpu/drm/i915/display/intel_fbc.h
+++ b/drivers/gpu/drm/i915/display/intel_fbc.h
@@ -19,14 +19,13 @@ struct intel_plane_state;
void intel_fbc_choose_crtc(struct drm_i915_private *dev_priv,
struct intel_atomic_state *state);
bool intel_fbc_is_active(struct drm_i915_private *dev_priv);
-bool intel_fbc_pre_update(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
-void intel_fbc_post_update(struct intel_crtc *crtc);
+bool intel_fbc_pre_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
+void intel_fbc_post_update(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_fbc_init(struct drm_i915_private *dev_priv);
-void intel_fbc_enable(struct intel_crtc *crtc,
- const struct intel_crtc_state *crtc_state,
- const struct intel_plane_state *plane_state);
+void intel_fbc_enable(struct intel_atomic_state *state,
+ struct intel_crtc *crtc);
void intel_fbc_disable(struct intel_crtc *crtc);
void intel_fbc_global_disable(struct drm_i915_private *dev_priv);
void intel_fbc_invalidate(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_fbdev.c b/drivers/gpu/drm/i915/display/intel_fbdev.c
index 1e98e432c9fa..3bc804212a99 100644
--- a/drivers/gpu/drm/i915/display/intel_fbdev.c
+++ b/drivers/gpu/drm/i915/display/intel_fbdev.c
@@ -40,7 +40,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_display_types.h"
@@ -191,7 +190,7 @@ static int intelfb_create(struct drm_fb_helper *helper,
drm_framebuffer_put(&intel_fb->base);
intel_fb = ifbdev->fb = NULL;
}
- if (!intel_fb || WARN_ON(!intel_fb_obj(&intel_fb->base))) {
+ if (!intel_fb || drm_WARN_ON(dev, !intel_fb_obj(&intel_fb->base))) {
DRM_DEBUG_KMS("no BIOS fb, allocating a new one\n");
ret = intelfb_alloc(helper, sizes);
if (ret)
@@ -410,9 +409,9 @@ static bool intel_fbdev_init_bios(struct drm_device *dev,
if (!crtc->state->active)
continue;
- WARN(!crtc->primary->state->fb,
- "re-used BIOS config but lost an fb on crtc %d\n",
- crtc->base.id);
+ drm_WARN(dev, !crtc->primary->state->fb,
+ "re-used BIOS config but lost an fb on crtc %d\n",
+ crtc->base.id);
}
@@ -439,7 +438,8 @@ int intel_fbdev_init(struct drm_device *dev)
struct intel_fbdev *ifbdev;
int ret;
- if (WARN_ON(!HAS_DISPLAY(dev_priv) || !INTEL_DISPLAY_ENABLED(dev_priv)))
+ if (drm_WARN_ON(dev, !HAS_DISPLAY(dev_priv) ||
+ !INTEL_DISPLAY_ENABLED(dev_priv)))
return -ENODEV;
ifbdev = kzalloc(sizeof(struct intel_fbdev), GFP_KERNEL);
@@ -452,7 +452,7 @@ int intel_fbdev_init(struct drm_device *dev)
if (!intel_fbdev_init_bios(dev, ifbdev))
ifbdev->preferred_bpp = 32;
- ret = drm_fb_helper_init(dev, &ifbdev->helper, 4);
+ ret = drm_fb_helper_init(dev, &ifbdev->helper);
if (ret) {
kfree(ifbdev);
return ret;
@@ -461,8 +461,6 @@ int intel_fbdev_init(struct drm_device *dev)
dev_priv->fbdev = ifbdev;
INIT_WORK(&dev_priv->fbdev_suspend_work, intel_fbdev_suspend_worker);
- drm_fb_helper_single_add_all_connectors(&ifbdev->helper);
-
return 0;
}
@@ -569,7 +567,7 @@ void intel_fbdev_set_suspend(struct drm_device *dev, int state, bool synchronous
* to all the printk activity. Try to keep it out of the hot
* path of resume if possible.
*/
- WARN_ON(state != FBINFO_STATE_RUNNING);
+ drm_WARN_ON(dev, state != FBINFO_STATE_RUNNING);
if (!console_trylock()) {
/* Don't block our own workqueue as this can
* be run in parallel with other i915.ko tasks.
diff --git a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
index 6c83b350525d..813a4f7033e1 100644
--- a/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
+++ b/drivers/gpu/drm/i915/display/intel_fifo_underrun.c
@@ -95,15 +95,15 @@ static void i9xx_check_fifo_underruns(struct intel_crtc *crtc)
lockdep_assert_held(&dev_priv->irq_lock);
- if ((I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
+ if ((intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS) == 0)
return;
enable_mask = i915_pipestat_enable_mask(dev_priv, crtc->pipe);
- I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+ intel_de_posting_read(dev_priv, reg);
trace_intel_cpu_fifo_underrun(dev_priv, crtc->pipe);
- DRM_ERROR("pipe %c underrun\n", pipe_name(crtc->pipe));
+ drm_err(&dev_priv->drm, "pipe %c underrun\n", pipe_name(crtc->pipe));
}
static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -118,11 +118,13 @@ static void i9xx_set_fifo_underrun_reporting(struct drm_device *dev,
if (enable) {
u32 enable_mask = i915_pipestat_enable_mask(dev_priv, pipe);
- I915_WRITE(reg, enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg,
+ enable_mask | PIPE_FIFO_UNDERRUN_STATUS);
+ intel_de_posting_read(dev_priv, reg);
} else {
- if (old && I915_READ(reg) & PIPE_FIFO_UNDERRUN_STATUS)
- DRM_ERROR("pipe %c underrun\n", pipe_name(pipe));
+ if (old && intel_de_read(dev_priv, reg) & PIPE_FIFO_UNDERRUN_STATUS)
+ drm_err(&dev_priv->drm, "pipe %c underrun\n",
+ pipe_name(pipe));
}
}
@@ -143,18 +145,18 @@ static void ivb_check_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pipe = crtc->pipe;
- u32 err_int = I915_READ(GEN7_ERR_INT);
+ u32 err_int = intel_de_read(dev_priv, GEN7_ERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
if ((err_int & ERR_INT_FIFO_UNDERRUN(pipe)) == 0)
return;
- I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
- POSTING_READ(GEN7_ERR_INT);
+ intel_de_write(dev_priv, GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+ intel_de_posting_read(dev_priv, GEN7_ERR_INT);
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
- DRM_ERROR("fifo underrun on pipe %c\n", pipe_name(pipe));
+ drm_err(&dev_priv->drm, "fifo underrun on pipe %c\n", pipe_name(pipe));
}
static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -163,7 +165,8 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
{
struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
- I915_WRITE(GEN7_ERR_INT, ERR_INT_FIFO_UNDERRUN(pipe));
+ intel_de_write(dev_priv, GEN7_ERR_INT,
+ ERR_INT_FIFO_UNDERRUN(pipe));
if (!ivb_can_enable_err_int(dev))
return;
@@ -173,9 +176,10 @@ static void ivb_set_fifo_underrun_reporting(struct drm_device *dev,
ilk_disable_display_irq(dev_priv, DE_ERR_INT_IVB);
if (old &&
- I915_READ(GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
- DRM_ERROR("uncleared fifo underrun on pipe %c\n",
- pipe_name(pipe));
+ intel_de_read(dev_priv, GEN7_ERR_INT) & ERR_INT_FIFO_UNDERRUN(pipe)) {
+ drm_err(&dev_priv->drm,
+ "uncleared fifo underrun on pipe %c\n",
+ pipe_name(pipe));
}
}
}
@@ -209,19 +213,20 @@ static void cpt_check_pch_fifo_underruns(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
enum pipe pch_transcoder = crtc->pipe;
- u32 serr_int = I915_READ(SERR_INT);
+ u32 serr_int = intel_de_read(dev_priv, SERR_INT);
lockdep_assert_held(&dev_priv->irq_lock);
if ((serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) == 0)
return;
- I915_WRITE(SERR_INT, SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
- POSTING_READ(SERR_INT);
+ intel_de_write(dev_priv, SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+ intel_de_posting_read(dev_priv, SERR_INT);
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
- DRM_ERROR("pch fifo underrun on pch transcoder %c\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm, "pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
}
static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
@@ -231,8 +236,8 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
struct drm_i915_private *dev_priv = to_i915(dev);
if (enable) {
- I915_WRITE(SERR_INT,
- SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
+ intel_de_write(dev_priv, SERR_INT,
+ SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder));
if (!cpt_can_enable_serr_int(dev))
return;
@@ -241,10 +246,11 @@ static void cpt_set_fifo_underrun_reporting(struct drm_device *dev,
} else {
ibx_disable_display_interrupt(dev_priv, SDE_ERROR_CPT);
- if (old && I915_READ(SERR_INT) &
+ if (old && intel_de_read(dev_priv, SERR_INT) &
SERR_INT_TRANS_FIFO_UNDERRUN(pch_transcoder)) {
- DRM_ERROR("uncleared pch fifo underrun on pch transcoder %c\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm,
+ "uncleared pch fifo underrun on pch transcoder %c\n",
+ pipe_name(pch_transcoder));
}
}
}
@@ -378,8 +384,8 @@ void intel_cpu_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false)) {
trace_intel_cpu_fifo_underrun(dev_priv, pipe);
- DRM_ERROR("CPU pipe %c FIFO underrun\n",
- pipe_name(pipe));
+ drm_err(&dev_priv->drm, "CPU pipe %c FIFO underrun\n",
+ pipe_name(pipe));
}
intel_fbc_handle_fifo_underrun_irq(dev_priv);
@@ -400,8 +406,8 @@ void intel_pch_fifo_underrun_irq_handler(struct drm_i915_private *dev_priv,
if (intel_set_pch_fifo_underrun_reporting(dev_priv, pch_transcoder,
false)) {
trace_intel_pch_fifo_underrun(dev_priv, pch_transcoder);
- DRM_ERROR("PCH transcoder %c FIFO underrun\n",
- pipe_name(pch_transcoder));
+ drm_err(&dev_priv->drm, "PCH transcoder %c FIFO underrun\n",
+ pipe_name(pch_transcoder));
}
}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c
new file mode 100644
index 000000000000..a0cc894c3868
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_global_state.c
@@ -0,0 +1,223 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/string.h>
+
+#include "i915_drv.h"
+#include "intel_atomic.h"
+#include "intel_display_types.h"
+#include "intel_global_state.h"
+
+void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+ struct intel_global_obj *obj,
+ struct intel_global_state *state,
+ const struct intel_global_state_funcs *funcs)
+{
+ memset(obj, 0, sizeof(*obj));
+
+ obj->state = state;
+ obj->funcs = funcs;
+ list_add_tail(&obj->head, &dev_priv->global_obj_list);
+}
+
+void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv)
+{
+ struct intel_global_obj *obj, *next;
+
+ list_for_each_entry_safe(obj, next, &dev_priv->global_obj_list, head) {
+ list_del(&obj->head);
+ obj->funcs->atomic_destroy_state(obj, obj->state);
+ }
+}
+
+static void assert_global_state_write_locked(struct drm_i915_private *dev_priv)
+{
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc)
+ drm_modeset_lock_assert_held(&crtc->base.mutex);
+}
+
+static bool modeset_lock_is_held(struct drm_modeset_acquire_ctx *ctx,
+ struct drm_modeset_lock *lock)
+{
+ struct drm_modeset_lock *l;
+
+ list_for_each_entry(l, &ctx->locked, head) {
+ if (lock == l)
+ return true;
+ }
+
+ return false;
+}
+
+static void assert_global_state_read_locked(struct intel_atomic_state *state)
+{
+ struct drm_modeset_acquire_ctx *ctx = state->base.acquire_ctx;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ if (modeset_lock_is_held(ctx, &crtc->base.mutex))
+ return;
+ }
+
+ WARN(1, "Global state not read locked\n");
+}
+
+struct intel_global_state *
+intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj)
+{
+ int index, num_objs, i;
+ size_t size;
+ struct __intel_global_objs_state *arr;
+ struct intel_global_state *obj_state;
+
+ for (i = 0; i < state->num_global_objs; i++)
+ if (obj == state->global_objs[i].ptr)
+ return state->global_objs[i].state;
+
+ assert_global_state_read_locked(state);
+
+ num_objs = state->num_global_objs + 1;
+ size = sizeof(*state->global_objs) * num_objs;
+ arr = krealloc(state->global_objs, size, GFP_KERNEL);
+ if (!arr)
+ return ERR_PTR(-ENOMEM);
+
+ state->global_objs = arr;
+ index = state->num_global_objs;
+ memset(&state->global_objs[index], 0, sizeof(*state->global_objs));
+
+ obj_state = obj->funcs->atomic_duplicate_state(obj);
+ if (!obj_state)
+ return ERR_PTR(-ENOMEM);
+
+ obj_state->changed = false;
+
+ state->global_objs[index].state = obj_state;
+ state->global_objs[index].old_state = obj->state;
+ state->global_objs[index].new_state = obj_state;
+ state->global_objs[index].ptr = obj;
+ obj_state->state = state;
+
+ state->num_global_objs = num_objs;
+
+ DRM_DEBUG_ATOMIC("Added new global object %p state %p to %p\n",
+ obj, obj_state, state);
+
+ return obj_state;
+}
+
+struct intel_global_state *
+intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_global_objs; i++)
+ if (obj == state->global_objs[i].ptr)
+ return state->global_objs[i].old_state;
+
+ return NULL;
+}
+
+struct intel_global_state *
+intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj)
+{
+ int i;
+
+ for (i = 0; i < state->num_global_objs; i++)
+ if (obj == state->global_objs[i].ptr)
+ return state->global_objs[i].new_state;
+
+ return NULL;
+}
+
+void intel_atomic_swap_global_state(struct intel_atomic_state *state)
+{
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_global_state *old_obj_state, *new_obj_state;
+ struct intel_global_obj *obj;
+ int i;
+
+ for_each_oldnew_global_obj_in_state(state, obj, old_obj_state,
+ new_obj_state, i) {
+ WARN_ON(obj->state != old_obj_state);
+
+ /*
+ * If the new state wasn't modified (and properly
+ * locked for write access) we throw it away.
+ */
+ if (!new_obj_state->changed)
+ continue;
+
+ assert_global_state_write_locked(dev_priv);
+
+ old_obj_state->state = state;
+ new_obj_state->state = NULL;
+
+ state->global_objs[i].state = old_obj_state;
+ obj->state = new_obj_state;
+ }
+}
+
+void intel_atomic_clear_global_state(struct intel_atomic_state *state)
+{
+ int i;
+
+ for (i = 0; i < state->num_global_objs; i++) {
+ struct intel_global_obj *obj = state->global_objs[i].ptr;
+
+ obj->funcs->atomic_destroy_state(obj,
+ state->global_objs[i].state);
+ state->global_objs[i].ptr = NULL;
+ state->global_objs[i].state = NULL;
+ state->global_objs[i].old_state = NULL;
+ state->global_objs[i].new_state = NULL;
+ }
+ state->num_global_objs = 0;
+}
+
+int intel_atomic_lock_global_state(struct intel_global_state *obj_state)
+{
+ struct intel_atomic_state *state = obj_state->state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ int ret;
+
+ ret = drm_modeset_lock(&crtc->base.mutex,
+ state->base.acquire_ctx);
+ if (ret)
+ return ret;
+ }
+
+ obj_state->changed = true;
+
+ return 0;
+}
+
+int intel_atomic_serialize_global_state(struct intel_global_state *obj_state)
+{
+ struct intel_atomic_state *state = obj_state->state;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
+ struct intel_crtc *crtc;
+
+ for_each_intel_crtc(&dev_priv->drm, crtc) {
+ struct intel_crtc_state *crtc_state;
+
+ crtc_state = intel_atomic_get_crtc_state(&state->base, crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+ }
+
+ obj_state->changed = true;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h
new file mode 100644
index 000000000000..e6163a469029
--- /dev/null
+++ b/drivers/gpu/drm/i915/display/intel_global_state.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_GLOBAL_STATE_H__
+#define __INTEL_GLOBAL_STATE_H__
+
+#include <linux/list.h>
+
+struct drm_i915_private;
+struct intel_atomic_state;
+struct intel_global_obj;
+struct intel_global_state;
+
+struct intel_global_state_funcs {
+ struct intel_global_state *(*atomic_duplicate_state)(struct intel_global_obj *obj);
+ void (*atomic_destroy_state)(struct intel_global_obj *obj,
+ struct intel_global_state *state);
+};
+
+struct intel_global_obj {
+ struct list_head head;
+ struct intel_global_state *state;
+ const struct intel_global_state_funcs *funcs;
+};
+
+#define intel_for_each_global_obj(obj, dev_priv) \
+ list_for_each_entry(obj, &(dev_priv)->global_obj_list, head)
+
+#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_old_global_obj_in_state(__state, obj, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (new_obj_state) = (__state)->global_objs[__i].old_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_global_objs && \
+ ((obj) = (__state)->global_objs[__i].ptr, \
+ (old_obj_state) = (__state)->global_objs[__i].old_state, \
+ (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if(obj)
+
+struct intel_global_state {
+ struct intel_atomic_state *state;
+ bool changed;
+};
+
+struct __intel_global_objs_state {
+ struct intel_global_obj *ptr;
+ struct intel_global_state *state, *old_state, *new_state;
+};
+
+void intel_atomic_global_obj_init(struct drm_i915_private *dev_priv,
+ struct intel_global_obj *obj,
+ struct intel_global_state *state,
+ const struct intel_global_state_funcs *funcs);
+void intel_atomic_global_obj_cleanup(struct drm_i915_private *dev_priv);
+
+struct intel_global_state *
+intel_atomic_get_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj);
+struct intel_global_state *
+intel_atomic_get_old_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj);
+struct intel_global_state *
+intel_atomic_get_new_global_obj_state(struct intel_atomic_state *state,
+ struct intel_global_obj *obj);
+
+void intel_atomic_swap_global_state(struct intel_atomic_state *state);
+void intel_atomic_clear_global_state(struct intel_atomic_state *state);
+int intel_atomic_lock_global_state(struct intel_global_state *obj_state);
+int intel_atomic_serialize_global_state(struct intel_global_state *obj_state);
+
+#endif
diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c
index 3d4d19ac1d14..1fd3a5a6296b 100644
--- a/drivers/gpu/drm/i915/display/intel_gmbus.c
+++ b/drivers/gpu/drm/i915/display/intel_gmbus.c
@@ -32,7 +32,6 @@
#include <linux/i2c.h>
#include <drm/drm_hdcp.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_display_types.h"
@@ -143,8 +142,8 @@ to_intel_gmbus(struct i2c_adapter *i2c)
void
intel_gmbus_reset(struct drm_i915_private *dev_priv)
{
- I915_WRITE(GMBUS0, 0);
- I915_WRITE(GMBUS4, 0);
+ intel_de_write(dev_priv, GMBUS0, 0);
+ intel_de_write(dev_priv, GMBUS4, 0);
}
static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
@@ -153,12 +152,12 @@ static void pnv_gmbus_clock_gating(struct drm_i915_private *dev_priv,
u32 val;
/* When using bit bashing for I2C, this bit needs to be set to 1 */
- val = I915_READ(DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
if (!enable)
val |= PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
val &= ~PNV_GMBUSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
}
static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
@@ -166,12 +165,12 @@ static void pch_gmbus_clock_gating(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(SOUTH_DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, SOUTH_DSPCLK_GATE_D);
if (!enable)
val |= PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
else
val &= ~PCH_GMBUSUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(SOUTH_DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, SOUTH_DSPCLK_GATE_D, val);
}
static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
@@ -179,12 +178,12 @@ static void bxt_gmbus_clock_gating(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(GEN9_CLKGATE_DIS_4);
+ val = intel_de_read(dev_priv, GEN9_CLKGATE_DIS_4);
if (!enable)
val |= BXT_GMBUS_GATING_DIS;
else
val &= ~BXT_GMBUS_GATING_DIS;
- I915_WRITE(GEN9_CLKGATE_DIS_4, val);
+ intel_de_write(dev_priv, GEN9_CLKGATE_DIS_4, val);
}
static u32 get_reserved(struct intel_gmbus *bus)
@@ -337,14 +336,16 @@ static int gmbus_wait(struct drm_i915_private *dev_priv, u32 status, u32 irq_en)
irq_en = 0;
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
- I915_WRITE_FW(GMBUS4, irq_en);
+ intel_de_write_fw(dev_priv, GMBUS4, irq_en);
status |= GMBUS_SATOER;
- ret = wait_for_us((gmbus2 = I915_READ_FW(GMBUS2)) & status, 2);
+ ret = wait_for_us((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+ 2);
if (ret)
- ret = wait_for((gmbus2 = I915_READ_FW(GMBUS2)) & status, 50);
+ ret = wait_for((gmbus2 = intel_de_read_fw(dev_priv, GMBUS2)) & status,
+ 50);
- I915_WRITE_FW(GMBUS4, 0);
+ intel_de_write_fw(dev_priv, GMBUS4, 0);
remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
if (gmbus2 & GMBUS_SATOER)
@@ -366,13 +367,13 @@ gmbus_wait_idle(struct drm_i915_private *dev_priv)
irq_enable = GMBUS_IDLE_EN;
add_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
- I915_WRITE_FW(GMBUS4, irq_enable);
+ intel_de_write_fw(dev_priv, GMBUS4, irq_enable);
ret = intel_wait_for_register_fw(&dev_priv->uncore,
GMBUS2, GMBUS_ACTIVE, 0,
10);
- I915_WRITE_FW(GMBUS4, 0);
+ intel_de_write_fw(dev_priv, GMBUS4, 0);
remove_wait_queue(&dev_priv->gmbus_wait_queue, &wait);
return ret;
@@ -404,15 +405,12 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
len++;
}
size = len % 256 + 256;
- I915_WRITE_FW(GMBUS0, gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
+ intel_de_write_fw(dev_priv, GMBUS0,
+ gmbus0_reg | GMBUS_BYTE_CNT_OVERRIDE);
}
- I915_WRITE_FW(GMBUS1,
- gmbus1_index |
- GMBUS_CYCLE_WAIT |
- (size << GMBUS_BYTE_COUNT_SHIFT) |
- (addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_READ | GMBUS_SW_RDY);
+ intel_de_write_fw(dev_priv, GMBUS1,
+ gmbus1_index | GMBUS_CYCLE_WAIT | (size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY);
while (len) {
int ret;
u32 val, loop = 0;
@@ -421,7 +419,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
if (ret)
return ret;
- val = I915_READ_FW(GMBUS3);
+ val = intel_de_read_fw(dev_priv, GMBUS3);
do {
if (extra_byte_added && len == 1)
break;
@@ -432,7 +430,7 @@ gmbus_xfer_read_chunk(struct drm_i915_private *dev_priv,
if (burst_read && len == size - 4)
/* Reset the override bit */
- I915_WRITE_FW(GMBUS0, gmbus0_reg);
+ intel_de_write_fw(dev_priv, GMBUS0, gmbus0_reg);
}
return 0;
@@ -489,12 +487,9 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
len -= 1;
}
- I915_WRITE_FW(GMBUS3, val);
- I915_WRITE_FW(GMBUS1,
- gmbus1_index | GMBUS_CYCLE_WAIT |
- (chunk_size << GMBUS_BYTE_COUNT_SHIFT) |
- (addr << GMBUS_SLAVE_ADDR_SHIFT) |
- GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
+ intel_de_write_fw(dev_priv, GMBUS3, val);
+ intel_de_write_fw(dev_priv, GMBUS1,
+ gmbus1_index | GMBUS_CYCLE_WAIT | (chunk_size << GMBUS_BYTE_COUNT_SHIFT) | (addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY);
while (len) {
int ret;
@@ -503,7 +498,7 @@ gmbus_xfer_write_chunk(struct drm_i915_private *dev_priv,
val |= *buf++ << (8 * loop);
} while (--len && ++loop < 4);
- I915_WRITE_FW(GMBUS3, val);
+ intel_de_write_fw(dev_priv, GMBUS3, val);
ret = gmbus_wait(dev_priv, GMBUS_HW_RDY, GMBUS_HW_RDY_EN);
if (ret)
@@ -568,7 +563,7 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
/* GMBUS5 holds 16-bit index */
if (gmbus5)
- I915_WRITE_FW(GMBUS5, gmbus5);
+ intel_de_write_fw(dev_priv, GMBUS5, gmbus5);
if (msgs[1].flags & I2C_M_RD)
ret = gmbus_xfer_read(dev_priv, &msgs[1], gmbus0_reg,
@@ -578,7 +573,7 @@ gmbus_index_xfer(struct drm_i915_private *dev_priv, struct i2c_msg *msgs,
/* Clear GMBUS5 after each index transfer */
if (gmbus5)
- I915_WRITE_FW(GMBUS5, 0);
+ intel_de_write_fw(dev_priv, GMBUS5, 0);
return ret;
}
@@ -601,7 +596,7 @@ do_gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num,
pch_gmbus_clock_gating(dev_priv, false);
retry:
- I915_WRITE_FW(GMBUS0, gmbus0_source | bus->reg0);
+ intel_de_write_fw(dev_priv, GMBUS0, gmbus0_source | bus->reg0);
for (; i < num; i += inc) {
inc = 1;
@@ -629,18 +624,19 @@ retry:
* a STOP on the very first cycle. To simplify the code we
* unconditionally generate the STOP condition with an additional gmbus
* cycle. */
- I915_WRITE_FW(GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
+ intel_de_write_fw(dev_priv, GMBUS1, GMBUS_CYCLE_STOP | GMBUS_SW_RDY);
/* Mark the GMBUS interface as disabled after waiting for idle.
* We will re-enable it at the start of the next xfer,
* till then let it sleep.
*/
if (gmbus_wait_idle(dev_priv)) {
- DRM_DEBUG_KMS("GMBUS [%s] timed out waiting for idle\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out waiting for idle\n",
+ adapter->name);
ret = -ETIMEDOUT;
}
- I915_WRITE_FW(GMBUS0, 0);
+ intel_de_write_fw(dev_priv, GMBUS0, 0);
ret = ret ?: i;
goto out;
@@ -660,8 +656,9 @@ clear_err:
*/
ret = -ENXIO;
if (gmbus_wait_idle(dev_priv)) {
- DRM_DEBUG_KMS("GMBUS [%s] timed out after NAK\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out after NAK\n",
+ adapter->name);
ret = -ETIMEDOUT;
}
@@ -669,13 +666,13 @@ clear_err:
* of resetting the GMBUS controller and so clearing the
* BUS_ERROR raised by the slave's NAK.
*/
- I915_WRITE_FW(GMBUS1, GMBUS_SW_CLR_INT);
- I915_WRITE_FW(GMBUS1, 0);
- I915_WRITE_FW(GMBUS0, 0);
+ intel_de_write_fw(dev_priv, GMBUS1, GMBUS_SW_CLR_INT);
+ intel_de_write_fw(dev_priv, GMBUS1, 0);
+ intel_de_write_fw(dev_priv, GMBUS0, 0);
- DRM_DEBUG_KMS("GMBUS [%s] NAK for addr: %04x %c(%d)\n",
- adapter->name, msgs[i].addr,
- (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
+ drm_dbg_kms(&dev_priv->drm, "GMBUS [%s] NAK for addr: %04x %c(%d)\n",
+ adapter->name, msgs[i].addr,
+ (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
/*
* Passive adapters sometimes NAK the first probe. Retry the first
@@ -684,17 +681,19 @@ clear_err:
* drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
*/
if (ret == -ENXIO && i == 0 && try++ == 0) {
- DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
- adapter->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] NAK on first message, retry\n",
+ adapter->name);
goto retry;
}
goto out;
timeout:
- DRM_DEBUG_KMS("GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
- bus->adapter.name, bus->reg0 & 0xff);
- I915_WRITE_FW(GMBUS0, 0);
+ drm_dbg_kms(&dev_priv->drm,
+ "GMBUS [%s] timed out, falling back to bit banging on pin %d\n",
+ bus->adapter.name, bus->reg0 & 0xff);
+ intel_de_write_fw(dev_priv, GMBUS0, 0);
/*
* Hardware may not support GMBUS over these pins? Try GPIO bitbanging
@@ -908,7 +907,8 @@ err:
struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv,
unsigned int pin)
{
- if (WARN_ON(!intel_gmbus_is_valid_pin(dev_priv, pin)))
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_gmbus_is_valid_pin(dev_priv, pin)))
return NULL;
return &dev_priv->gmbus[pin].adapter;
@@ -929,9 +929,10 @@ void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit)
mutex_lock(&dev_priv->gmbus_mutex);
bus->force_bit += force_bit ? 1 : -1;
- DRM_DEBUG_KMS("%sabling bit-banging on %s. force bit now %d\n",
- force_bit ? "en" : "dis", adapter->name,
- bus->force_bit);
+ drm_dbg_kms(&dev_priv->drm,
+ "%sabling bit-banging on %s. force bit now %d\n",
+ force_bit ? "en" : "dis", adapter->name,
+ bus->force_bit);
mutex_unlock(&dev_priv->gmbus_mutex);
}
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c
index 0fdbd39f6641..ee0f27ea2810 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.c
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.c
@@ -43,6 +43,7 @@ static
int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
const struct intel_hdcp_shim *shim, u8 *bksv)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
int ret, i, tries = 2;
/* HDCP spec states that we must retry the bksv if it is invalid */
@@ -54,7 +55,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
break;
}
if (i == tries) {
- DRM_DEBUG_KMS("Bksv is invalid\n");
+ drm_dbg_kms(&i915->drm, "Bksv is invalid\n");
return -ENODEV;
}
@@ -64,7 +65,7 @@ int intel_hdcp_read_valid_bksv(struct intel_digital_port *intel_dig_port,
/* Is HDCP1.4 capable on Platform and Sink */
bool intel_hdcp_capable(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
bool capable = false;
u8 bksv[5];
@@ -85,8 +86,8 @@ bool intel_hdcp_capable(struct intel_connector *connector)
/* Is HDCP2.2 capable on Platform and Sink */
bool intel_hdcp2_capable(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
bool capable = false;
@@ -112,7 +113,8 @@ static inline
bool intel_hdcp_in_use(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
{
- return I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ return intel_de_read(dev_priv,
+ HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
HDCP_STATUS_ENC;
}
@@ -120,7 +122,8 @@ static inline
bool intel_hdcp2_in_use(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder, enum port port)
{
- return I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ return intel_de_read(dev_priv,
+ HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_ENCRYPTION_STATUS;
}
@@ -184,9 +187,9 @@ static bool hdcp_key_loadable(struct drm_i915_private *dev_priv)
static void intel_hdcp_clear_keys(struct drm_i915_private *dev_priv)
{
- I915_WRITE(HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
- I915_WRITE(HDCP_KEY_STATUS, HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS |
- HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
+ intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_CLEAR_KEYS_TRIGGER);
+ intel_de_write(dev_priv, HDCP_KEY_STATUS,
+ HDCP_KEY_LOAD_DONE | HDCP_KEY_LOAD_STATUS | HDCP_FUSE_IN_PROGRESS | HDCP_FUSE_ERROR | HDCP_FUSE_DONE);
}
static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
@@ -194,7 +197,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
int ret;
u32 val;
- val = I915_READ(HDCP_KEY_STATUS);
+ val = intel_de_read(dev_priv, HDCP_KEY_STATUS);
if ((val & HDCP_KEY_LOAD_DONE) && (val & HDCP_KEY_LOAD_STATUS))
return 0;
@@ -203,7 +206,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
* out of reset. So if Key is not already loaded, its an error state.
*/
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- if (!(I915_READ(HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
+ if (!(intel_de_read(dev_priv, HDCP_KEY_STATUS) & HDCP_KEY_LOAD_DONE))
return -ENXIO;
/*
@@ -217,12 +220,13 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
ret = sandybridge_pcode_write(dev_priv,
SKL_PCODE_LOAD_HDCP_KEYS, 1);
if (ret) {
- DRM_ERROR("Failed to initiate HDCP key load (%d)\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Failed to initiate HDCP key load (%d)\n",
+ ret);
return ret;
}
} else {
- I915_WRITE(HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
+ intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_KEY_LOAD_TRIGGER);
}
/* Wait for the keys to load (500us) */
@@ -235,7 +239,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
return -ENXIO;
/* Send Aksv over to PCH display for use in authentication */
- I915_WRITE(HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
+ intel_de_write(dev_priv, HDCP_KEY_CONF, HDCP_AKSV_SEND_TRIGGER);
return 0;
}
@@ -243,9 +247,9 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv)
/* Returns updated SHA-1 index */
static int intel_write_sha_text(struct drm_i915_private *dev_priv, u32 sha_text)
{
- I915_WRITE(HDCP_SHA_TEXT, sha_text);
+ intel_de_write(dev_priv, HDCP_SHA_TEXT, sha_text);
if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL, HDCP_SHA1_READY, 1)) {
- DRM_ERROR("Timed out waiting for SHA1 ready\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for SHA1 ready\n");
return -ETIMEDOUT;
}
return 0;
@@ -270,7 +274,8 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
return HDCP_TRANSD_REP_PRESENT |
HDCP_TRANSD_SHA1_M0;
default:
- DRM_ERROR("Unknown transcoder %d\n", cpu_transcoder);
+ drm_err(&dev_priv->drm, "Unknown transcoder %d\n",
+ cpu_transcoder);
return -EINVAL;
}
}
@@ -287,7 +292,7 @@ u32 intel_hdcp_get_repeater_ctl(struct drm_i915_private *dev_priv,
case PORT_E:
return HDCP_DDIE_REP_PRESENT | HDCP_DDIE_SHA1_M0;
default:
- DRM_ERROR("Unknown port %d\n", port);
+ drm_err(&dev_priv->drm, "Unknown port %d\n", port);
return -EINVAL;
}
}
@@ -297,21 +302,19 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
const struct intel_hdcp_shim *shim,
u8 *ksv_fifo, u8 num_downstream, u8 *bstatus)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
- struct drm_i915_private *dev_priv;
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
enum port port = intel_dig_port->base.port;
u32 vprime, sha_text, sha_leftovers, rep_ctl;
int ret, i, j, sha_idx;
- dev_priv = intel_dig_port->base.base.dev->dev_private;
-
/* Process V' values from the receiver */
for (i = 0; i < DRM_HDCP_V_PRIME_NUM_PARTS; i++) {
ret = shim->read_v_prime_part(intel_dig_port, i, &vprime);
if (ret)
return ret;
- I915_WRITE(HDCP_SHA_V_PRIME(i), vprime);
+ intel_de_write(dev_priv, HDCP_SHA_V_PRIME(i), vprime);
}
/*
@@ -328,7 +331,7 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_text = 0;
sha_leftovers = 0;
rep_ctl = intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port);
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
for (i = 0; i < num_downstream; i++) {
unsigned int sha_empty;
u8 *ksv = &ksv_fifo[i * DRM_HDCP_KSV_LEN];
@@ -345,7 +348,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
/* Programming guide writes this every 64 bytes */
sha_idx += sizeof(sha_text);
if (!(sha_idx % 64))
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
/* Store the leftover bytes from the ksv in sha_text */
sha_leftovers = DRM_HDCP_KSV_LEN - sha_empty;
@@ -377,7 +381,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
*/
if (sha_leftovers == 0) {
/* Write 16 bits of text, 16 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_16);
ret = intel_write_sha_text(dev_priv,
bstatus[0] << 8 | bstatus[1]);
if (ret < 0)
@@ -385,14 +390,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 16 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_16);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_16);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
@@ -400,7 +407,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
} else if (sha_leftovers == 1) {
/* Write 24 bits of text, 8 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_24);
sha_text |= bstatus[0] << 16 | bstatus[1] << 8;
/* Only 24-bits of data, must be in the LSB */
sha_text = (sha_text & 0xffffff00) >> 8;
@@ -410,14 +418,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 24 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_8);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
@@ -425,7 +435,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
} else if (sha_leftovers == 2) {
/* Write 32 bits of text */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
sha_text |= bstatus[0] << 24 | bstatus[1] << 16;
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
@@ -433,7 +444,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx += sizeof(sha_text);
/* Write 64 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
for (i = 0; i < 2; i++) {
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
@@ -442,7 +454,8 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
}
} else if (sha_leftovers == 3) {
/* Write 32 bits of text */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_32);
sha_text |= bstatus[0] << 24;
ret = intel_write_sha_text(dev_priv, sha_text);
if (ret < 0)
@@ -450,32 +463,35 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
sha_idx += sizeof(sha_text);
/* Write 8 bits of text, 24 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_8);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_8);
ret = intel_write_sha_text(dev_priv, bstatus[1]);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 32 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_0);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_0);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
/* Write 8 bits of M0 */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_24);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_TEXT_24);
ret = intel_write_sha_text(dev_priv, 0);
if (ret < 0)
return ret;
sha_idx += sizeof(sha_text);
} else {
- DRM_DEBUG_KMS("Invalid number of leftovers %d\n",
- sha_leftovers);
+ drm_dbg_kms(&dev_priv->drm, "Invalid number of leftovers %d\n",
+ sha_leftovers);
return -EINVAL;
}
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
+ intel_de_write(dev_priv, HDCP_REP_CTL, rep_ctl | HDCP_SHA1_TEXT_32);
/* Fill up to 64-4 bytes with zeros (leave the last write for length) */
while ((sha_idx % 64) < (64 - sizeof(sha_text))) {
ret = intel_write_sha_text(dev_priv, 0);
@@ -495,14 +511,15 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
return ret;
/* Tell the HW we're done with the hash and wait for it to ACK */
- I915_WRITE(HDCP_REP_CTL, rep_ctl | HDCP_SHA1_COMPLETE_HASH);
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ rep_ctl | HDCP_SHA1_COMPLETE_HASH);
if (intel_de_wait_for_set(dev_priv, HDCP_REP_CTL,
HDCP_SHA1_COMPLETE, 1)) {
- DRM_ERROR("Timed out waiting for SHA1 complete\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for SHA1 complete\n");
return -ETIMEDOUT;
}
- if (!(I915_READ(HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
- DRM_DEBUG_KMS("SHA-1 mismatch, HDCP failed\n");
+ if (!(intel_de_read(dev_priv, HDCP_REP_CTL) & HDCP_SHA1_V_MATCH)) {
+ drm_dbg_kms(&dev_priv->drm, "SHA-1 mismatch, HDCP failed\n");
return -ENXIO;
}
@@ -513,15 +530,16 @@ int intel_hdcp_validate_v_prime(struct intel_connector *connector,
static
int intel_hdcp_auth_downstream(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
const struct intel_hdcp_shim *shim = connector->hdcp.shim;
- struct drm_device *dev = connector->base.dev;
u8 bstatus[2], num_downstream, *ksv_fifo;
int ret, i, tries = 3;
ret = intel_hdcp_poll_ksv_fifo(intel_dig_port, shim);
if (ret) {
- DRM_DEBUG_KMS("KSV list failed to become ready (%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "KSV list failed to become ready (%d)\n", ret);
return ret;
}
@@ -531,7 +549,7 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (DRM_HDCP_MAX_DEVICE_EXCEEDED(bstatus[0]) ||
DRM_HDCP_MAX_CASCADE_EXCEEDED(bstatus[1])) {
- DRM_DEBUG_KMS("Max Topology Limit Exceeded\n");
+ drm_dbg_kms(&dev_priv->drm, "Max Topology Limit Exceeded\n");
return -EPERM;
}
@@ -544,13 +562,14 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
*/
num_downstream = DRM_HDCP_NUM_DOWNSTREAM(bstatus[0]);
if (num_downstream == 0) {
- DRM_DEBUG_KMS("Repeater with zero downstream devices\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Repeater with zero downstream devices\n");
return -EINVAL;
}
ksv_fifo = kcalloc(DRM_HDCP_KSV_LEN, num_downstream, GFP_KERNEL);
if (!ksv_fifo) {
- DRM_DEBUG_KMS("Out of mem: ksv_fifo\n");
+ drm_dbg_kms(&dev_priv->drm, "Out of mem: ksv_fifo\n");
return -ENOMEM;
}
@@ -558,8 +577,9 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
if (ret)
goto err;
- if (drm_hdcp_check_ksvs_revoked(dev, ksv_fifo, num_downstream)) {
- DRM_ERROR("Revoked Ksv(s) in ksv_fifo\n");
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, ksv_fifo,
+ num_downstream)) {
+ drm_err(&dev_priv->drm, "Revoked Ksv(s) in ksv_fifo\n");
ret = -EPERM;
goto err;
}
@@ -577,12 +597,13 @@ int intel_hdcp_auth_downstream(struct intel_connector *connector)
}
if (i == tries) {
- DRM_DEBUG_KMS("V Prime validation failed.(%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "V Prime validation failed.(%d)\n", ret);
goto err;
}
- DRM_DEBUG_KMS("HDCP is enabled (%d downstream devices)\n",
- num_downstream);
+ drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (%d downstream devices)\n",
+ num_downstream);
ret = 0;
err:
kfree(ksv_fifo);
@@ -592,13 +613,12 @@ err:
/* Implements Part 1 of the HDCP authorization procedure */
static int intel_hdcp_auth(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_device *dev = connector->base.dev;
const struct intel_hdcp_shim *shim = hdcp->shim;
- struct drm_i915_private *dev_priv;
enum transcoder cpu_transcoder = connector->hdcp.cpu_transcoder;
- enum port port;
+ enum port port = intel_dig_port->base.port;
unsigned long r0_prime_gen_start;
int ret, i, tries = 2;
union {
@@ -615,10 +635,6 @@ static int intel_hdcp_auth(struct intel_connector *connector)
} ri;
bool repeater_present, hdcp_capable;
- dev_priv = intel_dig_port->base.base.dev->dev_private;
-
- port = intel_dig_port->base.port;
-
/*
* Detects whether the display is HDCP capable. Although we check for
* valid Bksv below, the HDCP over DP spec requires that we check
@@ -630,28 +646,32 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (ret)
return ret;
if (!hdcp_capable) {
- DRM_DEBUG_KMS("Panel is not HDCP capable\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel is not HDCP capable\n");
return -EINVAL;
}
}
/* Initialize An with 2 random values and acquire it */
for (i = 0; i < 2; i++)
- I915_WRITE(HDCP_ANINIT(dev_priv, cpu_transcoder, port),
- get_random_u32());
- I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
- HDCP_CONF_CAPTURE_AN);
+ intel_de_write(dev_priv,
+ HDCP_ANINIT(dev_priv, cpu_transcoder, port),
+ get_random_u32());
+ intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_CAPTURE_AN);
/* Wait for An to be acquired */
if (intel_de_wait_for_set(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_AN_READY, 1)) {
- DRM_ERROR("Timed out waiting for An\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for An\n");
return -ETIMEDOUT;
}
- an.reg[0] = I915_READ(HDCP_ANLO(dev_priv, cpu_transcoder, port));
- an.reg[1] = I915_READ(HDCP_ANHI(dev_priv, cpu_transcoder, port));
+ an.reg[0] = intel_de_read(dev_priv,
+ HDCP_ANLO(dev_priv, cpu_transcoder, port));
+ an.reg[1] = intel_de_read(dev_priv,
+ HDCP_ANHI(dev_priv, cpu_transcoder, port));
ret = shim->write_an_aksv(intel_dig_port, an.shim);
if (ret)
return ret;
@@ -664,33 +684,34 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (ret < 0)
return ret;
- if (drm_hdcp_check_ksvs_revoked(dev, bksv.shim, 1)) {
- DRM_ERROR("BKSV is revoked\n");
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm, bksv.shim, 1)) {
+ drm_err(&dev_priv->drm, "BKSV is revoked\n");
return -EPERM;
}
- I915_WRITE(HDCP_BKSVLO(dev_priv, cpu_transcoder, port), bksv.reg[0]);
- I915_WRITE(HDCP_BKSVHI(dev_priv, cpu_transcoder, port), bksv.reg[1]);
+ intel_de_write(dev_priv, HDCP_BKSVLO(dev_priv, cpu_transcoder, port),
+ bksv.reg[0]);
+ intel_de_write(dev_priv, HDCP_BKSVHI(dev_priv, cpu_transcoder, port),
+ bksv.reg[1]);
ret = shim->repeater_present(intel_dig_port, &repeater_present);
if (ret)
return ret;
if (repeater_present)
- I915_WRITE(HDCP_REP_CTL,
- intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder,
- port));
+ intel_de_write(dev_priv, HDCP_REP_CTL,
+ intel_hdcp_get_repeater_ctl(dev_priv, cpu_transcoder, port));
ret = shim->toggle_signalling(intel_dig_port, true);
if (ret)
return ret;
- I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port),
- HDCP_CONF_AUTH_AND_ENC);
+ intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port),
+ HDCP_CONF_AUTH_AND_ENC);
/* Wait for R0 ready */
- if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ if (wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
(HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) {
- DRM_ERROR("Timed out waiting for R0 ready\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for R0 ready\n");
return -ETIMEDOUT;
}
@@ -716,19 +737,21 @@ static int intel_hdcp_auth(struct intel_connector *connector)
ret = shim->read_ri_prime(intel_dig_port, ri.shim);
if (ret)
return ret;
- I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
+ intel_de_write(dev_priv,
+ HDCP_RPRIME(dev_priv, cpu_transcoder, port),
+ ri.reg);
/* Wait for Ri prime match */
- if (!wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
- port)) &
- (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
+ if (!wait_for(intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1))
break;
}
if (i == tries) {
- DRM_DEBUG_KMS("Timed out waiting for Ri prime match (%x)\n",
- I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
- port)));
+ drm_dbg_kms(&dev_priv->drm,
+ "Timed out waiting for Ri prime match (%x)\n",
+ intel_de_read(dev_priv, HDCP_STATUS(dev_priv,
+ cpu_transcoder, port)));
return -ETIMEDOUT;
}
@@ -737,7 +760,7 @@ static int intel_hdcp_auth(struct intel_connector *connector)
HDCP_STATUS(dev_priv, cpu_transcoder, port),
HDCP_STATUS_ENC,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
- DRM_ERROR("Timed out waiting for encryption\n");
+ drm_err(&dev_priv->drm, "Timed out waiting for encryption\n");
return -ETIMEDOUT;
}
@@ -749,52 +772,53 @@ static int intel_hdcp_auth(struct intel_connector *connector)
if (repeater_present)
return intel_hdcp_auth_downstream(connector);
- DRM_DEBUG_KMS("HDCP is enabled (no repeater present)\n");
+ drm_dbg_kms(&dev_priv->drm, "HDCP is enabled (no repeater present)\n");
return 0;
}
static int _intel_hdcp_disable(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP is being disabled...\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being disabled...\n",
+ connector->base.name, connector->base.base.id);
hdcp->hdcp_encrypted = false;
- I915_WRITE(HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
+ intel_de_write(dev_priv, HDCP_CONF(dev_priv, cpu_transcoder, port), 0);
if (intel_de_wait_for_clear(dev_priv,
HDCP_STATUS(dev_priv, cpu_transcoder, port),
~0, ENCRYPT_STATUS_CHANGE_TIMEOUT_MS)) {
- DRM_ERROR("Failed to disable HDCP, timeout clearing status\n");
+ drm_err(&dev_priv->drm,
+ "Failed to disable HDCP, timeout clearing status\n");
return -ETIMEDOUT;
}
ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
if (ret) {
- DRM_ERROR("Failed to disable HDCP signalling\n");
+ drm_err(&dev_priv->drm, "Failed to disable HDCP signalling\n");
return ret;
}
- DRM_DEBUG_KMS("HDCP is disabled\n");
+ drm_dbg_kms(&dev_priv->drm, "HDCP is disabled\n");
return 0;
}
static int _intel_hdcp_enable(struct intel_connector *connector)
{
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
int i, ret, tries = 3;
- DRM_DEBUG_KMS("[%s:%d] HDCP is being enabled...\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&dev_priv->drm, "[%s:%d] HDCP is being enabled...\n",
+ connector->base.name, connector->base.base.id);
if (!hdcp_key_loadable(dev_priv)) {
- DRM_ERROR("HDCP key Load is not possible\n");
+ drm_err(&dev_priv->drm, "HDCP key Load is not possible\n");
return -ENXIO;
}
@@ -805,7 +829,8 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
intel_hdcp_clear_keys(dev_priv);
}
if (ret) {
- DRM_ERROR("Could not load HDCP keys, (%d)\n", ret);
+ drm_err(&dev_priv->drm, "Could not load HDCP keys, (%d)\n",
+ ret);
return ret;
}
@@ -817,13 +842,14 @@ static int _intel_hdcp_enable(struct intel_connector *connector)
return 0;
}
- DRM_DEBUG_KMS("HDCP Auth failure (%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "HDCP Auth failure (%d)\n", ret);
/* Ensuring HDCP encryption and signalling are stopped. */
_intel_hdcp_disable(connector);
}
- DRM_DEBUG_KMS("HDCP authentication failed (%d tries/%d)\n", tries, ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP authentication failed (%d tries/%d)\n", tries, ret);
return ret;
}
@@ -836,9 +862,9 @@ struct intel_connector *intel_hdcp_to_connector(struct intel_hdcp *hdcp)
/* Implements Part 3 of the HDCP authorization procedure */
static int intel_hdcp_check_link(struct intel_connector *connector)
{
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_i915_private *dev_priv = connector->base.dev->dev_private;
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder;
int ret = 0;
@@ -853,11 +879,12 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
- DRM_ERROR("%s:%d HDCP link stopped encryption,%x\n",
- connector->base.name, connector->base.base.id,
- I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
- port)));
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_hdcp_in_use(dev_priv, cpu_transcoder, port))) {
+ drm_err(&dev_priv->drm,
+ "%s:%d HDCP link stopped encryption,%x\n",
+ connector->base.name, connector->base.base.id,
+ intel_de_read(dev_priv, HDCP_STATUS(dev_priv, cpu_transcoder, port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -872,12 +899,13 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
goto out;
}
- DRM_DEBUG_KMS("[%s:%d] HDCP link failed, retrying authentication\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] HDCP link failed, retrying authentication\n",
+ connector->base.name, connector->base.base.id);
ret = _intel_hdcp_disable(connector);
if (ret) {
- DRM_ERROR("Failed to disable hdcp (%d)\n", ret);
+ drm_err(&dev_priv->drm, "Failed to disable hdcp (%d)\n", ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
@@ -885,7 +913,7 @@ static int intel_hdcp_check_link(struct intel_connector *connector)
ret = _intel_hdcp_enable(connector);
if (ret) {
- DRM_ERROR("Failed to enable hdcp (%d)\n", ret);
+ drm_err(&dev_priv->drm, "Failed to enable hdcp (%d)\n", ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
@@ -901,9 +929,9 @@ static void intel_hdcp_prop_work(struct work_struct *work)
struct intel_hdcp *hdcp = container_of(work, struct intel_hdcp,
prop_work);
struct intel_connector *connector = intel_hdcp_to_connector(hdcp);
- struct drm_device *dev = connector->base.dev;
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
+ drm_modeset_lock(&dev_priv->drm.mode_config.connection_mutex, NULL);
mutex_lock(&hdcp->mutex);
/*
@@ -916,13 +944,13 @@ static void intel_hdcp_prop_work(struct work_struct *work)
hdcp->value);
mutex_unlock(&hdcp->mutex);
- drm_modeset_unlock(&dev->mode_config.connection_mutex);
+ drm_modeset_unlock(&dev_priv->drm.mode_config.connection_mutex);
}
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port)
{
- /* PORT E doesn't have HDCP, and PORT F is disabled */
- return INTEL_INFO(dev_priv)->display.has_hdcp && port < PORT_E;
+ return INTEL_INFO(dev_priv)->display.has_hdcp &&
+ (INTEL_GEN(dev_priv) >= 12 || port < PORT_E);
}
static int
@@ -944,7 +972,8 @@ hdcp2_prepare_ake_init(struct intel_connector *connector,
ret = comp->ops->initiate_hdcp2_session(comp->mei_dev, data, ake_data);
if (ret)
- DRM_DEBUG_KMS("Prepare_ake_init failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Prepare_ake_init failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -974,7 +1003,8 @@ hdcp2_verify_rx_cert_prepare_km(struct intel_connector *connector,
rx_cert, paired,
ek_pub_km, msg_sz);
if (ret < 0)
- DRM_DEBUG_KMS("Verify rx_cert failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Verify rx_cert failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -998,7 +1028,7 @@ static int hdcp2_verify_hprime(struct intel_connector *connector,
ret = comp->ops->verify_hprime(comp->mei_dev, data, rx_hprime);
if (ret < 0)
- DRM_DEBUG_KMS("Verify hprime failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Verify hprime failed. %d\n", ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1023,7 +1053,8 @@ hdcp2_store_pairing_info(struct intel_connector *connector,
ret = comp->ops->store_pairing_info(comp->mei_dev, data, pairing_info);
if (ret < 0)
- DRM_DEBUG_KMS("Store pairing info failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Store pairing info failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1048,7 +1079,8 @@ hdcp2_prepare_lc_init(struct intel_connector *connector,
ret = comp->ops->initiate_locality_check(comp->mei_dev, data, lc_init);
if (ret < 0)
- DRM_DEBUG_KMS("Prepare lc_init failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Prepare lc_init failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1073,7 +1105,8 @@ hdcp2_verify_lprime(struct intel_connector *connector,
ret = comp->ops->verify_lprime(comp->mei_dev, data, rx_lprime);
if (ret < 0)
- DRM_DEBUG_KMS("Verify L_Prime failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Verify L_Prime failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1097,7 +1130,8 @@ static int hdcp2_prepare_skey(struct intel_connector *connector,
ret = comp->ops->get_session_key(comp->mei_dev, data, ske_data);
if (ret < 0)
- DRM_DEBUG_KMS("Get session key failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Get session key failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1126,7 +1160,8 @@ hdcp2_verify_rep_topology_prepare_ack(struct intel_connector *connector,
rep_topology,
rep_send_ack);
if (ret < 0)
- DRM_DEBUG_KMS("Verify rep topology failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "Verify rep topology failed. %d\n", ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1151,7 +1186,7 @@ hdcp2_verify_mprime(struct intel_connector *connector,
ret = comp->ops->verify_mprime(comp->mei_dev, data, stream_ready);
if (ret < 0)
- DRM_DEBUG_KMS("Verify mprime failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Verify mprime failed. %d\n", ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1174,7 +1209,8 @@ static int hdcp2_authenticate_port(struct intel_connector *connector)
ret = comp->ops->enable_hdcp_authentication(comp->mei_dev, data);
if (ret < 0)
- DRM_DEBUG_KMS("Enable hdcp auth failed. %d\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Enable hdcp auth failed. %d\n",
+ ret);
mutex_unlock(&dev_priv->hdcp_comp_mutex);
return ret;
@@ -1209,9 +1245,9 @@ static int hdcp2_deauthenticate_port(struct intel_connector *connector)
/* Authentication flow starts from here */
static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_device *dev = connector->base.dev;
union {
struct hdcp2_ake_init ake_init;
struct hdcp2_ake_send_cert send_cert;
@@ -1242,15 +1278,16 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
return ret;
if (msgs.send_cert.rx_caps[0] != HDCP_2_2_RX_CAPS_VERSION_VAL) {
- DRM_DEBUG_KMS("cert.rx_caps dont claim HDCP2.2\n");
+ drm_dbg_kms(&dev_priv->drm, "cert.rx_caps dont claim HDCP2.2\n");
return -EINVAL;
}
hdcp->is_repeater = HDCP_2_2_RX_REPEATER(msgs.send_cert.rx_caps[2]);
- if (drm_hdcp_check_ksvs_revoked(dev, msgs.send_cert.cert_rx.receiver_id,
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
+ msgs.send_cert.cert_rx.receiver_id,
1)) {
- DRM_ERROR("Receiver ID is revoked\n");
+ drm_err(&dev_priv->drm, "Receiver ID is revoked\n");
return -EPERM;
}
@@ -1297,7 +1334,7 @@ static int hdcp2_authentication_key_exchange(struct intel_connector *connector)
static int hdcp2_locality_check(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_lc_init lc_init;
@@ -1333,7 +1370,7 @@ static int hdcp2_locality_check(struct intel_connector *connector)
static int hdcp2_session_key_exchange(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
struct hdcp2_ske_send_eks send_eks;
int ret;
@@ -1353,7 +1390,7 @@ static int hdcp2_session_key_exchange(struct intel_connector *connector)
static
int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct intel_hdcp *hdcp = &connector->hdcp;
union {
struct hdcp2_rep_stream_manage stream_manage;
@@ -1404,9 +1441,9 @@ int hdcp2_propagate_stream_management_info(struct intel_connector *connector)
static
int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- struct drm_device *dev = connector->base.dev;
union {
struct hdcp2_rep_send_receiverid_list recvid_list;
struct hdcp2_rep_send_ack rep_ack;
@@ -1425,7 +1462,7 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
if (HDCP_2_2_MAX_CASCADE_EXCEEDED(rx_info[1]) ||
HDCP_2_2_MAX_DEVS_EXCEEDED(rx_info[1])) {
- DRM_DEBUG_KMS("Topology Max Size Exceeded\n");
+ drm_dbg_kms(&dev_priv->drm, "Topology Max Size Exceeded\n");
return -EINVAL;
}
@@ -1433,17 +1470,24 @@ int hdcp2_authenticate_repeater_topology(struct intel_connector *connector)
seq_num_v =
drm_hdcp_be24_to_cpu((const u8 *)msgs.recvid_list.seq_num_v);
+ if (!hdcp->hdcp2_encrypted && seq_num_v) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Non zero Seq_num_v at first RecvId_List msg\n");
+ return -EINVAL;
+ }
+
if (seq_num_v < hdcp->seq_num_v) {
/* Roll over of the seq_num_v from repeater. Reauthenticate. */
- DRM_DEBUG_KMS("Seq_num_v roll over.\n");
+ drm_dbg_kms(&dev_priv->drm, "Seq_num_v roll over.\n");
return -EINVAL;
}
device_cnt = (HDCP_2_2_DEV_COUNT_HI(rx_info[0]) << 4 |
HDCP_2_2_DEV_COUNT_LO(rx_info[1]));
- if (drm_hdcp_check_ksvs_revoked(dev, msgs.recvid_list.receiver_ids,
+ if (drm_hdcp_check_ksvs_revoked(&dev_priv->drm,
+ msgs.recvid_list.receiver_ids,
device_cnt)) {
- DRM_ERROR("Revoked receiver ID(s) is in list\n");
+ drm_err(&dev_priv->drm, "Revoked receiver ID(s) is in list\n");
return -EPERM;
}
@@ -1475,26 +1519,28 @@ static int hdcp2_authenticate_repeater(struct intel_connector *connector)
static int hdcp2_authenticate_sink(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
const struct intel_hdcp_shim *shim = hdcp->shim;
int ret;
ret = hdcp2_authentication_key_exchange(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("AKE Failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm, "AKE Failed. Err : %d\n", ret);
return ret;
}
ret = hdcp2_locality_check(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Locality Check failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Locality Check failed. Err : %d\n", ret);
return ret;
}
ret = hdcp2_session_key_exchange(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("SKE Failed. Err : %d\n", ret);
+ drm_dbg_kms(&i915->drm, "SKE Failed. Err : %d\n", ret);
return ret;
}
@@ -1509,7 +1555,8 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
if (hdcp->is_repeater) {
ret = hdcp2_authenticate_repeater(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Repeater Auth Failed. Err: %d\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Repeater Auth Failed. Err: %d\n", ret);
return ret;
}
}
@@ -1524,31 +1571,32 @@ static int hdcp2_authenticate_sink(struct intel_connector *connector)
static int hdcp2_enable_encryption(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = connector->encoder->port;
+ enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
- LINK_ENCRYPTION_STATUS);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS);
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(intel_dig_port, true);
if (ret) {
- DRM_ERROR("Failed to enable HDCP signalling. %d\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Failed to enable HDCP signalling. %d\n",
+ ret);
return ret;
}
}
- if (I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ if (intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
LINK_AUTH_STATUS) {
/* Link is Authenticated. Now set for Encryption */
- I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
- I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder,
- port)) |
- CTL_LINK_ENCRYPTION_REQ);
+ intel_de_write(dev_priv,
+ HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) | CTL_LINK_ENCRYPTION_REQ);
}
ret = intel_de_wait_for_set(dev_priv,
@@ -1562,19 +1610,18 @@ static int hdcp2_enable_encryption(struct intel_connector *connector)
static int hdcp2_disable_encryption(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = connector->encoder->port;
+ enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder = hdcp->cpu_transcoder;
int ret;
- WARN_ON(!(I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
- LINK_ENCRYPTION_STATUS));
+ drm_WARN_ON(&dev_priv->drm, !(intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)) &
+ LINK_ENCRYPTION_STATUS));
- I915_WRITE(HDCP2_CTL(dev_priv, cpu_transcoder, port),
- I915_READ(HDCP2_CTL(dev_priv, cpu_transcoder, port)) &
- ~CTL_LINK_ENCRYPTION_REQ);
+ intel_de_write(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port),
+ intel_de_read(dev_priv, HDCP2_CTL(dev_priv, cpu_transcoder, port)) & ~CTL_LINK_ENCRYPTION_REQ);
ret = intel_de_wait_for_clear(dev_priv,
HDCP2_STATUS(dev_priv, cpu_transcoder,
@@ -1582,13 +1629,14 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
LINK_ENCRYPTION_STATUS,
ENCRYPT_STATUS_CHANGE_TIMEOUT_MS);
if (ret == -ETIMEDOUT)
- DRM_DEBUG_KMS("Disable Encryption Timedout");
+ drm_dbg_kms(&dev_priv->drm, "Disable Encryption Timedout");
if (hdcp->shim->toggle_signalling) {
ret = hdcp->shim->toggle_signalling(intel_dig_port, false);
if (ret) {
- DRM_ERROR("Failed to disable HDCP signalling. %d\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Failed to disable HDCP signalling. %d\n",
+ ret);
return ret;
}
}
@@ -1598,6 +1646,7 @@ static int hdcp2_disable_encryption(struct intel_connector *connector)
static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret, i, tries = 3;
for (i = 0; i < tries; i++) {
@@ -1606,10 +1655,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
break;
/* Clearing the mei hdcp session */
- DRM_DEBUG_KMS("HDCP2.2 Auth %d of %d Failed.(%d)\n",
- i + 1, tries, ret);
+ drm_dbg_kms(&i915->drm, "HDCP2.2 Auth %d of %d Failed.(%d)\n",
+ i + 1, tries, ret);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
if (i != tries) {
@@ -1620,9 +1669,10 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
msleep(HDCP_2_2_DELAY_BEFORE_ENCRYPTION_EN);
ret = hdcp2_enable_encryption(connector);
if (ret < 0) {
- DRM_DEBUG_KMS("Encryption Enable Failed.(%d)\n", ret);
+ drm_dbg_kms(&i915->drm,
+ "Encryption Enable Failed.(%d)\n", ret);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
}
}
@@ -1631,23 +1681,24 @@ static int hdcp2_authenticate_and_encrypt(struct intel_connector *connector)
static int _intel_hdcp2_enable(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
- connector->base.name, connector->base.base.id,
- hdcp->content_type);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being enabled. Type: %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
ret = hdcp2_authenticate_and_encrypt(connector);
if (ret) {
- DRM_DEBUG_KMS("HDCP2 Type%d Enabling Failed. (%d)\n",
- hdcp->content_type, ret);
+ drm_dbg_kms(&i915->drm, "HDCP2 Type%d Enabling Failed. (%d)\n",
+ hdcp->content_type, ret);
return ret;
}
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is enabled. Type %d\n",
- connector->base.name, connector->base.base.id,
- hdcp->content_type);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is enabled. Type %d\n",
+ connector->base.name, connector->base.base.id,
+ hdcp->content_type);
hdcp->hdcp2_encrypted = true;
return 0;
@@ -1655,15 +1706,16 @@ static int _intel_hdcp2_enable(struct intel_connector *connector)
static int _intel_hdcp2_disable(struct intel_connector *connector)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
int ret;
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 is being Disabled\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&i915->drm, "[%s:%d] HDCP2.2 is being Disabled\n",
+ connector->base.name, connector->base.base.id);
ret = hdcp2_disable_encryption(connector);
if (hdcp2_deauthenticate_port(connector) < 0)
- DRM_DEBUG_KMS("Port deauth failed.\n");
+ drm_dbg_kms(&i915->drm, "Port deauth failed.\n");
connector->hdcp.hdcp2_encrypted = false;
@@ -1673,10 +1725,10 @@ static int _intel_hdcp2_disable(struct intel_connector *connector)
/* Implements the Link Integrity Check for HDCP2.2 */
static int intel_hdcp2_check_link(struct intel_connector *connector)
{
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
- enum port port = connector->encoder->port;
+ enum port port = intel_dig_port->base.port;
enum transcoder cpu_transcoder;
int ret = 0;
@@ -1690,10 +1742,11 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
goto out;
}
- if (WARN_ON(!intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
- DRM_ERROR("HDCP2.2 link stopped the encryption, %x\n",
- I915_READ(HDCP2_STATUS(dev_priv, cpu_transcoder,
- port)));
+ if (drm_WARN_ON(&dev_priv->drm,
+ !intel_hdcp2_in_use(dev_priv, cpu_transcoder, port))) {
+ drm_err(&dev_priv->drm,
+ "HDCP2.2 link stopped the encryption, %x\n",
+ intel_de_read(dev_priv, HDCP2_STATUS(dev_priv, cpu_transcoder, port)));
ret = -ENXIO;
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
@@ -1713,25 +1766,29 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
if (hdcp->value == DRM_MODE_CONTENT_PROTECTION_UNDESIRED)
goto out;
- DRM_DEBUG_KMS("HDCP2.2 Downstream topology change\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "HDCP2.2 Downstream topology change\n");
ret = hdcp2_authenticate_repeater_topology(connector);
if (!ret) {
hdcp->value = DRM_MODE_CONTENT_PROTECTION_ENABLED;
schedule_work(&hdcp->prop_work);
goto out;
}
- DRM_DEBUG_KMS("[%s:%d] Repeater topology auth failed.(%d)\n",
- connector->base.name, connector->base.base.id,
- ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] Repeater topology auth failed.(%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
} else {
- DRM_DEBUG_KMS("[%s:%d] HDCP2.2 link failed, retrying auth\n",
- connector->base.name, connector->base.base.id);
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] HDCP2.2 link failed, retrying auth\n",
+ connector->base.name, connector->base.base.id);
}
ret = _intel_hdcp2_disable(connector);
if (ret) {
- DRM_ERROR("[%s:%d] Failed to disable hdcp2.2 (%d)\n",
- connector->base.name, connector->base.base.id, ret);
+ drm_err(&dev_priv->drm,
+ "[%s:%d] Failed to disable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id, ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
@@ -1739,9 +1796,10 @@ static int intel_hdcp2_check_link(struct intel_connector *connector)
ret = _intel_hdcp2_enable(connector);
if (ret) {
- DRM_DEBUG_KMS("[%s:%d] Failed to enable hdcp2.2 (%d)\n",
- connector->base.name, connector->base.base.id,
- ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "[%s:%d] Failed to enable hdcp2.2 (%d)\n",
+ connector->base.name, connector->base.base.id,
+ ret);
hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
schedule_work(&hdcp->prop_work);
goto out;
@@ -1772,7 +1830,7 @@ static int i915_hdcp_component_bind(struct device *i915_kdev,
{
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
- DRM_DEBUG("I915 HDCP comp bind\n");
+ drm_dbg(&dev_priv->drm, "I915 HDCP comp bind\n");
mutex_lock(&dev_priv->hdcp_comp_mutex);
dev_priv->hdcp_master = (struct i915_hdcp_comp_master *)data;
dev_priv->hdcp_master->mei_dev = mei_kdev;
@@ -1786,7 +1844,7 @@ static void i915_hdcp_component_unbind(struct device *i915_kdev,
{
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
- DRM_DEBUG("I915 HDCP comp unbind\n");
+ drm_dbg(&dev_priv->drm, "I915 HDCP comp unbind\n");
mutex_lock(&dev_priv->hdcp_comp_mutex);
dev_priv->hdcp_master = NULL;
mutex_unlock(&dev_priv->hdcp_comp_mutex);
@@ -1830,7 +1888,7 @@ static inline int initialize_hdcp_port_data(struct intel_connector *connector,
if (INTEL_GEN(dev_priv) < 12)
data->fw_ddi =
- intel_get_mei_fw_ddi_index(connector->encoder->port);
+ intel_get_mei_fw_ddi_index(intel_attached_encoder(connector)->port);
else
/*
* As per ME FW API expectation, for GEN 12+, fw_ddi is filled
@@ -1854,7 +1912,7 @@ static inline int initialize_hdcp_port_data(struct intel_connector *connector,
sizeof(struct hdcp2_streamid_type),
GFP_KERNEL);
if (!data->streams) {
- DRM_ERROR("Out of Memory\n");
+ drm_err(&dev_priv->drm, "Out of Memory\n");
return -ENOMEM;
}
@@ -1881,14 +1939,15 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
return;
mutex_lock(&dev_priv->hdcp_comp_mutex);
- WARN_ON(dev_priv->hdcp_comp_added);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->hdcp_comp_added);
dev_priv->hdcp_comp_added = true;
mutex_unlock(&dev_priv->hdcp_comp_mutex);
ret = component_add_typed(dev_priv->drm.dev, &i915_hdcp_component_ops,
I915_COMPONENT_HDCP);
if (ret < 0) {
- DRM_DEBUG_KMS("Failed at component add(%d)\n", ret);
+ drm_dbg_kms(&dev_priv->drm, "Failed at component add(%d)\n",
+ ret);
mutex_lock(&dev_priv->hdcp_comp_mutex);
dev_priv->hdcp_comp_added = false;
mutex_unlock(&dev_priv->hdcp_comp_mutex);
@@ -1899,12 +1958,13 @@ void intel_hdcp_component_init(struct drm_i915_private *dev_priv)
static void intel_hdcp2_init(struct intel_connector *connector,
const struct intel_hdcp_shim *shim)
{
+ struct drm_i915_private *i915 = to_i915(connector->base.dev);
struct intel_hdcp *hdcp = &connector->hdcp;
int ret;
ret = initialize_hdcp_port_data(connector, shim);
if (ret) {
- DRM_DEBUG_KMS("Mei hdcp data init failed\n");
+ drm_dbg_kms(&i915->drm, "Mei hdcp data init failed\n");
return;
}
@@ -1954,7 +2014,8 @@ int intel_hdcp_enable(struct intel_connector *connector,
return -ENOENT;
mutex_lock(&hdcp->mutex);
- WARN_ON(hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
+ drm_WARN_ON(&dev_priv->drm,
+ hdcp->value == DRM_MODE_CONTENT_PROTECTION_ENABLED);
hdcp->content_type = content_type;
if (INTEL_GEN(dev_priv) >= 12) {
@@ -2014,6 +2075,46 @@ int intel_hdcp_disable(struct intel_connector *connector)
return ret;
}
+void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ struct intel_connector *connector =
+ to_intel_connector(conn_state->connector);
+ struct intel_hdcp *hdcp = &connector->hdcp;
+ bool content_protection_type_changed =
+ (conn_state->hdcp_content_type != hdcp->content_type &&
+ conn_state->content_protection !=
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED);
+
+ /*
+ * During the HDCP encryption session if Type change is requested,
+ * disable the HDCP and reenable it with new TYPE value.
+ */
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_UNDESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_disable(connector);
+
+ /*
+ * Mark the hdcp state as DESIRED after the hdcp disable of type
+ * change procedure.
+ */
+ if (content_protection_type_changed) {
+ mutex_lock(&hdcp->mutex);
+ hdcp->value = DRM_MODE_CONTENT_PROTECTION_DESIRED;
+ schedule_work(&hdcp->prop_work);
+ mutex_unlock(&hdcp->mutex);
+ }
+
+ if (conn_state->content_protection ==
+ DRM_MODE_CONTENT_PROTECTION_DESIRED ||
+ content_protection_type_changed)
+ intel_hdcp_enable(connector,
+ crtc_state->cpu_transcoder,
+ (u8)conn_state->hdcp_content_type);
+}
+
void intel_hdcp_component_fini(struct drm_i915_private *dev_priv)
{
mutex_lock(&dev_priv->hdcp_comp_mutex);
diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.h b/drivers/gpu/drm/i915/display/intel_hdcp.h
index f3c3272e712a..7c12ad609b1f 100644
--- a/drivers/gpu/drm/i915/display/intel_hdcp.h
+++ b/drivers/gpu/drm/i915/display/intel_hdcp.h
@@ -8,12 +8,12 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_connector;
struct drm_connector_state;
struct drm_i915_private;
struct intel_connector;
+struct intel_crtc_state;
+struct intel_encoder;
struct intel_hdcp_shim;
enum port;
enum transcoder;
@@ -26,6 +26,9 @@ int intel_hdcp_init(struct intel_connector *connector,
int intel_hdcp_enable(struct intel_connector *connector,
enum transcoder cpu_transcoder, u8 content_type);
int intel_hdcp_disable(struct intel_connector *connector);
+void intel_hdcp_update_pipe(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state);
bool is_hdcp_supported(struct drm_i915_private *dev_priv, enum port port);
bool intel_hdcp_capable(struct intel_connector *connector);
bool intel_hdcp2_capable(struct intel_connector *connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c
index 93ac0f296852..39930232b253 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.c
@@ -36,7 +36,6 @@
#include <drm/drm_edid.h>
#include <drm/drm_hdcp.h>
#include <drm/drm_scdc_helper.h>
-#include <drm/i915_drm.h>
#include <drm/intel_lpe_audio.h>
#include "i915_debugfs.h"
@@ -45,6 +44,7 @@
#include "intel_audio.h"
#include "intel_connector.h"
#include "intel_ddi.h"
+#include "intel_display_debugfs.h"
#include "intel_display_types.h"
#include "intel_dp.h"
#include "intel_dpio_phy.h"
@@ -72,17 +72,19 @@ assert_hdmi_port_disabled(struct intel_hdmi *intel_hdmi)
enabled_bits = HAS_DDI(dev_priv) ? DDI_BUF_CTL_ENABLE : SDVO_ENABLE;
- WARN(I915_READ(intel_hdmi->hdmi_reg) & enabled_bits,
- "HDMI port enabled, expecting disabled\n");
+ drm_WARN(dev,
+ intel_de_read(dev_priv, intel_hdmi->hdmi_reg) & enabled_bits,
+ "HDMI port enabled, expecting disabled\n");
}
static void
assert_hdmi_transcoder_func_disabled(struct drm_i915_private *dev_priv,
enum transcoder cpu_transcoder)
{
- WARN(I915_READ(TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
- TRANS_DDI_FUNC_ENABLE,
- "HDMI transcoder function enabled, expecting disabled\n");
+ drm_WARN(&dev_priv->drm,
+ intel_de_read(dev_priv, TRANS_DDI_FUNC_CTL(cpu_transcoder)) &
+ TRANS_DDI_FUNC_ENABLE,
+ "HDMI transcoder function enabled, expecting disabled\n");
}
struct intel_hdmi *enc_to_intel_hdmi(struct intel_encoder *encoder)
@@ -215,32 +217,33 @@ static void g4x_write_infoframe(struct intel_encoder *encoder,
{
const u32 *data = frame;
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = I915_READ(VIDEO_DIP_CTL);
+ u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
int i;
- WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(type);
- I915_WRITE(VIDEO_DIP_CTL, val);
+ intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(VIDEO_DIP_DATA, *data);
+ intel_de_write(dev_priv, VIDEO_DIP_DATA, *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- I915_WRITE(VIDEO_DIP_DATA, 0);
+ intel_de_write(dev_priv, VIDEO_DIP_DATA, 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(VIDEO_DIP_CTL, val);
- POSTING_READ(VIDEO_DIP_CTL);
+ intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
+ intel_de_posting_read(dev_priv, VIDEO_DIP_CTL);
}
static void g4x_read_infoframe(struct intel_encoder *encoder,
@@ -252,22 +255,22 @@ static void g4x_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(VIDEO_DIP_CTL);
+ val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
- I915_WRITE(VIDEO_DIP_CTL, val);
+ intel_de_write(dev_priv, VIDEO_DIP_CTL, val);
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(VIDEO_DIP_DATA);
+ *data++ = intel_de_read(dev_priv, VIDEO_DIP_DATA);
}
static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = I915_READ(VIDEO_DIP_CTL);
+ u32 val = intel_de_read(dev_priv, VIDEO_DIP_CTL);
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -288,32 +291,34 @@ static void ibx_write_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
int i;
- WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(type);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe),
+ *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
}
static void ibx_read_infoframe(struct intel_encoder *encoder,
@@ -326,15 +331,15 @@ static void ibx_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+ val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
- I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
@@ -343,7 +348,7 @@ static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
i915_reg_t reg = TVIDEO_DIP_CTL(pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -365,10 +370,11 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
int i;
- WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
@@ -378,22 +384,23 @@ static void cpt_write_infoframe(struct intel_encoder *encoder,
if (type != HDMI_INFOFRAME_TYPE_AVI)
val &= ~g4x_infoframe_enable(type);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe),
+ *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ intel_de_write(dev_priv, TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
}
static void cpt_read_infoframe(struct intel_encoder *encoder,
@@ -406,15 +413,15 @@ static void cpt_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+ val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(crtc->pipe));
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
- I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_write(dev_priv, TVIDEO_DIP_CTL(crtc->pipe), val);
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(dev_priv, TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
@@ -422,7 +429,7 @@ static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
- u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
+ u32 val = intel_de_read(dev_priv, TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -441,32 +448,35 @@ static void vlv_write_infoframe(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
int i;
- WARN(!(val & VIDEO_DIP_ENABLE), "Writing DIP with CTL reg disabled\n");
+ drm_WARN(&dev_priv->drm, !(val & VIDEO_DIP_ENABLE),
+ "Writing DIP with CTL reg disabled\n");
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
val &= ~g4x_infoframe_enable(type);
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
+ intel_de_write(dev_priv,
+ VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
- I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
+ intel_de_write(dev_priv,
+ VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
val |= g4x_infoframe_enable(type);
val &= ~VIDEO_DIP_FREQ_MASK;
val |= VIDEO_DIP_FREQ_VSYNC;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
}
static void vlv_read_infoframe(struct intel_encoder *encoder,
@@ -479,15 +489,16 @@ static void vlv_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(VLV_TVIDEO_DIP_CTL(crtc->pipe));
+ val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe));
val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
val |= g4x_infoframe_index(type);
- I915_WRITE(VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+ intel_de_write(dev_priv, VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(VLV_TVIDEO_DIP_DATA(crtc->pipe));
+ *data++ = intel_de_read(dev_priv,
+ VLV_TVIDEO_DIP_DATA(crtc->pipe));
}
static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
@@ -495,7 +506,7 @@ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
enum pipe pipe = to_intel_crtc(pipe_config->uapi.crtc)->pipe;
- u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
+ u32 val = intel_de_read(dev_priv, VLV_TVIDEO_DIP_CTL(pipe));
if ((val & VIDEO_DIP_ENABLE) == 0)
return 0;
@@ -519,28 +530,30 @@ static void hsw_write_infoframe(struct intel_encoder *encoder,
i915_reg_t ctl_reg = HSW_TVIDEO_DIP_CTL(cpu_transcoder);
int data_size;
int i;
- u32 val = I915_READ(ctl_reg);
+ u32 val = intel_de_read(dev_priv, ctl_reg);
data_size = hsw_dip_data_size(dev_priv, type);
- WARN_ON(len > data_size);
+ drm_WARN_ON(&dev_priv->drm, len > data_size);
val &= ~hsw_infoframe_enable(type);
- I915_WRITE(ctl_reg, val);
+ intel_de_write(dev_priv, ctl_reg, val);
for (i = 0; i < len; i += 4) {
- I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
- type, i >> 2), *data);
+ intel_de_write(dev_priv,
+ hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ *data);
data++;
}
/* Write every possible data byte to force correct ECC calculation. */
for (; i < data_size; i += 4)
- I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
- type, i >> 2), 0);
+ intel_de_write(dev_priv,
+ hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2),
+ 0);
val |= hsw_infoframe_enable(type);
- I915_WRITE(ctl_reg, val);
- POSTING_READ(ctl_reg);
+ intel_de_write(dev_priv, ctl_reg, val);
+ intel_de_posting_read(dev_priv, ctl_reg);
}
static void hsw_read_infoframe(struct intel_encoder *encoder,
@@ -553,18 +566,19 @@ static void hsw_read_infoframe(struct intel_encoder *encoder,
u32 val, *data = frame;
int i;
- val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder));
+ val = intel_de_read(dev_priv, HSW_TVIDEO_DIP_CTL(cpu_transcoder));
for (i = 0; i < len; i += 4)
- *data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder,
- type, i >> 2));
+ *data++ = intel_de_read(dev_priv,
+ hsw_dip_data_reg(dev_priv, cpu_transcoder, type, i >> 2));
}
static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
const struct intel_crtc_state *pipe_config)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 val = I915_READ(HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
+ u32 val = intel_de_read(dev_priv,
+ HSW_TVIDEO_DIP_CTL(pipe_config->cpu_transcoder));
u32 mask;
mask = (VIDEO_DIP_ENABLE_VSC_HSW | VIDEO_DIP_ENABLE_AVI_HSW |
@@ -655,12 +669,12 @@ static void intel_write_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(type)) == 0)
return;
- if (WARN_ON(frame->any.type != type))
+ if (drm_WARN_ON(encoder->base.dev, frame->any.type != type))
return;
/* see comment above for the reason for this offset */
len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1);
- if (WARN_ON(len < 0))
+ if (drm_WARN_ON(encoder->base.dev, len < 0))
return;
/* Insert the 'hole' (see big comment above) at position 3 */
@@ -734,8 +748,8 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
/* nonsense combination */
- WARN_ON(crtc_state->limited_color_range &&
- crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
+ drm_WARN_ON(encoder->base.dev, crtc_state->limited_color_range &&
+ crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB);
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_RGB) {
drm_hdmi_avi_infoframe_quant_range(frame, connector,
@@ -753,7 +767,7 @@ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
/* TODO: handle pixel repetition for YCBCR420 outputs */
ret = hdmi_avi_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
return true;
@@ -774,13 +788,13 @@ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
frame->sdi = HDMI_SPD_SDI_PC;
ret = hdmi_spd_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
return true;
@@ -806,11 +820,11 @@ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
conn_state->connector,
&crtc_state->hw.adjusted_mode);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
ret = hdmi_vendor_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(encoder->base.dev, ret))
return false;
return true;
@@ -844,7 +858,7 @@ intel_hdmi_compute_drm_infoframe(struct intel_encoder *encoder,
}
ret = hdmi_drm_infoframe_check(frame);
- if (WARN_ON(ret))
+ if (drm_WARN_ON(&dev_priv->drm, ret))
return false;
return true;
@@ -859,7 +873,7 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = VIDEO_DIP_CTL;
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -885,8 +899,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
}
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
@@ -904,8 +918,8 @@ static void g4x_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -982,7 +996,7 @@ static bool intel_hdmi_set_gcp_infoframe(struct intel_encoder *encoder,
else
return false;
- I915_WRITE(reg, crtc_state->infoframes.gcp);
+ intel_de_write(dev_priv, reg, crtc_state->infoframes.gcp);
return true;
}
@@ -1007,7 +1021,7 @@ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
else
return;
- crtc_state->infoframes.gcp = I915_READ(reg);
+ crtc_state->infoframes.gcp = intel_de_read(dev_priv, reg);
}
static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
@@ -1042,7 +1056,7 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
struct intel_digital_port *intel_dig_port = enc_to_dig_port(encoder);
struct intel_hdmi *intel_hdmi = &intel_dig_port->hdmi;
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1056,15 +1070,15 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- WARN(val & VIDEO_DIP_ENABLE,
- "DIP already enabled on port %c\n",
- (val & VIDEO_DIP_PORT_MASK) >> 29);
+ drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ "DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
@@ -1077,8 +1091,8 @@ static void ibx_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1100,7 +1114,7 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1113,8 +1127,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
@@ -1126,8 +1140,8 @@ static void cpt_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1149,7 +1163,7 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
i915_reg_t reg = VLV_TVIDEO_DIP_CTL(intel_crtc->pipe);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
u32 port = VIDEO_DIP_PORT(encoder->port);
assert_hdmi_port_disabled(intel_hdmi);
@@ -1163,15 +1177,15 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
val &= ~(VIDEO_DIP_ENABLE | VIDEO_DIP_ENABLE_AVI |
VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
VIDEO_DIP_ENABLE_SPD | VIDEO_DIP_ENABLE_GCP);
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
if (port != (val & VIDEO_DIP_PORT_MASK)) {
- WARN(val & VIDEO_DIP_ENABLE,
- "DIP already enabled on port %c\n",
- (val & VIDEO_DIP_PORT_MASK) >> 29);
+ drm_WARN(&dev_priv->drm, val & VIDEO_DIP_ENABLE,
+ "DIP already enabled on port %c\n",
+ (val & VIDEO_DIP_PORT_MASK) >> 29);
val &= ~VIDEO_DIP_PORT_MASK;
val |= port;
}
@@ -1184,8 +1198,8 @@ static void vlv_set_infoframes(struct intel_encoder *encoder,
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1205,7 +1219,7 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
i915_reg_t reg = HSW_TVIDEO_DIP_CTL(crtc_state->cpu_transcoder);
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
assert_hdmi_transcoder_func_disabled(dev_priv,
crtc_state->cpu_transcoder);
@@ -1216,16 +1230,16 @@ static void hsw_set_infoframes(struct intel_encoder *encoder,
VIDEO_DIP_ENABLE_DRM_GLK);
if (!enable) {
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
return;
}
if (intel_hdmi_set_gcp_infoframe(encoder, crtc_state, conn_state))
val |= VIDEO_DIP_ENABLE_GCP_HSW;
- I915_WRITE(reg, val);
- POSTING_READ(reg);
+ intel_de_write(dev_priv, reg, val);
+ intel_de_posting_read(dev_priv, reg);
intel_write_infoframe(encoder, crtc_state,
HDMI_INFOFRAME_TYPE_AVI,
@@ -1260,10 +1274,9 @@ void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
unsigned int offset, void *buffer, size_t size)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
- struct drm_i915_private *dev_priv =
- intel_dig_port->base.base.dev->dev_private;
- struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
u8 start = offset & 0xff;
@@ -1290,10 +1303,9 @@ static int intel_hdmi_hdcp_read(struct intel_digital_port *intel_dig_port,
static int intel_hdmi_hdcp_write(struct intel_digital_port *intel_dig_port,
unsigned int offset, void *buffer, size_t size)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
- struct drm_i915_private *dev_priv =
- intel_dig_port->base.base.dev->dev_private;
- struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
u8 *write_buf;
@@ -1325,10 +1337,9 @@ static
int intel_hdmi_hdcp_write_an_aksv(struct intel_digital_port *intel_dig_port,
u8 *an)
{
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
- struct drm_i915_private *dev_priv =
- intel_dig_port->base.base.dev->dev_private;
- struct i2c_adapter *adapter = intel_gmbus_get_adapter(dev_priv,
+ struct i2c_adapter *adapter = intel_gmbus_get_adapter(i915,
hdmi->ddc_bus);
int ret;
@@ -1447,7 +1458,7 @@ int intel_hdmi_hdcp_read_v_prime_part(struct intel_digital_port *intel_dig_port,
static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+ struct intel_digital_port *intel_dig_port = intel_attached_dig_port(connector);
struct drm_crtc *crtc = connector->base.state->crtc;
struct intel_crtc *intel_crtc = container_of(crtc,
struct intel_crtc, base);
@@ -1455,7 +1466,7 @@ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
int ret;
for (;;) {
- scanline = I915_READ(PIPEDSL(intel_crtc->pipe));
+ scanline = intel_de_read(dev_priv, PIPEDSL(intel_crtc->pipe));
if (scanline > 100 && scanline < 200)
break;
usleep_range(25, 50);
@@ -1507,8 +1518,7 @@ int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
static
bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
{
- struct drm_i915_private *dev_priv =
- intel_dig_port->base.base.dev->dev_private;
+ struct drm_i915_private *i915 = to_i915(intel_dig_port->base.base.dev);
struct intel_connector *connector =
intel_dig_port->hdmi.attached_connector;
enum port port = intel_dig_port->base.port;
@@ -1523,14 +1533,13 @@ bool intel_hdmi_hdcp_check_link(struct intel_digital_port *intel_dig_port)
if (ret)
return false;
- I915_WRITE(HDCP_RPRIME(dev_priv, cpu_transcoder, port), ri.reg);
+ intel_de_write(i915, HDCP_RPRIME(i915, cpu_transcoder, port), ri.reg);
/* Wait for Ri prime match */
- if (wait_for(I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder, port)) &
+ if (wait_for(intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)) &
(HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) {
DRM_ERROR("Ri' mismatch detected, link check failed (%x)\n",
- I915_READ(HDCP_STATUS(dev_priv, cpu_transcoder,
- port)));
+ intel_de_read(i915, HDCP_STATUS(i915, cpu_transcoder, port)));
return false;
}
return true;
@@ -1767,8 +1776,8 @@ static void intel_hdmi_prepare(struct intel_encoder *encoder,
else
hdmi_val |= SDVO_PIPE_SEL(crtc->pipe);
- I915_WRITE(intel_hdmi->hdmi_reg, hdmi_val);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, hdmi_val);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
static bool intel_hdmi_get_hw_state(struct intel_encoder *encoder,
@@ -1802,7 +1811,7 @@ static void intel_hdmi_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_HDMI);
- tmp = I915_READ(intel_hdmi->hdmi_reg);
+ tmp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
if (tmp & SDVO_HSYNC_ACTIVE_HIGH)
flags |= DRM_MODE_FLAG_PHSYNC;
@@ -1863,7 +1872,7 @@ static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
{
struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc);
- WARN_ON(!pipe_config->has_hdmi_sink);
+ drm_WARN_ON(encoder->base.dev, !pipe_config->has_hdmi_sink);
DRM_DEBUG_DRIVER("Enabling HDMI audio on pipe %c\n",
pipe_name(crtc->pipe));
intel_audio_codec_enable(encoder, pipe_config, conn_state);
@@ -1878,14 +1887,14 @@ static void g4x_enable_hdmi(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
- temp = I915_READ(intel_hdmi->hdmi_reg);
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
temp |= HDMI_AUDIO_ENABLE;
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
if (pipe_config->has_audio)
intel_enable_hdmi_audio(encoder, pipe_config, conn_state);
@@ -1900,7 +1909,7 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
u32 temp;
- temp = I915_READ(intel_hdmi->hdmi_reg);
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
@@ -1910,10 +1919,10 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
/*
* HW workaround, need to toggle enable bit off and on
@@ -1924,17 +1933,18 @@ static void ibx_enable_hdmi(struct intel_encoder *encoder,
*/
if (pipe_config->pipe_bpp > 24 &&
pipe_config->pixel_multiplier > 1) {
- I915_WRITE(intel_hdmi->hdmi_reg, temp & ~SDVO_ENABLE);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg,
+ temp & ~SDVO_ENABLE);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
}
if (pipe_config->has_audio)
@@ -1952,7 +1962,7 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
enum pipe pipe = crtc->pipe;
u32 temp;
- temp = I915_READ(intel_hdmi->hdmi_reg);
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp |= SDVO_ENABLE;
if (pipe_config->has_audio)
@@ -1969,27 +1979,25 @@ static void cpt_enable_hdmi(struct intel_encoder *encoder,
*/
if (pipe_config->pipe_bpp > 24) {
- I915_WRITE(TRANS_CHICKEN1(pipe),
- I915_READ(TRANS_CHICKEN1(pipe)) |
- TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) | TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= SDVO_COLOR_FORMAT_8bpc;
}
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
if (pipe_config->pipe_bpp > 24) {
temp &= ~SDVO_COLOR_FORMAT_MASK;
temp |= HDMI_COLOR_FORMAT_12bpc;
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
- I915_WRITE(TRANS_CHICKEN1(pipe),
- I915_READ(TRANS_CHICKEN1(pipe)) &
- ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
+ intel_de_write(dev_priv, TRANS_CHICKEN1(pipe),
+ intel_de_read(dev_priv, TRANS_CHICKEN1(pipe)) & ~TRANS_CHICKEN1_HDMIUNIT_GC_DISABLE);
}
if (pipe_config->has_audio)
@@ -2014,11 +2022,11 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
struct intel_crtc *crtc = to_intel_crtc(old_crtc_state->uapi.crtc);
u32 temp;
- temp = I915_READ(intel_hdmi->hdmi_reg);
+ temp = intel_de_read(dev_priv, intel_hdmi->hdmi_reg);
temp &= ~(SDVO_ENABLE | HDMI_AUDIO_ENABLE);
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
/*
* HW workaround for IBX, we need to move the port
@@ -2039,14 +2047,14 @@ static void intel_disable_hdmi(struct intel_encoder *encoder,
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
temp &= ~SDVO_ENABLE;
- I915_WRITE(intel_hdmi->hdmi_reg, temp);
- POSTING_READ(intel_hdmi->hdmi_reg);
+ intel_de_write(dev_priv, intel_hdmi->hdmi_reg, temp);
+ intel_de_posting_read(dev_priv, intel_hdmi->hdmi_reg);
intel_wait_for_vblank_if_active(dev_priv, PIPE_A);
intel_set_cpu_fifo_underrun_reporting(dev_priv, PIPE_A, true);
@@ -2090,9 +2098,7 @@ static void pch_post_disable_hdmi(struct intel_encoder *encoder,
static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[encoder->port];
- int max_tmds_clock;
+ int max_tmds_clock, vbt_max_tmds_clock;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
max_tmds_clock = 594000;
@@ -2103,15 +2109,23 @@ static int intel_hdmi_source_max_tmds_clock(struct intel_encoder *encoder)
else
max_tmds_clock = 165000;
- if (info->max_tmds_clock)
- max_tmds_clock = min(max_tmds_clock, info->max_tmds_clock);
+ vbt_max_tmds_clock = intel_bios_max_tmds_clock(encoder);
+ if (vbt_max_tmds_clock)
+ max_tmds_clock = min(max_tmds_clock, vbt_max_tmds_clock);
return max_tmds_clock;
}
+static bool intel_has_hdmi_sink(struct intel_hdmi *hdmi,
+ const struct drm_connector_state *conn_state)
+{
+ return hdmi->has_hdmi_sink &&
+ READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
+}
+
static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
bool respect_downstream_limits,
- bool force_dvi)
+ bool has_hdmi_sink)
{
struct intel_encoder *encoder = &hdmi_to_dig_port(hdmi)->base;
int max_tmds_clock = intel_hdmi_source_max_tmds_clock(encoder);
@@ -2127,7 +2141,7 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
if (info->max_tmds_clock)
max_tmds_clock = min(max_tmds_clock,
info->max_tmds_clock);
- else if (!hdmi->has_hdmi_sink || force_dvi)
+ else if (!has_hdmi_sink)
max_tmds_clock = min(max_tmds_clock, 165000);
}
@@ -2137,13 +2151,14 @@ static int hdmi_port_clock_limit(struct intel_hdmi *hdmi,
static enum drm_mode_status
hdmi_port_clock_valid(struct intel_hdmi *hdmi,
int clock, bool respect_downstream_limits,
- bool force_dvi)
+ bool has_hdmi_sink)
{
struct drm_i915_private *dev_priv = to_i915(intel_hdmi_to_dev(hdmi));
if (clock < 25000)
return MODE_CLOCK_LOW;
- if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits, force_dvi))
+ if (clock > hdmi_port_clock_limit(hdmi, respect_downstream_limits,
+ has_hdmi_sink))
return MODE_CLOCK_HIGH;
/* BXT DPLL can't generate 223-240 MHz */
@@ -2165,16 +2180,13 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
struct drm_device *dev = intel_hdmi_to_dev(hdmi);
struct drm_i915_private *dev_priv = to_i915(dev);
enum drm_mode_status status;
- int clock;
+ int clock = mode->clock;
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
- bool force_dvi =
- READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
+ bool has_hdmi_sink = intel_has_hdmi_sink(hdmi, connector->state);
if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
return MODE_NO_DBLESCAN;
- clock = mode->clock;
-
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
clock *= 2;
@@ -2188,18 +2200,18 @@ intel_hdmi_mode_valid(struct drm_connector *connector,
clock /= 2;
/* check if we can do 8bpc */
- status = hdmi_port_clock_valid(hdmi, clock, true, force_dvi);
+ status = hdmi_port_clock_valid(hdmi, clock, true, has_hdmi_sink);
- if (hdmi->has_hdmi_sink && !force_dvi) {
+ if (has_hdmi_sink) {
/* if we can't do 8bpc we may still be able to do 12bpc */
if (status != MODE_OK && !HAS_GMCH(dev_priv))
status = hdmi_port_clock_valid(hdmi, clock * 3 / 2,
- true, force_dvi);
+ true, has_hdmi_sink);
/* if we can't do 8,12bpc we may still be able to do 10bpc */
if (status != MODE_OK && INTEL_GEN(dev_priv) >= 11)
status = hdmi_port_clock_valid(hdmi, clock * 5 / 4,
- true, force_dvi);
+ true, has_hdmi_sink);
}
if (status != MODE_OK)
return status;
@@ -2263,14 +2275,9 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state,
}
}
- /* Display WA #1139: glk */
- if (bpc == 12 && IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1) &&
- adjusted_mode->htotal > 5460)
- return false;
-
- /* Display Wa_1405510057:icl */
+ /* Display Wa_1405510057:icl,ehl */
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
- bpc == 10 && INTEL_GEN(dev_priv) >= 11 &&
+ bpc == 10 && IS_GEN(dev_priv, 11) &&
(adjusted_mode->crtc_hblank_end -
adjusted_mode->crtc_hblank_start) % 8 == 2)
return false;
@@ -2315,7 +2322,7 @@ static int intel_hdmi_port_clock(int clock, int bpc)
static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
struct intel_crtc_state *crtc_state,
- int clock, bool force_dvi)
+ int clock)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
int bpc;
@@ -2324,7 +2331,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
if (hdmi_deep_color_possible(crtc_state, bpc) &&
hdmi_port_clock_valid(intel_hdmi,
intel_hdmi_port_clock(clock, bpc),
- true, force_dvi) == MODE_OK)
+ true, crtc_state->has_hdmi_sink) == MODE_OK)
return bpc;
}
@@ -2332,8 +2339,7 @@ static int intel_hdmi_compute_bpc(struct intel_encoder *encoder,
}
static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
- struct intel_crtc_state *crtc_state,
- bool force_dvi)
+ struct intel_crtc_state *crtc_state)
{
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder);
const struct drm_display_mode *adjusted_mode =
@@ -2347,8 +2353,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
clock /= 2;
- bpc = intel_hdmi_compute_bpc(encoder, crtc_state,
- clock, force_dvi);
+ bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock);
crtc_state->port_clock = intel_hdmi_port_clock(clock, bpc);
@@ -2364,7 +2369,7 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder,
bpc, crtc_state->pipe_bpp);
if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock,
- false, force_dvi) != MODE_OK) {
+ false, crtc_state->has_hdmi_sink) != MODE_OK) {
DRM_DEBUG_KMS("unsupported HDMI clock (%d kHz), rejecting mode\n",
crtc_state->port_clock);
return -EINVAL;
@@ -2412,14 +2417,14 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
struct drm_scdc *scdc = &connector->display_info.hdmi.scdc;
struct intel_digital_connector_state *intel_conn_state =
to_intel_digital_connector_state(conn_state);
- bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
int ret;
if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
return -EINVAL;
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
+ pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_hdmi,
+ conn_state);
if (pipe_config->has_hdmi_sink)
pipe_config->has_infoframe = true;
@@ -2448,7 +2453,7 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder,
intel_conn_state->force_audio == HDMI_AUDIO_ON;
}
- ret = intel_hdmi_compute_clock(encoder, pipe_config, force_dvi);
+ ret = intel_hdmi_compute_clock(encoder, pipe_config);
if (ret)
return ret;
@@ -2808,7 +2813,7 @@ intel_hdmi_connector_register(struct drm_connector *connector)
if (ret)
return ret;
- i915_debugfs_connector_add(connector);
+ intel_connector_debugfs_add(connector);
intel_hdmi_create_i2c_symlink(connector);
@@ -3002,7 +3007,7 @@ static u8 icl_port_to_ddc_pin(struct drm_i915_private *dev_priv, enum port port)
else if (intel_phy_is_tc(dev_priv, phy))
return GMBUS_PIN_9_TC1_ICP + intel_port_to_tc(dev_priv, port);
- WARN(1, "Unknown port:%c\n", port_name(port));
+ drm_WARN(&dev_priv->drm, 1, "Unknown port:%c\n", port_name(port));
return GMBUS_PIN_2_BXT;
}
@@ -3052,17 +3057,17 @@ static u8 g4x_port_to_ddc_pin(struct drm_i915_private *dev_priv,
return ddc_pin;
}
-static u8 intel_hdmi_ddc_pin(struct drm_i915_private *dev_priv,
- enum port port)
+static u8 intel_hdmi_ddc_pin(struct intel_encoder *encoder)
{
- const struct ddi_vbt_port_info *info =
- &dev_priv->vbt.ddi_port_info[port];
+ struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ enum port port = encoder->port;
u8 ddc_pin;
- if (info->alternate_ddc_pin) {
+ ddc_pin = intel_bios_alternate_ddc_pin(encoder);
+ if (ddc_pin) {
DRM_DEBUG_KMS("Using DDC pin 0x%x for port %c (VBT)\n",
- info->alternate_ddc_pin, port_name(port));
- return info->alternate_ddc_pin;
+ ddc_pin, port_name(port));
+ return ddc_pin;
}
if (HAS_PCH_MCC(dev_priv))
@@ -3139,16 +3144,16 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
DRM_DEBUG_KMS("Adding HDMI connector on [ENCODER:%d:%s]\n",
intel_encoder->base.base.id, intel_encoder->base.name);
- if (INTEL_GEN(dev_priv) < 12 && WARN_ON(port == PORT_A))
+ if (INTEL_GEN(dev_priv) < 12 && drm_WARN_ON(dev, port == PORT_A))
return;
- if (WARN(intel_dig_port->max_lanes < 4,
- "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
- intel_dig_port->max_lanes, intel_encoder->base.base.id,
- intel_encoder->base.name))
+ if (drm_WARN(dev, intel_dig_port->max_lanes < 4,
+ "Not enough lanes (%d) for HDMI on [ENCODER:%d:%s]\n",
+ intel_dig_port->max_lanes, intel_encoder->base.base.id,
+ intel_encoder->base.name))
return;
- intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(dev_priv, port);
+ intel_hdmi->ddc_bus = intel_hdmi_ddc_pin(intel_encoder);
ddc = intel_gmbus_get_adapter(dev_priv, intel_hdmi->ddc_bus);
drm_connector_init_with_ddc(dev, connector,
@@ -3165,6 +3170,7 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
connector->ycbcr_420_allowed = true;
intel_encoder->hpd_pin = intel_hpd_pin_default(dev_priv, port);
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
if (HAS_DDI(dev_priv))
intel_connector->get_hw_state = intel_ddi_connector_get_hw_state;
@@ -3188,8 +3194,9 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
* generated on the port when a cable is not attached.
*/
if (IS_G45(dev_priv)) {
- u32 temp = I915_READ(PEG_BAND_GAP_DATA);
- I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
+ u32 temp = intel_de_read(dev_priv, PEG_BAND_GAP_DATA);
+ intel_de_write(dev_priv, PEG_BAND_GAP_DATA,
+ (temp & ~0xf) | 0xd);
}
cec_fill_conn_info_from_drm(&conn_info, connector);
diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h
index d3659d0b408b..8ff1f76a63df 100644
--- a/drivers/gpu/drm/i915/display/intel_hdmi.h
+++ b/drivers/gpu/drm/i915/display/intel_hdmi.h
@@ -9,8 +9,6 @@
#include <linux/hdmi.h>
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
struct drm_connector;
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c
index 99d3a3c7989e..a091442efba4 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.c
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.c
@@ -23,8 +23,6 @@
#include <linux/kernel.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_hotplug.h"
@@ -89,29 +87,16 @@
enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
enum port port)
{
- switch (port) {
- case PORT_A:
- return HPD_PORT_A;
- case PORT_B:
- return HPD_PORT_B;
- case PORT_C:
- return HPD_PORT_C;
- case PORT_D:
- return HPD_PORT_D;
- case PORT_E:
- return HPD_PORT_E;
- case PORT_F:
- if (IS_CNL_WITH_PORT_F(dev_priv))
- return HPD_PORT_E;
- return HPD_PORT_F;
- case PORT_G:
- return HPD_PORT_G;
- case PORT_H:
- return HPD_PORT_H;
- case PORT_I:
- return HPD_PORT_I;
+ enum phy phy = intel_port_to_phy(dev_priv, port);
+
+ switch (phy) {
+ case PHY_F:
+ return IS_CNL_WITH_PORT_F(dev_priv) ? HPD_PORT_E : HPD_PORT_F;
+ case PHY_A ... PHY_E:
+ case PHY_G ... PHY_I:
+ return HPD_PORT_A + phy - PHY_A;
default:
- MISSING_CASE(port);
+ MISSING_CASE(phy);
return HPD_NONE;
}
}
@@ -120,6 +105,20 @@ enum hpd_pin intel_hpd_pin_default(struct drm_i915_private *dev_priv,
#define HPD_STORM_REENABLE_DELAY (2 * 60 * 1000)
#define HPD_RETRY_DELAY 1000
+static enum hpd_pin
+intel_connector_hpd_pin(struct intel_connector *connector)
+{
+ struct intel_encoder *encoder = intel_attached_encoder(connector);
+
+ /*
+ * MST connectors get their encoder attached dynamically
+ * so need to make sure we have an encoder here. But since
+ * MST encoders have their hpd_pin set to HPD_NONE we don't
+ * have to special case them beyond that.
+ */
+ return encoder ? encoder->hpd_pin : HPD_NONE;
+}
+
/**
* intel_hpd_irq_storm_detect - gather stats and detect HPD IRQ storm on a pin
* @dev_priv: private driver data pointer
@@ -171,10 +170,13 @@ static bool intel_hpd_irq_storm_detect(struct drm_i915_private *dev_priv,
hpd->stats[pin].count += increment;
if (hpd->stats[pin].count > threshold) {
hpd->stats[pin].state = HPD_MARK_DISABLED;
- DRM_DEBUG_KMS("HPD interrupt storm detected on PIN %d\n", pin);
+ drm_dbg_kms(&dev_priv->drm,
+ "HPD interrupt storm detected on PIN %d\n", pin);
storm = true;
} else {
- DRM_DEBUG_KMS("Received HPD interrupt on PIN %d - cnt: %d\n", pin,
+ drm_dbg_kms(&dev_priv->drm,
+ "Received HPD interrupt on PIN %d - cnt: %d\n",
+ pin,
hpd->stats[pin].count);
}
@@ -185,37 +187,32 @@ static void
intel_hpd_irq_storm_switch_to_polling(struct drm_i915_private *dev_priv)
{
struct drm_device *dev = &dev_priv->drm;
- struct intel_connector *intel_connector;
- struct intel_encoder *intel_encoder;
- struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
- enum hpd_pin pin;
+ struct intel_connector *connector;
bool hpd_disabled = false;
lockdep_assert_held(&dev_priv->irq_lock);
drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->polled != DRM_CONNECTOR_POLL_HPD)
- continue;
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ enum hpd_pin pin;
- intel_connector = to_intel_connector(connector);
- intel_encoder = intel_connector->encoder;
- if (!intel_encoder)
+ if (connector->base.polled != DRM_CONNECTOR_POLL_HPD)
continue;
- pin = intel_encoder->hpd_pin;
+ pin = intel_connector_hpd_pin(connector);
if (pin == HPD_NONE ||
dev_priv->hotplug.stats[pin].state != HPD_MARK_DISABLED)
continue;
- DRM_INFO("HPD interrupt storm detected on connector %s: "
+ drm_info(&dev_priv->drm,
+ "HPD interrupt storm detected on connector %s: "
"switching from hotplug detection to polling\n",
- connector->name);
+ connector->base.name);
dev_priv->hotplug.stats[pin].state = HPD_DISABLED;
- connector->polled = DRM_CONNECTOR_POLL_CONNECT
- | DRM_CONNECTOR_POLL_DISCONNECT;
+ connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
hpd_disabled = true;
}
drm_connector_list_iter_end(&conn_iter);
@@ -234,40 +231,38 @@ static void intel_hpd_irq_storm_reenable_work(struct work_struct *work)
container_of(work, typeof(*dev_priv),
hotplug.reenable_work.work);
struct drm_device *dev = &dev_priv->drm;
+ struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
intel_wakeref_t wakeref;
enum hpd_pin pin;
wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_irq(&dev_priv->irq_lock);
- for_each_hpd_pin(pin) {
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- if (dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE ||
+ dev_priv->hotplug.stats[pin].state != HPD_DISABLED)
continue;
- dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- /* Don't check MST ports, they don't have pins */
- if (!intel_connector->mst_port &&
- intel_connector->encoder->hpd_pin == pin) {
- if (connector->polled != intel_connector->polled)
- DRM_DEBUG_DRIVER("Reenabling HPD on connector %s\n",
- connector->name);
- connector->polled = intel_connector->polled;
- if (!connector->polled)
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
+ if (connector->base.polled != connector->polled)
+ drm_dbg(&dev_priv->drm,
+ "Reenabling HPD on connector %s\n",
+ connector->base.name);
+ connector->base.polled = connector->polled;
}
+ drm_connector_list_iter_end(&conn_iter);
+
+ for_each_hpd_pin(pin) {
+ if (dev_priv->hotplug.stats[pin].state == HPD_DISABLED)
+ dev_priv->hotplug.stats[pin].state = HPD_ENABLED;
+ }
+
if (dev_priv->display_irqs_enabled && dev_priv->display.hpd_irq_setup)
dev_priv->display.hpd_irq_setup(dev_priv);
+
spin_unlock_irq(&dev_priv->irq_lock);
intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
@@ -281,7 +276,7 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
struct drm_device *dev = connector->base.dev;
enum drm_connector_status old_status;
- WARN_ON(!mutex_is_locked(&dev->mode_config.mutex));
+ drm_WARN_ON(dev, !mutex_is_locked(&dev->mode_config.mutex));
old_status = connector->base.status;
connector->base.status =
@@ -290,11 +285,12 @@ intel_encoder_hotplug(struct intel_encoder *encoder,
if (old_status == connector->base.status)
return INTEL_HOTPLUG_UNCHANGED;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s\n",
- connector->base.base.id,
- connector->base.name,
- drm_get_connector_status_name(old_status),
- drm_get_connector_status_name(connector->base.status));
+ drm_dbg_kms(&to_i915(dev)->drm,
+ "[CONNECTOR:%d:%s] status updated from %s to %s\n",
+ connector->base.base.id,
+ connector->base.name,
+ drm_get_connector_status_name(old_status),
+ drm_get_connector_status_name(connector->base.status));
return INTEL_HOTPLUG_CHANGED;
}
@@ -361,16 +357,14 @@ static void i915_hotplug_work_func(struct work_struct *work)
container_of(work, struct drm_i915_private,
hotplug.hotplug_work.work);
struct drm_device *dev = &dev_priv->drm;
- struct intel_connector *intel_connector;
- struct intel_encoder *intel_encoder;
- struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
u32 changed = 0, retry = 0;
u32 hpd_event_bits;
u32 hpd_retry_bits;
mutex_lock(&dev->mode_config.mutex);
- DRM_DEBUG_KMS("running encoder hotplug functions\n");
+ drm_dbg_kms(&dev_priv->drm, "running encoder hotplug functions\n");
spin_lock_irq(&dev_priv->irq_lock);
@@ -385,21 +379,25 @@ static void i915_hotplug_work_func(struct work_struct *work)
spin_unlock_irq(&dev_priv->irq_lock);
drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ enum hpd_pin pin;
u32 hpd_bit;
- intel_connector = to_intel_connector(connector);
- if (!intel_connector->encoder)
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE)
continue;
- intel_encoder = intel_connector->encoder;
- hpd_bit = BIT(intel_encoder->hpd_pin);
+
+ hpd_bit = BIT(pin);
if ((hpd_event_bits | hpd_retry_bits) & hpd_bit) {
- DRM_DEBUG_KMS("Connector %s (pin %i) received hotplug event.\n",
- connector->name, intel_encoder->hpd_pin);
+ struct intel_encoder *encoder =
+ intel_attached_encoder(connector);
+
+ drm_dbg_kms(&dev_priv->drm,
+ "Connector %s (pin %i) received hotplug event.\n",
+ connector->base.name, pin);
- switch (intel_encoder->hotplug(intel_encoder,
- intel_connector,
- hpd_event_bits & hpd_bit)) {
+ switch (encoder->hotplug(encoder, connector,
+ hpd_event_bits & hpd_bit)) {
case INTEL_HOTPLUG_UNCHANGED:
break;
case INTEL_HOTPLUG_CHANGED:
@@ -481,9 +479,10 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
long_hpd = long_mask & BIT(pin);
- DRM_DEBUG_DRIVER("digital hpd on [ENCODER:%d:%s] - %s\n",
- encoder->base.base.id, encoder->base.name,
- long_hpd ? "long" : "short");
+ drm_dbg(&dev_priv->drm,
+ "digital hpd on [ENCODER:%d:%s] - %s\n",
+ encoder->base.base.id, encoder->base.name,
+ long_hpd ? "long" : "short");
queue_dig = true;
if (long_hpd) {
@@ -509,8 +508,9 @@ void intel_hpd_irq_handler(struct drm_i915_private *dev_priv,
* hotplug bits itself. So only WARN about unexpected
* interrupts on saner platforms.
*/
- WARN_ONCE(!HAS_GMCH(dev_priv),
- "Received HPD interrupt on pin %d although disabled\n", pin);
+ drm_WARN_ONCE(&dev_priv->drm, !HAS_GMCH(dev_priv),
+ "Received HPD interrupt on pin %d although disabled\n",
+ pin);
continue;
}
@@ -601,8 +601,8 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
container_of(work, struct drm_i915_private,
hotplug.poll_init_work);
struct drm_device *dev = &dev_priv->drm;
- struct drm_connector *connector;
struct drm_connector_list_iter conn_iter;
+ struct intel_connector *connector;
bool enabled;
mutex_lock(&dev->mode_config.mutex);
@@ -610,23 +610,18 @@ static void i915_hpd_poll_init_work(struct work_struct *work)
enabled = READ_ONCE(dev_priv->hotplug.poll_enabled);
drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_connector *intel_connector =
- to_intel_connector(connector);
- connector->polled = intel_connector->polled;
-
- /* MST has a dynamic intel_connector->encoder and it's reprobing
- * is all handled by the MST helpers. */
- if (intel_connector->mst_port)
+ for_each_intel_connector_iter(connector, &conn_iter) {
+ enum hpd_pin pin;
+
+ pin = intel_connector_hpd_pin(connector);
+ if (pin == HPD_NONE)
continue;
- if (!connector->polled && I915_HAS_HOTPLUG(dev_priv) &&
- intel_connector->encoder->hpd_pin > HPD_NONE) {
- connector->polled = enabled ?
- DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT :
- DRM_CONNECTOR_POLL_HPD;
- }
+ connector->base.polled = connector->polled;
+
+ if (enabled && connector->base.polled == DRM_CONNECTOR_POLL_HPD)
+ connector->base.polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
}
drm_connector_list_iter_end(&conn_iter);
diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.h b/drivers/gpu/drm/i915/display/intel_hotplug.h
index 087b5f57b321..1e6b4fda2900 100644
--- a/drivers/gpu/drm/i915/display/intel_hotplug.h
+++ b/drivers/gpu/drm/i915/display/intel_hotplug.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
struct drm_i915_private;
struct intel_connector;
struct intel_encoder;
diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
index 0b67f7887cd0..ad5cc13037ae 100644
--- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c
@@ -71,6 +71,7 @@
#include <drm/intel_lpe_audio.h>
#include "i915_drv.h"
+#include "intel_de.h"
#include "intel_lpe_audio.h"
#define HAS_LPE_AUDIO(dev_priv) ((dev_priv)->lpe_audio.platdev != NULL)
@@ -126,7 +127,8 @@ lpe_audio_platdev_create(struct drm_i915_private *dev_priv)
kfree(pdata);
if (IS_ERR(platdev)) {
- DRM_ERROR("Failed to allocate LPE audio platform device\n");
+ drm_err(&dev_priv->drm,
+ "Failed to allocate LPE audio platform device\n");
return platdev;
}
@@ -166,7 +168,7 @@ static int lpe_audio_irq_init(struct drm_i915_private *dev_priv)
{
int irq = dev_priv->lpe_audio.irq;
- WARN_ON(!intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
irq_set_chip_and_handler_name(irq,
&lpe_audio_irqchip,
handle_simple_irq,
@@ -189,7 +191,8 @@ static bool lpe_audio_detect(struct drm_i915_private *dev_priv)
};
if (!pci_dev_present(atom_hdaudio_ids)) {
- DRM_INFO("HDaudio controller not detected, using LPE audio instead\n");
+ drm_info(&dev_priv->drm,
+ "HDaudio controller not detected, using LPE audio instead\n");
lpe_present = true;
}
}
@@ -202,18 +205,19 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
dev_priv->lpe_audio.irq = irq_alloc_desc(0);
if (dev_priv->lpe_audio.irq < 0) {
- DRM_ERROR("Failed to allocate IRQ desc: %d\n",
+ drm_err(&dev_priv->drm, "Failed to allocate IRQ desc: %d\n",
dev_priv->lpe_audio.irq);
ret = dev_priv->lpe_audio.irq;
goto err;
}
- DRM_DEBUG("irq = %d\n", dev_priv->lpe_audio.irq);
+ drm_dbg(&dev_priv->drm, "irq = %d\n", dev_priv->lpe_audio.irq);
ret = lpe_audio_irq_init(dev_priv);
if (ret) {
- DRM_ERROR("Failed to initialize irqchip for lpe audio: %d\n",
+ drm_err(&dev_priv->drm,
+ "Failed to initialize irqchip for lpe audio: %d\n",
ret);
goto err_free_irq;
}
@@ -222,7 +226,8 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
if (IS_ERR(dev_priv->lpe_audio.platdev)) {
ret = PTR_ERR(dev_priv->lpe_audio.platdev);
- DRM_ERROR("Failed to create lpe audio platform device: %d\n",
+ drm_err(&dev_priv->drm,
+ "Failed to create lpe audio platform device: %d\n",
ret);
goto err_free_irq;
}
@@ -230,7 +235,8 @@ static int lpe_audio_setup(struct drm_i915_private *dev_priv)
/* enable chicken bit; at least this is required for Dell Wyse 3040
* with DP outputs (but only sometimes by some reason!)
*/
- I915_WRITE(VLV_AUD_CHICKEN_BIT_REG, VLV_CHICKEN_BIT_DBG_ENABLE);
+ intel_de_write(dev_priv, VLV_AUD_CHICKEN_BIT_REG,
+ VLV_CHICKEN_BIT_DBG_ENABLE);
return 0;
err_free_irq:
@@ -257,8 +263,8 @@ void intel_lpe_audio_irq_handler(struct drm_i915_private *dev_priv)
ret = generic_handle_irq(dev_priv->lpe_audio.irq);
if (ret)
- DRM_ERROR_RATELIMITED("error handling LPE audio irq: %d\n",
- ret);
+ drm_err_ratelimited(&dev_priv->drm,
+ "error handling LPE audio irq: %d\n", ret);
}
/**
@@ -276,7 +282,8 @@ int intel_lpe_audio_init(struct drm_i915_private *dev_priv)
if (lpe_audio_detect(dev_priv)) {
ret = lpe_audio_setup(dev_priv);
if (ret < 0)
- DRM_ERROR("failed to setup LPE Audio bridge\n");
+ drm_err(&dev_priv->drm,
+ "failed to setup LPE Audio bridge\n");
}
return ret;
}
@@ -334,7 +341,7 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
spin_lock_irqsave(&pdata->lpe_audio_slock, irqflags);
- audio_enable = I915_READ(VLV_AUD_PORT_EN_DBG(port));
+ audio_enable = intel_de_read(dev_priv, VLV_AUD_PORT_EN_DBG(port));
if (eld != NULL) {
memcpy(ppdata->eld, eld, HDMI_MAX_ELD_BYTES);
@@ -343,8 +350,8 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
ppdata->dp_output = dp_output;
/* Unmute the amp for both DP and HDMI */
- I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
- audio_enable & ~VLV_AMP_MUTE);
+ intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+ audio_enable & ~VLV_AMP_MUTE);
} else {
memset(ppdata->eld, 0, HDMI_MAX_ELD_BYTES);
ppdata->pipe = -1;
@@ -352,8 +359,8 @@ void intel_lpe_audio_notify(struct drm_i915_private *dev_priv,
ppdata->dp_output = false;
/* Mute the amp for both DP and HDMI */
- I915_WRITE(VLV_AUD_PORT_EN_DBG(port),
- audio_enable | VLV_AMP_MUTE);
+ intel_de_write(dev_priv, VLV_AUD_PORT_EN_DBG(port),
+ audio_enable | VLV_AMP_MUTE);
}
if (pdata->notify_audio_lpe)
diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c
index 10696bb99dcf..9a067effcfa0 100644
--- a/drivers/gpu/drm/i915/display/intel_lvds.c
+++ b/drivers/gpu/drm/i915/display/intel_lvds.c
@@ -37,7 +37,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_atomic.h"
@@ -85,7 +84,7 @@ bool intel_lvds_port_enabled(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(lvds_reg);
+ val = intel_de_read(dev_priv, lvds_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
@@ -125,7 +124,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_LVDS);
- tmp = I915_READ(lvds_encoder->reg);
+ tmp = intel_de_read(dev_priv, lvds_encoder->reg);
if (tmp & LVDS_HSYNC_POLARITY)
flags |= DRM_MODE_FLAG_NHSYNC;
else
@@ -143,7 +142,7 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
/* gen2/3 store dither state in pfit control, needs to match */
if (INTEL_GEN(dev_priv) < 4) {
- tmp = I915_READ(PFIT_CONTROL);
+ tmp = intel_de_read(dev_priv, PFIT_CONTROL);
pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
}
@@ -156,18 +155,18 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
{
u32 val;
- pps->powerdown_on_reset = I915_READ(PP_CONTROL(0)) & PANEL_POWER_RESET;
+ pps->powerdown_on_reset = intel_de_read(dev_priv, PP_CONTROL(0)) & PANEL_POWER_RESET;
- val = I915_READ(PP_ON_DELAYS(0));
+ val = intel_de_read(dev_priv, PP_ON_DELAYS(0));
pps->port = REG_FIELD_GET(PANEL_PORT_SELECT_MASK, val);
pps->t1_t2 = REG_FIELD_GET(PANEL_POWER_UP_DELAY_MASK, val);
pps->t5 = REG_FIELD_GET(PANEL_LIGHT_ON_DELAY_MASK, val);
- val = I915_READ(PP_OFF_DELAYS(0));
+ val = intel_de_read(dev_priv, PP_OFF_DELAYS(0));
pps->t3 = REG_FIELD_GET(PANEL_POWER_DOWN_DELAY_MASK, val);
pps->tx = REG_FIELD_GET(PANEL_LIGHT_OFF_DELAY_MASK, val);
- val = I915_READ(PP_DIVISOR(0));
+ val = intel_de_read(dev_priv, PP_DIVISOR(0));
pps->divider = REG_FIELD_GET(PP_REFERENCE_DIVIDER_MASK, val);
val = REG_FIELD_GET(PANEL_POWER_CYCLE_DELAY_MASK, val);
/*
@@ -182,8 +181,9 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
if (INTEL_GEN(dev_priv) <= 4 &&
pps->t1_t2 == 0 && pps->t5 == 0 && pps->t3 == 0 && pps->tx == 0) {
- DRM_DEBUG_KMS("Panel power timings uninitialized, "
- "setting defaults\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel power timings uninitialized, "
+ "setting defaults\n");
/* Set T2 to 40ms and T5 to 200ms in 100 usec units */
pps->t1_t2 = 40 * 10;
pps->t5 = 200 * 10;
@@ -192,10 +192,10 @@ static void intel_lvds_pps_get_hw_state(struct drm_i915_private *dev_priv,
pps->tx = 200 * 10;
}
- DRM_DEBUG_DRIVER("LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
- "divider %d port %d powerdown_on_reset %d\n",
- pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
- pps->divider, pps->port, pps->powerdown_on_reset);
+ drm_dbg(&dev_priv->drm, "LVDS PPS:t1+t2 %d t3 %d t4 %d t5 %d tx %d "
+ "divider %d port %d powerdown_on_reset %d\n",
+ pps->t1_t2, pps->t3, pps->t4, pps->t5, pps->tx,
+ pps->divider, pps->port, pps->powerdown_on_reset);
}
static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
@@ -203,25 +203,21 @@ static void intel_lvds_pps_init_hw(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(PP_CONTROL(0));
- WARN_ON((val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
+ val = intel_de_read(dev_priv, PP_CONTROL(0));
+ drm_WARN_ON(&dev_priv->drm,
+ (val & PANEL_UNLOCK_MASK) != PANEL_UNLOCK_REGS);
if (pps->powerdown_on_reset)
val |= PANEL_POWER_RESET;
- I915_WRITE(PP_CONTROL(0), val);
+ intel_de_write(dev_priv, PP_CONTROL(0), val);
- I915_WRITE(PP_ON_DELAYS(0),
- REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) |
- REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) |
- REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
+ intel_de_write(dev_priv, PP_ON_DELAYS(0),
+ REG_FIELD_PREP(PANEL_PORT_SELECT_MASK, pps->port) | REG_FIELD_PREP(PANEL_POWER_UP_DELAY_MASK, pps->t1_t2) | REG_FIELD_PREP(PANEL_LIGHT_ON_DELAY_MASK, pps->t5));
- I915_WRITE(PP_OFF_DELAYS(0),
- REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) |
- REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
+ intel_de_write(dev_priv, PP_OFF_DELAYS(0),
+ REG_FIELD_PREP(PANEL_POWER_DOWN_DELAY_MASK, pps->t3) | REG_FIELD_PREP(PANEL_LIGHT_OFF_DELAY_MASK, pps->tx));
- I915_WRITE(PP_DIVISOR(0),
- REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) |
- REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK,
- DIV_ROUND_UP(pps->t4, 1000) + 1));
+ intel_de_write(dev_priv, PP_DIVISOR(0),
+ REG_FIELD_PREP(PP_REFERENCE_DIVIDER_MASK, pps->divider) | REG_FIELD_PREP(PANEL_POWER_CYCLE_DELAY_MASK, DIV_ROUND_UP(pps->t4, 1000) + 1));
}
static void intel_pre_enable_lvds(struct intel_encoder *encoder,
@@ -299,7 +295,7 @@ static void intel_pre_enable_lvds(struct intel_encoder *encoder,
if (adjusted_mode->flags & DRM_MODE_FLAG_NVSYNC)
temp |= LVDS_VSYNC_POLARITY;
- I915_WRITE(lvds_encoder->reg, temp);
+ intel_de_write(dev_priv, lvds_encoder->reg, temp);
}
/*
@@ -313,13 +309,16 @@ static void intel_enable_lvds(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(dev);
- I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) | LVDS_PORT_EN);
+ intel_de_write(dev_priv, lvds_encoder->reg,
+ intel_de_read(dev_priv, lvds_encoder->reg) | LVDS_PORT_EN);
- I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) | PANEL_POWER_ON);
- POSTING_READ(lvds_encoder->reg);
+ intel_de_write(dev_priv, PP_CONTROL(0),
+ intel_de_read(dev_priv, PP_CONTROL(0)) | PANEL_POWER_ON);
+ intel_de_posting_read(dev_priv, lvds_encoder->reg);
if (intel_de_wait_for_set(dev_priv, PP_STATUS(0), PP_ON, 5000))
- DRM_ERROR("timed out waiting for panel to power on\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel to power on\n");
intel_panel_enable_backlight(pipe_config, conn_state);
}
@@ -331,12 +330,15 @@ static void intel_disable_lvds(struct intel_encoder *encoder,
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- I915_WRITE(PP_CONTROL(0), I915_READ(PP_CONTROL(0)) & ~PANEL_POWER_ON);
+ intel_de_write(dev_priv, PP_CONTROL(0),
+ intel_de_read(dev_priv, PP_CONTROL(0)) & ~PANEL_POWER_ON);
if (intel_de_wait_for_clear(dev_priv, PP_STATUS(0), PP_ON, 1000))
- DRM_ERROR("timed out waiting for panel to power off\n");
+ drm_err(&dev_priv->drm,
+ "timed out waiting for panel to power off\n");
- I915_WRITE(lvds_encoder->reg, I915_READ(lvds_encoder->reg) & ~LVDS_PORT_EN);
- POSTING_READ(lvds_encoder->reg);
+ intel_de_write(dev_priv, lvds_encoder->reg,
+ intel_de_read(dev_priv, lvds_encoder->reg) & ~LVDS_PORT_EN);
+ intel_de_posting_read(dev_priv, lvds_encoder->reg);
}
static void gmch_disable_lvds(struct intel_encoder *encoder,
@@ -398,7 +400,7 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
/* Should never happen!! */
if (INTEL_GEN(dev_priv) < 4 && intel_crtc->pipe == 0) {
- DRM_ERROR("Can't support LVDS on pipe A\n");
+ drm_err(&dev_priv->drm, "Can't support LVDS on pipe A\n");
return -EINVAL;
}
@@ -408,8 +410,9 @@ static int intel_lvds_compute_config(struct intel_encoder *intel_encoder,
lvds_bpp = 6*3;
if (lvds_bpp != pipe_config->pipe_bpp && !pipe_config->bw_constrained) {
- DRM_DEBUG_KMS("forcing display bpp (was %d) to LVDS (%d)\n",
- pipe_config->pipe_bpp, lvds_bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "forcing display bpp (was %d) to LVDS (%d)\n",
+ pipe_config->pipe_bpp, lvds_bpp);
pipe_config->pipe_bpp = lvds_bpp;
}
@@ -791,7 +794,7 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
* we need to check "the value to be set" in VBT when LVDS
* register is uninitialized.
*/
- val = I915_READ(lvds_encoder->reg);
+ val = intel_de_read(dev_priv, lvds_encoder->reg);
if (HAS_PCH_CPT(dev_priv))
val &= ~(LVDS_DETECTED | LVDS_PIPE_SEL_MASK_CPT);
else
@@ -827,13 +830,14 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
/* Skip init on machines we know falsely report LVDS */
if (dmi_check_system(intel_no_lvds)) {
- WARN(!dev_priv->vbt.int_lvds_support,
- "Useless DMI match. Internal LVDS support disabled by VBT\n");
+ drm_WARN(dev, !dev_priv->vbt.int_lvds_support,
+ "Useless DMI match. Internal LVDS support disabled by VBT\n");
return;
}
if (!dev_priv->vbt.int_lvds_support) {
- DRM_DEBUG_KMS("Internal LVDS support disabled by VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Internal LVDS support disabled by VBT\n");
return;
}
@@ -842,7 +846,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
else
lvds_reg = LVDS;
- lvds = I915_READ(lvds_reg);
+ lvds = intel_de_read(dev_priv, lvds_reg);
if (HAS_PCH_SPLIT(dev_priv)) {
if ((lvds & LVDS_DETECTED) == 0)
@@ -852,10 +856,12 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
pin = GMBUS_PIN_PANEL;
if (!intel_bios_is_lvds_present(dev_priv, &pin)) {
if ((lvds & LVDS_PORT_EN) == 0) {
- DRM_DEBUG_KMS("LVDS is not present in VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LVDS is not present in VBT\n");
return;
}
- DRM_DEBUG_KMS("LVDS is not present in VBT, but enabled anyway\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "LVDS is not present in VBT, but enabled anyway\n");
}
lvds_encoder = kzalloc(sizeof(*lvds_encoder), GFP_KERNEL);
@@ -969,7 +975,7 @@ void intel_lvds_init(struct drm_i915_private *dev_priv)
*/
fixed_mode = intel_encoder_current_mode(intel_encoder);
if (fixed_mode) {
- DRM_DEBUG_KMS("using current (BIOS) mode: ");
+ drm_dbg_kms(&dev_priv->drm, "using current (BIOS) mode: ");
drm_mode_debug_printmodeline(fixed_mode);
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
}
@@ -985,8 +991,8 @@ out:
intel_panel_setup_backlight(connector, INVALID_PIPE);
lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
- DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
- lvds_encoder->is_dual_link ? "dual" : "single");
+ drm_dbg_kms(&dev_priv->drm, "detected %s-link lvds configuration\n",
+ lvds_encoder->is_dual_link ? "dual" : "single");
lvds_encoder->a3_power = lvds & LVDS_A3_POWER_MASK;
@@ -995,7 +1001,7 @@ out:
failed:
mutex_unlock(&dev->mode_config.mutex);
- DRM_DEBUG_KMS("No LVDS modes found, disabling.\n");
+ drm_dbg_kms(&dev_priv->drm, "No LVDS modes found, disabling.\n");
drm_connector_cleanup(connector);
drm_encoder_cleanup(encoder);
kfree(lvds_encoder);
diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c
index e59b4992ba1b..cc6b00959586 100644
--- a/drivers/gpu/drm/i915/display/intel_opregion.c
+++ b/drivers/gpu/drm/i915/display/intel_opregion.c
@@ -30,11 +30,10 @@
#include <linux/firmware.h>
#include <acpi/video.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_panel.h"
#include "i915_drv.h"
+#include "intel_acpi.h"
#include "intel_display_types.h"
#include "intel_opregion.h"
@@ -242,29 +241,6 @@ struct opregion_asle_ext {
#define SWSCI_SBCB_POST_VBE_PM SWSCI_FUNCTION_CODE(SWSCI_SBCB, 19)
#define SWSCI_SBCB_ENABLE_DISABLE_AUDIO SWSCI_FUNCTION_CODE(SWSCI_SBCB, 21)
-/*
- * ACPI Specification, Revision 5.0, Appendix B.3.2 _DOD (Enumerate All Devices
- * Attached to the Display Adapter).
- */
-#define ACPI_DISPLAY_INDEX_SHIFT 0
-#define ACPI_DISPLAY_INDEX_MASK (0xf << 0)
-#define ACPI_DISPLAY_PORT_ATTACHMENT_SHIFT 4
-#define ACPI_DISPLAY_PORT_ATTACHMENT_MASK (0xf << 4)
-#define ACPI_DISPLAY_TYPE_SHIFT 8
-#define ACPI_DISPLAY_TYPE_MASK (0xf << 8)
-#define ACPI_DISPLAY_TYPE_OTHER (0 << 8)
-#define ACPI_DISPLAY_TYPE_VGA (1 << 8)
-#define ACPI_DISPLAY_TYPE_TV (2 << 8)
-#define ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL (3 << 8)
-#define ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL (4 << 8)
-#define ACPI_VENDOR_SPECIFIC_SHIFT 12
-#define ACPI_VENDOR_SPECIFIC_MASK (0xf << 12)
-#define ACPI_BIOS_CAN_DETECT (1 << 16)
-#define ACPI_DEPENDS_ON_VGA (1 << 17)
-#define ACPI_PIPE_ID_SHIFT 18
-#define ACPI_PIPE_ID_MASK (7 << 18)
-#define ACPI_DEVICE_ID_SCHEME (1 << 31)
-
#define MAX_DSLP 1500
static int swsci(struct drm_i915_private *dev_priv,
@@ -311,7 +287,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* The spec tells us to do this, but we are the only user... */
scic = swsci->scic;
if (scic & SWSCI_SCIC_INDICATOR) {
- DRM_DEBUG_DRIVER("SWSCI request already in progress\n");
+ drm_dbg(&dev_priv->drm, "SWSCI request already in progress\n");
return -EBUSY;
}
@@ -335,7 +311,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Poll for the result. */
#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0)
if (wait_for(C, dslp)) {
- DRM_DEBUG_DRIVER("SWSCI request timed out\n");
+ drm_dbg(&dev_priv->drm, "SWSCI request timed out\n");
return -ETIMEDOUT;
}
@@ -344,7 +320,7 @@ static int swsci(struct drm_i915_private *dev_priv,
/* Note: scic == 0 is an error! */
if (scic != SWSCI_SCIC_EXIT_STATUS_SUCCESS) {
- DRM_DEBUG_DRIVER("SWSCI request error %u\n", scic);
+ drm_dbg(&dev_priv->drm, "SWSCI request error %u\n", scic);
return -EIO;
}
@@ -403,8 +379,9 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder,
type = DISPLAY_TYPE_INTERNAL_FLAT_PANEL;
break;
default:
- WARN_ONCE(1, "unsupported intel_encoder type %d\n",
- intel_encoder->type);
+ drm_WARN_ONCE(&dev_priv->drm, 1,
+ "unsupported intel_encoder type %d\n",
+ intel_encoder->type);
return -EINVAL;
}
@@ -448,10 +425,11 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
struct opregion_asle *asle = dev_priv->opregion.asle;
struct drm_device *dev = &dev_priv->drm;
- DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
+ drm_dbg(&dev_priv->drm, "bclp = 0x%08x\n", bclp);
if (acpi_video_get_backlight_type() == acpi_backlight_native) {
- DRM_DEBUG_KMS("opregion backlight request ignored\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "opregion backlight request ignored\n");
return 0;
}
@@ -468,7 +446,8 @@ static u32 asle_set_backlight(struct drm_i915_private *dev_priv, u32 bclp)
* Update backlight on all connectors that support backlight (usually
* only one).
*/
- DRM_DEBUG_KMS("updating opregion backlight %d/255\n", bclp);
+ drm_dbg_kms(&dev_priv->drm, "updating opregion backlight %d/255\n",
+ bclp);
drm_connector_list_iter_begin(dev, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter)
intel_panel_set_backlight_acpi(connector->base.state, bclp, 255);
@@ -485,13 +464,13 @@ static u32 asle_set_als_illum(struct drm_i915_private *dev_priv, u32 alsi)
{
/* alsi is the current ALS reading in lux. 0 indicates below sensor
range, 0xffff indicates above sensor range. 1-0xfffe are valid */
- DRM_DEBUG_DRIVER("Illum is not supported\n");
+ drm_dbg(&dev_priv->drm, "Illum is not supported\n");
return ASLC_ALS_ILLUM_FAILED;
}
static u32 asle_set_pwm_freq(struct drm_i915_private *dev_priv, u32 pfmb)
{
- DRM_DEBUG_DRIVER("PWM freq is not supported\n");
+ drm_dbg(&dev_priv->drm, "PWM freq is not supported\n");
return ASLC_PWM_FREQ_FAILED;
}
@@ -499,30 +478,36 @@ static u32 asle_set_pfit(struct drm_i915_private *dev_priv, u32 pfit)
{
/* Panel fitting is currently controlled by the X code, so this is a
noop until modesetting support works fully */
- DRM_DEBUG_DRIVER("Pfit is not supported\n");
+ drm_dbg(&dev_priv->drm, "Pfit is not supported\n");
return ASLC_PFIT_FAILED;
}
static u32 asle_set_supported_rotation_angles(struct drm_i915_private *dev_priv, u32 srot)
{
- DRM_DEBUG_DRIVER("SROT is not supported\n");
+ drm_dbg(&dev_priv->drm, "SROT is not supported\n");
return ASLC_ROTATION_ANGLES_FAILED;
}
static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
{
if (!iuer)
- DRM_DEBUG_DRIVER("Button array event is not supported (nothing)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (nothing)\n");
if (iuer & ASLE_IUER_ROTATION_LOCK_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (rotation lock)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (rotation lock)\n");
if (iuer & ASLE_IUER_VOLUME_DOWN_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (volume down)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (volume down)\n");
if (iuer & ASLE_IUER_VOLUME_UP_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (volume up)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (volume up)\n");
if (iuer & ASLE_IUER_WINDOWS_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (windows)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (windows)\n");
if (iuer & ASLE_IUER_POWER_BTN)
- DRM_DEBUG_DRIVER("Button array event is not supported (power)\n");
+ drm_dbg(&dev_priv->drm,
+ "Button array event is not supported (power)\n");
return ASLC_BUTTON_ARRAY_FAILED;
}
@@ -530,9 +515,11 @@ static u32 asle_set_button_array(struct drm_i915_private *dev_priv, u32 iuer)
static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
{
if (iuer & ASLE_IUER_CONVERTIBLE)
- DRM_DEBUG_DRIVER("Convertible is not supported (clamshell)\n");
+ drm_dbg(&dev_priv->drm,
+ "Convertible is not supported (clamshell)\n");
else
- DRM_DEBUG_DRIVER("Convertible is not supported (slate)\n");
+ drm_dbg(&dev_priv->drm,
+ "Convertible is not supported (slate)\n");
return ASLC_CONVERTIBLE_FAILED;
}
@@ -540,16 +527,17 @@ static u32 asle_set_convertible(struct drm_i915_private *dev_priv, u32 iuer)
static u32 asle_set_docking(struct drm_i915_private *dev_priv, u32 iuer)
{
if (iuer & ASLE_IUER_DOCKING)
- DRM_DEBUG_DRIVER("Docking is not supported (docked)\n");
+ drm_dbg(&dev_priv->drm, "Docking is not supported (docked)\n");
else
- DRM_DEBUG_DRIVER("Docking is not supported (undocked)\n");
+ drm_dbg(&dev_priv->drm,
+ "Docking is not supported (undocked)\n");
return ASLC_DOCKING_FAILED;
}
static u32 asle_isct_state(struct drm_i915_private *dev_priv)
{
- DRM_DEBUG_DRIVER("ISCT is not supported\n");
+ drm_dbg(&dev_priv->drm, "ISCT is not supported\n");
return ASLC_ISCT_STATE_FAILED;
}
@@ -569,8 +557,8 @@ static void asle_work(struct work_struct *work)
aslc_req = asle->aslc;
if (!(aslc_req & ASLC_REQ_MSK)) {
- DRM_DEBUG_DRIVER("No request on ASLC interrupt 0x%08x\n",
- aslc_req);
+ drm_dbg(&dev_priv->drm,
+ "No request on ASLC interrupt 0x%08x\n", aslc_req);
return;
}
@@ -662,54 +650,12 @@ static void set_did(struct intel_opregion *opregion, int i, u32 val)
}
}
-static u32 acpi_display_type(struct intel_connector *connector)
-{
- u32 display_type;
-
- switch (connector->base.connector_type) {
- case DRM_MODE_CONNECTOR_VGA:
- case DRM_MODE_CONNECTOR_DVIA:
- display_type = ACPI_DISPLAY_TYPE_VGA;
- break;
- case DRM_MODE_CONNECTOR_Composite:
- case DRM_MODE_CONNECTOR_SVIDEO:
- case DRM_MODE_CONNECTOR_Component:
- case DRM_MODE_CONNECTOR_9PinDIN:
- case DRM_MODE_CONNECTOR_TV:
- display_type = ACPI_DISPLAY_TYPE_TV;
- break;
- case DRM_MODE_CONNECTOR_DVII:
- case DRM_MODE_CONNECTOR_DVID:
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_HDMIA:
- case DRM_MODE_CONNECTOR_HDMIB:
- display_type = ACPI_DISPLAY_TYPE_EXTERNAL_DIGITAL;
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- case DRM_MODE_CONNECTOR_eDP:
- case DRM_MODE_CONNECTOR_DSI:
- display_type = ACPI_DISPLAY_TYPE_INTERNAL_DIGITAL;
- break;
- case DRM_MODE_CONNECTOR_Unknown:
- case DRM_MODE_CONNECTOR_VIRTUAL:
- display_type = ACPI_DISPLAY_TYPE_OTHER;
- break;
- default:
- MISSING_CASE(connector->base.connector_type);
- display_type = ACPI_DISPLAY_TYPE_OTHER;
- break;
- }
-
- return display_type;
-}
-
static void intel_didl_outputs(struct drm_i915_private *dev_priv)
{
struct intel_opregion *opregion = &dev_priv->opregion;
struct intel_connector *connector;
struct drm_connector_list_iter conn_iter;
int i = 0, max_outputs;
- int display_index[16] = {};
/*
* In theory, did2, the extended didl, gets added at opregion version
@@ -721,29 +667,22 @@ static void intel_didl_outputs(struct drm_i915_private *dev_priv)
max_outputs = ARRAY_SIZE(opregion->acpi->didl) +
ARRAY_SIZE(opregion->acpi->did2);
+ intel_acpi_device_id_update(dev_priv);
+
drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
for_each_intel_connector_iter(connector, &conn_iter) {
- u32 device_id, type;
-
- device_id = acpi_display_type(connector);
-
- /* Use display type specific display index. */
- type = (device_id & ACPI_DISPLAY_TYPE_MASK)
- >> ACPI_DISPLAY_TYPE_SHIFT;
- device_id |= display_index[type]++ << ACPI_DISPLAY_INDEX_SHIFT;
-
- connector->acpi_device_id = device_id;
if (i < max_outputs)
- set_did(opregion, i, device_id);
+ set_did(opregion, i, connector->acpi_device_id);
i++;
}
drm_connector_list_iter_end(&conn_iter);
- DRM_DEBUG_KMS("%d outputs detected\n", i);
+ drm_dbg_kms(&dev_priv->drm, "%d outputs detected\n", i);
if (i > max_outputs)
- DRM_ERROR("More than %d outputs in connector list\n",
- max_outputs);
+ drm_err(&dev_priv->drm,
+ "More than %d outputs in connector list\n",
+ max_outputs);
/* If fewer than max outputs, the list must be null terminated */
if (i < max_outputs)
@@ -823,7 +762,9 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
if (requested_callbacks) {
u32 req = opregion->swsci_sbcb_sub_functions;
if ((req & tmp) != req)
- DRM_DEBUG_DRIVER("SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n", req, tmp);
+ drm_dbg(&dev_priv->drm,
+ "SWSCI BIOS requested (%08x) SBCB callbacks that are not supported (%08x)\n",
+ req, tmp);
/* XXX: for now, trust the requested callbacks */
/* opregion->swsci_sbcb_sub_functions &= tmp; */
} else {
@@ -831,9 +772,10 @@ static void swsci_setup(struct drm_i915_private *dev_priv)
}
}
- DRM_DEBUG_DRIVER("SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
- opregion->swsci_gbda_sub_functions,
- opregion->swsci_sbcb_sub_functions);
+ drm_dbg(&dev_priv->drm,
+ "SWSCI GBDA callbacks %08x, SBCB callbacks %08x\n",
+ opregion->swsci_gbda_sub_functions,
+ opregion->swsci_sbcb_sub_functions);
}
static int intel_no_opregion_vbt_callback(const struct dmi_system_id *id)
@@ -867,15 +809,17 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
ret = request_firmware(&fw, name, &dev_priv->drm.pdev->dev);
if (ret) {
- DRM_ERROR("Requesting VBT firmware \"%s\" failed (%d)\n",
- name, ret);
+ drm_err(&dev_priv->drm,
+ "Requesting VBT firmware \"%s\" failed (%d)\n",
+ name, ret);
return ret;
}
if (intel_bios_is_valid_vbt(fw->data, fw->size)) {
opregion->vbt_firmware = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (opregion->vbt_firmware) {
- DRM_DEBUG_KMS("Found valid VBT firmware \"%s\"\n", name);
+ drm_dbg_kms(&dev_priv->drm,
+ "Found valid VBT firmware \"%s\"\n", name);
opregion->vbt = opregion->vbt_firmware;
opregion->vbt_size = fw->size;
ret = 0;
@@ -883,7 +827,8 @@ static int intel_load_vbt_firmware(struct drm_i915_private *dev_priv)
ret = -ENOMEM;
}
} else {
- DRM_DEBUG_KMS("Invalid VBT firmware \"%s\"\n", name);
+ drm_dbg_kms(&dev_priv->drm, "Invalid VBT firmware \"%s\"\n",
+ name);
ret = -EINVAL;
}
@@ -910,9 +855,10 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
BUILD_BUG_ON(sizeof(struct opregion_asle_ext) != 0x400);
pci_read_config_dword(pdev, ASLS, &asls);
- DRM_DEBUG_DRIVER("graphic opregion physical addr: 0x%x\n", asls);
+ drm_dbg(&dev_priv->drm, "graphic opregion physical addr: 0x%x\n",
+ asls);
if (asls == 0) {
- DRM_DEBUG_DRIVER("ACPI OpRegion not supported!\n");
+ drm_dbg(&dev_priv->drm, "ACPI OpRegion not supported!\n");
return -ENOTSUPP;
}
@@ -925,21 +871,21 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
memcpy(buf, base, sizeof(buf));
if (memcmp(buf, OPREGION_SIGNATURE, 16)) {
- DRM_DEBUG_DRIVER("opregion signature mismatch\n");
+ drm_dbg(&dev_priv->drm, "opregion signature mismatch\n");
err = -EINVAL;
goto err_out;
}
opregion->header = base;
opregion->lid_state = base + ACPI_CLID;
- DRM_DEBUG_DRIVER("ACPI OpRegion version %u.%u.%u\n",
- opregion->header->over.major,
- opregion->header->over.minor,
- opregion->header->over.revision);
+ drm_dbg(&dev_priv->drm, "ACPI OpRegion version %u.%u.%u\n",
+ opregion->header->over.major,
+ opregion->header->over.minor,
+ opregion->header->over.revision);
mboxes = opregion->header->mboxes;
if (mboxes & MBOX_ACPI) {
- DRM_DEBUG_DRIVER("Public ACPI methods supported\n");
+ drm_dbg(&dev_priv->drm, "Public ACPI methods supported\n");
opregion->acpi = base + OPREGION_ACPI_OFFSET;
/*
* Indicate we handle monitor hotplug events ourselves so we do
@@ -951,20 +897,20 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
}
if (mboxes & MBOX_SWSCI) {
- DRM_DEBUG_DRIVER("SWSCI supported\n");
+ drm_dbg(&dev_priv->drm, "SWSCI supported\n");
opregion->swsci = base + OPREGION_SWSCI_OFFSET;
swsci_setup(dev_priv);
}
if (mboxes & MBOX_ASLE) {
- DRM_DEBUG_DRIVER("ASLE supported\n");
+ drm_dbg(&dev_priv->drm, "ASLE supported\n");
opregion->asle = base + OPREGION_ASLE_OFFSET;
opregion->asle->ardy = ASLE_ARDY_NOT_READY;
}
if (mboxes & MBOX_ASLE_EXT)
- DRM_DEBUG_DRIVER("ASLE extension supported\n");
+ drm_dbg(&dev_priv->drm, "ASLE extension supported\n");
if (intel_load_vbt_firmware(dev_priv) == 0)
goto out;
@@ -984,7 +930,7 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
*/
if (opregion->header->over.major > 2 ||
opregion->header->over.minor >= 1) {
- WARN_ON(rvda < OPREGION_SIZE);
+ drm_WARN_ON(&dev_priv->drm, rvda < OPREGION_SIZE);
rvda += asls;
}
@@ -995,12 +941,14 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
vbt = opregion->rvda;
vbt_size = opregion->asle->rvds;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
- DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (RVDA)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found valid VBT in ACPI OpRegion (RVDA)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
goto out;
} else {
- DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (RVDA)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid VBT in ACPI OpRegion (RVDA)\n");
memunmap(opregion->rvda);
opregion->rvda = NULL;
}
@@ -1018,11 +966,13 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv)
OPREGION_ASLE_EXT_OFFSET : OPREGION_SIZE;
vbt_size -= OPREGION_VBT_OFFSET;
if (intel_bios_is_valid_vbt(vbt, vbt_size)) {
- DRM_DEBUG_KMS("Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Found valid VBT in ACPI OpRegion (Mailbox #4)\n");
opregion->vbt = vbt;
opregion->vbt_size = vbt_size;
} else {
- DRM_DEBUG_KMS("Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid VBT in ACPI OpRegion (Mailbox #4)\n");
}
out:
@@ -1058,20 +1008,22 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
ret = swsci(dev_priv, SWSCI_GBDA_PANEL_DETAILS, 0x0, &panel_details);
if (ret) {
- DRM_DEBUG_KMS("Failed to get panel details from OpRegion (%d)\n",
- ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "Failed to get panel details from OpRegion (%d)\n",
+ ret);
return ret;
}
ret = (panel_details >> 8) & 0xff;
if (ret > 0x10) {
- DRM_DEBUG_KMS("Invalid OpRegion panel type 0x%x\n", ret);
+ drm_dbg_kms(&dev_priv->drm,
+ "Invalid OpRegion panel type 0x%x\n", ret);
return -EINVAL;
}
/* fall back to VBT panel type? */
if (ret == 0x0) {
- DRM_DEBUG_KMS("No panel type in OpRegion\n");
+ drm_dbg_kms(&dev_priv->drm, "No panel type in OpRegion\n");
return -ENODEV;
}
@@ -1081,7 +1033,8 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv)
* via a quirk list :(
*/
if (!dmi_check_system(intel_use_opregion_panel_type)) {
- DRM_DEBUG_KMS("Ignoring OpRegion panel type (%d)\n", ret - 1);
+ drm_dbg_kms(&dev_priv->drm,
+ "Ignoring OpRegion panel type (%d)\n", ret - 1);
return -ENODEV;
}
diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c
index e40c3a0e2cd7..481187223101 100644
--- a/drivers/gpu/drm/i915/display/intel_overlay.c
+++ b/drivers/gpu/drm/i915/display/intel_overlay.c
@@ -27,7 +27,6 @@
*/
#include <drm/drm_fourcc.h>
-#include <drm/i915_drm.h>
#include "gem/i915_gem_pm.h"
#include "gt/intel_ring.h"
@@ -204,9 +203,10 @@ static void i830_overlay_clock_gating(struct drm_i915_private *dev_priv,
/* WA_OVERLAY_CLKGATE:alm */
if (enable)
- I915_WRITE(DSPCLK_GATE_D, 0);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, 0);
else
- I915_WRITE(DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE);
+ intel_de_write(dev_priv, DSPCLK_GATE_D,
+ OVRUNIT_CLOCK_GATE_DISABLE);
/* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */
pci_bus_read_config_byte(pdev->bus,
@@ -247,7 +247,7 @@ static int intel_overlay_on(struct intel_overlay *overlay)
struct i915_request *rq;
u32 *cs;
- WARN_ON(overlay->active);
+ drm_WARN_ON(&dev_priv->drm, overlay->active);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -315,15 +315,15 @@ static int intel_overlay_continue(struct intel_overlay *overlay,
u32 flip_addr = overlay->flip_addr;
u32 tmp, *cs;
- WARN_ON(!overlay->active);
+ drm_WARN_ON(&dev_priv->drm, !overlay->active);
if (load_polyphase_filter)
flip_addr |= OFC_UPDATE;
/* check for underruns */
- tmp = I915_READ(DOVSTA);
+ tmp = intel_de_read(dev_priv, DOVSTA);
if (tmp & (1 << 17))
- DRM_DEBUG("overlay underrun, DOVSTA: %x\n", tmp);
+ drm_dbg(&dev_priv->drm, "overlay underrun, DOVSTA: %x\n", tmp);
rq = alloc_request(overlay, NULL);
if (IS_ERR(rq))
@@ -456,7 +456,7 @@ static int intel_overlay_release_old_vid(struct intel_overlay *overlay)
if (!overlay->old_vma)
return 0;
- if (!(I915_READ(GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
+ if (!(intel_de_read(dev_priv, GEN2_ISR) & I915_OVERLAY_PLANE_FLIP_PENDING_INTERRUPT)) {
intel_overlay_release_old_vid_tail(overlay);
return 0;
}
@@ -759,7 +759,8 @@ static int intel_overlay_do_put_image(struct intel_overlay *overlay,
struct i915_vma *vma;
int ret, tmp_width;
- WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_release_old_vid(overlay);
if (ret != 0)
@@ -857,7 +858,8 @@ int intel_overlay_switch_off(struct intel_overlay *overlay)
struct drm_i915_private *dev_priv = overlay->i915;
int ret;
- WARN_ON(!drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
+ drm_WARN_ON(&dev_priv->drm,
+ !drm_modeset_is_locked(&dev_priv->drm.mode_config.connection_mutex));
ret = intel_overlay_recover_from_interrupt(overlay);
if (ret != 0)
@@ -891,7 +893,7 @@ static int check_overlay_possible_on_crtc(struct intel_overlay *overlay,
static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
{
struct drm_i915_private *dev_priv = overlay->i915;
- u32 pfit_control = I915_READ(PFIT_CONTROL);
+ u32 pfit_control = intel_de_read(dev_priv, PFIT_CONTROL);
u32 ratio;
/* XXX: This is not the same logic as in the xorg driver, but more in
@@ -899,12 +901,12 @@ static void update_pfit_vscale_ratio(struct intel_overlay *overlay)
*/
if (INTEL_GEN(dev_priv) >= 4) {
/* on i965 use the PGM reg to read out the autoscaler values */
- ratio = I915_READ(PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
+ ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS) >> PFIT_VERT_SCALE_SHIFT_965;
} else {
if (pfit_control & VERT_AUTO_SCALE)
- ratio = I915_READ(PFIT_AUTO_RATIOS);
+ ratio = intel_de_read(dev_priv, PFIT_AUTO_RATIOS);
else
- ratio = I915_READ(PFIT_PGM_RATIOS);
+ ratio = intel_de_read(dev_priv, PFIT_PGM_RATIOS);
ratio >>= PFIT_VERT_SCALE_SHIFT;
}
@@ -1066,7 +1068,7 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
overlay = dev_priv->overlay;
if (!overlay) {
- DRM_DEBUG("userspace bug: no overlay\n");
+ drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1090,7 +1092,8 @@ int intel_overlay_put_image_ioctl(struct drm_device *dev, void *data,
drm_modeset_lock_all(dev);
if (i915_gem_object_is_tiled(new_bo)) {
- DRM_DEBUG_KMS("buffer used for overlay image can not be tiled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "buffer used for overlay image can not be tiled\n");
ret = -EINVAL;
goto out_unlock;
}
@@ -1225,7 +1228,7 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
overlay = dev_priv->overlay;
if (!overlay) {
- DRM_DEBUG("userspace bug: no overlay\n");
+ drm_dbg(&dev_priv->drm, "userspace bug: no overlay\n");
return -ENODEV;
}
@@ -1239,12 +1242,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
attrs->saturation = overlay->saturation;
if (!IS_GEN(dev_priv, 2)) {
- attrs->gamma0 = I915_READ(OGAMC0);
- attrs->gamma1 = I915_READ(OGAMC1);
- attrs->gamma2 = I915_READ(OGAMC2);
- attrs->gamma3 = I915_READ(OGAMC3);
- attrs->gamma4 = I915_READ(OGAMC4);
- attrs->gamma5 = I915_READ(OGAMC5);
+ attrs->gamma0 = intel_de_read(dev_priv, OGAMC0);
+ attrs->gamma1 = intel_de_read(dev_priv, OGAMC1);
+ attrs->gamma2 = intel_de_read(dev_priv, OGAMC2);
+ attrs->gamma3 = intel_de_read(dev_priv, OGAMC3);
+ attrs->gamma4 = intel_de_read(dev_priv, OGAMC4);
+ attrs->gamma5 = intel_de_read(dev_priv, OGAMC5);
}
} else {
if (attrs->brightness < -128 || attrs->brightness > 127)
@@ -1274,12 +1277,12 @@ int intel_overlay_attrs_ioctl(struct drm_device *dev, void *data,
if (ret)
goto out_unlock;
- I915_WRITE(OGAMC0, attrs->gamma0);
- I915_WRITE(OGAMC1, attrs->gamma1);
- I915_WRITE(OGAMC2, attrs->gamma2);
- I915_WRITE(OGAMC3, attrs->gamma3);
- I915_WRITE(OGAMC4, attrs->gamma4);
- I915_WRITE(OGAMC5, attrs->gamma5);
+ intel_de_write(dev_priv, OGAMC0, attrs->gamma0);
+ intel_de_write(dev_priv, OGAMC1, attrs->gamma1);
+ intel_de_write(dev_priv, OGAMC2, attrs->gamma2);
+ intel_de_write(dev_priv, OGAMC3, attrs->gamma3);
+ intel_de_write(dev_priv, OGAMC4, attrs->gamma4);
+ intel_de_write(dev_priv, OGAMC5, attrs->gamma5);
}
}
overlay->color_key_enabled = (attrs->flags & I915_OVERLAY_DISABLE_DEST_COLORKEY) == 0;
@@ -1369,7 +1372,7 @@ void intel_overlay_setup(struct drm_i915_private *dev_priv)
update_reg_attrs(overlay, overlay->regs);
dev_priv->overlay = overlay;
- DRM_INFO("Initialized overlay support.\n");
+ drm_info(&dev_priv->drm, "Initialized overlay support.\n");
return;
out_free:
@@ -1389,7 +1392,7 @@ void intel_overlay_cleanup(struct drm_i915_private *dev_priv)
* Furthermore modesetting teardown happens beforehand so the
* hardware should be off already.
*/
- WARN_ON(overlay->active);
+ drm_WARN_ON(&dev_priv->drm, overlay->active);
i915_gem_object_put(overlay->reg_bo);
i915_active_fini(&overlay->last_flip);
@@ -1419,8 +1422,8 @@ intel_overlay_capture_error_state(struct drm_i915_private *dev_priv)
if (error == NULL)
return NULL;
- error->dovsta = I915_READ(DOVSTA);
- error->isr = I915_READ(GEN2_ISR);
+ error->dovsta = intel_de_read(dev_priv, DOVSTA);
+ error->isr = intel_de_read(dev_priv, GEN2_ISR);
error->base = overlay->flip_addr;
memcpy_fromio(&error->regs, overlay->regs, sizeof(error->regs));
diff --git a/drivers/gpu/drm/i915/display/intel_panel.c b/drivers/gpu/drm/i915/display/intel_panel.c
index 7b3ec6eb3382..276f43870802 100644
--- a/drivers/gpu/drm/i915/display/intel_panel.c
+++ b/drivers/gpu/drm/i915/display/intel_panel.c
@@ -96,8 +96,9 @@ intel_panel_edid_downclock_mode(struct intel_connector *connector,
if (!downclock_mode)
return NULL;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using downclock mode from EDID: ",
- connector->base.base.id, connector->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using downclock mode from EDID: ",
+ connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(downclock_mode);
return downclock_mode;
@@ -122,8 +123,9 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
if (!fixed_mode)
return NULL;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using preferred mode from EDID: ",
- connector->base.base.id, connector->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using preferred mode from EDID: ",
+ connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(fixed_mode);
return fixed_mode;
@@ -138,8 +140,9 @@ intel_panel_edid_fixed_mode(struct intel_connector *connector)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using first mode from EDID: ",
- connector->base.base.id, connector->base.name);
+ drm_dbg_kms(&dev_priv->drm,
+ "[CONNECTOR:%d:%s] using first mode from EDID: ",
+ connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(fixed_mode);
return fixed_mode;
@@ -162,8 +165,8 @@ intel_panel_vbt_fixed_mode(struct intel_connector *connector)
fixed_mode->type |= DRM_MODE_TYPE_PREFERRED;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s] using mode from VBT: ",
- connector->base.base.id, connector->base.name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s] using mode from VBT: ",
+ connector->base.base.id, connector->base.name);
drm_mode_debug_printmodeline(fixed_mode);
info->width_mm = fixed_mode->width_mm;
@@ -423,15 +426,15 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
}
break;
default:
- WARN(1, "bad panel fit mode: %d\n", fitting_mode);
+ drm_WARN(&dev_priv->drm, 1, "bad panel fit mode: %d\n",
+ fitting_mode);
return;
}
/* 965+ wants fuzzy fitting */
/* FIXME: handle multiple panels by failing gracefully */
if (INTEL_GEN(dev_priv) >= 4)
- pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
- PFIT_FILTER_FUZZY);
+ pfit_control |= PFIT_PIPE(intel_crtc->pipe) | PFIT_FILTER_FUZZY;
out:
if ((pfit_control & PFIT_ENABLE) == 0) {
@@ -520,7 +523,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
if (i915_modparams.invert_brightness < 0)
return val;
@@ -537,14 +540,14 @@ static u32 lpt_get_backlight(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return I915_READ(BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 pch_get_backlight(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return I915_READ(BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 i9xx_get_backlight(struct intel_connector *connector)
@@ -553,7 +556,7 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
u32 val;
- val = I915_READ(BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
+ val = intel_de_read(dev_priv, BLC_PWM_CTL) & BACKLIGHT_DUTY_CYCLE_MASK;
if (INTEL_GEN(dev_priv) < 4)
val >>= 1;
@@ -569,10 +572,10 @@ static u32 i9xx_get_backlight(struct intel_connector *connector)
static u32 _vlv_get_backlight(struct drm_i915_private *dev_priv, enum pipe pipe)
{
- if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return 0;
- return I915_READ(VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
+ return intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & BACKLIGHT_DUTY_CYCLE_MASK;
}
static u32 vlv_get_backlight(struct intel_connector *connector)
@@ -588,7 +591,8 @@ static u32 bxt_get_backlight(struct intel_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- return I915_READ(BXT_BLC_PWM_DUTY(panel->backlight.controller));
+ return intel_de_read(dev_priv,
+ BXT_BLC_PWM_DUTY(panel->backlight.controller));
}
static u32 pwm_get_backlight(struct intel_connector *connector)
@@ -605,8 +609,8 @@ static void lpt_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_connector *connector = to_intel_connector(conn_state->connector);
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- u32 val = I915_READ(BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(BLC_PWM_PCH_CTL2, val | level);
+ u32 val = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, val | level);
}
static void pch_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -615,8 +619,8 @@ static void pch_set_backlight(const struct drm_connector_state *conn_state, u32
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
u32 tmp;
- tmp = I915_READ(BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(BLC_PWM_CPU_CTL, tmp | level);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL, tmp | level);
}
static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -626,7 +630,7 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
struct intel_panel *panel = &connector->panel;
u32 tmp, mask;
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
if (panel->backlight.combination_mode) {
u8 lbpc;
@@ -643,8 +647,8 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32
mask = BACKLIGHT_DUTY_CYCLE_MASK_PNV;
}
- tmp = I915_READ(BLC_PWM_CTL) & ~mask;
- I915_WRITE(BLC_PWM_CTL, tmp | level);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CTL) & ~mask;
+ intel_de_write(dev_priv, BLC_PWM_CTL, tmp | level);
}
static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -654,8 +658,8 @@ static void vlv_set_backlight(const struct drm_connector_state *conn_state, u32
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 tmp;
- tmp = I915_READ(VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), tmp | level);
+ tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe)) & ~BACKLIGHT_DUTY_CYCLE_MASK;
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), tmp | level);
}
static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -664,7 +668,8 @@ static void bxt_set_backlight(const struct drm_connector_state *conn_state, u32
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
struct intel_panel *panel = &connector->panel;
- I915_WRITE(BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_DUTY(panel->backlight.controller), level);
}
static void pwm_set_backlight(const struct drm_connector_state *conn_state, u32 level)
@@ -709,7 +714,7 @@ void intel_panel_set_backlight_acpi(const struct drm_connector_state *conn_state
mutex_lock(&dev_priv->backlight_lock);
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
hw_level = clamp_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -742,14 +747,16 @@ static void lpt_disable_backlight(const struct drm_connector_state *old_conn_sta
* This needs rework if we need to add support for CPU PWM on PCH split
* platforms.
*/
- tmp = I915_READ(BLC_PWM_CPU_CTL2);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
if (tmp & BLM_PWM_ENABLE) {
- DRM_DEBUG_KMS("cpu backlight was enabled, disabling\n");
- I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ drm_dbg_kms(&dev_priv->drm,
+ "cpu backlight was enabled, disabling\n");
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ tmp & ~BLM_PWM_ENABLE);
}
- tmp = I915_READ(BLC_PWM_PCH_CTL1);
- I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
static void pch_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -760,11 +767,11 @@ static void pch_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(BLC_PWM_CPU_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, tmp & ~BLM_PWM_ENABLE);
- tmp = I915_READ(BLC_PWM_PCH_CTL1);
- I915_WRITE(BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, tmp & ~BLM_PCH_PWM_ENABLE);
}
static void i9xx_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -779,8 +786,8 @@ static void i965_disable_backlight(const struct drm_connector_state *old_conn_st
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(BLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, BLC_PWM_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, tmp & ~BLM_PWM_ENABLE);
}
static void vlv_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -792,8 +799,9 @@ static void vlv_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(VLV_BLC_PWM_CTL2(pipe));
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe), tmp & ~BLM_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+ tmp & ~BLM_PWM_ENABLE);
}
static void bxt_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -805,14 +813,15 @@ static void bxt_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp & ~BXT_BLC_PWM_ENABLE);
if (panel->backlight.controller == 1) {
- val = I915_READ(UTIL_PIN_CTL);
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
val &= ~UTIL_PIN_ENABLE;
- I915_WRITE(UTIL_PIN_CTL, val);
+ intel_de_write(dev_priv, UTIL_PIN_CTL, val);
}
}
@@ -825,9 +834,10 @@ static void cnp_disable_backlight(const struct drm_connector_state *old_conn_sta
intel_panel_actually_set_backlight(old_conn_state, 0);
- tmp = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- tmp & ~BXT_BLC_PWM_ENABLE);
+ tmp = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ tmp & ~BXT_BLC_PWM_ENABLE);
}
static void pwm_disable_backlight(const struct drm_connector_state *old_conn_state)
@@ -857,7 +867,8 @@ void intel_panel_disable_backlight(const struct drm_connector_state *old_conn_st
* another client is not activated.
*/
if (dev_priv->drm.switch_power_state == DRM_SWITCH_POWER_CHANGING) {
- DRM_DEBUG_DRIVER("Skipping backlight disable on vga switch\n");
+ drm_dbg(&dev_priv->drm,
+ "Skipping backlight disable on vga switch\n");
return;
}
@@ -879,31 +890,31 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
u32 pch_ctl1, pch_ctl2, schicken;
- pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- DRM_DEBUG_KMS("pch backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
}
if (HAS_PCH_LPT(dev_priv)) {
- schicken = I915_READ(SOUTH_CHICKEN2);
+ schicken = intel_de_read(dev_priv, SOUTH_CHICKEN2);
if (panel->backlight.alternate_pwm_increment)
schicken |= LPT_PWM_GRANULARITY;
else
schicken &= ~LPT_PWM_GRANULARITY;
- I915_WRITE(SOUTH_CHICKEN2, schicken);
+ intel_de_write(dev_priv, SOUTH_CHICKEN2, schicken);
} else {
- schicken = I915_READ(SOUTH_CHICKEN1);
+ schicken = intel_de_read(dev_priv, SOUTH_CHICKEN1);
if (panel->backlight.alternate_pwm_increment)
schicken |= SPT_PWM_GRANULARITY;
else
schicken &= ~SPT_PWM_GRANULARITY;
- I915_WRITE(SOUTH_CHICKEN1, schicken);
+ intel_de_write(dev_priv, SOUTH_CHICKEN1, schicken);
}
pch_ctl2 = panel->backlight.max << 16;
- I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
@@ -913,9 +924,10 @@ static void lpt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (HAS_PCH_LPT(dev_priv))
pch_ctl1 |= BLM_PCH_OVERRIDE_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
- POSTING_READ(BLC_PWM_PCH_CTL1);
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -930,41 +942,42 @@ static void pch_enable_backlight(const struct intel_crtc_state *crtc_state,
enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
u32 cpu_ctl2, pch_ctl1, pch_ctl2;
- cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
if (cpu_ctl2 & BLM_PWM_ENABLE) {
- DRM_DEBUG_KMS("cpu backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "cpu backlight already enabled\n");
cpu_ctl2 &= ~BLM_PWM_ENABLE;
- I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
}
- pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
if (pch_ctl1 & BLM_PCH_PWM_ENABLE) {
- DRM_DEBUG_KMS("pch backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "pch backlight already enabled\n");
pch_ctl1 &= ~BLM_PCH_PWM_ENABLE;
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
}
if (cpu_transcoder == TRANSCODER_EDP)
cpu_ctl2 = BLM_TRANSCODER_EDP;
else
cpu_ctl2 = BLM_PIPE(cpu_transcoder);
- I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2);
- POSTING_READ(BLC_PWM_CPU_CTL2);
- I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2);
+ intel_de_posting_read(dev_priv, BLC_PWM_CPU_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2, cpu_ctl2 | BLM_PWM_ENABLE);
/* This won't stick until the above enable. */
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
pch_ctl2 = panel->backlight.max << 16;
- I915_WRITE(BLC_PWM_PCH_CTL2, pch_ctl2);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL2, pch_ctl2);
pch_ctl1 = 0;
if (panel->backlight.active_low_pwm)
pch_ctl1 |= BLM_PCH_POLARITY;
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1);
- POSTING_READ(BLC_PWM_PCH_CTL1);
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1, pch_ctl1);
+ intel_de_posting_read(dev_priv, BLC_PWM_PCH_CTL1);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_PWM_ENABLE);
}
static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -975,10 +988,10 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
u32 ctl, freq;
- ctl = I915_READ(BLC_PWM_CTL);
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
if (ctl & BACKLIGHT_DUTY_CYCLE_MASK_PNV) {
- DRM_DEBUG_KMS("backlight already enabled\n");
- I915_WRITE(BLC_PWM_CTL, 0);
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
+ intel_de_write(dev_priv, BLC_PWM_CTL, 0);
}
freq = panel->backlight.max;
@@ -991,8 +1004,8 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
if (IS_PINEVIEW(dev_priv) && panel->backlight.active_low_pwm)
ctl |= BLM_POLARITY_PNV;
- I915_WRITE(BLC_PWM_CTL, ctl);
- POSTING_READ(BLC_PWM_CTL);
+ intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
+ intel_de_posting_read(dev_priv, BLC_PWM_CTL);
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1003,7 +1016,7 @@ static void i9xx_enable_backlight(const struct intel_crtc_state *crtc_state,
* that has backlight.
*/
if (IS_GEN(dev_priv, 2))
- I915_WRITE(BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
+ intel_de_write(dev_priv, BLC_HIST_CTL, BLM_HISTOGRAM_ENABLE);
}
static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1015,11 +1028,11 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
enum pipe pipe = to_intel_crtc(conn_state->crtc)->pipe;
u32 ctl, ctl2, freq;
- ctl2 = I915_READ(BLC_PWM_CTL2);
+ ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
if (ctl2 & BLM_PWM_ENABLE) {
- DRM_DEBUG_KMS("backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
ctl2 &= ~BLM_PWM_ENABLE;
- I915_WRITE(BLC_PWM_CTL2, ctl2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
}
freq = panel->backlight.max;
@@ -1027,16 +1040,16 @@ static void i965_enable_backlight(const struct intel_crtc_state *crtc_state,
freq /= 0xff;
ctl = freq << 16;
- I915_WRITE(BLC_PWM_CTL, ctl);
+ intel_de_write(dev_priv, BLC_PWM_CTL, ctl);
ctl2 = BLM_PIPE(pipe);
if (panel->backlight.combination_mode)
ctl2 |= BLM_COMBINATION_MODE;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- I915_WRITE(BLC_PWM_CTL2, ctl2);
- POSTING_READ(BLC_PWM_CTL2);
- I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2);
+ intel_de_posting_read(dev_priv, BLC_PWM_CTL2);
+ intel_de_write(dev_priv, BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
}
@@ -1050,15 +1063,15 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
enum pipe pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
u32 ctl, ctl2;
- ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
if (ctl2 & BLM_PWM_ENABLE) {
- DRM_DEBUG_KMS("backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
ctl2 &= ~BLM_PWM_ENABLE;
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
}
ctl = panel->backlight.max << 16;
- I915_WRITE(VLV_BLC_PWM_CTL(pipe), ctl);
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL(pipe), ctl);
/* XXX: combine this into above write? */
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1066,9 +1079,10 @@ static void vlv_enable_backlight(const struct intel_crtc_state *crtc_state,
ctl2 = 0;
if (panel->backlight.active_low_pwm)
ctl2 |= BLM_POLARITY_I965;
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2);
- POSTING_READ(VLV_BLC_PWM_CTL2(pipe));
- I915_WRITE(VLV_BLC_PWM_CTL2(pipe), ctl2 | BLM_PWM_ENABLE);
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe), ctl2);
+ intel_de_posting_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
+ intel_de_write(dev_priv, VLV_BLC_PWM_CTL2(pipe),
+ ctl2 | BLM_PWM_ENABLE);
}
static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1082,30 +1096,34 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = I915_READ(UTIL_PIN_CTL);
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
if (val & UTIL_PIN_ENABLE) {
- DRM_DEBUG_KMS("util pin already enabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "util pin already enabled\n");
val &= ~UTIL_PIN_ENABLE;
- I915_WRITE(UTIL_PIN_CTL, val);
+ intel_de_write(dev_priv, UTIL_PIN_CTL, val);
}
val = 0;
if (panel->backlight.util_pin_active_low)
val |= UTIL_PIN_POLARITY;
- I915_WRITE(UTIL_PIN_CTL, val | UTIL_PIN_PIPE(pipe) |
- UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
+ intel_de_write(dev_priv, UTIL_PIN_CTL,
+ val | UTIL_PIN_PIPE(pipe) | UTIL_PIN_MODE_PWM | UTIL_PIN_ENABLE);
}
- pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- DRM_DEBUG_KMS("backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
}
- I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ panel->backlight.max);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1113,10 +1131,12 @@ static void bxt_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
- POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl | BXT_BLC_PWM_ENABLE);
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ intel_de_posting_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl | BXT_BLC_PWM_ENABLE);
}
static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1127,16 +1147,19 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
struct intel_panel *panel = &connector->panel;
u32 pwm_ctl;
- pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
if (pwm_ctl & BXT_BLC_PWM_ENABLE) {
- DRM_DEBUG_KMS("backlight already enabled\n");
+ drm_dbg_kms(&dev_priv->drm, "backlight already enabled\n");
pwm_ctl &= ~BXT_BLC_PWM_ENABLE;
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
}
- I915_WRITE(BXT_BLC_PWM_FREQ(panel->backlight.controller),
- panel->backlight.max);
+ intel_de_write(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller),
+ panel->backlight.max);
intel_panel_actually_set_backlight(conn_state, panel->backlight.level);
@@ -1144,10 +1167,12 @@ static void cnp_enable_backlight(const struct intel_crtc_state *crtc_state,
if (panel->backlight.active_low_pwm)
pwm_ctl |= BXT_BLC_PWM_POLARITY;
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller), pwm_ctl);
- POSTING_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
- I915_WRITE(BXT_BLC_PWM_CTL(panel->backlight.controller),
- pwm_ctl | BXT_BLC_PWM_ENABLE);
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl);
+ intel_de_posting_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
+ intel_de_write(dev_priv, BXT_BLC_PWM_CTL(panel->backlight.controller),
+ pwm_ctl | BXT_BLC_PWM_ENABLE);
}
static void pwm_enable_backlight(const struct intel_crtc_state *crtc_state,
@@ -1194,7 +1219,7 @@ void intel_panel_enable_backlight(const struct intel_crtc_state *crtc_state,
if (!panel->backlight.present)
return;
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(pipe));
+ drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(pipe));
mutex_lock(&dev_priv->backlight_lock);
@@ -1219,7 +1244,7 @@ static u32 intel_panel_get_backlight(struct intel_connector *connector)
mutex_unlock(&dev_priv->backlight_lock);
- DRM_DEBUG_DRIVER("get backlight PWM = %d\n", val);
+ drm_dbg(&dev_priv->drm, "get backlight PWM = %d\n", val);
return val;
}
@@ -1237,7 +1262,7 @@ static void intel_panel_set_backlight(const struct drm_connector_state *conn_sta
mutex_lock(&dev_priv->backlight_lock);
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
hw_level = scale_user_to_hw(connector, user_level, user_max);
panel->backlight.level = hw_level;
@@ -1380,7 +1405,8 @@ static u32 cnp_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz);
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ pwm_freq_hz);
}
/*
@@ -1441,7 +1467,8 @@ static u32 pch_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- return DIV_ROUND_CLOSEST(KHz(dev_priv->rawclk_freq), pwm_freq_hz * 128);
+ return DIV_ROUND_CLOSEST(KHz(RUNTIME_INFO(dev_priv)->rawclk_freq),
+ pwm_freq_hz * 128);
}
/*
@@ -1458,7 +1485,7 @@ static u32 i9xx_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_PINEVIEW(dev_priv))
- clock = KHz(dev_priv->rawclk_freq);
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
else
clock = KHz(dev_priv->cdclk.hw.cdclk);
@@ -1476,7 +1503,7 @@ static u32 i965_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
int clock;
if (IS_G4X(dev_priv))
- clock = KHz(dev_priv->rawclk_freq);
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
else
clock = KHz(dev_priv->cdclk.hw.cdclk);
@@ -1493,14 +1520,14 @@ static u32 vlv_hz_to_pwm(struct intel_connector *connector, u32 pwm_freq_hz)
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
int mul, clock;
- if ((I915_READ(CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
+ if ((intel_de_read(dev_priv, CBR1_VLV) & CBR_PWM_CLOCK_MUX_SELECT) == 0) {
if (IS_CHERRYVIEW(dev_priv))
clock = KHz(19200);
else
clock = MHz(25);
mul = 16;
} else {
- clock = KHz(dev_priv->rawclk_freq);
+ clock = KHz(RUNTIME_INFO(dev_priv)->rawclk_freq);
mul = 128;
}
@@ -1515,22 +1542,26 @@ static u32 get_backlight_max_vbt(struct intel_connector *connector)
u32 pwm;
if (!panel->backlight.hz_to_pwm) {
- DRM_DEBUG_KMS("backlight frequency conversion not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion not supported\n");
return 0;
}
if (pwm_freq_hz) {
- DRM_DEBUG_KMS("VBT defined backlight frequency %u Hz\n",
- pwm_freq_hz);
+ drm_dbg_kms(&dev_priv->drm,
+ "VBT defined backlight frequency %u Hz\n",
+ pwm_freq_hz);
} else {
pwm_freq_hz = 200;
- DRM_DEBUG_KMS("default backlight frequency %u Hz\n",
- pwm_freq_hz);
+ drm_dbg_kms(&dev_priv->drm,
+ "default backlight frequency %u Hz\n",
+ pwm_freq_hz);
}
pwm = panel->backlight.hz_to_pwm(connector, pwm_freq_hz);
if (!pwm) {
- DRM_DEBUG_KMS("backlight frequency conversion failed\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "backlight frequency conversion failed\n");
return 0;
}
@@ -1546,7 +1577,7 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
struct intel_panel *panel = &connector->panel;
int min;
- WARN_ON(panel->backlight.max == 0);
+ drm_WARN_ON(&dev_priv->drm, panel->backlight.max == 0);
/*
* XXX: If the vbt value is 255, it makes min equal to max, which leads
@@ -1557,8 +1588,9 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
*/
min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
if (min != dev_priv->vbt.backlight.min_brightness) {
- DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
- dev_priv->vbt.backlight.min_brightness, min);
+ drm_dbg_kms(&dev_priv->drm,
+ "clamping VBT min backlight %d/255 to %d/255\n",
+ dev_priv->vbt.backlight.min_brightness, min);
}
/* vbt value is a coefficient in range [0..255] */
@@ -1573,18 +1605,18 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
bool alt, cpu_mode;
if (HAS_PCH_LPT(dev_priv))
- alt = I915_READ(SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
+ alt = intel_de_read(dev_priv, SOUTH_CHICKEN2) & LPT_PWM_GRANULARITY;
else
- alt = I915_READ(SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
+ alt = intel_de_read(dev_priv, SOUTH_CHICKEN1) & SPT_PWM_GRANULARITY;
panel->backlight.alternate_pwm_increment = alt;
- pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
- cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1608,13 +1640,16 @@ static int lpt_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.max);
if (cpu_mode) {
- DRM_DEBUG_KMS("CPU backlight register was enabled, switching to PCH override\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "CPU backlight register was enabled, switching to PCH override\n");
/* Write converted CPU PWM value to PCH override register */
lpt_set_backlight(connector->base.state, panel->backlight.level);
- I915_WRITE(BLC_PWM_PCH_CTL1, pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_PCH_CTL1,
+ pch_ctl1 | BLM_PCH_OVERRIDE_ENABLE);
- I915_WRITE(BLC_PWM_CPU_CTL2, cpu_ctl2 & ~BLM_PWM_ENABLE);
+ intel_de_write(dev_priv, BLC_PWM_CPU_CTL2,
+ cpu_ctl2 & ~BLM_PWM_ENABLE);
}
return 0;
@@ -1626,10 +1661,10 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
struct intel_panel *panel = &connector->panel;
u32 cpu_ctl2, pch_ctl1, pch_ctl2, val;
- pch_ctl1 = I915_READ(BLC_PWM_PCH_CTL1);
+ pch_ctl1 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL1);
panel->backlight.active_low_pwm = pch_ctl1 & BLM_PCH_POLARITY;
- pch_ctl2 = I915_READ(BLC_PWM_PCH_CTL2);
+ pch_ctl2 = intel_de_read(dev_priv, BLC_PWM_PCH_CTL2);
panel->backlight.max = pch_ctl2 >> 16;
if (!panel->backlight.max)
@@ -1645,7 +1680,7 @@ static int pch_setup_backlight(struct intel_connector *connector, enum pipe unus
panel->backlight.level = clamp(val, panel->backlight.min,
panel->backlight.max);
- cpu_ctl2 = I915_READ(BLC_PWM_CPU_CTL2);
+ cpu_ctl2 = intel_de_read(dev_priv, BLC_PWM_CPU_CTL2);
panel->backlight.enabled = (cpu_ctl2 & BLM_PWM_ENABLE) &&
(pch_ctl1 & BLM_PCH_PWM_ENABLE);
@@ -1658,7 +1693,7 @@ static int i9xx_setup_backlight(struct intel_connector *connector, enum pipe unu
struct intel_panel *panel = &connector->panel;
u32 ctl, val;
- ctl = I915_READ(BLC_PWM_CTL);
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
if (IS_GEN(dev_priv, 2) || IS_I915GM(dev_priv) || IS_I945GM(dev_priv))
panel->backlight.combination_mode = ctl & BLM_LEGACY_MODE;
@@ -1697,11 +1732,11 @@ static int i965_setup_backlight(struct intel_connector *connector, enum pipe unu
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2, val;
- ctl2 = I915_READ(BLC_PWM_CTL2);
+ ctl2 = intel_de_read(dev_priv, BLC_PWM_CTL2);
panel->backlight.combination_mode = ctl2 & BLM_COMBINATION_MODE;
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = I915_READ(BLC_PWM_CTL);
+ ctl = intel_de_read(dev_priv, BLC_PWM_CTL);
panel->backlight.max = ctl >> 16;
if (!panel->backlight.max)
@@ -1731,13 +1766,13 @@ static int vlv_setup_backlight(struct intel_connector *connector, enum pipe pipe
struct intel_panel *panel = &connector->panel;
u32 ctl, ctl2, val;
- if (WARN_ON(pipe != PIPE_A && pipe != PIPE_B))
+ if (drm_WARN_ON(&dev_priv->drm, pipe != PIPE_A && pipe != PIPE_B))
return -ENODEV;
- ctl2 = I915_READ(VLV_BLC_PWM_CTL2(pipe));
+ ctl2 = intel_de_read(dev_priv, VLV_BLC_PWM_CTL2(pipe));
panel->backlight.active_low_pwm = ctl2 & BLM_POLARITY_I965;
- ctl = I915_READ(VLV_BLC_PWM_CTL(pipe));
+ ctl = intel_de_read(dev_priv, VLV_BLC_PWM_CTL(pipe));
panel->backlight.max = ctl >> 16;
if (!panel->backlight.max)
@@ -1767,18 +1802,20 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
panel->backlight.controller = dev_priv->vbt.backlight.controller;
- pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
/* Controller 1 uses the utility pin. */
if (panel->backlight.controller == 1) {
- val = I915_READ(UTIL_PIN_CTL);
+ val = intel_de_read(dev_priv, UTIL_PIN_CTL);
panel->backlight.util_pin_active_low =
val & UTIL_PIN_POLARITY;
}
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.max =
- I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1812,11 +1849,13 @@ cnp_setup_backlight(struct intel_connector *connector, enum pipe unused)
*/
panel->backlight.controller = 0;
- pwm_ctl = I915_READ(BXT_BLC_PWM_CTL(panel->backlight.controller));
+ pwm_ctl = intel_de_read(dev_priv,
+ BXT_BLC_PWM_CTL(panel->backlight.controller));
panel->backlight.active_low_pwm = pwm_ctl & BXT_BLC_PWM_POLARITY;
panel->backlight.max =
- I915_READ(BXT_BLC_PWM_FREQ(panel->backlight.controller));
+ intel_de_read(dev_priv,
+ BXT_BLC_PWM_FREQ(panel->backlight.controller));
if (!panel->backlight.max)
panel->backlight.max = get_backlight_max_vbt(connector);
@@ -1843,6 +1882,7 @@ static int pwm_setup_backlight(struct intel_connector *connector,
struct drm_i915_private *dev_priv = to_i915(dev);
struct intel_panel *panel = &connector->panel;
const char *desc;
+ u32 level, ns;
int retval;
/* Get the right PWM chip for DSI backlight according to VBT */
@@ -1855,7 +1895,8 @@ static int pwm_setup_backlight(struct intel_connector *connector,
}
if (IS_ERR(panel->backlight.pwm)) {
- DRM_ERROR("Failed to get the %s PWM chip\n", desc);
+ drm_err(&dev_priv->drm, "Failed to get the %s PWM chip\n",
+ desc);
panel->backlight.pwm = NULL;
return -ENODEV;
}
@@ -1866,23 +1907,27 @@ static int pwm_setup_backlight(struct intel_connector *connector,
*/
pwm_apply_args(panel->backlight.pwm);
- retval = pwm_config(panel->backlight.pwm, CRC_PMIC_PWM_PERIOD_NS,
- CRC_PMIC_PWM_PERIOD_NS);
+ panel->backlight.min = 0; /* 0% */
+ panel->backlight.max = 100; /* 100% */
+ level = intel_panel_compute_brightness(connector, 100);
+ ns = DIV_ROUND_UP(level * CRC_PMIC_PWM_PERIOD_NS, 100);
+
+ retval = pwm_config(panel->backlight.pwm, ns, CRC_PMIC_PWM_PERIOD_NS);
if (retval < 0) {
- DRM_ERROR("Failed to configure the pwm chip\n");
+ drm_err(&dev_priv->drm, "Failed to configure the pwm chip\n");
pwm_put(panel->backlight.pwm);
panel->backlight.pwm = NULL;
return retval;
}
- panel->backlight.min = 0; /* 0% */
- panel->backlight.max = 100; /* 100% */
- panel->backlight.level = DIV_ROUND_UP(
- pwm_get_duty_cycle(panel->backlight.pwm) * 100,
- CRC_PMIC_PWM_PERIOD_NS);
+ level = DIV_ROUND_UP(pwm_get_duty_cycle(panel->backlight.pwm) * 100,
+ CRC_PMIC_PWM_PERIOD_NS);
+ panel->backlight.level =
+ intel_panel_compute_brightness(connector, level);
panel->backlight.enabled = panel->backlight.level != 0;
- DRM_INFO("Using %s PWM for LCD backlight control\n", desc);
+ drm_info(&dev_priv->drm, "Using %s PWM for LCD backlight control\n",
+ desc);
return 0;
}
@@ -1913,15 +1958,17 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
if (!dev_priv->vbt.backlight.present) {
if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
- DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT, but present per quirk\n");
} else {
- DRM_DEBUG_KMS("no backlight present per VBT\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "no backlight present per VBT\n");
return 0;
}
}
/* ensure intel_panel has been initialized first */
- if (WARN_ON(!panel->backlight.setup))
+ if (drm_WARN_ON(&dev_priv->drm, !panel->backlight.setup))
return -ENODEV;
/* set level and max in panel struct */
@@ -1930,17 +1977,19 @@ int intel_panel_setup_backlight(struct drm_connector *connector, enum pipe pipe)
mutex_unlock(&dev_priv->backlight_lock);
if (ret) {
- DRM_DEBUG_KMS("failed to setup backlight for connector %s\n",
- connector->name);
+ drm_dbg_kms(&dev_priv->drm,
+ "failed to setup backlight for connector %s\n",
+ connector->name);
return ret;
}
panel->backlight.present = true;
- DRM_DEBUG_KMS("Connector %s backlight initialized, %s, brightness %u/%u\n",
- connector->name,
- enableddisabled(panel->backlight.enabled),
- panel->backlight.level, panel->backlight.max);
+ drm_dbg_kms(&dev_priv->drm,
+ "Connector %s backlight initialized, %s, brightness %u/%u\n",
+ connector->name,
+ enableddisabled(panel->backlight.enabled),
+ panel->backlight.level, panel->backlight.max);
return 0;
}
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.c b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
index 520408e83681..a9a5df2fee4d 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.c
@@ -110,8 +110,8 @@ static int i9xx_pipe_crc_auto_source(struct drm_i915_private *dev_priv,
*source = INTEL_PIPE_CRC_SOURCE_DP_D;
break;
default:
- WARN(1, "nonexisting DP port %c\n",
- port_name(dig_port->base.port));
+ drm_WARN(dev, 1, "nonexisting DP port %c\n",
+ port_name(dig_port->base.port));
break;
}
break;
@@ -172,7 +172,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
* - DisplayPort scrambling: used for EMI reduction
*/
if (need_stable_symbols) {
- u32 tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X);
tmp |= DC_BALANCE_RESET_VLV;
switch (pipe) {
@@ -188,7 +188,7 @@ static int vlv_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
default:
return -EINVAL;
}
- I915_WRITE(PORT_DFT2_G4X, tmp);
+ intel_de_write(dev_priv, PORT_DFT2_G4X, tmp);
}
return 0;
@@ -237,7 +237,7 @@ static int i9xx_pipe_crc_ctl_reg(struct drm_i915_private *dev_priv,
static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
enum pipe pipe)
{
- u32 tmp = I915_READ(PORT_DFT2_G4X);
+ u32 tmp = intel_de_read(dev_priv, PORT_DFT2_G4X);
switch (pipe) {
case PIPE_A:
@@ -254,7 +254,7 @@ static void vlv_undo_pipe_scramble_reset(struct drm_i915_private *dev_priv,
}
if (!(tmp & PIPE_SCRAMBLE_RESET_MASK))
tmp &= ~DC_BALANCE_RESET_VLV;
- I915_WRITE(PORT_DFT2_G4X, tmp);
+ intel_de_write(dev_priv, PORT_DFT2_G4X, tmp);
}
static int ilk_pipe_crc_ctl_reg(enum intel_pipe_crc_source *source,
@@ -328,7 +328,8 @@ put_state:
drm_atomic_state_put(state);
unlock:
- WARN(ret, "Toggling workaround to %i returns %i\n", enable, ret);
+ drm_WARN(&dev_priv->drm, ret,
+ "Toggling workaround to %i returns %i\n", enable, ret);
drm_modeset_drop_locks(&ctx);
drm_modeset_acquire_fini(&ctx);
}
@@ -440,15 +441,11 @@ display_crc_ctl_parse_source(const char *buf, enum intel_pipe_crc_source *s)
return 0;
}
-void intel_display_crc_init(struct drm_i915_private *dev_priv)
+void intel_crtc_crc_init(struct intel_crtc *crtc)
{
- enum pipe pipe;
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
- for_each_pipe(dev_priv, pipe) {
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
-
- spin_lock_init(&pipe_crc->lock);
- }
+ spin_lock_init(&pipe_crc->lock);
}
static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
@@ -570,7 +567,7 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
enum intel_pipe_crc_source source;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+ drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
@@ -586,7 +583,8 @@ int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
enum intel_display_power_domain power_domain;
enum intel_pipe_crc_source source;
intel_wakeref_t wakeref;
@@ -595,14 +593,15 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
bool enable;
if (display_crc_ctl_parse_source(source_name, &source) < 0) {
- DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+ drm_dbg(&dev_priv->drm, "unknown source %s\n", source_name);
return -EINVAL;
}
power_domain = POWER_DOMAIN_PIPE(crtc->index);
wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
if (!wakeref) {
- DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -615,8 +614,8 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
goto out;
pipe_crc->source = source;
- I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
- POSTING_READ(PIPE_CRC_CTL(crtc->index));
+ intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), val);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
if (!source) {
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
@@ -638,7 +637,7 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
u32 val = 0;
if (!crtc->crc.opened)
@@ -650,22 +649,22 @@ void intel_crtc_enable_pipe_crc(struct intel_crtc *intel_crtc)
/* Don't need pipe_crc->lock here, IRQs are not generated. */
pipe_crc->skipped = 0;
- I915_WRITE(PIPE_CRC_CTL(crtc->index), val);
- POSTING_READ(PIPE_CRC_CTL(crtc->index));
+ intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), val);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
}
void intel_crtc_disable_pipe_crc(struct intel_crtc *intel_crtc)
{
struct drm_crtc *crtc = &intel_crtc->base;
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
+ struct intel_pipe_crc *pipe_crc = &intel_crtc->pipe_crc;
/* Swallow crc's until we stop generating them. */
spin_lock_irq(&pipe_crc->lock);
pipe_crc->skipped = INT_MIN;
spin_unlock_irq(&pipe_crc->lock);
- I915_WRITE(PIPE_CRC_CTL(crtc->index), 0);
- POSTING_READ(PIPE_CRC_CTL(crtc->index));
+ intel_de_write(dev_priv, PIPE_CRC_CTL(crtc->index), 0);
+ intel_de_posting_read(dev_priv, PIPE_CRC_CTL(crtc->index));
intel_synchronize_irq(dev_priv);
}
diff --git a/drivers/gpu/drm/i915/display/intel_pipe_crc.h b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
index db258a756fc6..43012b189415 100644
--- a/drivers/gpu/drm/i915/display/intel_pipe_crc.h
+++ b/drivers/gpu/drm/i915/display/intel_pipe_crc.h
@@ -13,7 +13,7 @@ struct drm_i915_private;
struct intel_crtc;
#ifdef CONFIG_DEBUG_FS
-void intel_display_crc_init(struct drm_i915_private *dev_priv);
+void intel_crtc_crc_init(struct intel_crtc *crtc);
int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
const char *source_name, size_t *values_cnt);
@@ -22,7 +22,7 @@ const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
#else
-static inline void intel_display_crc_init(struct drm_i915_private *dev_priv) {}
+static inline void intel_crtc_crc_init(struct intel_crtc *crtc) {}
#define intel_crtc_set_crc_source NULL
#define intel_crtc_verify_crc_source NULL
#define intel_crtc_get_crc_sources NULL
diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c
index 89c9cf5f38d2..fd9b146e3aba 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.c
+++ b/drivers/gpu/drm/i915/display/intel_psr.c
@@ -59,11 +59,28 @@
* get called by the frontbuffer tracking code. Note that because of locking
* issues the self-refresh re-enable code is done from a work queue, which
* must be correctly synchronized/cancelled when shutting down the pipe."
+ *
+ * DC3CO (DC3 clock off)
+ *
+ * On top of PSR2, GEN12 adds a intermediate power savings state that turns
+ * clock off automatically during PSR2 idle state.
+ * The smaller overhead of DC3co entry/exit vs. the overhead of PSR2 deep sleep
+ * entry/exit allows the HW to enter a low-power state even when page flipping
+ * periodically (for instance a 30fps video playback scenario).
+ *
+ * Every time a flips occurs PSR2 will get out of deep sleep state(if it was),
+ * so DC3CO is enabled and tgl_dc3co_disable_work is schedule to run after 6
+ * frames, if no other flip occurs and the function above is executed, DC3CO is
+ * disabled and PSR2 is configured to enter deep sleep, resetting again in case
+ * of another flip.
+ * Front buffer modifications do not trigger DC3CO activation on purpose as it
+ * would bring a lot of complexity and most of the moderns systems will only
+ * use page flips.
*/
-static bool psr_global_enabled(u32 debug)
+static bool psr_global_enabled(struct drm_i915_private *i915)
{
- switch (debug & I915_PSR_DEBUG_MODE_MASK) {
+ switch (i915->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DEFAULT:
return i915_modparams.enable_psr;
case I915_PSR_DEBUG_DISABLE:
@@ -77,8 +94,8 @@ static bool intel_psr2_enabled(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state)
{
/* Cannot enable DSC and PSR2 simultaneously */
- WARN_ON(crtc_state->dsc.compression_enable &&
- crtc_state->has_psr2);
+ drm_WARN_ON(&dev_priv->drm, crtc_state->dsc.compression_enable &&
+ crtc_state->has_psr2);
switch (dev_priv->psr.debug & I915_PSR_DEBUG_MODE_MASK) {
case I915_PSR_DEBUG_DISABLE:
@@ -114,10 +131,10 @@ static void psr_irq_control(struct drm_i915_private *dev_priv)
EDP_PSR_PRE_ENTRY(trans_shift);
/* Warning: it is masking/setting reserved bits too */
- val = I915_READ(imr_reg);
+ val = intel_de_read(dev_priv, imr_reg);
val &= ~EDP_PSR_TRANS_MASK(trans_shift);
val |= ~mask;
- I915_WRITE(imr_reg, val);
+ intel_de_write(dev_priv, imr_reg, val);
}
static void psr_event_print(u32 val, bool psr2_enabled)
@@ -174,20 +191,24 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
if (psr_iir & EDP_PSR_PRE_ENTRY(trans_shift)) {
dev_priv->psr.last_entry_attempt = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR entry attempt in 2 vblanks\n",
- transcoder_name(cpu_transcoder));
+ drm_dbg_kms(&dev_priv->drm,
+ "[transcoder %s] PSR entry attempt in 2 vblanks\n",
+ transcoder_name(cpu_transcoder));
}
if (psr_iir & EDP_PSR_POST_EXIT(trans_shift)) {
dev_priv->psr.last_exit = time_ns;
- DRM_DEBUG_KMS("[transcoder %s] PSR exit completed\n",
- transcoder_name(cpu_transcoder));
+ drm_dbg_kms(&dev_priv->drm,
+ "[transcoder %s] PSR exit completed\n",
+ transcoder_name(cpu_transcoder));
if (INTEL_GEN(dev_priv) >= 9) {
- u32 val = I915_READ(PSR_EVENT(cpu_transcoder));
+ u32 val = intel_de_read(dev_priv,
+ PSR_EVENT(cpu_transcoder));
bool psr2_enabled = dev_priv->psr.psr2_enabled;
- I915_WRITE(PSR_EVENT(cpu_transcoder), val);
+ intel_de_write(dev_priv, PSR_EVENT(cpu_transcoder),
+ val);
psr_event_print(val, psr2_enabled);
}
}
@@ -195,7 +216,7 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
if (psr_iir & EDP_PSR_ERROR(trans_shift)) {
u32 val;
- DRM_WARN("[transcoder %s] PSR aux error\n",
+ drm_warn(&dev_priv->drm, "[transcoder %s] PSR aux error\n",
transcoder_name(cpu_transcoder));
dev_priv->psr.irq_aux_error = true;
@@ -208,9 +229,9 @@ void intel_psr_irq_handler(struct drm_i915_private *dev_priv, u32 psr_iir)
* again so we don't care about unmask the interruption
* or unset irq_aux_error.
*/
- val = I915_READ(imr_reg);
+ val = intel_de_read(dev_priv, imr_reg);
val |= EDP_PSR_ERROR(trans_shift);
- I915_WRITE(imr_reg, val);
+ intel_de_write(dev_priv, imr_reg, val);
schedule_work(&dev_priv->psr.work);
}
@@ -270,7 +291,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
to_i915(dp_to_dig_port(intel_dp)->base.base.dev);
if (dev_priv->psr.dp) {
- DRM_WARN("More than one eDP panel found, PSR support should be extended\n");
+ drm_warn(&dev_priv->drm,
+ "More than one eDP panel found, PSR support should be extended\n");
return;
}
@@ -279,16 +301,18 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
if (!intel_dp->psr_dpcd[0])
return;
- DRM_DEBUG_KMS("eDP panel supports PSR version %x\n",
- intel_dp->psr_dpcd[0]);
+ drm_dbg_kms(&dev_priv->drm, "eDP panel supports PSR version %x\n",
+ intel_dp->psr_dpcd[0]);
- if (drm_dp_has_quirk(&intel_dp->desc, DP_DPCD_QUIRK_NO_PSR)) {
- DRM_DEBUG_KMS("PSR support not currently available for this panel\n");
+ if (drm_dp_has_quirk(&intel_dp->desc, 0, DP_DPCD_QUIRK_NO_PSR)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR support not currently available for this panel\n");
return;
}
if (!(intel_dp->edp_dpcd[1] & DP_EDP_SET_POWER_CAP)) {
- DRM_DEBUG_KMS("Panel lacks power state control, PSR cannot be enabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Panel lacks power state control, PSR cannot be enabled\n");
return;
}
@@ -316,8 +340,8 @@ void intel_psr_init_dpcd(struct intel_dp *intel_dp)
* GTC first.
*/
dev_priv->psr.sink_psr2_support = y_req && alpm;
- DRM_DEBUG_KMS("PSR2 %ssupported\n",
- dev_priv->psr.sink_psr2_support ? "" : "not ");
+ drm_dbg_kms(&dev_priv->drm, "PSR2 %ssupported\n",
+ dev_priv->psr.sink_psr2_support ? "" : "not ");
if (dev_priv->psr.sink_psr2_support) {
dev_priv->psr.colorimetry_support =
@@ -380,8 +404,9 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
BUILD_BUG_ON(sizeof(aux_msg) > 20);
for (i = 0; i < sizeof(aux_msg); i += 4)
- I915_WRITE(EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
- intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
+ intel_de_write(dev_priv,
+ EDP_PSR_AUX_DATA(dev_priv->psr.transcoder, i >> 2),
+ intel_dp_pack_aux(&aux_msg[i], sizeof(aux_msg) - i));
aux_clock_divider = intel_dp->get_aux_clock_divider(intel_dp, 0);
@@ -391,7 +416,8 @@ static void hsw_psr_setup_aux(struct intel_dp *intel_dp)
/* Select only valid bits for SRD_AUX_CTL */
aux_ctl &= psr_aux_mask;
- I915_WRITE(EDP_PSR_AUX_CTL(dev_priv->psr.transcoder), aux_ctl);
+ intel_de_write(dev_priv, EDP_PSR_AUX_CTL(dev_priv->psr.transcoder),
+ aux_ctl);
}
static void intel_psr_enable_sink(struct intel_dp *intel_dp)
@@ -454,22 +480,30 @@ static u32 intel_psr1_get_tp_time(struct intel_dp *intel_dp)
return val;
}
-static void hsw_activate_psr1(struct intel_dp *intel_dp)
+static u8 psr_compute_idle_frames(struct intel_dp *intel_dp)
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- u32 max_sleep_time = 0x1f;
- u32 val = EDP_PSR_ENABLE;
+ int idle_frames;
/* Let's use 6 as the minimum to cover all known cases including the
* off-by-one issue that HW has in some cases.
*/
- int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
- /* sink_sync_latency of 8 means source has to wait for more than 8
- * frames, we'll go with 9 frames for now
- */
+ idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- val |= idle_frames << EDP_PSR_IDLE_FRAME_SHIFT;
+
+ if (drm_WARN_ON(&dev_priv->drm, idle_frames > 0xf))
+ idle_frames = 0xf;
+
+ return idle_frames;
+}
+
+static void hsw_activate_psr1(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 max_sleep_time = 0x1f;
+ u32 val = EDP_PSR_ENABLE;
+
+ val |= psr_compute_idle_frames(intel_dp) << EDP_PSR_IDLE_FRAME_SHIFT;
val |= max_sleep_time << EDP_PSR_MAX_SLEEP_TIME_SHIFT;
if (IS_HASWELL(dev_priv))
@@ -483,9 +517,9 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp)
if (INTEL_GEN(dev_priv) >= 8)
val |= EDP_PSR_CRC_ENABLE;
- val |= (I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) &
+ val |= (intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) &
EDP_PSR_RESTORE_PSR_ACTIVE_CTX_MASK);
- I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
static void hsw_activate_psr2(struct intel_dp *intel_dp)
@@ -493,13 +527,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
u32 val;
- /* Let's use 6 as the minimum to cover all known cases including the
- * off-by-one issue that HW has in some cases.
- */
- int idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
-
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- val = idle_frames << EDP_PSR2_IDLE_FRAME_SHIFT;
+ val = psr_compute_idle_frames(intel_dp) << EDP_PSR2_IDLE_FRAME_SHIFT;
val |= EDP_PSR2_ENABLE | EDP_SU_TRACK_ENABLE;
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
@@ -521,9 +549,9 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp)
* PSR2 HW is incorrectly using EDP_PSR_TP1_TP3_SEL and BSpec is
* recommending keep this bit unset while PSR2 is enabled.
*/
- I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
+ intel_de_write(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder), 0);
- I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
}
static bool
@@ -552,10 +580,10 @@ static void psr2_program_idle_frames(struct drm_i915_private *dev_priv,
u32 val;
idle_frames <<= EDP_PSR2_IDLE_FRAME_SHIFT;
- val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ val = intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder));
val &= ~EDP_PSR2_IDLE_FRAME_MASK;
val |= idle_frames;
- I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
}
static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
@@ -566,29 +594,22 @@ static void tgl_psr2_enable_dc3co(struct drm_i915_private *dev_priv)
static void tgl_psr2_disable_dc3co(struct drm_i915_private *dev_priv)
{
- int idle_frames;
+ struct intel_dp *intel_dp = dev_priv->psr.dp;
intel_display_power_set_target_dc_state(dev_priv, DC_STATE_EN_UPTO_DC6);
- /*
- * Restore PSR2 idle frame let's use 6 as the minimum to cover all known
- * cases including the off-by-one issue that HW has in some cases.
- */
- idle_frames = max(6, dev_priv->vbt.psr.idle_frames);
- idle_frames = max(idle_frames, dev_priv->psr.sink_sync_latency + 1);
- psr2_program_idle_frames(dev_priv, idle_frames);
+ psr2_program_idle_frames(dev_priv, psr_compute_idle_frames(intel_dp));
}
-static void tgl_dc5_idle_thread(struct work_struct *work)
+static void tgl_dc3co_disable_work(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
- container_of(work, typeof(*dev_priv), psr.idle_work.work);
+ container_of(work, typeof(*dev_priv), psr.dc3co_work.work);
mutex_lock(&dev_priv->psr.lock);
/* If delayed work is pending, it is not idle */
- if (delayed_work_pending(&dev_priv->psr.idle_work))
+ if (delayed_work_pending(&dev_priv->psr.dc3co_work))
goto unlock;
- DRM_DEBUG_KMS("DC5/6 idle thread\n");
tgl_psr2_disable_dc3co(dev_priv);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -599,11 +620,41 @@ static void tgl_disallow_dc3co_on_psr2_exit(struct drm_i915_private *dev_priv)
if (!dev_priv->psr.dc3co_enabled)
return;
- cancel_delayed_work(&dev_priv->psr.idle_work);
+ cancel_delayed_work(&dev_priv->psr.dc3co_work);
/* Before PSR2 exit disallow dc3co*/
tgl_psr2_disable_dc3co(dev_priv);
}
+static void
+tgl_dc3co_exitline_compute_config(struct intel_dp *intel_dp,
+ struct intel_crtc_state *crtc_state)
+{
+ const u32 crtc_vdisplay = crtc_state->uapi.adjusted_mode.crtc_vdisplay;
+ struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp);
+ struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
+ u32 exit_scanlines;
+
+ if (!(dev_priv->csr.allowed_dc_mask & DC_STATE_EN_DC3CO))
+ return;
+
+ /* B.Specs:49196 DC3CO only works with pipeA and DDIA.*/
+ if (to_intel_crtc(crtc_state->uapi.crtc)->pipe != PIPE_A ||
+ dig_port->base.port != PORT_A)
+ return;
+
+ /*
+ * DC3CO Exit time 200us B.Spec 49196
+ * PSR2 transcoder Early Exit scanlines = ROUNDUP(200 / line time) + 1
+ */
+ exit_scanlines =
+ intel_usecs_to_scanlines(&crtc_state->uapi.adjusted_mode, 200) + 1;
+
+ if (drm_WARN_ON(&dev_priv->drm, exit_scanlines > crtc_vdisplay))
+ return;
+
+ crtc_state->dc3co_exitline = crtc_vdisplay - exit_scanlines;
+}
+
static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
struct intel_crtc_state *crtc_state)
{
@@ -616,8 +667,9 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
return false;
if (!transcoder_has_psr2(dev_priv, crtc_state->cpu_transcoder)) {
- DRM_DEBUG_KMS("PSR2 not supported in transcoder %s\n",
- transcoder_name(crtc_state->cpu_transcoder));
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not supported in transcoder %s\n",
+ transcoder_name(crtc_state->cpu_transcoder));
return false;
}
@@ -627,7 +679,8 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* over PSR2.
*/
if (crtc_state->dsc.compression_enable) {
- DRM_DEBUG_KMS("PSR2 cannot be enabled since DSC is enabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 cannot be enabled since DSC is enabled\n");
return false;
}
@@ -646,15 +699,17 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
}
if (crtc_hdisplay > psr_max_h || crtc_vdisplay > psr_max_v) {
- DRM_DEBUG_KMS("PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
- crtc_hdisplay, crtc_vdisplay,
- psr_max_h, psr_max_v);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, resolution %dx%d > max supported %dx%d\n",
+ crtc_hdisplay, crtc_vdisplay,
+ psr_max_h, psr_max_v);
return false;
}
if (crtc_state->pipe_bpp > max_bpp) {
- DRM_DEBUG_KMS("PSR2 not enabled, pipe bpp %d > max supported %d\n",
- crtc_state->pipe_bpp, max_bpp);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, pipe bpp %d > max supported %d\n",
+ crtc_state->pipe_bpp, max_bpp);
return false;
}
@@ -665,16 +720,19 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp,
* x granularity.
*/
if (crtc_hdisplay % dev_priv->psr.su_x_granularity) {
- DRM_DEBUG_KMS("PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
- crtc_hdisplay, dev_priv->psr.su_x_granularity);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled, hdisplay(%d) not multiple of %d\n",
+ crtc_hdisplay, dev_priv->psr.su_x_granularity);
return false;
}
if (crtc_state->crc_enabled) {
- DRM_DEBUG_KMS("PSR2 not enabled because it would inhibit pipe CRC calculation\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR2 not enabled because it would inhibit pipe CRC calculation\n");
return false;
}
+ tgl_dc3co_exitline_compute_config(intel_dp, crtc_state);
return true;
}
@@ -700,31 +758,36 @@ void intel_psr_compute_config(struct intel_dp *intel_dp,
* hardcoded to PORT_A
*/
if (dig_port->base.port != PORT_A) {
- DRM_DEBUG_KMS("PSR condition failed: Port not supported\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Port not supported\n");
return;
}
if (dev_priv->psr.sink_not_reliable) {
- DRM_DEBUG_KMS("PSR sink implementation is not reliable\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR sink implementation is not reliable\n");
return;
}
if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) {
- DRM_DEBUG_KMS("PSR condition failed: Interlaced mode enabled\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Interlaced mode enabled\n");
return;
}
psr_setup_time = drm_dp_psr_setup_time(intel_dp->psr_dpcd);
if (psr_setup_time < 0) {
- DRM_DEBUG_KMS("PSR condition failed: Invalid PSR setup time (0x%02x)\n",
- intel_dp->psr_dpcd[1]);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: Invalid PSR setup time (0x%02x)\n",
+ intel_dp->psr_dpcd[1]);
return;
}
if (intel_usecs_to_scanlines(adjusted_mode, psr_setup_time) >
adjusted_mode->crtc_vtotal - adjusted_mode->crtc_vdisplay - 1) {
- DRM_DEBUG_KMS("PSR condition failed: PSR setup time (%d us) too long\n",
- psr_setup_time);
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR condition failed: PSR setup time (%d us) too long\n",
+ psr_setup_time);
return;
}
@@ -737,10 +800,12 @@ static void intel_psr_activate(struct intel_dp *intel_dp)
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder))
- WARN_ON(I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, EDP_PSR2_CTL(dev_priv->psr.transcoder)) & EDP_PSR2_ENABLE);
- WARN_ON(I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
- WARN_ON(dev_priv->psr.active);
+ drm_WARN_ON(&dev_priv->drm,
+ intel_de_read(dev_priv, EDP_PSR_CTL(dev_priv->psr.transcoder)) & EDP_PSR_ENABLE);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->psr.active);
lockdep_assert_held(&dev_priv->psr.lock);
/* psr1 and psr2 are mutually exclusive.*/
@@ -768,11 +833,11 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (dev_priv->psr.psr2_enabled && (IS_GEN(dev_priv, 9) &&
!IS_GEMINILAKE(dev_priv))) {
i915_reg_t reg = CHICKEN_TRANS(cpu_transcoder);
- u32 chicken = I915_READ(reg);
+ u32 chicken = intel_de_read(dev_priv, reg);
chicken |= PSR2_VSC_ENABLE_PROG_HEADER |
PSR2_ADD_VERTICAL_LINE_COUNT;
- I915_WRITE(reg, chicken);
+ intel_de_write(dev_priv, reg, chicken);
}
/*
@@ -789,9 +854,24 @@ static void intel_psr_enable_source(struct intel_dp *intel_dp,
if (INTEL_GEN(dev_priv) < 11)
mask |= EDP_PSR_DEBUG_MASK_DISP_REG_WRITE;
- I915_WRITE(EDP_PSR_DEBUG(dev_priv->psr.transcoder), mask);
+ intel_de_write(dev_priv, EDP_PSR_DEBUG(dev_priv->psr.transcoder),
+ mask);
psr_irq_control(dev_priv);
+
+ if (crtc_state->dc3co_exitline) {
+ u32 val;
+
+ /*
+ * TODO: if future platforms supports DC3CO in more than one
+ * transcoder, EXITLINE will need to be unset when disabling PSR
+ */
+ val = intel_de_read(dev_priv, EXITLINE(cpu_transcoder));
+ val &= ~EXITLINE_MASK;
+ val |= crtc_state->dc3co_exitline << EXITLINE_SHIFT;
+ val |= EXITLINE_ENABLE;
+ intel_de_write(dev_priv, EXITLINE(cpu_transcoder), val);
+ }
}
static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
@@ -800,14 +880,16 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
struct intel_dp *intel_dp = dev_priv->psr.dp;
u32 val;
- WARN_ON(dev_priv->psr.enabled);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->psr.enabled);
dev_priv->psr.psr2_enabled = intel_psr2_enabled(dev_priv, crtc_state);
dev_priv->psr.busy_frontbuffer_bits = 0;
dev_priv->psr.pipe = to_intel_crtc(crtc_state->uapi.crtc)->pipe;
dev_priv->psr.dc3co_enabled = !!crtc_state->dc3co_exitline;
- dev_priv->psr.dc3co_exit_delay = intel_get_frame_time_us(crtc_state);
dev_priv->psr.transcoder = crtc_state->cpu_transcoder;
+ /* DC5/DC6 requires at least 6 idle frames */
+ val = usecs_to_jiffies(intel_get_frame_time_us(crtc_state) * 6);
+ dev_priv->psr.dc3co_exit_delay = val;
/*
* If a PSR error happened and the driver is reloaded, the EDP_PSR_IIR
@@ -818,20 +900,22 @@ static void intel_psr_enable_locked(struct drm_i915_private *dev_priv,
* to avoid any rendering problems.
*/
if (INTEL_GEN(dev_priv) >= 12) {
- val = I915_READ(TRANS_PSR_IIR(dev_priv->psr.transcoder));
+ val = intel_de_read(dev_priv,
+ TRANS_PSR_IIR(dev_priv->psr.transcoder));
val &= EDP_PSR_ERROR(0);
} else {
- val = I915_READ(EDP_PSR_IIR);
+ val = intel_de_read(dev_priv, EDP_PSR_IIR);
val &= EDP_PSR_ERROR(dev_priv->psr.transcoder);
}
if (val) {
dev_priv->psr.sink_not_reliable = true;
- DRM_DEBUG_KMS("PSR interruption error set, not enabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR interruption error set, not enabling PSR\n");
return;
}
- DRM_DEBUG_KMS("Enabling PSR%s\n",
- dev_priv->psr.psr2_enabled ? "2" : "1");
+ drm_dbg_kms(&dev_priv->drm, "Enabling PSR%s\n",
+ dev_priv->psr.psr2_enabled ? "2" : "1");
intel_psr_setup_vsc(intel_dp, crtc_state);
intel_psr_enable_sink(intel_dp);
intel_psr_enable_source(intel_dp, crtc_state);
@@ -852,18 +936,20 @@ void intel_psr_enable(struct intel_dp *intel_dp,
{
struct drm_i915_private *dev_priv = dp_to_i915(intel_dp);
- if (!crtc_state->has_psr)
+ if (!CAN_PSR(dev_priv) || dev_priv->psr.dp != intel_dp)
return;
- if (WARN_ON(!CAN_PSR(dev_priv)))
+ dev_priv->psr.force_mode_changed = false;
+
+ if (!crtc_state->has_psr)
return;
- WARN_ON(dev_priv->drrs.dp);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->drrs.dp);
mutex_lock(&dev_priv->psr.lock);
- if (!psr_global_enabled(dev_priv->psr.debug)) {
- DRM_DEBUG_KMS("PSR disabled by flag\n");
+ if (!psr_global_enabled(dev_priv)) {
+ drm_dbg_kms(&dev_priv->drm, "PSR disabled by flag\n");
goto unlock;
}
@@ -879,27 +965,33 @@ static void intel_psr_exit(struct drm_i915_private *dev_priv)
if (!dev_priv->psr.active) {
if (transcoder_has_psr2(dev_priv, dev_priv->psr.transcoder)) {
- val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
- WARN_ON(val & EDP_PSR2_ENABLE);
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, val & EDP_PSR2_ENABLE);
}
- val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
- WARN_ON(val & EDP_PSR_ENABLE);
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(dev_priv->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, val & EDP_PSR_ENABLE);
return;
}
if (dev_priv->psr.psr2_enabled) {
tgl_disallow_dc3co_on_psr2_exit(dev_priv);
- val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
- WARN_ON(!(val & EDP_PSR2_ENABLE));
+ val = intel_de_read(dev_priv,
+ EDP_PSR2_CTL(dev_priv->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR2_ENABLE));
val &= ~EDP_PSR2_ENABLE;
- I915_WRITE(EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv,
+ EDP_PSR2_CTL(dev_priv->psr.transcoder), val);
} else {
- val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
- WARN_ON(!(val & EDP_PSR_ENABLE));
+ val = intel_de_read(dev_priv,
+ EDP_PSR_CTL(dev_priv->psr.transcoder));
+ drm_WARN_ON(&dev_priv->drm, !(val & EDP_PSR_ENABLE));
val &= ~EDP_PSR_ENABLE;
- I915_WRITE(EDP_PSR_CTL(dev_priv->psr.transcoder), val);
+ intel_de_write(dev_priv,
+ EDP_PSR_CTL(dev_priv->psr.transcoder), val);
}
dev_priv->psr.active = false;
}
@@ -915,8 +1007,8 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
if (!dev_priv->psr.enabled)
return;
- DRM_DEBUG_KMS("Disabling PSR%s\n",
- dev_priv->psr.psr2_enabled ? "2" : "1");
+ drm_dbg_kms(&dev_priv->drm, "Disabling PSR%s\n",
+ dev_priv->psr.psr2_enabled ? "2" : "1");
intel_psr_exit(dev_priv);
@@ -931,7 +1023,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp)
/* Wait till PSR is idle */
if (intel_de_wait_for_clear(dev_priv, psr_status,
psr_status_mask, 2000))
- DRM_ERROR("Timed out waiting PSR idle state\n");
+ drm_err(&dev_priv->drm, "Timed out waiting PSR idle state\n");
/* Disable PSR on Sink */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_EN_CFG, 0);
@@ -957,7 +1049,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
if (!old_crtc_state->has_psr)
return;
- if (WARN_ON(!CAN_PSR(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !CAN_PSR(dev_priv)))
return;
mutex_lock(&dev_priv->psr.lock);
@@ -966,7 +1058,7 @@ void intel_psr_disable(struct intel_dp *intel_dp,
mutex_unlock(&dev_priv->psr.lock);
cancel_work_sync(&dev_priv->psr.work);
- cancel_delayed_work_sync(&dev_priv->psr.idle_work);
+ cancel_delayed_work_sync(&dev_priv->psr.dc3co_work);
}
static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
@@ -981,7 +1073,7 @@ static void psr_force_hw_tracking_exit(struct drm_i915_private *dev_priv)
* but it makes more sense write to the current active
* pipe.
*/
- I915_WRITE(CURSURFLIVE(dev_priv->psr.pipe), 0);
+ intel_de_write(dev_priv, CURSURFLIVE(dev_priv->psr.pipe), 0);
else
/*
* A write to CURSURFLIVE do not cause HW tracking to exit PSR
@@ -1009,9 +1101,11 @@ void intel_psr_update(struct intel_dp *intel_dp,
if (!CAN_PSR(dev_priv) || READ_ONCE(psr->dp) != intel_dp)
return;
+ dev_priv->psr.force_mode_changed = false;
+
mutex_lock(&dev_priv->psr.lock);
- enable = crtc_state->has_psr && psr_global_enabled(psr->debug);
+ enable = crtc_state->has_psr && psr_global_enabled(dev_priv);
psr2_enable = intel_psr2_enabled(dev_priv, crtc_state);
if (enable == psr->enabled && psr2_enable == psr->psr2_enabled) {
@@ -1099,7 +1193,8 @@ static bool __psr_wait_for_idle_locked(struct drm_i915_private *dev_priv)
err = intel_de_wait_for_clear(dev_priv, reg, mask, 50);
if (err)
- DRM_ERROR("Timed out waiting for PSR Idle for re-enable\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for PSR Idle for re-enable\n");
/* After the unlocked wait, verify that PSR is still wanted! */
mutex_lock(&dev_priv->psr.lock);
@@ -1163,7 +1258,7 @@ int intel_psr_debug_set(struct drm_i915_private *dev_priv, u64 val)
if (val & ~(I915_PSR_DEBUG_IRQ | I915_PSR_DEBUG_MODE_MASK) ||
mode > I915_PSR_DEBUG_FORCE_PSR1) {
- DRM_DEBUG_KMS("Invalid debug mask %llx\n", val);
+ drm_dbg_kms(&dev_priv->drm, "Invalid debug mask %llx\n", val);
return -EINVAL;
}
@@ -1275,14 +1370,12 @@ void intel_psr_invalidate(struct drm_i915_private *dev_priv,
* When we will be completely rely on PSR2 S/W tracking in future,
* intel_psr_flush() will invalidate and flush the PSR for ORIGIN_FLIP
* event also therefore tgl_dc3co_flush() require to be changed
- * accrodingly in future.
+ * accordingly in future.
*/
static void
tgl_dc3co_flush(struct drm_i915_private *dev_priv,
unsigned int frontbuffer_bits, enum fb_op_origin origin)
{
- u32 delay;
-
mutex_lock(&dev_priv->psr.lock);
if (!dev_priv->psr.dc3co_enabled)
@@ -1300,10 +1393,8 @@ tgl_dc3co_flush(struct drm_i915_private *dev_priv,
goto unlock;
tgl_psr2_enable_dc3co(dev_priv);
- /* DC5/DC6 required idle frames = 6 */
- delay = 6 * dev_priv->psr.dc3co_exit_delay;
- mod_delayed_work(system_wq, &dev_priv->psr.idle_work,
- usecs_to_jiffies(delay));
+ mod_delayed_work(system_wq, &dev_priv->psr.dc3co_work,
+ dev_priv->psr.dc3co_exit_delay);
unlock:
mutex_unlock(&dev_priv->psr.lock);
@@ -1387,7 +1478,7 @@ void intel_psr_init(struct drm_i915_private *dev_priv)
dev_priv->psr.link_standby = dev_priv->vbt.psr.full_link;
INIT_WORK(&dev_priv->psr.work, intel_psr_work);
- INIT_DELAYED_WORK(&dev_priv->psr.idle_work, tgl_dc5_idle_thread);
+ INIT_DELAYED_WORK(&dev_priv->psr.dc3co_work, tgl_dc3co_disable_work);
mutex_init(&dev_priv->psr.lock);
}
@@ -1423,14 +1514,15 @@ static void psr_alpm_check(struct intel_dp *intel_dp)
r = drm_dp_dpcd_readb(aux, DP_RECEIVER_ALPM_STATUS, &val);
if (r != 1) {
- DRM_ERROR("Error reading ALPM status\n");
+ drm_err(&dev_priv->drm, "Error reading ALPM status\n");
return;
}
if (val & DP_ALPM_LOCK_TIMEOUT_ERROR) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- DRM_DEBUG_KMS("ALPM lock timeout error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "ALPM lock timeout error, disabling PSR\n");
/* Clearing error */
drm_dp_dpcd_writeb(aux, DP_RECEIVER_ALPM_STATUS, val);
@@ -1446,14 +1538,15 @@ static void psr_capability_changed_check(struct intel_dp *intel_dp)
r = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_ESI, &val);
if (r != 1) {
- DRM_ERROR("Error reading DP_PSR_ESI\n");
+ drm_err(&dev_priv->drm, "Error reading DP_PSR_ESI\n");
return;
}
if (val & DP_PSR_CAPS_CHANGE) {
intel_psr_disable_locked(intel_dp);
psr->sink_not_reliable = true;
- DRM_DEBUG_KMS("Sink PSR capability changed, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Sink PSR capability changed, disabling PSR\n");
/* Clearing it */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ESI, val);
@@ -1478,7 +1571,8 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
goto exit;
if (psr_get_status_and_error_status(intel_dp, &status, &error_status)) {
- DRM_ERROR("Error reading PSR status or error status\n");
+ drm_err(&dev_priv->drm,
+ "Error reading PSR status or error status\n");
goto exit;
}
@@ -1488,17 +1582,22 @@ void intel_psr_short_pulse(struct intel_dp *intel_dp)
}
if (status == DP_PSR_SINK_INTERNAL_ERROR && !error_status)
- DRM_DEBUG_KMS("PSR sink internal error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR sink internal error, disabling PSR\n");
if (error_status & DP_PSR_RFB_STORAGE_ERROR)
- DRM_DEBUG_KMS("PSR RFB storage error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR RFB storage error, disabling PSR\n");
if (error_status & DP_PSR_VSC_SDP_UNCORRECTABLE_ERROR)
- DRM_DEBUG_KMS("PSR VSC SDP uncorrectable error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR VSC SDP uncorrectable error, disabling PSR\n");
if (error_status & DP_PSR_LINK_CRC_ERROR)
- DRM_DEBUG_KMS("PSR Link CRC error, disabling PSR\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "PSR Link CRC error, disabling PSR\n");
if (error_status & ~errors)
- DRM_ERROR("PSR_ERROR_STATUS unhandled errors %x\n",
- error_status & ~errors);
+ drm_err(&dev_priv->drm,
+ "PSR_ERROR_STATUS unhandled errors %x\n",
+ error_status & ~errors);
/* clear status register */
drm_dp_dpcd_writeb(&intel_dp->aux, DP_PSR_ERROR_STATUS, error_status);
@@ -1534,16 +1633,29 @@ void intel_psr_atomic_check(struct drm_connector *connector,
struct drm_crtc_state *crtc_state;
if (!CAN_PSR(dev_priv) || !new_state->crtc ||
- dev_priv->psr.initially_probed)
+ !dev_priv->psr.force_mode_changed)
return;
intel_connector = to_intel_connector(connector);
- dig_port = enc_to_dig_port(intel_connector->encoder);
+ dig_port = enc_to_dig_port(intel_attached_encoder(intel_connector));
if (dev_priv->psr.dp != &dig_port->dp)
return;
crtc_state = drm_atomic_get_new_crtc_state(new_state->state,
new_state->crtc);
crtc_state->mode_changed = true;
- dev_priv->psr.initially_probed = true;
+}
+
+void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp)
+{
+ struct drm_i915_private *dev_priv;
+
+ if (!intel_dp)
+ return;
+
+ dev_priv = dp_to_i915(intel_dp);
+ if (!CAN_PSR(dev_priv) || intel_dp != dev_priv->psr.dp)
+ return;
+
+ dev_priv->psr.force_mode_changed = true;
}
diff --git a/drivers/gpu/drm/i915/display/intel_psr.h b/drivers/gpu/drm/i915/display/intel_psr.h
index c58a1d438808..274fc6bb6221 100644
--- a/drivers/gpu/drm/i915/display/intel_psr.h
+++ b/drivers/gpu/drm/i915/display/intel_psr.h
@@ -40,5 +40,6 @@ bool intel_psr_enabled(struct intel_dp *intel_dp);
void intel_psr_atomic_check(struct drm_connector *connector,
struct drm_connector_state *old_state,
struct drm_connector_state *new_state);
+void intel_psr_set_force_mode_changed(struct intel_dp *intel_dp);
#endif /* __INTEL_PSR_H__ */
diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c
index 399b1542509f..46beb155d835 100644
--- a/drivers/gpu/drm/i915/display/intel_quirks.c
+++ b/drivers/gpu/drm/i915/display/intel_quirks.c
@@ -14,7 +14,7 @@
static void quirk_ssc_force_disable(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_LVDS_SSC_DISABLE;
- DRM_INFO("applying lvds SSC disable quirk\n");
+ drm_info(&i915->drm, "applying lvds SSC disable quirk\n");
}
/*
@@ -24,14 +24,14 @@ static void quirk_ssc_force_disable(struct drm_i915_private *i915)
static void quirk_invert_brightness(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_INVERT_BRIGHTNESS;
- DRM_INFO("applying inverted panel brightness quirk\n");
+ drm_info(&i915->drm, "applying inverted panel brightness quirk\n");
}
/* Some VBT's incorrectly indicate no backlight is present */
static void quirk_backlight_present(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_BACKLIGHT_PRESENT;
- DRM_INFO("applying backlight present quirk\n");
+ drm_info(&i915->drm, "applying backlight present quirk\n");
}
/* Toshiba Satellite P50-C-18C requires T12 delay to be min 800ms
@@ -40,7 +40,7 @@ static void quirk_backlight_present(struct drm_i915_private *i915)
static void quirk_increase_t12_delay(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_INCREASE_T12_DELAY;
- DRM_INFO("Applying T12 delay quirk\n");
+ drm_info(&i915->drm, "Applying T12 delay quirk\n");
}
/*
@@ -50,7 +50,7 @@ static void quirk_increase_t12_delay(struct drm_i915_private *i915)
static void quirk_increase_ddi_disabled_time(struct drm_i915_private *i915)
{
i915->quirks |= QUIRK_INCREASE_DDI_DISABLED_TIME;
- DRM_INFO("Applying Increase DDI Disabled quirk\n");
+ drm_info(&i915->drm, "Applying Increase DDI Disabled quirk\n");
}
struct intel_quirk {
@@ -82,6 +82,16 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
DMI_MATCH(DMI_PRODUCT_NAME, ""),
},
},
+ {
+ .callback = intel_dmi_reverse_brightness,
+ .ident = "Thundersoft TST178 tablet",
+ /* DMI strings are too generic, also match on BIOS date */
+ .matches = {DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "AMI Corporation"),
+ DMI_EXACT_MATCH(DMI_BOARD_NAME, "Aptio CRB"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "To be filled by O.E.M."),
+ DMI_EXACT_MATCH(DMI_BIOS_DATE, "04/15/2014"),
+ },
+ },
{ } /* terminating entry */
},
.hook = quirk_invert_brightness,
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c
index e8819fd21e03..637d8fe2f8c2 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.c
@@ -34,7 +34,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_atomic.h"
@@ -217,23 +216,23 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
int i;
if (HAS_PCH_SPLIT(dev_priv)) {
- I915_WRITE(intel_sdvo->sdvo_reg, val);
- POSTING_READ(intel_sdvo->sdvo_reg);
+ intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val);
+ intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg);
/*
* HW workaround, need to write this twice for issue
* that may result in first write getting masked.
*/
if (HAS_PCH_IBX(dev_priv)) {
- I915_WRITE(intel_sdvo->sdvo_reg, val);
- POSTING_READ(intel_sdvo->sdvo_reg);
+ intel_de_write(dev_priv, intel_sdvo->sdvo_reg, val);
+ intel_de_posting_read(dev_priv, intel_sdvo->sdvo_reg);
}
return;
}
if (intel_sdvo->port == PORT_B)
- cval = I915_READ(GEN3_SDVOC);
+ cval = intel_de_read(dev_priv, GEN3_SDVOC);
else
- bval = I915_READ(GEN3_SDVOB);
+ bval = intel_de_read(dev_priv, GEN3_SDVOB);
/*
* Write the registers twice for luck. Sometimes,
@@ -241,11 +240,11 @@ static void intel_sdvo_write_sdvox(struct intel_sdvo *intel_sdvo, u32 val)
* The BIOS does this too. Yay, magic
*/
for (i = 0; i < 2; i++) {
- I915_WRITE(GEN3_SDVOB, bval);
- POSTING_READ(GEN3_SDVOB);
+ intel_de_write(dev_priv, GEN3_SDVOB, bval);
+ intel_de_posting_read(dev_priv, GEN3_SDVOB);
- I915_WRITE(GEN3_SDVOC, cval);
- POSTING_READ(GEN3_SDVOC);
+ intel_de_write(dev_priv, GEN3_SDVOC, cval);
+ intel_de_posting_read(dev_priv, GEN3_SDVOC);
}
}
@@ -414,12 +413,10 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
{
const char *cmd_name;
int i, pos = 0;
-#define BUF_LEN 256
- char buffer[BUF_LEN];
+ char buffer[64];
#define BUF_PRINT(args...) \
- pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
-
+ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
for (i = 0; i < args_len; i++) {
BUF_PRINT("%02X ", ((u8 *)args)[i]);
@@ -433,9 +430,9 @@ static void intel_sdvo_debug_write(struct intel_sdvo *intel_sdvo, u8 cmd,
BUF_PRINT("(%s)", cmd_name);
else
BUF_PRINT("(%02X)", cmd);
- BUG_ON(pos >= BUF_LEN - 1);
+
+ WARN_ON(pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
-#undef BUF_LEN
DRM_DEBUG_KMS("%s: W: %02X %s\n", SDVO_NAME(intel_sdvo), cmd, buffer);
}
@@ -540,8 +537,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
u8 retry = 15; /* 5 quick checks, followed by 10 long checks */
u8 status;
int i, pos = 0;
-#define BUF_LEN 256
- char buffer[BUF_LEN];
+ char buffer[64];
buffer[0] = '\0';
@@ -581,7 +577,7 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
}
#define BUF_PRINT(args...) \
- pos += snprintf(buffer + pos, max_t(int, BUF_LEN - pos, 0), args)
+ pos += snprintf(buffer + pos, max_t(int, sizeof(buffer) - pos, 0), args)
cmd_status = sdvo_cmd_status(status);
if (cmd_status)
@@ -600,9 +596,9 @@ static bool intel_sdvo_read_response(struct intel_sdvo *intel_sdvo,
goto log_fail;
BUF_PRINT(" %02X", ((u8 *)response)[i]);
}
- BUG_ON(pos >= BUF_LEN - 1);
+
+ WARN_ON(pos >= sizeof(buffer) - 1);
#undef BUF_PRINT
-#undef BUF_LEN
DRM_DEBUG_KMS("%s: R: %s\n", SDVO_NAME(intel_sdvo), buffer);
return true;
@@ -1267,6 +1263,13 @@ static void i9xx_adjust_sdvo_tv_clock(struct intel_crtc_state *pipe_config)
pipe_config->clock_set = true;
}
+static bool intel_has_hdmi_sink(struct intel_sdvo *sdvo,
+ const struct drm_connector_state *conn_state)
+{
+ return sdvo->has_hdmi_monitor &&
+ READ_ONCE(to_intel_digital_connector_state(conn_state)->force_audio) != HDMI_AUDIO_OFF_DVI;
+}
+
static int intel_sdvo_compute_config(struct intel_encoder *encoder,
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
@@ -1322,12 +1325,15 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder,
pipe_config->pixel_multiplier =
intel_sdvo_get_pixel_multiplier(adjusted_mode);
- if (intel_sdvo_state->base.force_audio != HDMI_AUDIO_OFF_DVI)
- pipe_config->has_hdmi_sink = intel_sdvo->has_hdmi_monitor;
+ pipe_config->has_hdmi_sink = intel_has_hdmi_sink(intel_sdvo, conn_state);
- if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON ||
- (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO && intel_sdvo->has_hdmi_audio))
- pipe_config->has_audio = true;
+ if (pipe_config->has_hdmi_sink) {
+ if (intel_sdvo_state->base.force_audio == HDMI_AUDIO_AUTO)
+ pipe_config->has_audio = intel_sdvo->has_hdmi_audio;
+ else
+ pipe_config->has_audio =
+ intel_sdvo_state->base.force_audio == HDMI_AUDIO_ON;
+ }
if (intel_sdvo_state->base.broadcast_rgb == INTEL_BROADCAST_RGB_AUTO) {
/*
@@ -1470,7 +1476,8 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
else
intel_sdvo_get_dtd_from_mode(&output_dtd, mode);
if (!intel_sdvo_set_output_timing(intel_sdvo, &output_dtd))
- DRM_INFO("Setting output timings on %s failed\n",
+ drm_info(&dev_priv->drm,
+ "Setting output timings on %s failed\n",
SDVO_NAME(intel_sdvo));
/* Set the input timing to the screen. Assume always input 0. */
@@ -1494,12 +1501,14 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (IS_TV(intel_sdvo_connector) || IS_LVDS(intel_sdvo_connector))
input_dtd.part2.sdvo_flags = intel_sdvo->dtd_sdvo_flags;
if (!intel_sdvo_set_input_timing(intel_sdvo, &input_dtd))
- DRM_INFO("Setting input timings on %s failed\n",
+ drm_info(&dev_priv->drm,
+ "Setting input timings on %s failed\n",
SDVO_NAME(intel_sdvo));
switch (crtc_state->pixel_multiplier) {
default:
- WARN(1, "unknown pixel multiplier specified\n");
+ drm_WARN(&dev_priv->drm, 1,
+ "unknown pixel multiplier specified\n");
/* fall through */
case 1: rate = SDVO_CLOCK_RATE_MULT_1X; break;
case 2: rate = SDVO_CLOCK_RATE_MULT_2X; break;
@@ -1518,7 +1527,7 @@ static void intel_sdvo_pre_enable(struct intel_encoder *intel_encoder,
if (INTEL_GEN(dev_priv) < 5)
sdvox |= SDVO_BORDER_ENABLE;
} else {
- sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
if (intel_sdvo->port == PORT_B)
sdvox &= SDVOB_PRESERVE_MASK;
else
@@ -1564,7 +1573,7 @@ bool intel_sdvo_port_enabled(struct drm_i915_private *dev_priv,
{
u32 val;
- val = I915_READ(sdvo_reg);
+ val = intel_de_read(dev_priv, sdvo_reg);
/* asserts want to know the pipe even if the port is disabled */
if (HAS_PCH_CPT(dev_priv))
@@ -1607,7 +1616,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_SDVO);
- sdvox = I915_READ(intel_sdvo->sdvo_reg);
+ sdvox = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
ret = intel_sdvo_get_input_timing(intel_sdvo, &dtd);
if (!ret) {
@@ -1615,7 +1624,7 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
* Some sdvo encoders are not spec compliant and don't
* implement the mandatory get_timings function.
*/
- DRM_DEBUG_DRIVER("failed to retrieve SDVO DTD\n");
+ drm_dbg(&dev_priv->drm, "failed to retrieve SDVO DTD\n");
pipe_config->quirks |= PIPE_CONFIG_QUIRK_MODE_SYNC_FLAGS;
} else {
if (dtd.part2.dtd_flags & DTD_FLAG_HSYNC_POSITIVE)
@@ -1667,9 +1676,10 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
}
}
- WARN(encoder_pixel_multiplier != pipe_config->pixel_multiplier,
- "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
- pipe_config->pixel_multiplier, encoder_pixel_multiplier);
+ drm_WARN(dev,
+ encoder_pixel_multiplier != pipe_config->pixel_multiplier,
+ "SDVO pixel multiplier mismatch, port: %i, encoder: %i\n",
+ pipe_config->pixel_multiplier, encoder_pixel_multiplier);
if (sdvox & HDMI_COLOR_RANGE_16_235)
pipe_config->limited_color_range = true;
@@ -1734,7 +1744,7 @@ static void intel_disable_sdvo(struct intel_encoder *encoder,
intel_sdvo_set_encoder_power_state(intel_sdvo,
DRM_MODE_DPMS_OFF);
- temp = I915_READ(intel_sdvo->sdvo_reg);
+ temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
temp &= ~SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
@@ -1791,7 +1801,7 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
int i;
bool success;
- temp = I915_READ(intel_sdvo->sdvo_reg);
+ temp = intel_de_read(dev_priv, intel_sdvo->sdvo_reg);
temp |= SDVO_ENABLE;
intel_sdvo_write_sdvox(intel_sdvo, temp);
@@ -1806,8 +1816,9 @@ static void intel_enable_sdvo(struct intel_encoder *encoder,
* a given it the status is a success, we succeeded.
*/
if (success && !input1) {
- DRM_DEBUG_KMS("First %s output reported failure to "
- "sync\n", SDVO_NAME(intel_sdvo));
+ drm_dbg_kms(&dev_priv->drm,
+ "First %s output reported failure to "
+ "sync\n", SDVO_NAME(intel_sdvo));
}
if (0)
@@ -2219,8 +2230,8 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(connector->dev);
struct drm_display_mode *newmode;
- DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
+ drm_dbg_kms(&dev_priv->drm, "[CONNECTOR:%d:%s]\n",
+ connector->base.id, connector->name);
/*
* Fetch modes from VBT. For SDVO prefer the VBT mode since some
@@ -2709,6 +2720,7 @@ intel_sdvo_dvi_init(struct intel_sdvo *intel_sdvo, int device)
* Some SDVO devices have one-shot hotplug interrupts.
* Ensure that they get re-enabled when an interrupt happens.
*/
+ intel_connector->polled = DRM_CONNECTOR_POLL_HPD;
intel_encoder->hotplug = intel_sdvo_hotplug;
intel_sdvo_enable_hotplug(intel_encoder);
} else {
@@ -3229,9 +3241,9 @@ static void assert_sdvo_port_valid(const struct drm_i915_private *dev_priv,
enum port port)
{
if (HAS_PCH_SPLIT(dev_priv))
- WARN_ON(port != PORT_B);
+ drm_WARN_ON(&dev_priv->drm, port != PORT_B);
else
- WARN_ON(port != PORT_B && port != PORT_C);
+ drm_WARN_ON(&dev_priv->drm, port != PORT_B && port != PORT_C);
}
bool intel_sdvo_init(struct drm_i915_private *dev_priv,
@@ -3269,8 +3281,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
u8 byte;
if (!intel_sdvo_read_byte(intel_sdvo, i, &byte)) {
- DRM_DEBUG_KMS("No SDVO device found on %s\n",
- SDVO_NAME(intel_sdvo));
+ drm_dbg_kms(&dev_priv->drm,
+ "No SDVO device found on %s\n",
+ SDVO_NAME(intel_sdvo));
goto err;
}
}
@@ -3293,8 +3306,9 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
if (intel_sdvo_output_setup(intel_sdvo,
intel_sdvo->caps.output_flags) != true) {
- DRM_DEBUG_KMS("SDVO output failed to setup on %s\n",
- SDVO_NAME(intel_sdvo));
+ drm_dbg_kms(&dev_priv->drm,
+ "SDVO output failed to setup on %s\n",
+ SDVO_NAME(intel_sdvo));
/* Output_setup can leave behind connectors! */
goto err_output;
}
@@ -3331,7 +3345,7 @@ bool intel_sdvo_init(struct drm_i915_private *dev_priv,
&intel_sdvo->pixel_clock_max))
goto err_output;
- DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, "
+ drm_dbg_kms(&dev_priv->drm, "%s device VID/DID: %02X:%02X.%02X, "
"clock range %dMHz - %dMHz, "
"input 1: %c, input 2: %c, "
"output 1: %c, output 2: %c\n",
diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.h b/drivers/gpu/drm/i915/display/intel_sdvo.h
index a66f224aa17d..72065e4360d5 100644
--- a/drivers/gpu/drm/i915/display/intel_sdvo.h
+++ b/drivers/gpu/drm/i915/display/intel_sdvo.h
@@ -8,8 +8,6 @@
#include <linux/types.h>
-#include <drm/i915_drm.h>
-
#include "i915_reg.h"
struct drm_i915_private;
diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c
index fca77ec1e0dd..deda351719db 100644
--- a/drivers/gpu/drm/i915/display/intel_sprite.c
+++ b/drivers/gpu/drm/i915/display/intel_sprite.c
@@ -37,10 +37,10 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_rect.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_trace.h"
+#include "i915_vgpu.h"
#include "intel_atomic_plane.h"
#include "intel_display_types.h"
#include "intel_frontbuffer.h"
@@ -104,7 +104,7 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
if (min <= 0 || max <= 0)
goto irq_disable;
- if (WARN_ON(drm_crtc_vblank_get(&crtc->base)))
+ if (drm_WARN_ON(&dev_priv->drm, drm_crtc_vblank_get(&crtc->base)))
goto irq_disable;
/*
@@ -113,8 +113,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
* re-entry as well.
*/
if (intel_psr_wait_for_idle(new_crtc_state, &psr_status))
- DRM_ERROR("PSR idle timed out 0x%x, atomic update may fail\n",
- psr_status);
+ drm_err(&dev_priv->drm,
+ "PSR idle timed out 0x%x, atomic update may fail\n",
+ psr_status);
local_irq_disable();
@@ -135,8 +136,9 @@ void intel_pipe_update_start(const struct intel_crtc_state *new_crtc_state)
break;
if (!timeout) {
- DRM_ERROR("Potential atomic update failure on pipe %c\n",
- pipe_name(crtc->pipe));
+ drm_err(&dev_priv->drm,
+ "Potential atomic update failure on pipe %c\n",
+ pipe_name(crtc->pipe));
break;
}
@@ -204,7 +206,8 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
* event outside of the critical section - the spinlock might spin for a
* while ... */
if (new_crtc_state->uapi.event) {
- WARN_ON(drm_crtc_vblank_get(&crtc->base) != 0);
+ drm_WARN_ON(&dev_priv->drm,
+ drm_crtc_vblank_get(&crtc->base) != 0);
spin_lock(&crtc->base.dev->event_lock);
drm_crtc_arm_vblank_event(&crtc->base,
@@ -221,17 +224,20 @@ void intel_pipe_update_end(struct intel_crtc_state *new_crtc_state)
if (crtc->debug.start_vbl_count &&
crtc->debug.start_vbl_count != end_vbl_count) {
- DRM_ERROR("Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
- pipe_name(pipe), crtc->debug.start_vbl_count,
- end_vbl_count,
- ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
- crtc->debug.min_vbl, crtc->debug.max_vbl,
- crtc->debug.scanline_start, scanline_end);
+ drm_err(&dev_priv->drm,
+ "Atomic update failure on pipe %c (start=%u end=%u) time %lld us, min %d, max %d, scanline start %d, end %d\n",
+ pipe_name(pipe), crtc->debug.start_vbl_count,
+ end_vbl_count,
+ ktime_us_delta(end_vbl_time,
+ crtc->debug.start_vbl_time),
+ crtc->debug.min_vbl, crtc->debug.max_vbl,
+ crtc->debug.scanline_start, scanline_end);
}
#ifdef CONFIG_DRM_I915_DEBUG_VBLANK_EVADE
else if (ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time) >
VBLANK_EVASION_TIME_US)
- DRM_WARN("Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
+ drm_warn(&dev_priv->drm,
+ "Atomic update on pipe (%c) took %lld us, max time under evasion is %u us\n",
pipe_name(pipe),
ktime_us_delta(end_vbl_time, crtc->debug.start_vbl_time),
VBLANK_EVASION_TIME_US);
@@ -278,6 +284,16 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
bool rotated = drm_rotation_90_or_270(plane_state->hw.rotation);
/*
+ * FIXME hsub/vsub vs. block size is a mess. Pre-tgl CCS
+ * abuses hsub/vsub so we can't use them here. But as they
+ * are limited to 32bpp RGB formats we don't actually need
+ * to check anything.
+ */
+ if (fb->modifier == I915_FORMAT_MOD_Y_TILED_CCS ||
+ fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS)
+ return 0;
+
+ /*
* Hardware doesn't handle subpixel coordinates.
* Adjust to (macro)pixel boundary, but be careful not to
* increase the source viewport size, because that could
@@ -291,26 +307,26 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state)
drm_rect_init(src, src_x << 16, src_y << 16,
src_w << 16, src_h << 16);
- if (!fb->format->is_yuv)
- return 0;
-
- /* YUV specific checks */
- if (!rotated) {
+ if (fb->format->format == DRM_FORMAT_RGB565 && rotated) {
+ hsub = 2;
+ vsub = 2;
+ } else {
hsub = fb->format->hsub;
vsub = fb->format->vsub;
- } else {
- hsub = vsub = max(fb->format->hsub, fb->format->vsub);
}
+ if (rotated)
+ hsub = vsub = max(hsub, vsub);
+
if (src_x % hsub || src_w % hsub) {
- DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u for %sYUV planes\n",
- src_x, src_w, hsub, rotated ? "rotated " : "");
+ DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_x, src_w, hsub, yesno(rotated));
return -EINVAL;
}
if (src_y % vsub || src_h % vsub) {
- DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u for %sYUV planes\n",
- src_y, src_h, vsub, rotated ? "rotated " : "");
+ DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n",
+ src_y, src_h, vsub, yesno(rotated));
return -EINVAL;
}
@@ -349,9 +365,8 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
struct drm_i915_private *dev_priv = to_i915(plane_state->uapi.plane->dev);
- unsigned int pixel_rate = crtc_state->pixel_rate;
- unsigned int src_w, src_h, dst_w, dst_h;
unsigned int num, den;
+ unsigned int pixel_rate = intel_plane_pixel_rate(crtc_state, plane_state);
skl_plane_ratio(crtc_state, plane_state, &num, &den);
@@ -359,17 +374,7 @@ static int skl_plane_min_cdclk(const struct intel_crtc_state *crtc_state,
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
den *= 2;
- src_w = drm_rect_width(&plane_state->uapi.src) >> 16;
- src_h = drm_rect_height(&plane_state->uapi.src) >> 16;
- dst_w = drm_rect_width(&plane_state->uapi.dst);
- dst_h = drm_rect_height(&plane_state->uapi.dst);
-
- /* Downscaling limits the maximum pixel rate */
- dst_w = min(src_w, dst_w);
- dst_h = min(src_h, dst_h);
-
- return DIV64_U64_ROUND_UP(mul_u32_u32(pixel_rate * num, src_w * src_h),
- mul_u32_u32(den, dst_w * dst_h));
+ return DIV_ROUND_UP(pixel_rate * num, den);
}
static unsigned int
@@ -434,14 +439,16 @@ skl_program_scaler(struct intel_plane *plane,
uv_rgb_vphase = skl_scaler_calc_phase(1, vscale, false);
}
- I915_WRITE_FW(SKL_PS_CTRL(pipe, scaler_id),
- PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
- I915_WRITE_FW(SKL_PS_VPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
- I915_WRITE_FW(SKL_PS_HPHASE(pipe, scaler_id),
- PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
- I915_WRITE_FW(SKL_PS_WIN_POS(pipe, scaler_id), (crtc_x << 16) | crtc_y);
- I915_WRITE_FW(SKL_PS_WIN_SZ(pipe, scaler_id), (crtc_w << 16) | crtc_h);
+ intel_de_write_fw(dev_priv, SKL_PS_CTRL(pipe, scaler_id),
+ PS_SCALER_EN | PS_PLANE_SEL(plane->id) | scaler->mode);
+ intel_de_write_fw(dev_priv, SKL_PS_VPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_vphase) | PS_UV_RGB_PHASE(uv_rgb_vphase));
+ intel_de_write_fw(dev_priv, SKL_PS_HPHASE(pipe, scaler_id),
+ PS_Y_PHASE(y_hphase) | PS_UV_RGB_PHASE(uv_rgb_hphase));
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_POS(pipe, scaler_id),
+ (crtc_x << 16) | crtc_y);
+ intel_de_write_fw(dev_priv, SKL_PS_WIN_SZ(pipe, scaler_id),
+ (crtc_w << 16) | crtc_h);
}
/* Preoffset values for YUV to RGB Conversion */
@@ -547,28 +554,37 @@ icl_program_input_csc(struct intel_plane *plane,
else
csc = input_csc_matrix_lr[plane_state->hw.color_encoding];
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0), ROFF(csc[0]) |
- GOFF(csc[1]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1), BOFF(csc[2]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2), ROFF(csc[3]) |
- GOFF(csc[4]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3), BOFF(csc[5]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4), ROFF(csc[6]) |
- GOFF(csc[7]));
- I915_WRITE_FW(PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5), BOFF(csc[8]));
-
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
- PREOFF_YUV_TO_RGB_HI);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 0),
+ ROFF(csc[0]) | GOFF(csc[1]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 1),
+ BOFF(csc[2]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 2),
+ ROFF(csc[3]) | GOFF(csc[4]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 3),
+ BOFF(csc[5]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 4),
+ ROFF(csc[6]) | GOFF(csc[7]));
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_COEFF(pipe, plane_id, 5),
+ BOFF(csc[8]));
+
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 0),
+ PREOFF_YUV_TO_RGB_HI);
if (plane_state->hw.color_range == DRM_COLOR_YCBCR_FULL_RANGE)
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1), 0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ 0);
else
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
- PREOFF_YUV_TO_RGB_ME);
- I915_WRITE_FW(PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
- PREOFF_YUV_TO_RGB_LO);
- I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
- I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
- I915_WRITE_FW(PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 1),
+ PREOFF_YUV_TO_RGB_ME);
+ intel_de_write_fw(dev_priv, PLANE_INPUT_CSC_PREOFF(pipe, plane_id, 2),
+ PREOFF_YUV_TO_RGB_LO);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 0), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 1), 0x0);
+ intel_de_write_fw(dev_priv,
+ PLANE_INPUT_CSC_POSTOFF(pipe, plane_id, 2), 0x0);
}
static void
@@ -623,44 +639,49 @@ skl_program_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(PLANE_STRIDE(pipe, plane_id), stride);
- I915_WRITE_FW(PLANE_POS(pipe, plane_id), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(PLANE_SIZE(pipe, plane_id), (src_h << 16) | src_w);
+ intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride);
+ intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id),
+ (src_h << 16) | src_w);
if (INTEL_GEN(dev_priv) < 12)
aux_dist |= aux_stride;
- I915_WRITE_FW(PLANE_AUX_DIST(pipe, plane_id), aux_dist);
+ intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist);
if (icl_is_hdr_plane(dev_priv, plane_id))
- I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), plane_state->cus_ctl);
+ intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id),
+ plane_state->cus_ctl);
if (INTEL_GEN(dev_priv) >= 10 || IS_GEMINILAKE(dev_priv))
- I915_WRITE_FW(PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl);
+ intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id),
+ plane_color_ctl);
if (fb->format->is_yuv && icl_is_hdr_plane(dev_priv, plane_id))
icl_program_input_csc(plane, crtc_state, plane_state);
skl_write_plane_wm(plane, crtc_state);
- I915_WRITE_FW(PLANE_KEYVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(PLANE_KEYMSK(pipe, plane_id), keymsk);
- I915_WRITE_FW(PLANE_KEYMAX(pipe, plane_id), keymax);
+ intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id),
+ key->min_value);
+ intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk);
+ intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax);
- I915_WRITE_FW(PLANE_OFFSET(pipe, plane_id), (y << 16) | x);
+ intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id),
+ (y << 16) | x);
if (INTEL_GEN(dev_priv) < 11)
- I915_WRITE_FW(PLANE_AUX_OFFSET(pipe, plane_id),
- (plane_state->color_plane[1].y << 16) |
- plane_state->color_plane[1].x);
+ intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id),
+ (plane_state->color_plane[1].y << 16) | plane_state->color_plane[1].x);
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), plane_ctl);
- I915_WRITE_FW(PLANE_SURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + surf_addr);
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), plane_ctl);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + surf_addr);
if (plane_state->scaler_id >= 0)
skl_program_scaler(plane, crtc_state, plane_state);
@@ -693,12 +714,12 @@ skl_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
if (icl_is_hdr_plane(dev_priv, plane_id))
- I915_WRITE_FW(PLANE_CUS_CTL(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_CUS_CTL(pipe, plane_id), 0);
skl_write_plane_wm(plane, crtc_state);
- I915_WRITE_FW(PLANE_CTL(pipe, plane_id), 0);
- I915_WRITE_FW(PLANE_SURF(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_CTL(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, PLANE_SURF(pipe, plane_id), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -718,7 +739,7 @@ skl_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
+ ret = intel_de_read(dev_priv, PLANE_CTL(plane->pipe, plane_id)) & PLANE_CTL_ENABLE;
*pipe = plane->pipe;
@@ -774,23 +795,36 @@ chv_update_csc(const struct intel_plane_state *plane_state)
if (!fb->format->is_yuv)
return;
- I915_WRITE_FW(SPCSCYGOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
- I915_WRITE_FW(SPCSCCBOFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
- I915_WRITE_FW(SPCSCCROFF(plane_id), SPCSC_OOFF(0) | SPCSC_IOFF(0));
-
- I915_WRITE_FW(SPCSCC01(plane_id), SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
- I915_WRITE_FW(SPCSCC23(plane_id), SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
- I915_WRITE_FW(SPCSCC45(plane_id), SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
- I915_WRITE_FW(SPCSCC67(plane_id), SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
- I915_WRITE_FW(SPCSCC8(plane_id), SPCSC_C0(csc[8]));
-
- I915_WRITE_FW(SPCSCYGICLAMP(plane_id), SPCSC_IMAX(1023) | SPCSC_IMIN(0));
- I915_WRITE_FW(SPCSCCBICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
- I915_WRITE_FW(SPCSCCRICLAMP(plane_id), SPCSC_IMAX(512) | SPCSC_IMIN(-512));
-
- I915_WRITE_FW(SPCSCYGOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- I915_WRITE_FW(SPCSCCBOCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
- I915_WRITE_FW(SPCSCCROCLAMP(plane_id), SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCYGOFF(plane_id),
+ SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ intel_de_write_fw(dev_priv, SPCSCCBOFF(plane_id),
+ SPCSC_OOFF(0) | SPCSC_IOFF(0));
+ intel_de_write_fw(dev_priv, SPCSCCROFF(plane_id),
+ SPCSC_OOFF(0) | SPCSC_IOFF(0));
+
+ intel_de_write_fw(dev_priv, SPCSCC01(plane_id),
+ SPCSC_C1(csc[1]) | SPCSC_C0(csc[0]));
+ intel_de_write_fw(dev_priv, SPCSCC23(plane_id),
+ SPCSC_C1(csc[3]) | SPCSC_C0(csc[2]));
+ intel_de_write_fw(dev_priv, SPCSCC45(plane_id),
+ SPCSC_C1(csc[5]) | SPCSC_C0(csc[4]));
+ intel_de_write_fw(dev_priv, SPCSCC67(plane_id),
+ SPCSC_C1(csc[7]) | SPCSC_C0(csc[6]));
+ intel_de_write_fw(dev_priv, SPCSCC8(plane_id), SPCSC_C0(csc[8]));
+
+ intel_de_write_fw(dev_priv, SPCSCYGICLAMP(plane_id),
+ SPCSC_IMAX(1023) | SPCSC_IMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCCBICLAMP(plane_id),
+ SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+ intel_de_write_fw(dev_priv, SPCSCCRICLAMP(plane_id),
+ SPCSC_IMAX(512) | SPCSC_IMIN(-512));
+
+ intel_de_write_fw(dev_priv, SPCSCYGOCLAMP(plane_id),
+ SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCCBOCLAMP(plane_id),
+ SPCSC_OMAX(1023) | SPCSC_OMIN(0));
+ intel_de_write_fw(dev_priv, SPCSCCROCLAMP(plane_id),
+ SPCSC_OMAX(1023) | SPCSC_OMIN(0));
}
#define SIN_0 0
@@ -829,10 +863,10 @@ vlv_update_clrc(const struct intel_plane_state *plane_state)
}
/* FIXME these register are single buffered :( */
- I915_WRITE_FW(SPCLRC0(pipe, plane_id),
- SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
- I915_WRITE_FW(SPCLRC1(pipe, plane_id),
- SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
+ intel_de_write_fw(dev_priv, SPCLRC0(pipe, plane_id),
+ SP_CONTRAST(contrast) | SP_BRIGHTNESS(brightness));
+ intel_de_write_fw(dev_priv, SPCLRC1(pipe, plane_id),
+ SP_SH_SIN(sh_sin) | SP_SH_COS(sh_cos));
}
static void
@@ -1019,10 +1053,8 @@ static void vlv_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
/* The two end points are implicit (0.0 and 1.0) */
for (i = 1; i < 8 - 1; i++)
- I915_WRITE_FW(SPGAMC(pipe, plane_id, i - 1),
- gamma[i] << 16 |
- gamma[i] << 8 |
- gamma[i]);
+ intel_de_write_fw(dev_priv, SPGAMC(pipe, plane_id, i - 1),
+ gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
}
static void
@@ -1055,32 +1087,37 @@ vlv_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(SPSTRIDE(pipe, plane_id),
- plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPPOS(pipe, plane_id), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(SPSIZE(pipe, plane_id), (crtc_h << 16) | crtc_w);
- I915_WRITE_FW(SPCONSTALPHA(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id),
+ plane_state->color_plane[0].stride);
+ intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id),
+ (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id),
+ (crtc_h << 16) | crtc_w);
+ intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0);
if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B)
chv_update_csc(plane_state);
if (key->flags) {
- I915_WRITE_FW(SPKEYMINVAL(pipe, plane_id), key->min_value);
- I915_WRITE_FW(SPKEYMSK(pipe, plane_id), key->channel_mask);
- I915_WRITE_FW(SPKEYMAXVAL(pipe, plane_id), key->max_value);
+ intel_de_write_fw(dev_priv, SPKEYMINVAL(pipe, plane_id),
+ key->min_value);
+ intel_de_write_fw(dev_priv, SPKEYMSK(pipe, plane_id),
+ key->channel_mask);
+ intel_de_write_fw(dev_priv, SPKEYMAXVAL(pipe, plane_id),
+ key->max_value);
}
- I915_WRITE_FW(SPLINOFF(pipe, plane_id), linear_offset);
- I915_WRITE_FW(SPTILEOFF(pipe, plane_id), (y << 16) | x);
+ intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset);
+ intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x);
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(SPCNTR(pipe, plane_id), sprctl);
- I915_WRITE_FW(SPSURF(pipe, plane_id),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), sprctl);
+ intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id),
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
vlv_update_clrc(plane_state);
vlv_update_gamma(plane_state);
@@ -1099,8 +1136,8 @@ vlv_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(SPCNTR(pipe, plane_id), 0);
- I915_WRITE_FW(SPSURF(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, SPCNTR(pipe, plane_id), 0);
+ intel_de_write_fw(dev_priv, SPSURF(pipe, plane_id), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1120,7 +1157,7 @@ vlv_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
+ ret = intel_de_read(dev_priv, SPCNTR(plane->pipe, plane_id)) & SP_ENABLE;
*pipe = plane->pipe;
@@ -1424,19 +1461,17 @@ static void ivb_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
- I915_WRITE_FW(SPRGAMC(pipe, i),
- gamma[i] << 20 |
- gamma[i] << 10 |
- gamma[i]);
-
- I915_WRITE_FW(SPRGAMC16(pipe, 0), gamma[i]);
- I915_WRITE_FW(SPRGAMC16(pipe, 1), gamma[i]);
- I915_WRITE_FW(SPRGAMC16(pipe, 2), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC(pipe, i),
+ gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
+
+ intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 0), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 1), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC16(pipe, 2), gamma[i]);
i++;
- I915_WRITE_FW(SPRGAMC17(pipe, 0), gamma[i]);
- I915_WRITE_FW(SPRGAMC17(pipe, 1), gamma[i]);
- I915_WRITE_FW(SPRGAMC17(pipe, 2), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 0), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 1), gamma[i]);
+ intel_de_write_fw(dev_priv, SPRGAMC17(pipe, 2), gamma[i]);
i++;
}
@@ -1476,25 +1511,27 @@ ivb_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(SPRSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(SPRPOS(pipe), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
+ intel_de_write_fw(dev_priv, SPRSTRIDE(pipe),
+ plane_state->color_plane[0].stride);
+ intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, SPRSIZE(pipe), (crtc_h << 16) | crtc_w);
if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE_FW(SPRSCALE(pipe), sprscale);
+ intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale);
if (key->flags) {
- I915_WRITE_FW(SPRKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(SPRKEYMSK(pipe), key->channel_mask);
- I915_WRITE_FW(SPRKEYMAX(pipe), key->max_value);
+ intel_de_write_fw(dev_priv, SPRKEYVAL(pipe), key->min_value);
+ intel_de_write_fw(dev_priv, SPRKEYMSK(pipe),
+ key->channel_mask);
+ intel_de_write_fw(dev_priv, SPRKEYMAX(pipe), key->max_value);
}
/* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET
* register */
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- I915_WRITE_FW(SPROFFSET(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, SPROFFSET(pipe), (y << 16) | x);
} else {
- I915_WRITE_FW(SPRLINOFF(pipe), linear_offset);
- I915_WRITE_FW(SPRTILEOFF(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, SPRLINOFF(pipe), linear_offset);
+ intel_de_write_fw(dev_priv, SPRTILEOFF(pipe), (y << 16) | x);
}
/*
@@ -1502,9 +1539,9 @@ ivb_update_plane(struct intel_plane *plane,
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(SPRCTL(pipe), sprctl);
- I915_WRITE_FW(SPRSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
+ intel_de_write_fw(dev_priv, SPRCTL(pipe), sprctl);
+ intel_de_write_fw(dev_priv, SPRSURF(pipe),
+ intel_plane_ggtt_offset(plane_state) + sprsurf_offset);
ivb_update_gamma(plane_state);
@@ -1521,11 +1558,11 @@ ivb_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(SPRCTL(pipe), 0);
+ intel_de_write_fw(dev_priv, SPRCTL(pipe), 0);
/* Disable the scaler */
if (IS_IVYBRIDGE(dev_priv))
- I915_WRITE_FW(SPRSCALE(pipe), 0);
- I915_WRITE_FW(SPRSURF(pipe), 0);
+ intel_de_write_fw(dev_priv, SPRSCALE(pipe), 0);
+ intel_de_write_fw(dev_priv, SPRSURF(pipe), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1544,7 +1581,7 @@ ivb_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(SPRCTL(plane->pipe)) & SPRITE_ENABLE;
+ ret = intel_de_read(dev_priv, SPRCTL(plane->pipe)) & SPRITE_ENABLE;
*pipe = plane->pipe;
@@ -1710,10 +1747,8 @@ static void g4x_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
/* The two end points are implicit (0.0 and 1.0) */
for (i = 1; i < 8 - 1; i++)
- I915_WRITE_FW(DVSGAMC_G4X(pipe, i - 1),
- gamma[i] << 16 |
- gamma[i] << 8 |
- gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMC_G4X(pipe, i - 1),
+ gamma[i] << 16 | gamma[i] << 8 | gamma[i]);
}
static void ilk_sprite_linear_gamma(u16 gamma[17])
@@ -1741,14 +1776,12 @@ static void ilk_update_gamma(const struct intel_plane_state *plane_state)
/* FIXME these register are single buffered :( */
for (i = 0; i < 16; i++)
- I915_WRITE_FW(DVSGAMC_ILK(pipe, i),
- gamma[i] << 20 |
- gamma[i] << 10 |
- gamma[i]);
-
- I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
- I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
- I915_WRITE_FW(DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMC_ILK(pipe, i),
+ gamma[i] << 20 | gamma[i] << 10 | gamma[i]);
+
+ intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 0), gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 1), gamma[i]);
+ intel_de_write_fw(dev_priv, DVSGAMCMAX_ILK(pipe, 2), gamma[i]);
i++;
}
@@ -1788,28 +1821,30 @@ g4x_update_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DVSSTRIDE(pipe), plane_state->color_plane[0].stride);
- I915_WRITE_FW(DVSPOS(pipe), (crtc_y << 16) | crtc_x);
- I915_WRITE_FW(DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
- I915_WRITE_FW(DVSSCALE(pipe), dvsscale);
+ intel_de_write_fw(dev_priv, DVSSTRIDE(pipe),
+ plane_state->color_plane[0].stride);
+ intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x);
+ intel_de_write_fw(dev_priv, DVSSIZE(pipe), (crtc_h << 16) | crtc_w);
+ intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale);
if (key->flags) {
- I915_WRITE_FW(DVSKEYVAL(pipe), key->min_value);
- I915_WRITE_FW(DVSKEYMSK(pipe), key->channel_mask);
- I915_WRITE_FW(DVSKEYMAX(pipe), key->max_value);
+ intel_de_write_fw(dev_priv, DVSKEYVAL(pipe), key->min_value);
+ intel_de_write_fw(dev_priv, DVSKEYMSK(pipe),
+ key->channel_mask);
+ intel_de_write_fw(dev_priv, DVSKEYMAX(pipe), key->max_value);
}
- I915_WRITE_FW(DVSLINOFF(pipe), linear_offset);
- I915_WRITE_FW(DVSTILEOFF(pipe), (y << 16) | x);
+ intel_de_write_fw(dev_priv, DVSLINOFF(pipe), linear_offset);
+ intel_de_write_fw(dev_priv, DVSTILEOFF(pipe), (y << 16) | x);
/*
* The control register self-arms if the plane was previously
* disabled. Try to make the plane enable atomic by writing
* the control register just before the surface register.
*/
- I915_WRITE_FW(DVSCNTR(pipe), dvscntr);
- I915_WRITE_FW(DVSSURF(pipe),
- intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
+ intel_de_write_fw(dev_priv, DVSCNTR(pipe), dvscntr);
+ intel_de_write_fw(dev_priv, DVSSURF(pipe),
+ intel_plane_ggtt_offset(plane_state) + dvssurf_offset);
if (IS_G4X(dev_priv))
g4x_update_gamma(plane_state);
@@ -1829,10 +1864,10 @@ g4x_disable_plane(struct intel_plane *plane,
spin_lock_irqsave(&dev_priv->uncore.lock, irqflags);
- I915_WRITE_FW(DVSCNTR(pipe), 0);
+ intel_de_write_fw(dev_priv, DVSCNTR(pipe), 0);
/* Disable the scaler */
- I915_WRITE_FW(DVSSCALE(pipe), 0);
- I915_WRITE_FW(DVSSURF(pipe), 0);
+ intel_de_write_fw(dev_priv, DVSSCALE(pipe), 0);
+ intel_de_write_fw(dev_priv, DVSSURF(pipe), 0);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
}
@@ -1851,7 +1886,7 @@ g4x_plane_get_hw_state(struct intel_plane *plane,
if (!wakeref)
return false;
- ret = I915_READ(DVSCNTR(plane->pipe)) & DVS_ENABLE;
+ ret = intel_de_read(dev_priv, DVSCNTR(plane->pipe)) & DVS_ENABLE;
*pipe = plane->pipe;
@@ -1999,7 +2034,8 @@ int chv_plane_check_rotation(const struct intel_plane_state *plane_state)
if (IS_CHERRYVIEW(dev_priv) &&
rotation & DRM_MODE_ROTATE_180 &&
rotation & DRM_MODE_REFLECT_X) {
- DRM_DEBUG_KMS("Cannot rotate and reflect at the same time\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Cannot rotate and reflect at the same time\n");
return -EINVAL;
}
@@ -2040,6 +2076,18 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state,
return 0;
}
+static bool intel_format_is_p01x(u32 format)
+{
+ switch (format) {
+ case DRM_FORMAT_P010:
+ case DRM_FORMAT_P012:
+ case DRM_FORMAT_P016:
+ return true;
+ default:
+ return false;
+ }
+}
+
static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state)
{
@@ -2054,21 +2102,24 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
if (rotation & ~(DRM_MODE_ROTATE_0 | DRM_MODE_ROTATE_180) &&
is_ccs_modifier(fb->modifier)) {
- DRM_DEBUG_KMS("RC support only with 0/180 degree rotation (%x)\n",
- rotation);
+ drm_dbg_kms(&dev_priv->drm,
+ "RC support only with 0/180 degree rotation (%x)\n",
+ rotation);
return -EINVAL;
}
if (rotation & DRM_MODE_REFLECT_X &&
fb->modifier == DRM_FORMAT_MOD_LINEAR) {
- DRM_DEBUG_KMS("horizontal flip is not supported with linear surface formats\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "horizontal flip is not supported with linear surface formats\n");
return -EINVAL;
}
if (drm_rotation_90_or_270(rotation)) {
if (fb->modifier != I915_FORMAT_MOD_Y_TILED &&
fb->modifier != I915_FORMAT_MOD_Yf_TILED) {
- DRM_DEBUG_KMS("Y/Yf tiling required for 90/270!\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Y/Yf tiling required for 90/270!\n");
return -EINVAL;
}
@@ -2091,9 +2142,10 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
case DRM_FORMAT_Y216:
case DRM_FORMAT_XVYU12_16161616:
case DRM_FORMAT_XVYU16161616:
- DRM_DEBUG_KMS("Unsupported pixel format %s for 90/270!\n",
- drm_get_format_name(fb->format->format,
- &format_name));
+ drm_dbg_kms(&dev_priv->drm,
+ "Unsupported pixel format %s for 90/270!\n",
+ drm_get_format_name(fb->format->format,
+ &format_name));
return -EINVAL;
default:
break;
@@ -2109,7 +2161,17 @@ static int skl_plane_check_fb(const struct intel_crtc_state *crtc_state,
fb->modifier == I915_FORMAT_MOD_Yf_TILED_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS ||
fb->modifier == I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS)) {
- DRM_DEBUG_KMS("Y/Yf tiling not supported in IF-ID mode\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Y/Yf tiling not supported in IF-ID mode\n");
+ return -EINVAL;
+ }
+
+ /* Wa_1606054188:tgl */
+ if (IS_TIGERLAKE(dev_priv) &&
+ plane_state->ckey.flags & I915_SET_COLORKEY_SOURCE &&
+ intel_format_is_p01x(fb->format->format)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Source color keying not supported with P01x formats\n");
return -EINVAL;
}
@@ -2136,10 +2198,11 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s
*/
if ((IS_GEMINILAKE(dev_priv) || IS_CANNONLAKE(dev_priv)) &&
(crtc_x + crtc_w < 4 || crtc_x > pipe_src_w - 4)) {
- DRM_DEBUG_KMS("requested plane X %s position %d invalid (valid range %d-%d)\n",
- crtc_x + crtc_w < 4 ? "end" : "start",
- crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
- 4, pipe_src_w - 4);
+ drm_dbg_kms(&dev_priv->drm,
+ "requested plane X %s position %d invalid (valid range %d-%d)\n",
+ crtc_x + crtc_w < 4 ? "end" : "start",
+ crtc_x + crtc_w < 4 ? crtc_x + crtc_w : crtc_x,
+ 4, pipe_src_w - 4);
return -ERANGE;
}
@@ -2968,7 +3031,6 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
struct intel_plane *plane;
enum drm_plane_type plane_type;
unsigned int supported_rotations;
- unsigned int possible_crtcs;
const u64 *modifiers;
const u32 *formats;
int num_formats;
@@ -3023,10 +3085,8 @@ skl_universal_plane_create(struct drm_i915_private *dev_priv,
else
plane_type = DRM_PLANE_TYPE_OVERLAY;
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats, modifiers,
plane_type,
"plane %d%c", plane_id + 1,
@@ -3077,7 +3137,6 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
{
struct intel_plane *plane;
const struct drm_plane_funcs *plane_funcs;
- unsigned long possible_crtcs;
unsigned int supported_rotations;
const u64 *modifiers;
const u32 *formats;
@@ -3162,10 +3221,8 @@ intel_sprite_plane_create(struct drm_i915_private *dev_priv,
plane->id = PLANE_SPRITE0 + sprite;
plane->frontbuffer_bit = INTEL_FRONTBUFFER(pipe, plane->id);
- possible_crtcs = BIT(pipe);
-
ret = drm_universal_plane_init(&dev_priv->drm, &plane->base,
- possible_crtcs, plane_funcs,
+ 0, plane_funcs,
formats, num_formats, modifiers,
DRM_PLANE_TYPE_OVERLAY,
"sprite %c", sprite_name(pipe, sprite));
diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c
index 7773169b7331..9b850c11aa78 100644
--- a/drivers/gpu/drm/i915/display/intel_tc.c
+++ b/drivers/gpu/drm/i915/display/intel_tc.c
@@ -61,7 +61,7 @@ u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port)
lane_mask = intel_uncore_read(uncore,
PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
- WARN_ON(lane_mask == 0xffffffff);
+ drm_WARN_ON(&i915->drm, lane_mask == 0xffffffff);
lane_mask &= DP_LANE_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx);
return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -76,7 +76,7 @@ u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port)
pin_mask = intel_uncore_read(uncore,
PORT_TX_DFLEXPA1(dig_port->tc_phy_fia));
- WARN_ON(pin_mask == 0xffffffff);
+ drm_WARN_ON(&i915->drm, pin_mask == 0xffffffff);
return (pin_mask & DP_PIN_ASSIGNMENT_MASK(dig_port->tc_phy_fia_idx)) >>
DP_PIN_ASSIGNMENT_SHIFT(dig_port->tc_phy_fia_idx);
@@ -120,7 +120,8 @@ void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port,
struct intel_uncore *uncore = &i915->uncore;
u32 val;
- WARN_ON(lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
+ drm_WARN_ON(&i915->drm,
+ lane_reversal && dig_port->tc_mode != TC_PORT_LEGACY);
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPMLE1(dig_port->tc_phy_fia));
@@ -181,8 +182,9 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
PORT_TX_DFLEXDPSP(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
- DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, nothing connected\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, nothing connected\n",
+ dig_port->tc_port_name);
return mask;
}
@@ -195,7 +197,7 @@ static u32 tc_port_live_status_mask(struct intel_digital_port *dig_port)
mask |= BIT(TC_PORT_LEGACY);
/* The sink can be connected only in a single mode. */
- if (!WARN_ON(hweight32(mask) > 1))
+ if (!drm_WARN_ON(&i915->drm, hweight32(mask) > 1))
tc_port_fixup_legacy_flag(dig_port, mask);
return mask;
@@ -210,8 +212,9 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port)
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPPMS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
- DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assuming not complete\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, assuming not complete\n",
+ dig_port->tc_port_name);
return false;
}
@@ -228,8 +231,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
- DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
- dig_port->tc_port_name,
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, can't set safe-mode to %s\n",
+ dig_port->tc_port_name,
enableddisabled(enable));
return false;
@@ -243,8 +247,9 @@ static bool icl_tc_phy_set_safe_mode(struct intel_digital_port *dig_port,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia), val);
if (enable && wait_for(!icl_tc_phy_status_complete(dig_port), 10))
- DRM_DEBUG_KMS("Port %s: PHY complete clear timed out\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY complete clear timed out\n",
+ dig_port->tc_port_name);
return true;
}
@@ -258,8 +263,9 @@ static bool icl_tc_phy_is_in_safe_mode(struct intel_digital_port *dig_port)
val = intel_uncore_read(uncore,
PORT_TX_DFLEXDPCSSS(dig_port->tc_phy_fia));
if (val == 0xffffffff) {
- DRM_DEBUG_KMS("Port %s: PHY in TCCOLD, assume safe mode\n",
- dig_port->tc_port_name);
+ drm_dbg_kms(&i915->drm,
+ "Port %s: PHY in TCCOLD, assume safe mode\n",
+ dig_port->tc_port_name);
return true;
}
@@ -409,16 +415,17 @@ static void intel_tc_port_reset_mode(struct intel_digital_port *dig_port,
enum tc_port_mode old_tc_mode = dig_port->tc_mode;
intel_display_power_flush_work(i915);
- WARN_ON(intel_display_power_is_enabled(i915,
- intel_aux_power_domain(dig_port)));
+ drm_WARN_ON(&i915->drm,
+ intel_display_power_is_enabled(i915,
+ intel_aux_power_domain(dig_port)));
icl_tc_phy_disconnect(dig_port);
icl_tc_phy_connect(dig_port, required_lanes);
- DRM_DEBUG_KMS("Port %s: TC port mode reset (%s -> %s)\n",
- dig_port->tc_port_name,
- tc_port_mode_name(old_tc_mode),
- tc_port_mode_name(dig_port->tc_mode));
+ drm_dbg_kms(&i915->drm, "Port %s: TC port mode reset (%s -> %s)\n",
+ dig_port->tc_port_name,
+ tc_port_mode_name(old_tc_mode),
+ tc_port_mode_name(dig_port->tc_mode));
}
static void
@@ -503,7 +510,7 @@ static void __intel_tc_port_lock(struct intel_digital_port *dig_port,
intel_tc_port_needs_reset(dig_port))
intel_tc_port_reset_mode(dig_port, required_lanes);
- WARN_ON(dig_port->tc_lock_wakeref);
+ drm_WARN_ON(&i915->drm, dig_port->tc_lock_wakeref);
dig_port->tc_lock_wakeref = wakeref;
}
@@ -550,7 +557,7 @@ void intel_tc_port_init(struct intel_digital_port *dig_port, bool is_legacy)
enum port port = dig_port->base.port;
enum tc_port tc_port = intel_port_to_tc(i915, port);
- if (WARN_ON(tc_port == PORT_TC_NONE))
+ if (drm_WARN_ON(&i915->drm, tc_port == PORT_TC_NONE))
return;
snprintf(dig_port->tc_port_name, sizeof(dig_port->tc_port_name),
diff --git a/drivers/gpu/drm/i915/display/intel_tv.c b/drivers/gpu/drm/i915/display/intel_tv.c
index c75e0ceecee6..d2e3a3a323e9 100644
--- a/drivers/gpu/drm/i915/display/intel_tv.c
+++ b/drivers/gpu/drm/i915/display/intel_tv.c
@@ -33,7 +33,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "intel_connector.h"
@@ -907,7 +906,7 @@ static bool
intel_tv_get_hw_state(struct intel_encoder *encoder, enum pipe *pipe)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- u32 tmp = I915_READ(TV_CTL);
+ u32 tmp = intel_de_read(dev_priv, TV_CTL);
*pipe = (tmp & TV_ENC_PIPE_SEL_MASK) >> TV_ENC_PIPE_SEL_SHIFT;
@@ -926,7 +925,8 @@ intel_enable_tv(struct intel_encoder *encoder,
intel_wait_for_vblank(dev_priv,
to_intel_crtc(pipe_config->uapi.crtc)->pipe);
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) | TV_ENC_ENABLE);
+ intel_de_write(dev_priv, TV_CTL,
+ intel_de_read(dev_priv, TV_CTL) | TV_ENC_ENABLE);
}
static void
@@ -937,7 +937,8 @@ intel_disable_tv(struct intel_encoder *encoder,
struct drm_device *dev = encoder->base.dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- I915_WRITE(TV_CTL, I915_READ(TV_CTL) & ~TV_ENC_ENABLE);
+ intel_de_write(dev_priv, TV_CTL,
+ intel_de_read(dev_priv, TV_CTL) & ~TV_ENC_ENABLE);
}
static const struct tv_mode *intel_tv_mode_find(const struct drm_connector_state *conn_state)
@@ -1095,11 +1096,11 @@ intel_tv_get_config(struct intel_encoder *encoder,
pipe_config->output_types |= BIT(INTEL_OUTPUT_TVOUT);
- tv_ctl = I915_READ(TV_CTL);
- hctl1 = I915_READ(TV_H_CTL_1);
- hctl3 = I915_READ(TV_H_CTL_3);
- vctl1 = I915_READ(TV_V_CTL_1);
- vctl2 = I915_READ(TV_V_CTL_2);
+ tv_ctl = intel_de_read(dev_priv, TV_CTL);
+ hctl1 = intel_de_read(dev_priv, TV_H_CTL_1);
+ hctl3 = intel_de_read(dev_priv, TV_H_CTL_3);
+ vctl1 = intel_de_read(dev_priv, TV_V_CTL_1);
+ vctl2 = intel_de_read(dev_priv, TV_V_CTL_2);
tv_mode.htotal = (hctl1 & TV_HTOTAL_MASK) >> TV_HTOTAL_SHIFT;
tv_mode.hsync_end = (hctl1 & TV_HSYNC_END_MASK) >> TV_HSYNC_END_SHIFT;
@@ -1134,17 +1135,17 @@ intel_tv_get_config(struct intel_encoder *encoder,
break;
}
- tmp = I915_READ(TV_WIN_POS);
+ tmp = intel_de_read(dev_priv, TV_WIN_POS);
xpos = tmp >> 16;
ypos = tmp & 0xffff;
- tmp = I915_READ(TV_WIN_SIZE);
+ tmp = intel_de_read(dev_priv, TV_WIN_SIZE);
xsize = tmp >> 16;
ysize = tmp & 0xffff;
intel_tv_mode_to_mode(&mode, &tv_mode);
- DRM_DEBUG_KMS("TV mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
drm_mode_debug_printmodeline(&mode);
intel_tv_scale_mode_horiz(&mode, hdisplay,
@@ -1200,7 +1201,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
- DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
+ drm_dbg_kms(&dev_priv->drm, "forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
pipe_config->port_clock = tv_mode->clock;
@@ -1215,7 +1216,8 @@ intel_tv_compute_config(struct intel_encoder *encoder,
extra = adjusted_mode->crtc_vdisplay - vdisplay;
if (extra < 0) {
- DRM_DEBUG_KMS("No vertical scaling for >1024 pixel wide modes\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "No vertical scaling for >1024 pixel wide modes\n");
return -EINVAL;
}
@@ -1248,7 +1250,7 @@ intel_tv_compute_config(struct intel_encoder *encoder,
tv_conn_state->bypass_vfilter = false;
}
- DRM_DEBUG_KMS("TV mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
drm_mode_debug_printmodeline(adjusted_mode);
/*
@@ -1380,16 +1382,16 @@ set_tv_mode_timings(struct drm_i915_private *dev_priv,
vctl7 = (tv_mode->vburst_start_f4 << TV_VBURST_START_F4_SHIFT) |
(tv_mode->vburst_end_f4 << TV_VBURST_END_F4_SHIFT);
- I915_WRITE(TV_H_CTL_1, hctl1);
- I915_WRITE(TV_H_CTL_2, hctl2);
- I915_WRITE(TV_H_CTL_3, hctl3);
- I915_WRITE(TV_V_CTL_1, vctl1);
- I915_WRITE(TV_V_CTL_2, vctl2);
- I915_WRITE(TV_V_CTL_3, vctl3);
- I915_WRITE(TV_V_CTL_4, vctl4);
- I915_WRITE(TV_V_CTL_5, vctl5);
- I915_WRITE(TV_V_CTL_6, vctl6);
- I915_WRITE(TV_V_CTL_7, vctl7);
+ intel_de_write(dev_priv, TV_H_CTL_1, hctl1);
+ intel_de_write(dev_priv, TV_H_CTL_2, hctl2);
+ intel_de_write(dev_priv, TV_H_CTL_3, hctl3);
+ intel_de_write(dev_priv, TV_V_CTL_1, vctl1);
+ intel_de_write(dev_priv, TV_V_CTL_2, vctl2);
+ intel_de_write(dev_priv, TV_V_CTL_3, vctl3);
+ intel_de_write(dev_priv, TV_V_CTL_4, vctl4);
+ intel_de_write(dev_priv, TV_V_CTL_5, vctl5);
+ intel_de_write(dev_priv, TV_V_CTL_6, vctl6);
+ intel_de_write(dev_priv, TV_V_CTL_7, vctl7);
}
static void set_color_conversion(struct drm_i915_private *dev_priv,
@@ -1398,18 +1400,18 @@ static void set_color_conversion(struct drm_i915_private *dev_priv,
if (!color_conversion)
return;
- I915_WRITE(TV_CSC_Y, (color_conversion->ry << 16) |
- color_conversion->gy);
- I915_WRITE(TV_CSC_Y2, (color_conversion->by << 16) |
- color_conversion->ay);
- I915_WRITE(TV_CSC_U, (color_conversion->ru << 16) |
- color_conversion->gu);
- I915_WRITE(TV_CSC_U2, (color_conversion->bu << 16) |
- color_conversion->au);
- I915_WRITE(TV_CSC_V, (color_conversion->rv << 16) |
- color_conversion->gv);
- I915_WRITE(TV_CSC_V2, (color_conversion->bv << 16) |
- color_conversion->av);
+ intel_de_write(dev_priv, TV_CSC_Y,
+ (color_conversion->ry << 16) | color_conversion->gy);
+ intel_de_write(dev_priv, TV_CSC_Y2,
+ (color_conversion->by << 16) | color_conversion->ay);
+ intel_de_write(dev_priv, TV_CSC_U,
+ (color_conversion->ru << 16) | color_conversion->gu);
+ intel_de_write(dev_priv, TV_CSC_U2,
+ (color_conversion->bu << 16) | color_conversion->au);
+ intel_de_write(dev_priv, TV_CSC_V,
+ (color_conversion->rv << 16) | color_conversion->gv);
+ intel_de_write(dev_priv, TV_CSC_V2,
+ (color_conversion->bv << 16) | color_conversion->av);
}
static void intel_tv_pre_enable(struct intel_encoder *encoder,
@@ -1434,7 +1436,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
if (!tv_mode)
return; /* can't happen (mode_prepare prevents this) */
- tv_ctl = I915_READ(TV_CTL);
+ tv_ctl = intel_de_read(dev_priv, TV_CTL);
tv_ctl &= TV_CTL_SAVE;
switch (intel_tv->type) {
@@ -1511,21 +1513,20 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
set_tv_mode_timings(dev_priv, tv_mode, burst_ena);
- I915_WRITE(TV_SC_CTL_1, scctl1);
- I915_WRITE(TV_SC_CTL_2, scctl2);
- I915_WRITE(TV_SC_CTL_3, scctl3);
+ intel_de_write(dev_priv, TV_SC_CTL_1, scctl1);
+ intel_de_write(dev_priv, TV_SC_CTL_2, scctl2);
+ intel_de_write(dev_priv, TV_SC_CTL_3, scctl3);
set_color_conversion(dev_priv, color_conversion);
if (INTEL_GEN(dev_priv) >= 4)
- I915_WRITE(TV_CLR_KNOBS, 0x00404000);
+ intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00404000);
else
- I915_WRITE(TV_CLR_KNOBS, 0x00606000);
+ intel_de_write(dev_priv, TV_CLR_KNOBS, 0x00606000);
if (video_levels)
- I915_WRITE(TV_CLR_LEVEL,
- ((video_levels->black << TV_BLACK_LEVEL_SHIFT) |
- (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
+ intel_de_write(dev_priv, TV_CLR_LEVEL,
+ ((video_levels->black << TV_BLACK_LEVEL_SHIFT) | (video_levels->blank << TV_BLANK_LEVEL_SHIFT)));
assert_pipe_disabled(dev_priv, pipe_config->cpu_transcoder);
@@ -1533,7 +1534,7 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
tv_filter_ctl = TV_AUTO_SCALE;
if (tv_conn_state->bypass_vfilter)
tv_filter_ctl |= TV_V_FILTER_BYPASS;
- I915_WRITE(TV_FILTER_CTL_1, tv_filter_ctl);
+ intel_de_write(dev_priv, TV_FILTER_CTL_1, tv_filter_ctl);
xsize = tv_mode->hblank_start - tv_mode->hblank_end;
ysize = intel_tv_mode_vdisplay(tv_mode);
@@ -1544,20 +1545,25 @@ static void intel_tv_pre_enable(struct intel_encoder *encoder,
conn_state->tv.margins.right);
ysize -= (tv_conn_state->margins.top +
tv_conn_state->margins.bottom);
- I915_WRITE(TV_WIN_POS, (xpos<<16)|ypos);
- I915_WRITE(TV_WIN_SIZE, (xsize<<16)|ysize);
+ intel_de_write(dev_priv, TV_WIN_POS, (xpos << 16) | ypos);
+ intel_de_write(dev_priv, TV_WIN_SIZE, (xsize << 16) | ysize);
j = 0;
for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_LUMA(i), tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_H_LUMA(i),
+ tv_mode->filter_table[j++]);
for (i = 0; i < 60; i++)
- I915_WRITE(TV_H_CHROMA(i), tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_H_CHROMA(i),
+ tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_LUMA(i), tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_V_LUMA(i),
+ tv_mode->filter_table[j++]);
for (i = 0; i < 43; i++)
- I915_WRITE(TV_V_CHROMA(i), tv_mode->filter_table[j++]);
- I915_WRITE(TV_DAC, I915_READ(TV_DAC) & TV_DAC_SAVE);
- I915_WRITE(TV_CTL, tv_ctl);
+ intel_de_write(dev_priv, TV_V_CHROMA(i),
+ tv_mode->filter_table[j++]);
+ intel_de_write(dev_priv, TV_DAC,
+ intel_de_read(dev_priv, TV_DAC) & TV_DAC_SAVE);
+ intel_de_write(dev_priv, TV_CTL, tv_ctl);
}
static int
@@ -1581,8 +1587,8 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
spin_unlock_irq(&dev_priv->irq_lock);
}
- save_tv_dac = tv_dac = I915_READ(TV_DAC);
- save_tv_ctl = tv_ctl = I915_READ(TV_CTL);
+ save_tv_dac = tv_dac = intel_de_read(dev_priv, TV_DAC);
+ save_tv_ctl = tv_ctl = intel_de_read(dev_priv, TV_CTL);
/* Poll for TV detection */
tv_ctl &= ~(TV_ENC_ENABLE | TV_ENC_PIPE_SEL_MASK | TV_TEST_MODE_MASK);
@@ -1608,15 +1614,15 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
tv_dac &= ~(TVDAC_STATE_CHG_EN | TVDAC_A_SENSE_CTL |
TVDAC_B_SENSE_CTL | TVDAC_C_SENSE_CTL);
- I915_WRITE(TV_CTL, tv_ctl);
- I915_WRITE(TV_DAC, tv_dac);
- POSTING_READ(TV_DAC);
+ intel_de_write(dev_priv, TV_CTL, tv_ctl);
+ intel_de_write(dev_priv, TV_DAC, tv_dac);
+ intel_de_posting_read(dev_priv, TV_DAC);
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
type = -1;
- tv_dac = I915_READ(TV_DAC);
- DRM_DEBUG_KMS("TV detected: %x, %x\n", tv_ctl, tv_dac);
+ tv_dac = intel_de_read(dev_priv, TV_DAC);
+ drm_dbg_kms(&dev_priv->drm, "TV detected: %x, %x\n", tv_ctl, tv_dac);
/*
* A B C
* 0 1 1 Composite
@@ -1624,22 +1630,25 @@ intel_tv_detect_type(struct intel_tv *intel_tv,
* 0 0 0 Component
*/
if ((tv_dac & TVDAC_SENSE_MASK) == (TVDAC_B_SENSE | TVDAC_C_SENSE)) {
- DRM_DEBUG_KMS("Detected Composite TV connection\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Detected Composite TV connection\n");
type = DRM_MODE_CONNECTOR_Composite;
} else if ((tv_dac & (TVDAC_A_SENSE|TVDAC_B_SENSE)) == TVDAC_A_SENSE) {
- DRM_DEBUG_KMS("Detected S-Video TV connection\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Detected S-Video TV connection\n");
type = DRM_MODE_CONNECTOR_SVIDEO;
} else if ((tv_dac & TVDAC_SENSE_MASK) == 0) {
- DRM_DEBUG_KMS("Detected Component TV connection\n");
+ drm_dbg_kms(&dev_priv->drm,
+ "Detected Component TV connection\n");
type = DRM_MODE_CONNECTOR_Component;
} else {
- DRM_DEBUG_KMS("Unrecognised TV connection\n");
+ drm_dbg_kms(&dev_priv->drm, "Unrecognised TV connection\n");
type = -1;
}
- I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
- I915_WRITE(TV_CTL, save_tv_ctl);
- POSTING_READ(TV_CTL);
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ intel_de_write(dev_priv, TV_CTL, save_tv_ctl);
+ intel_de_posting_read(dev_priv, TV_CTL);
/* For unknown reasons the hw barfs if we don't do this vblank wait. */
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
@@ -1794,7 +1803,7 @@ intel_tv_get_modes(struct drm_connector *connector)
*/
intel_tv_mode_to_mode(mode, tv_mode);
if (count == 0) {
- DRM_DEBUG_KMS("TV mode:\n");
+ drm_dbg_kms(&dev_priv->drm, "TV mode:\n");
drm_mode_debug_printmodeline(mode);
}
intel_tv_scale_mode_horiz(mode, input->w, 0, 0);
@@ -1870,11 +1879,11 @@ intel_tv_init(struct drm_i915_private *dev_priv)
int i, initial_mode = 0;
struct drm_connector_state *state;
- if ((I915_READ(TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
+ if ((intel_de_read(dev_priv, TV_CTL) & TV_FUSE_STATE_MASK) == TV_FUSE_STATE_DISABLED)
return;
if (!intel_bios_is_tv_present(dev_priv)) {
- DRM_DEBUG_KMS("Integrated TV is not present.\n");
+ drm_dbg_kms(&dev_priv->drm, "Integrated TV is not present.\n");
return;
}
@@ -1882,15 +1891,15 @@ intel_tv_init(struct drm_i915_private *dev_priv)
* Sanity check the TV output by checking to see if the
* DAC register holds a value
*/
- save_tv_dac = I915_READ(TV_DAC);
+ save_tv_dac = intel_de_read(dev_priv, TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
- tv_dac_on = I915_READ(TV_DAC);
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac | TVDAC_STATE_CHG_EN);
+ tv_dac_on = intel_de_read(dev_priv, TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
- tv_dac_off = I915_READ(TV_DAC);
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac & ~TVDAC_STATE_CHG_EN);
+ tv_dac_off = intel_de_read(dev_priv, TV_DAC);
- I915_WRITE(TV_DAC, save_tv_dac);
+ intel_de_write(dev_priv, TV_DAC, save_tv_dac);
/*
* If the register does not hold the state change enable
diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
index 4d0c23b29248..05c7cbe32eb4 100644
--- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h
+++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h
@@ -111,7 +111,7 @@ enum bdb_block_id {
BDB_LVDS_LFP_DATA_PTRS = 41,
BDB_LVDS_LFP_DATA = 42,
BDB_LVDS_BACKLIGHT = 43,
- BDB_LVDS_POWER = 44,
+ BDB_LFP_POWER = 44,
BDB_MIPI_CONFIG = 52,
BDB_MIPI_SEQUENCE = 53,
BDB_COMPRESSION_PARAMETERS = 56,
diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c
index 9e6aaa302e40..95ad87d4ccb3 100644
--- a/drivers/gpu/drm/i915/display/intel_vdsc.c
+++ b/drivers/gpu/drm/i915/display/intel_vdsc.c
@@ -6,8 +6,6 @@
* Manasi Navare <[email protected]>
*/
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "intel_display_types.h"
#include "intel_dsi.h"
@@ -374,7 +372,7 @@ static bool is_pipe_dsc(const struct intel_crtc_state *crtc_state)
return false;
/* There's no pipe A DSC engine on ICL */
- WARN_ON(crtc->pipe == PIPE_A);
+ drm_WARN_ON(&i915->drm, crtc->pipe == PIPE_A);
return true;
}
@@ -518,119 +516,149 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
pps_val |= DSC_422_ENABLE;
if (vdsc_cfg->vbr_enable)
pps_val |= DSC_VBR_ENABLE;
- DRM_INFO("PPS0 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS0 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_0, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_0,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_0, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_0,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_0(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_1 registers */
pps_val = 0;
pps_val |= DSC_BPP(vdsc_cfg->bits_per_pixel);
- DRM_INFO("PPS1 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS1 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_1, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_1,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_1, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_1,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_1(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_2 registers */
pps_val = 0;
pps_val |= DSC_PIC_HEIGHT(vdsc_cfg->pic_height) |
DSC_PIC_WIDTH(vdsc_cfg->pic_width / num_vdsc_instances);
- DRM_INFO("PPS2 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS2 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_2, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_2,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_2, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_2,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_2(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_3 registers */
pps_val = 0;
pps_val |= DSC_SLICE_HEIGHT(vdsc_cfg->slice_height) |
DSC_SLICE_WIDTH(vdsc_cfg->slice_width);
- DRM_INFO("PPS3 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS3 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_3, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_3,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_3, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_3,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_3(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_4 registers */
pps_val = 0;
pps_val |= DSC_INITIAL_XMIT_DELAY(vdsc_cfg->initial_xmit_delay) |
DSC_INITIAL_DEC_DELAY(vdsc_cfg->initial_dec_delay);
- DRM_INFO("PPS4 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS4 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_4, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_4,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_4, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_4,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_4(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_5 registers */
pps_val = 0;
pps_val |= DSC_SCALE_INC_INT(vdsc_cfg->scale_increment_interval) |
DSC_SCALE_DEC_INT(vdsc_cfg->scale_decrement_interval);
- DRM_INFO("PPS5 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS5 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_5, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_5,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_5, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_5,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_5(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_6 registers */
@@ -639,80 +667,100 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
DSC_FIRST_LINE_BPG_OFFSET(vdsc_cfg->first_line_bpg_offset) |
DSC_FLATNESS_MIN_QP(vdsc_cfg->flatness_min_qp) |
DSC_FLATNESS_MAX_QP(vdsc_cfg->flatness_max_qp);
- DRM_INFO("PPS6 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS6 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_6, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_6,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_6, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_6,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_6(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_7 registers */
pps_val = 0;
pps_val |= DSC_SLICE_BPG_OFFSET(vdsc_cfg->slice_bpg_offset) |
DSC_NFL_BPG_OFFSET(vdsc_cfg->nfl_bpg_offset);
- DRM_INFO("PPS7 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS7 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_7, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_7,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_7, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_7,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_7(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_8 registers */
pps_val = 0;
pps_val |= DSC_FINAL_OFFSET(vdsc_cfg->final_offset) |
DSC_INITIAL_OFFSET(vdsc_cfg->initial_offset);
- DRM_INFO("PPS8 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS8 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_8, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_8,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_8, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_8,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_8(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_9 registers */
pps_val = 0;
pps_val |= DSC_RC_MODEL_SIZE(DSC_RC_MODEL_SIZE_CONST) |
DSC_RC_EDGE_FACTOR(DSC_RC_EDGE_FACTOR_CONST);
- DRM_INFO("PPS9 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS9 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_9, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_9,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_9, pps_val);
+ intel_de_write(dev_priv, DSCC_PICTURE_PARAMETER_SET_9,
+ pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_9(pipe),
+ pps_val);
}
/* Populate PICTURE_PARAMETER_SET_10 registers */
@@ -721,20 +769,25 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
DSC_RC_QUANT_INC_LIMIT1(vdsc_cfg->rc_quant_incr_limit1) |
DSC_RC_TARGET_OFF_HIGH(DSC_RC_TGT_OFFSET_HI_CONST) |
DSC_RC_TARGET_OFF_LOW(DSC_RC_TGT_OFFSET_LO_CONST);
- DRM_INFO("PPS10 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS10 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_10, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_10,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_10, pps_val);
+ intel_de_write(dev_priv,
+ DSCC_PICTURE_PARAMETER_SET_10, pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_10(pipe),
+ pps_val);
}
/* Populate Picture parameter set 16 */
@@ -744,20 +797,25 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
vdsc_cfg->slice_width) |
DSC_SLICE_ROW_PER_FRAME(vdsc_cfg->pic_height /
vdsc_cfg->slice_height);
- DRM_INFO("PPS16 = 0x%08x\n", pps_val);
+ drm_info(&dev_priv->drm, "PPS16 = 0x%08x\n", pps_val);
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_PICTURE_PARAMETER_SET_16, pps_val);
+ intel_de_write(dev_priv, DSCA_PICTURE_PARAMETER_SET_16,
+ pps_val);
/*
* If 2 VDSC instances are needed, configure PPS for second
* VDSC
*/
if (crtc_state->dsc.dsc_split)
- I915_WRITE(DSCC_PICTURE_PARAMETER_SET_16, pps_val);
+ intel_de_write(dev_priv,
+ DSCC_PICTURE_PARAMETER_SET_16, pps_val);
} else {
- I915_WRITE(ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe), pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
if (crtc_state->dsc.dsc_split)
- I915_WRITE(ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
- pps_val);
+ intel_de_write(dev_priv,
+ ICL_DSC1_PICTURE_PARAMETER_SET_16(pipe),
+ pps_val);
}
/* Populate the RC_BUF_THRESH registers */
@@ -766,42 +824,50 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
rc_buf_thresh_dword[i / 4] |=
(u32)(vdsc_cfg->rc_buf_thresh[i] <<
BITS_PER_BYTE * (i % 4));
- DRM_INFO(" RC_BUF_THRESH%d = 0x%08x\n", i,
+ drm_info(&dev_priv->drm, " RC_BUF_THRESH%d = 0x%08x\n", i,
rc_buf_thresh_dword[i / 4]);
}
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_RC_BUF_THRESH_0, rc_buf_thresh_dword[0]);
- I915_WRITE(DSCA_RC_BUF_THRESH_0_UDW, rc_buf_thresh_dword[1]);
- I915_WRITE(DSCA_RC_BUF_THRESH_1, rc_buf_thresh_dword[2]);
- I915_WRITE(DSCA_RC_BUF_THRESH_1_UDW, rc_buf_thresh_dword[3]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv, DSCA_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
if (crtc_state->dsc.dsc_split) {
- I915_WRITE(DSCC_RC_BUF_THRESH_0,
- rc_buf_thresh_dword[0]);
- I915_WRITE(DSCC_RC_BUF_THRESH_0_UDW,
- rc_buf_thresh_dword[1]);
- I915_WRITE(DSCC_RC_BUF_THRESH_1,
- rc_buf_thresh_dword[2]);
- I915_WRITE(DSCC_RC_BUF_THRESH_1_UDW,
- rc_buf_thresh_dword[3]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0,
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_0_UDW,
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1,
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv, DSCC_RC_BUF_THRESH_1_UDW,
+ rc_buf_thresh_dword[3]);
}
} else {
- I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0(pipe),
- rc_buf_thresh_dword[0]);
- I915_WRITE(ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
- rc_buf_thresh_dword[1]);
- I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1(pipe),
- rc_buf_thresh_dword[2]);
- I915_WRITE(ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
- rc_buf_thresh_dword[3]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
if (crtc_state->dsc.dsc_split) {
- I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0(pipe),
- rc_buf_thresh_dword[0]);
- I915_WRITE(ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
- rc_buf_thresh_dword[1]);
- I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1(pipe),
- rc_buf_thresh_dword[2]);
- I915_WRITE(ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
- rc_buf_thresh_dword[3]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_0(pipe),
+ rc_buf_thresh_dword[0]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_0_UDW(pipe),
+ rc_buf_thresh_dword[1]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_1(pipe),
+ rc_buf_thresh_dword[2]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_BUF_THRESH_1_UDW(pipe),
+ rc_buf_thresh_dword[3]);
}
}
@@ -815,78 +881,94 @@ static void intel_dsc_pps_configure(struct intel_encoder *encoder,
RC_MAX_QP_SHIFT) |
(vdsc_cfg->rc_range_params[i].range_min_qp <<
RC_MIN_QP_SHIFT)) << 16 * (i % 2));
- DRM_INFO(" RC_RANGE_PARAM_%d = 0x%08x\n", i,
+ drm_info(&dev_priv->drm, " RC_RANGE_PARAM_%d = 0x%08x\n", i,
rc_range_params_dword[i / 2]);
}
if (!is_pipe_dsc(crtc_state)) {
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0,
- rc_range_params_dword[0]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_0_UDW,
- rc_range_params_dword[1]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1,
- rc_range_params_dword[2]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_1_UDW,
- rc_range_params_dword[3]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2,
- rc_range_params_dword[4]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_2_UDW,
- rc_range_params_dword[5]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3,
- rc_range_params_dword[6]);
- I915_WRITE(DSCA_RC_RANGE_PARAMETERS_3_UDW,
- rc_range_params_dword[7]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv, DSCA_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
if (crtc_state->dsc.dsc_split) {
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0,
- rc_range_params_dword[0]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_0_UDW,
- rc_range_params_dword[1]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1,
- rc_range_params_dword[2]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_1_UDW,
- rc_range_params_dword[3]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2,
- rc_range_params_dword[4]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_2_UDW,
- rc_range_params_dword[5]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3,
- rc_range_params_dword[6]);
- I915_WRITE(DSCC_RC_RANGE_PARAMETERS_3_UDW,
- rc_range_params_dword[7]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_0,
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_0_UDW,
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_1,
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_1_UDW,
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_2,
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_2_UDW,
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv, DSCC_RC_RANGE_PARAMETERS_3,
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv,
+ DSCC_RC_RANGE_PARAMETERS_3_UDW,
+ rc_range_params_dword[7]);
}
} else {
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
- rc_range_params_dword[0]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
- rc_range_params_dword[1]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
- rc_range_params_dword[2]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
- rc_range_params_dword[3]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
- rc_range_params_dword[4]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
- rc_range_params_dword[5]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
- rc_range_params_dword[6]);
- I915_WRITE(ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
- rc_range_params_dword[7]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv, ICL_DSC0_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv,
+ ICL_DSC0_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
if (crtc_state->dsc.dsc_split) {
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
- rc_range_params_dword[0]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
- rc_range_params_dword[1]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
- rc_range_params_dword[2]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
- rc_range_params_dword[3]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
- rc_range_params_dword[4]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
- rc_range_params_dword[5]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
- rc_range_params_dword[6]);
- I915_WRITE(ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
- rc_range_params_dword[7]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_0(pipe),
+ rc_range_params_dword[0]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_0_UDW(pipe),
+ rc_range_params_dword[1]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_1(pipe),
+ rc_range_params_dword[2]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_1_UDW(pipe),
+ rc_range_params_dword[3]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_2(pipe),
+ rc_range_params_dword[4]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_2_UDW(pipe),
+ rc_range_params_dword[5]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_3(pipe),
+ rc_range_params_dword[6]);
+ intel_de_write(dev_priv,
+ ICL_DSC1_RC_RANGE_PARAMETERS_3_UDW(pipe),
+ rc_range_params_dword[7]);
}
}
}
@@ -912,11 +994,11 @@ void intel_dsc_get_config(struct intel_encoder *encoder,
return;
if (!is_pipe_dsc(crtc_state)) {
- dss_ctl1 = I915_READ(DSS_CTL1);
- dss_ctl2 = I915_READ(DSS_CTL2);
+ dss_ctl1 = intel_de_read(dev_priv, DSS_CTL1);
+ dss_ctl2 = intel_de_read(dev_priv, DSS_CTL2);
} else {
- dss_ctl1 = I915_READ(ICL_PIPE_DSS_CTL1(pipe));
- dss_ctl2 = I915_READ(ICL_PIPE_DSS_CTL2(pipe));
+ dss_ctl1 = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL1(pipe));
+ dss_ctl2 = intel_de_read(dev_priv, ICL_PIPE_DSS_CTL2(pipe));
}
crtc_state->dsc.compression_enable = dss_ctl2 & LEFT_BRANCH_VDSC_ENABLE;
@@ -930,9 +1012,10 @@ void intel_dsc_get_config(struct intel_encoder *encoder,
/* PPS1 */
if (!is_pipe_dsc(crtc_state))
- val = I915_READ(DSCA_PICTURE_PARAMETER_SET_1);
+ val = intel_de_read(dev_priv, DSCA_PICTURE_PARAMETER_SET_1);
else
- val = I915_READ(ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
+ val = intel_de_read(dev_priv,
+ ICL_DSC0_PICTURE_PARAMETER_SET_1(pipe));
vdsc_cfg->bits_per_pixel = val;
crtc_state->dsc.compressed_bpp = vdsc_cfg->bits_per_pixel >> 4;
out:
@@ -1013,8 +1096,8 @@ void intel_dsc_enable(struct intel_encoder *encoder,
dss_ctl2_val |= RIGHT_BRANCH_VDSC_ENABLE;
dss_ctl1_val |= JOINER_ENABLE;
}
- I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
- I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1_val);
+ intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2_val);
}
void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
@@ -1035,17 +1118,17 @@ void intel_dsc_disable(const struct intel_crtc_state *old_crtc_state)
dss_ctl1_reg = ICL_PIPE_DSS_CTL1(pipe);
dss_ctl2_reg = ICL_PIPE_DSS_CTL2(pipe);
}
- dss_ctl1_val = I915_READ(dss_ctl1_reg);
+ dss_ctl1_val = intel_de_read(dev_priv, dss_ctl1_reg);
if (dss_ctl1_val & JOINER_ENABLE)
dss_ctl1_val &= ~JOINER_ENABLE;
- I915_WRITE(dss_ctl1_reg, dss_ctl1_val);
+ intel_de_write(dev_priv, dss_ctl1_reg, dss_ctl1_val);
- dss_ctl2_val = I915_READ(dss_ctl2_reg);
+ dss_ctl2_val = intel_de_read(dev_priv, dss_ctl2_reg);
if (dss_ctl2_val & LEFT_BRANCH_VDSC_ENABLE ||
dss_ctl2_val & RIGHT_BRANCH_VDSC_ENABLE)
dss_ctl2_val &= ~(LEFT_BRANCH_VDSC_ENABLE |
RIGHT_BRANCH_VDSC_ENABLE);
- I915_WRITE(dss_ctl2_reg, dss_ctl2_val);
+ intel_de_write(dev_priv, dss_ctl2_reg, dss_ctl2_val);
/* Disable Power wells for VDSC/joining */
intel_display_power_put_unchecked(dev_priv,
diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c
index 2ff7293986d4..be333699c515 100644
--- a/drivers/gpu/drm/i915/display/intel_vga.c
+++ b/drivers/gpu/drm/i915/display/intel_vga.c
@@ -9,6 +9,7 @@
#include <drm/i915_drm.h>
#include "i915_drv.h"
+#include "intel_de.h"
#include "intel_vga.h"
static i915_reg_t intel_vga_cntrl_reg(struct drm_i915_private *i915)
@@ -36,16 +37,17 @@ void intel_vga_disable(struct drm_i915_private *dev_priv)
vga_put(pdev, VGA_RSRC_LEGACY_IO);
udelay(300);
- I915_WRITE(vga_reg, VGA_DISP_DISABLE);
- POSTING_READ(vga_reg);
+ intel_de_write(dev_priv, vga_reg, VGA_DISP_DISABLE);
+ intel_de_posting_read(dev_priv, vga_reg);
}
void intel_vga_redisable_power_on(struct drm_i915_private *dev_priv)
{
i915_reg_t vga_reg = intel_vga_cntrl_reg(dev_priv);
- if (!(I915_READ(vga_reg) & VGA_DISP_DISABLE)) {
- DRM_DEBUG_KMS("Something enabled VGA plane, disabling it\n");
+ if (!(intel_de_read(dev_priv, vga_reg) & VGA_DISP_DISABLE)) {
+ drm_dbg_kms(&dev_priv->drm,
+ "Something enabled VGA plane, disabling it\n");
intel_vga_disable(dev_priv);
}
}
@@ -98,7 +100,7 @@ intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
u16 gmch_ctrl;
if (pci_read_config_word(i915->bridge_dev, reg, &gmch_ctrl)) {
- DRM_ERROR("failed to read control word\n");
+ drm_err(&i915->drm, "failed to read control word\n");
return -EIO;
}
@@ -111,7 +113,7 @@ intel_vga_set_state(struct drm_i915_private *i915, bool enable_decode)
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
if (pci_write_config_word(i915->bridge_dev, reg, gmch_ctrl)) {
- DRM_ERROR("failed to write control word\n");
+ drm_err(&i915->drm, "failed to write control word\n");
return -EIO;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c
index daf4fc3dab6f..f4c362dc6e15 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi.c
@@ -85,7 +85,7 @@ void vlv_dsi_wait_for_fifo_empty(struct intel_dsi *intel_dsi, enum port port)
if (intel_de_wait_for_set(dev_priv, MIPI_GEN_FIFO_STAT(port),
mask, 100))
- DRM_ERROR("DPI FIFOs are not empty\n");
+ drm_err(&dev_priv->drm, "DPI FIFOs are not empty\n");
}
static void write_data(struct drm_i915_private *dev_priv,
@@ -100,7 +100,7 @@ static void write_data(struct drm_i915_private *dev_priv,
for (j = 0; j < min_t(u32, len - i, 4); j++)
val |= *data++ << 8 * j;
- I915_WRITE(reg, val);
+ intel_de_write(dev_priv, reg, val);
}
}
@@ -111,7 +111,7 @@ static void read_data(struct drm_i915_private *dev_priv,
u32 i, j;
for (i = 0; i < len; i += 4) {
- u32 val = I915_READ(reg);
+ u32 val = intel_de_read(dev_priv, reg);
for (j = 0; j < min_t(u32, len - i, 4); j++)
*data++ = val >> 8 * j;
@@ -154,29 +154,34 @@ static ssize_t intel_dsi_host_transfer(struct mipi_dsi_host *host,
if (packet.payload_length) {
if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
data_mask, 50))
- DRM_ERROR("Timeout waiting for HS/LP DATA FIFO !full\n");
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for HS/LP DATA FIFO !full\n");
write_data(dev_priv, data_reg, packet.payload,
packet.payload_length);
}
if (msg->rx_len) {
- I915_WRITE(MIPI_INTR_STAT(port), GEN_READ_DATA_AVAIL);
+ intel_de_write(dev_priv, MIPI_INTR_STAT(port),
+ GEN_READ_DATA_AVAIL);
}
if (intel_de_wait_for_clear(dev_priv, MIPI_GEN_FIFO_STAT(port),
ctrl_mask, 50)) {
- DRM_ERROR("Timeout waiting for HS/LP CTRL FIFO !full\n");
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for HS/LP CTRL FIFO !full\n");
}
- I915_WRITE(ctrl_reg, header[2] << 16 | header[1] << 8 | header[0]);
+ intel_de_write(dev_priv, ctrl_reg,
+ header[2] << 16 | header[1] << 8 | header[0]);
/* ->rx_len is set only for reads */
if (msg->rx_len) {
data_mask = GEN_READ_DATA_AVAIL;
if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port),
data_mask, 50))
- DRM_ERROR("Timeout waiting for read data.\n");
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for read data.\n");
read_data(dev_priv, data_reg, msg->rx_buf, msg->rx_len);
}
@@ -223,17 +228,19 @@ static int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs,
cmd |= DPI_LP_MODE;
/* clear bit */
- I915_WRITE(MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
+ intel_de_write(dev_priv, MIPI_INTR_STAT(port), SPL_PKT_SENT_INTERRUPT);
/* XXX: old code skips write if control unchanged */
- if (cmd == I915_READ(MIPI_DPI_CONTROL(port)))
- DRM_DEBUG_KMS("Same special packet %02x twice in a row.\n", cmd);
+ if (cmd == intel_de_read(dev_priv, MIPI_DPI_CONTROL(port)))
+ drm_dbg_kms(&dev_priv->drm,
+ "Same special packet %02x twice in a row.\n", cmd);
- I915_WRITE(MIPI_DPI_CONTROL(port), cmd);
+ intel_de_write(dev_priv, MIPI_DPI_CONTROL(port), cmd);
mask = SPL_PKT_SENT_INTERRUPT;
if (intel_de_wait_for_set(dev_priv, MIPI_INTR_STAT(port), mask, 100))
- DRM_ERROR("Video mode command 0x%08x send failed.\n", cmd);
+ drm_err(&dev_priv->drm,
+ "Video mode command 0x%08x send failed.\n", cmd);
return 0;
}
@@ -265,7 +272,7 @@ static int intel_dsi_compute_config(struct intel_encoder *encoder,
struct drm_display_mode *adjusted_mode = &pipe_config->hw.adjusted_mode;
int ret;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
pipe_config->output_format = INTEL_OUTPUT_FORMAT_RGB;
if (fixed_mode) {
@@ -328,36 +335,37 @@ static bool glk_dsi_enable_io(struct intel_encoder *encoder)
* Power ON MIPI IO first and then write into IO reset and LP wake bits
*/
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(MIPI_CTRL(port));
- I915_WRITE(MIPI_CTRL(port), tmp | GLK_MIPIIO_ENABLE);
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ tmp | GLK_MIPIIO_ENABLE);
}
/* Put the IO into reset */
- tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- I915_WRITE(MIPI_CTRL(PORT_A), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
/* Program LP Wake */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(MIPI_CTRL(port));
- if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
+ if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
tmp &= ~GLK_LP_WAKE;
else
tmp |= GLK_LP_WAKE;
- I915_WRITE(MIPI_CTRL(port), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
}
/* Wait for Pwr ACK */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 20))
- DRM_ERROR("MIPIO port is powergated\n");
+ drm_err(&dev_priv->drm, "MIPIO port is powergated\n");
}
/* Check for cold boot scenario */
for_each_dsi_port(port, intel_dsi->ports) {
cold_boot |=
- !(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY);
+ !(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY);
}
return cold_boot;
@@ -374,48 +382,49 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 20))
- DRM_ERROR("PHY is not ON\n");
+ drm_err(&dev_priv->drm, "PHY is not ON\n");
}
/* Get IO out of reset */
- val = I915_READ(MIPI_CTRL(PORT_A));
- I915_WRITE(MIPI_CTRL(PORT_A), val | GLK_MIPIIO_RESET_RELEASED);
+ val = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+ val | GLK_MIPIIO_RESET_RELEASED);
/* Get IO out of Low power state*/
for_each_dsi_port(port, intel_dsi->ports) {
- if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
- val = I915_READ(MIPI_DEVICE_READY(port));
+ if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY)) {
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= DEVICE_READY;
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
usleep_range(10, 15);
} else {
/* Enter ULPS */
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
/* Wait for ULPS active */
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_ULPS_NOT_ACTIVE, 20))
- DRM_ERROR("ULPS not active\n");
+ drm_err(&dev_priv->drm, "ULPS not active\n");
/* Exit ULPS */
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_EXIT | DEVICE_READY);
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
/* Enter Normal Mode */
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_NORMAL_OPERATION | DEVICE_READY);
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
- val = I915_READ(MIPI_CTRL(port));
+ val = intel_de_read(dev_priv, MIPI_CTRL(port));
val &= ~GLK_LP_WAKE;
- I915_WRITE(MIPI_CTRL(port), val);
+ intel_de_write(dev_priv, MIPI_CTRL(port), val);
}
}
@@ -423,14 +432,16 @@ static void glk_dsi_device_ready(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_set(dev_priv, MIPI_CTRL(port),
GLK_DATA_LANE_STOP_STATE, 20))
- DRM_ERROR("Date lane not in STOP state\n");
+ drm_err(&dev_priv->drm,
+ "Date lane not in STOP state\n");
}
/* Wait for AFE LATCH */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_set(dev_priv, BXT_MIPI_PORT_CTRL(port),
AFE_LATCHOUT, 20))
- DRM_ERROR("D-PHY not entering LP-11 state\n");
+ drm_err(&dev_priv->drm,
+ "D-PHY not entering LP-11 state\n");
}
}
@@ -441,23 +452,24 @@ static void bxt_dsi_device_ready(struct intel_encoder *encoder)
enum port port;
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
/* Enable MIPI PHY transparent latch */
for_each_dsi_port(port, intel_dsi->ports) {
- val = I915_READ(BXT_MIPI_PORT_CTRL(port));
- I915_WRITE(BXT_MIPI_PORT_CTRL(port), val | LP_OUTPUT_HOLD);
+ val = intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port));
+ intel_de_write(dev_priv, BXT_MIPI_PORT_CTRL(port),
+ val | LP_OUTPUT_HOLD);
usleep_range(2000, 2500);
}
/* Clear ULPS and set device ready */
for_each_dsi_port(port, intel_dsi->ports) {
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
usleep_range(2000, 2500);
val |= DEVICE_READY;
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
}
}
@@ -468,7 +480,7 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
enum port port;
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
vlv_flisdsi_get(dev_priv);
/* program rcomp for compliance, reduce from 50 ohms to 45 ohms
@@ -481,21 +493,25 @@ static void vlv_dsi_device_ready(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_ENTER);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_ENTER);
usleep_range(2500, 3000);
/* Enable MIPI PHY transparent latch
* Common bit for both MIPI Port A & MIPI Port C
* No similar bit in MIPI Port C reg
*/
- val = I915_READ(MIPI_PORT_CTRL(PORT_A));
- I915_WRITE(MIPI_PORT_CTRL(PORT_A), val | LP_OUTPUT_HOLD);
+ val = intel_de_read(dev_priv, MIPI_PORT_CTRL(PORT_A));
+ intel_de_write(dev_priv, MIPI_PORT_CTRL(PORT_A),
+ val | LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(port), ULPS_STATE_EXIT);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ ULPS_STATE_EXIT);
usleep_range(2500, 3000);
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY);
usleep_range(2500, 3000);
}
}
@@ -521,24 +537,25 @@ static void glk_dsi_enter_low_power_mode(struct intel_encoder *encoder)
/* Enter ULPS */
for_each_dsi_port(port, intel_dsi->ports) {
- val = I915_READ(MIPI_DEVICE_READY(port));
+ val = intel_de_read(dev_priv, MIPI_DEVICE_READY(port));
val &= ~ULPS_STATE_MASK;
val |= (ULPS_STATE_ENTER | DEVICE_READY);
- I915_WRITE(MIPI_DEVICE_READY(port), val);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), val);
}
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 20))
- DRM_ERROR("PHY is not turning OFF\n");
+ drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
}
/* Wait for Pwr ACK bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_MIPIIO_PORT_POWERED, 20))
- DRM_ERROR("MIPI IO Port is not powergated\n");
+ drm_err(&dev_priv->drm,
+ "MIPI IO Port is not powergated\n");
}
}
@@ -550,22 +567,22 @@ static void glk_dsi_disable_mipi_io(struct intel_encoder *encoder)
u32 tmp;
/* Put the IO into reset */
- tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
tmp &= ~GLK_MIPIIO_RESET_RELEASED;
- I915_WRITE(MIPI_CTRL(PORT_A), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A), tmp);
/* Wait for MIPI PHY status bit to unset */
for_each_dsi_port(port, intel_dsi->ports) {
if (intel_de_wait_for_clear(dev_priv, MIPI_CTRL(port),
GLK_PHY_STATUS_PORT_READY, 20))
- DRM_ERROR("PHY is not turning OFF\n");
+ drm_err(&dev_priv->drm, "PHY is not turning OFF\n");
}
/* Clear MIPI mode */
for_each_dsi_port(port, intel_dsi->ports) {
- tmp = I915_READ(MIPI_CTRL(port));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= ~GLK_MIPIIO_ENABLE;
- I915_WRITE(MIPI_CTRL(port), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
}
}
@@ -581,23 +598,23 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
enum port port;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
for_each_dsi_port(port, intel_dsi->ports) {
/* Common bit for both MIPI Port A & MIPI Port C on VLV/CHV */
i915_reg_t port_ctrl = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(PORT_A);
u32 val;
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
- ULPS_STATE_ENTER);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
- ULPS_STATE_EXIT);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY | ULPS_STATE_EXIT);
usleep_range(2000, 2500);
- I915_WRITE(MIPI_DEVICE_READY(port), DEVICE_READY |
- ULPS_STATE_ENTER);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port),
+ DEVICE_READY | ULPS_STATE_ENTER);
usleep_range(2000, 2500);
/*
@@ -607,14 +624,14 @@ static void vlv_dsi_clear_device_ready(struct intel_encoder *encoder)
if ((IS_GEN9_LP(dev_priv) || port == PORT_A) &&
intel_de_wait_for_clear(dev_priv, port_ctrl,
AFE_LATCHOUT, 30))
- DRM_ERROR("DSI LP not going Low\n");
+ drm_err(&dev_priv->drm, "DSI LP not going Low\n");
/* Disable MIPI PHY transparent latch */
- val = I915_READ(port_ctrl);
- I915_WRITE(port_ctrl, val & ~LP_OUTPUT_HOLD);
+ val = intel_de_read(dev_priv, port_ctrl);
+ intel_de_write(dev_priv, port_ctrl, val & ~LP_OUTPUT_HOLD);
usleep_range(1000, 1500);
- I915_WRITE(MIPI_DEVICE_READY(port), 0x00);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x00);
usleep_range(2000, 2500);
}
}
@@ -631,18 +648,20 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
u32 temp;
if (IS_GEN9_LP(dev_priv)) {
for_each_dsi_port(port, intel_dsi->ports) {
- temp = I915_READ(MIPI_CTRL(port));
+ temp = intel_de_read(dev_priv,
+ MIPI_CTRL(port));
temp &= ~BXT_PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap <<
BXT_PIXEL_OVERLAP_CNT_SHIFT;
- I915_WRITE(MIPI_CTRL(port), temp);
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ temp);
}
} else {
- temp = I915_READ(VLV_CHICKEN_3);
+ temp = intel_de_read(dev_priv, VLV_CHICKEN_3);
temp &= ~PIXEL_OVERLAP_CNT_MASK |
intel_dsi->pixel_overlap <<
PIXEL_OVERLAP_CNT_SHIFT;
- I915_WRITE(VLV_CHICKEN_3, temp);
+ intel_de_write(dev_priv, VLV_CHICKEN_3, temp);
}
}
@@ -651,7 +670,7 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
u32 temp;
- temp = I915_READ(port_ctrl);
+ temp = intel_de_read(dev_priv, port_ctrl);
temp &= ~LANE_CONFIGURATION_MASK;
temp &= ~DUAL_LINK_MODE_MASK;
@@ -671,8 +690,8 @@ static void intel_dsi_port_enable(struct intel_encoder *encoder,
temp |= DITHERING_ENABLE;
/* assert ip_tg_enable signal */
- I915_WRITE(port_ctrl, temp | DPI_ENABLE);
- POSTING_READ(port_ctrl);
+ intel_de_write(dev_priv, port_ctrl, temp | DPI_ENABLE);
+ intel_de_posting_read(dev_priv, port_ctrl);
}
}
@@ -689,9 +708,9 @@ static void intel_dsi_port_disable(struct intel_encoder *encoder)
u32 temp;
/* de-assert ip_tg_enable signal */
- temp = I915_READ(port_ctrl);
- I915_WRITE(port_ctrl, temp & ~DPI_ENABLE);
- POSTING_READ(port_ctrl);
+ temp = intel_de_read(dev_priv, port_ctrl);
+ intel_de_write(dev_priv, port_ctrl, temp & ~DPI_ENABLE);
+ intel_de_posting_read(dev_priv, port_ctrl);
}
}
@@ -753,7 +772,7 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
u32 val;
bool glk_cold_boot = false;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true);
@@ -771,22 +790,22 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv)) {
/* Add MIPI IO reset programming for modeset */
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
- val | MIPIO_RST_CTRL);
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
+ val | MIPIO_RST_CTRL);
/* Power up DSI regulator */
- I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
- I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, 0);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL, 0);
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
u32 val;
/* Disable DPOunit clock gating, can stall pipe */
- val = I915_READ(DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
val |= DPOUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
}
if (!IS_GEMINILAKE(dev_priv))
@@ -820,7 +839,8 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
* recommendation, port should be enabled befor plane & pipe */
if (is_cmd_mode(intel_dsi)) {
for_each_dsi_port(port, intel_dsi->ports)
- I915_WRITE(MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
+ intel_de_write(dev_priv,
+ MIPI_MAX_RETURN_PKT_SIZE(port), 8 * 4);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_TEAR_ON);
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_DISPLAY_ON);
} else {
@@ -838,6 +858,15 @@ static void intel_dsi_pre_enable(struct intel_encoder *encoder,
intel_dsi_vbt_exec_sequence(intel_dsi, MIPI_SEQ_BACKLIGHT_ON);
}
+static void bxt_dsi_enable(struct intel_encoder *encoder,
+ const struct intel_crtc_state *crtc_state,
+ const struct drm_connector_state *conn_state)
+{
+ WARN_ON(crtc_state->has_pch_encoder);
+
+ intel_crtc_vblank_on(crtc_state);
+}
+
/*
* DSI port disable has to be done after pipe and plane disable, so we do it in
* the post_disable hook.
@@ -886,7 +915,7 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
enum port port;
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
if (IS_GEN9_LP(dev_priv)) {
intel_crtc_vblank_off(old_crtc_state);
@@ -917,13 +946,14 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
if (IS_BROXTON(dev_priv)) {
/* Power down DSI regulator to save power */
- I915_WRITE(BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
- I915_WRITE(BXT_P_DSI_REGULATOR_TX_CTRL, HS_IO_CTRL_SELECT);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_CFG, STAP_SELECT);
+ intel_de_write(dev_priv, BXT_P_DSI_REGULATOR_TX_CTRL,
+ HS_IO_CTRL_SELECT);
/* Add MIPI IO reset programming for modeset */
- val = I915_READ(BXT_P_CR_GT_DISP_PWRON);
- I915_WRITE(BXT_P_CR_GT_DISP_PWRON,
- val & ~MIPIO_RST_CTRL);
+ val = intel_de_read(dev_priv, BXT_P_CR_GT_DISP_PWRON);
+ intel_de_write(dev_priv, BXT_P_CR_GT_DISP_PWRON,
+ val & ~MIPIO_RST_CTRL);
}
if (IS_GEN9_LP(dev_priv)) {
@@ -933,9 +963,9 @@ static void intel_dsi_post_disable(struct intel_encoder *encoder,
vlv_dsi_pll_disable(encoder);
- val = I915_READ(DSPCLK_GATE_D);
+ val = intel_de_read(dev_priv, DSPCLK_GATE_D);
val &= ~DPOUNIT_CLOCK_GATE_DISABLE;
- I915_WRITE(DSPCLK_GATE_D, val);
+ intel_de_write(dev_priv, DSPCLK_GATE_D, val);
}
/* Assert reset */
@@ -960,7 +990,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
enum port port;
bool active = false;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
wakeref = intel_display_power_get_if_enabled(dev_priv,
encoder->power_domain);
@@ -979,7 +1009,7 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_GEN9_LP(dev_priv) ?
BXT_MIPI_PORT_CTRL(port) : MIPI_PORT_CTRL(port);
- bool enabled = I915_READ(ctrl_reg) & DPI_ENABLE;
+ bool enabled = intel_de_read(dev_priv, ctrl_reg) & DPI_ENABLE;
/*
* Due to some hardware limitations on VLV/CHV, the DPI enable
@@ -988,26 +1018,27 @@ static bool intel_dsi_get_hw_state(struct intel_encoder *encoder,
*/
if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
port == PORT_C)
- enabled = I915_READ(PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
+ enabled = intel_de_read(dev_priv, PIPECONF(PIPE_B)) & PIPECONF_ENABLE;
/* Try command mode if video mode not enabled */
if (!enabled) {
- u32 tmp = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ u32 tmp = intel_de_read(dev_priv,
+ MIPI_DSI_FUNC_PRG(port));
enabled = tmp & CMD_MODE_DATA_WIDTH_MASK;
}
if (!enabled)
continue;
- if (!(I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY))
+ if (!(intel_de_read(dev_priv, MIPI_DEVICE_READY(port)) & DEVICE_READY))
continue;
if (IS_GEN9_LP(dev_priv)) {
- u32 tmp = I915_READ(MIPI_CTRL(port));
+ u32 tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= BXT_PIPE_SELECT_MASK;
tmp >>= BXT_PIPE_SELECT_SHIFT;
- if (WARN_ON(tmp > PIPE_C))
+ if (drm_WARN_ON(&dev_priv->drm, tmp > PIPE_C))
continue;
*pipe = tmp;
@@ -1051,11 +1082,11 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
* encoder->get_hw_state() returns true.
*/
for_each_dsi_port(port, intel_dsi->ports) {
- if (I915_READ(BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
+ if (intel_de_read(dev_priv, BXT_MIPI_PORT_CTRL(port)) & DPI_ENABLE)
break;
}
- fmt = I915_READ(MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
+ fmt = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port)) & VID_MODE_FORMAT_MASK;
bpp = mipi_dsi_pixel_format_to_bpp(
pixel_format_from_register_bits(fmt));
@@ -1067,21 +1098,24 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
/* In terms of pixels */
adjusted_mode->crtc_hdisplay =
- I915_READ(BXT_MIPI_TRANS_HACTIVE(port));
+ intel_de_read(dev_priv,
+ BXT_MIPI_TRANS_HACTIVE(port));
adjusted_mode->crtc_vdisplay =
- I915_READ(BXT_MIPI_TRANS_VACTIVE(port));
+ intel_de_read(dev_priv,
+ BXT_MIPI_TRANS_VACTIVE(port));
adjusted_mode->crtc_vtotal =
- I915_READ(BXT_MIPI_TRANS_VTOTAL(port));
+ intel_de_read(dev_priv,
+ BXT_MIPI_TRANS_VTOTAL(port));
hactive = adjusted_mode->crtc_hdisplay;
- hfp = I915_READ(MIPI_HFP_COUNT(port));
+ hfp = intel_de_read(dev_priv, MIPI_HFP_COUNT(port));
/*
* Meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes
*/
- hsync = I915_READ(MIPI_HSYNC_PADDING_COUNT(port));
- hbp = I915_READ(MIPI_HBP_COUNT(port));
+ hsync = intel_de_read(dev_priv, MIPI_HSYNC_PADDING_COUNT(port));
+ hbp = intel_de_read(dev_priv, MIPI_HBP_COUNT(port));
/* harizontal values are in terms of high speed byte clock */
hfp = pixels_from_txbyteclkhs(hfp, bpp, lane_count,
@@ -1098,9 +1132,9 @@ static void bxt_dsi_get_pipe_config(struct intel_encoder *encoder,
}
/* vertical values are in terms of lines */
- vfp = I915_READ(MIPI_VFP_COUNT(port));
- vsync = I915_READ(MIPI_VSYNC_PADDING_COUNT(port));
- vbp = I915_READ(MIPI_VBP_COUNT(port));
+ vfp = intel_de_read(dev_priv, MIPI_VFP_COUNT(port));
+ vsync = intel_de_read(dev_priv, MIPI_VSYNC_PADDING_COUNT(port));
+ vbp = intel_de_read(dev_priv, MIPI_VBP_COUNT(port));
adjusted_mode->crtc_htotal = hactive + hfp + hsync + hbp;
adjusted_mode->crtc_hsync_start = hfp + adjusted_mode->crtc_hdisplay;
@@ -1191,7 +1225,7 @@ static void intel_dsi_get_config(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 pclk;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
pipe_config->output_types |= BIT(INTEL_OUTPUT_DSI);
@@ -1268,26 +1302,29 @@ static void set_dsi_timings(struct drm_encoder *encoder,
* vactive, as they are calculated per channel basis,
* whereas these values should be based on resolution.
*/
- I915_WRITE(BXT_MIPI_TRANS_HACTIVE(port),
- adjusted_mode->crtc_hdisplay);
- I915_WRITE(BXT_MIPI_TRANS_VACTIVE(port),
- adjusted_mode->crtc_vdisplay);
- I915_WRITE(BXT_MIPI_TRANS_VTOTAL(port),
- adjusted_mode->crtc_vtotal);
+ intel_de_write(dev_priv, BXT_MIPI_TRANS_HACTIVE(port),
+ adjusted_mode->crtc_hdisplay);
+ intel_de_write(dev_priv, BXT_MIPI_TRANS_VACTIVE(port),
+ adjusted_mode->crtc_vdisplay);
+ intel_de_write(dev_priv, BXT_MIPI_TRANS_VTOTAL(port),
+ adjusted_mode->crtc_vtotal);
}
- I915_WRITE(MIPI_HACTIVE_AREA_COUNT(port), hactive);
- I915_WRITE(MIPI_HFP_COUNT(port), hfp);
+ intel_de_write(dev_priv, MIPI_HACTIVE_AREA_COUNT(port),
+ hactive);
+ intel_de_write(dev_priv, MIPI_HFP_COUNT(port), hfp);
/* meaningful for video mode non-burst sync pulse mode only,
* can be zero for non-burst sync events and burst modes */
- I915_WRITE(MIPI_HSYNC_PADDING_COUNT(port), hsync);
- I915_WRITE(MIPI_HBP_COUNT(port), hbp);
+ intel_de_write(dev_priv, MIPI_HSYNC_PADDING_COUNT(port),
+ hsync);
+ intel_de_write(dev_priv, MIPI_HBP_COUNT(port), hbp);
/* vertical values are in terms of lines */
- I915_WRITE(MIPI_VFP_COUNT(port), vfp);
- I915_WRITE(MIPI_VSYNC_PADDING_COUNT(port), vsync);
- I915_WRITE(MIPI_VBP_COUNT(port), vbp);
+ intel_de_write(dev_priv, MIPI_VFP_COUNT(port), vfp);
+ intel_de_write(dev_priv, MIPI_VSYNC_PADDING_COUNT(port),
+ vsync);
+ intel_de_write(dev_priv, MIPI_VBP_COUNT(port), vbp);
}
}
@@ -1322,7 +1359,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
u32 val, tmp;
u16 mode_hdisplay;
- DRM_DEBUG_KMS("pipe %c\n", pipe_name(intel_crtc->pipe));
+ drm_dbg_kms(&dev_priv->drm, "pipe %c\n", pipe_name(intel_crtc->pipe));
mode_hdisplay = adjusted_mode->crtc_hdisplay;
@@ -1338,35 +1375,35 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* escape clock divider, 20MHz, shared for A and C.
* device ready must be off when doing this! txclkesc?
*/
- tmp = I915_READ(MIPI_CTRL(PORT_A));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(PORT_A));
tmp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- I915_WRITE(MIPI_CTRL(PORT_A), tmp |
- ESCAPE_CLOCK_DIVIDER_1);
+ intel_de_write(dev_priv, MIPI_CTRL(PORT_A),
+ tmp | ESCAPE_CLOCK_DIVIDER_1);
/* read request priority is per pipe */
- tmp = I915_READ(MIPI_CTRL(port));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= ~READ_REQUEST_PRIORITY_MASK;
- I915_WRITE(MIPI_CTRL(port), tmp |
- READ_REQUEST_PRIORITY_HIGH);
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ tmp | READ_REQUEST_PRIORITY_HIGH);
} else if (IS_GEN9_LP(dev_priv)) {
enum pipe pipe = intel_crtc->pipe;
- tmp = I915_READ(MIPI_CTRL(port));
+ tmp = intel_de_read(dev_priv, MIPI_CTRL(port));
tmp &= ~BXT_PIPE_SELECT_MASK;
tmp |= BXT_PIPE_SELECT(pipe);
- I915_WRITE(MIPI_CTRL(port), tmp);
+ intel_de_write(dev_priv, MIPI_CTRL(port), tmp);
}
/* XXX: why here, why like this? handling in irq handler?! */
- I915_WRITE(MIPI_INTR_STAT(port), 0xffffffff);
- I915_WRITE(MIPI_INTR_EN(port), 0xffffffff);
+ intel_de_write(dev_priv, MIPI_INTR_STAT(port), 0xffffffff);
+ intel_de_write(dev_priv, MIPI_INTR_EN(port), 0xffffffff);
- I915_WRITE(MIPI_DPHY_PARAM(port), intel_dsi->dphy_reg);
+ intel_de_write(dev_priv, MIPI_DPHY_PARAM(port),
+ intel_dsi->dphy_reg);
- I915_WRITE(MIPI_DPI_RESOLUTION(port),
- adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT |
- mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
+ intel_de_write(dev_priv, MIPI_DPI_RESOLUTION(port),
+ adjusted_mode->crtc_vdisplay << VERTICAL_ADDRESS_SHIFT | mode_hdisplay << HORIZONTAL_ADDRESS_SHIFT);
}
set_dsi_timings(encoder, adjusted_mode);
@@ -1393,7 +1430,7 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
}
for_each_dsi_port(port, intel_dsi->ports) {
- I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
/* timeouts for recovery. one frame IIUC. if counter expires,
* EOT and stop state. */
@@ -1414,28 +1451,24 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
if (is_vid_mode(intel_dsi) &&
intel_dsi->video_mode_format == VIDEO_MODE_BURST) {
- I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
- txbyteclkhs(adjusted_mode->crtc_htotal, bpp,
- intel_dsi->lane_count,
- intel_dsi->burst_mode_ratio) + 1);
+ intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
} else {
- I915_WRITE(MIPI_HS_TX_TIMEOUT(port),
- txbyteclkhs(adjusted_mode->crtc_vtotal *
- adjusted_mode->crtc_htotal,
- bpp, intel_dsi->lane_count,
- intel_dsi->burst_mode_ratio) + 1);
+ intel_de_write(dev_priv, MIPI_HS_TX_TIMEOUT(port),
+ txbyteclkhs(adjusted_mode->crtc_vtotal * adjusted_mode->crtc_htotal, bpp, intel_dsi->lane_count, intel_dsi->burst_mode_ratio) + 1);
}
- I915_WRITE(MIPI_LP_RX_TIMEOUT(port), intel_dsi->lp_rx_timeout);
- I915_WRITE(MIPI_TURN_AROUND_TIMEOUT(port),
- intel_dsi->turn_arnd_val);
- I915_WRITE(MIPI_DEVICE_RESET_TIMER(port),
- intel_dsi->rst_timer_val);
+ intel_de_write(dev_priv, MIPI_LP_RX_TIMEOUT(port),
+ intel_dsi->lp_rx_timeout);
+ intel_de_write(dev_priv, MIPI_TURN_AROUND_TIMEOUT(port),
+ intel_dsi->turn_arnd_val);
+ intel_de_write(dev_priv, MIPI_DEVICE_RESET_TIMER(port),
+ intel_dsi->rst_timer_val);
/* dphy stuff */
/* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(port),
- txclkesc(intel_dsi->escape_clk_div, 100));
+ intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ txclkesc(intel_dsi->escape_clk_div, 100));
if (IS_GEN9_LP(dev_priv) && (!intel_dsi->dual_link)) {
/*
@@ -1444,24 +1477,25 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* getting used. So write the other port
* if not in dual link mode.
*/
- I915_WRITE(MIPI_INIT_COUNT(port ==
- PORT_A ? PORT_C : PORT_A),
- intel_dsi->init_count);
+ intel_de_write(dev_priv,
+ MIPI_INIT_COUNT(port == PORT_A ? PORT_C : PORT_A),
+ intel_dsi->init_count);
}
/* recovery disables */
- I915_WRITE(MIPI_EOT_DISABLE(port), tmp);
+ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), tmp);
/* in terms of low power clock */
- I915_WRITE(MIPI_INIT_COUNT(port), intel_dsi->init_count);
+ intel_de_write(dev_priv, MIPI_INIT_COUNT(port),
+ intel_dsi->init_count);
/* in terms of txbyteclkhs. actual high to low switch +
* MIPI_STOP_STATE_STALL * MIPI_LP_BYTECLK.
*
* XXX: write MIPI_STOP_STATE_STALL?
*/
- I915_WRITE(MIPI_HIGH_LOW_SWITCH_COUNT(port),
- intel_dsi->hs_to_lp_count);
+ intel_de_write(dev_priv, MIPI_HIGH_LOW_SWITCH_COUNT(port),
+ intel_dsi->hs_to_lp_count);
/* XXX: low power clock equivalence in terms of byte clock.
* the number of byte clocks occupied in one low power clock.
@@ -1469,14 +1503,15 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* txclkesc time / txbyteclk time * (105 + MIPI_STOP_STATE_STALL
* ) / 105.???
*/
- I915_WRITE(MIPI_LP_BYTECLK(port), intel_dsi->lp_byte_clk);
+ intel_de_write(dev_priv, MIPI_LP_BYTECLK(port),
+ intel_dsi->lp_byte_clk);
if (IS_GEMINILAKE(dev_priv)) {
- I915_WRITE(MIPI_TLPX_TIME_COUNT(port),
- intel_dsi->lp_byte_clk);
+ intel_de_write(dev_priv, MIPI_TLPX_TIME_COUNT(port),
+ intel_dsi->lp_byte_clk);
/* Shadow of DPHY reg */
- I915_WRITE(MIPI_CLK_LANE_TIMING(port),
- intel_dsi->dphy_reg);
+ intel_de_write(dev_priv, MIPI_CLK_LANE_TIMING(port),
+ intel_dsi->dphy_reg);
}
/* the bw essential for transmitting 16 long packets containing
@@ -1484,21 +1519,18 @@ static void intel_dsi_prepare(struct intel_encoder *intel_encoder,
* this register in terms of byte clocks. based on dsi transfer
* rate and the number of lanes configured the time taken to
* transmit 16 long packets in a dsi stream varies. */
- I915_WRITE(MIPI_DBI_BW_CTRL(port), intel_dsi->bw_timer);
+ intel_de_write(dev_priv, MIPI_DBI_BW_CTRL(port),
+ intel_dsi->bw_timer);
- I915_WRITE(MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
- intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT |
- intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
+ intel_de_write(dev_priv, MIPI_CLK_LANE_SWITCH_TIME_CNT(port),
+ intel_dsi->clk_lp_to_hs_count << LP_HS_SSW_CNT_SHIFT | intel_dsi->clk_hs_to_lp_count << HS_LP_PWR_SW_CNT_SHIFT);
if (is_vid_mode(intel_dsi))
/* Some panels might have resolution which is not a
* multiple of 64 like 1366 x 768. Enable RANDOM
* resolution support for such panels by default */
- I915_WRITE(MIPI_VIDEO_MODE_FORMAT(port),
- intel_dsi->video_frmt_cfg_bits |
- intel_dsi->video_mode_format |
- IP_TG_CONFIG |
- RANDOM_DPI_DISPLAY_RESOLUTION);
+ intel_de_write(dev_priv, MIPI_VIDEO_MODE_FORMAT(port),
+ intel_dsi->video_frmt_cfg_bits | intel_dsi->video_mode_format | IP_TG_CONFIG | RANDOM_DPI_DISPLAY_RESOLUTION);
}
}
@@ -1514,19 +1546,19 @@ static void intel_dsi_unprepare(struct intel_encoder *encoder)
for_each_dsi_port(port, intel_dsi->ports) {
/* Panel commands can be sent when clock is in LP11 */
- I915_WRITE(MIPI_DEVICE_READY(port), 0x0);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x0);
if (IS_GEN9_LP(dev_priv))
bxt_dsi_reset_clocks(encoder, port);
else
vlv_dsi_reset_clocks(encoder, port);
- I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
- val = I915_READ(MIPI_DSI_FUNC_PRG(port));
+ val = intel_de_read(dev_priv, MIPI_DSI_FUNC_PRG(port));
val &= ~VID_MODE_FORMAT_MASK;
- I915_WRITE(MIPI_DSI_FUNC_PRG(port), val);
+ intel_de_write(dev_priv, MIPI_DSI_FUNC_PRG(port), val);
- I915_WRITE(MIPI_DEVICE_READY(port), 0x1);
+ intel_de_write(dev_priv, MIPI_DEVICE_READY(port), 0x1);
}
}
@@ -1559,59 +1591,6 @@ static const struct drm_connector_funcs intel_dsi_connector_funcs = {
.atomic_duplicate_state = intel_digital_connector_duplicate_state,
};
-static enum drm_panel_orientation
-vlv_dsi_get_hw_panel_orientation(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- struct intel_encoder *encoder = connector->encoder;
- enum intel_display_power_domain power_domain;
- enum drm_panel_orientation orientation;
- struct intel_plane *plane;
- struct intel_crtc *crtc;
- intel_wakeref_t wakeref;
- enum pipe pipe;
- u32 val;
-
- if (!encoder->get_hw_state(encoder, &pipe))
- return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-
- crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
- plane = to_intel_plane(crtc->base.primary);
-
- power_domain = POWER_DOMAIN_PIPE(pipe);
- wakeref = intel_display_power_get_if_enabled(dev_priv, power_domain);
- if (!wakeref)
- return DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
-
- val = I915_READ(DSPCNTR(plane->i9xx_plane));
-
- if (!(val & DISPLAY_PLANE_ENABLE))
- orientation = DRM_MODE_PANEL_ORIENTATION_UNKNOWN;
- else if (val & DISPPLANE_ROTATE_180)
- orientation = DRM_MODE_PANEL_ORIENTATION_BOTTOM_UP;
- else
- orientation = DRM_MODE_PANEL_ORIENTATION_NORMAL;
-
- intel_display_power_put(dev_priv, power_domain, wakeref);
-
- return orientation;
-}
-
-static enum drm_panel_orientation
-vlv_dsi_get_panel_orientation(struct intel_connector *connector)
-{
- struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
- enum drm_panel_orientation orientation;
-
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
- orientation = vlv_dsi_get_hw_panel_orientation(connector);
- if (orientation != DRM_MODE_PANEL_ORIENTATION_UNKNOWN)
- return orientation;
- }
-
- return intel_dsi_get_panel_orientation(connector);
-}
-
static void vlv_dsi_add_properties(struct intel_connector *connector)
{
struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
@@ -1628,10 +1607,9 @@ static void vlv_dsi_add_properties(struct intel_connector *connector)
connector->base.state->scaling_mode = DRM_MODE_SCALE_ASPECT;
- connector->base.display_info.panel_orientation =
- vlv_dsi_get_panel_orientation(connector);
- drm_connector_init_panel_orientation_property(
+ drm_connector_set_panel_orientation_with_quirk(
&connector->base,
+ intel_dsi_get_panel_orientation(connector),
connector->panel.fixed_mode->hdisplay,
connector->panel.fixed_mode->vdisplay);
}
@@ -1703,7 +1681,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
prepare_cnt = DIV_ROUND_UP(ths_prepare_ns * ui_den, ui_num * mul);
if (prepare_cnt > PREPARE_CNT_MAX) {
- DRM_DEBUG_KMS("prepare count too high %u\n", prepare_cnt);
+ drm_dbg_kms(&dev_priv->drm, "prepare count too high %u\n",
+ prepare_cnt);
prepare_cnt = PREPARE_CNT_MAX;
}
@@ -1723,7 +1702,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
exit_zero_cnt += 1;
if (exit_zero_cnt > EXIT_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("exit zero count too high %u\n", exit_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm, "exit zero count too high %u\n",
+ exit_zero_cnt);
exit_zero_cnt = EXIT_ZERO_CNT_MAX;
}
@@ -1733,7 +1713,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
* ui_den, ui_num * mul);
if (clk_zero_cnt > CLK_ZERO_CNT_MAX) {
- DRM_DEBUG_KMS("clock zero count too high %u\n", clk_zero_cnt);
+ drm_dbg_kms(&dev_priv->drm, "clock zero count too high %u\n",
+ clk_zero_cnt);
clk_zero_cnt = CLK_ZERO_CNT_MAX;
}
@@ -1742,7 +1723,8 @@ static void vlv_dphy_param_init(struct intel_dsi *intel_dsi)
trail_cnt = DIV_ROUND_UP(tclk_trail_ns * ui_den, ui_num * mul);
if (trail_cnt > TRAIL_CNT_MAX) {
- DRM_DEBUG_KMS("trail count too high %u\n", trail_cnt);
+ drm_dbg_kms(&dev_priv->drm, "trail count too high %u\n",
+ trail_cnt);
trail_cnt = TRAIL_CNT_MAX;
}
@@ -1817,7 +1799,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
enum port port;
enum pipe pipe;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
/* There is no detection method for MIPI so rely on VBT */
if (!intel_bios_is_dsi_present(dev_priv, &port))
@@ -1849,6 +1831,8 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
intel_encoder->compute_config = intel_dsi_compute_config;
intel_encoder->pre_enable = intel_dsi_pre_enable;
+ if (IS_GEN9_LP(dev_priv))
+ intel_encoder->enable = bxt_dsi_enable;
intel_encoder->disable = intel_dsi_disable;
intel_encoder->post_disable = intel_dsi_post_disable;
intel_encoder->get_hw_state = intel_dsi_get_hw_state;
@@ -1894,18 +1878,18 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
}
if (!intel_dsi_vbt_init(intel_dsi, MIPI_DSI_GENERIC_PANEL_ID)) {
- DRM_DEBUG_KMS("no device found\n");
+ drm_dbg_kms(&dev_priv->drm, "no device found\n");
goto err;
}
/* Use clock read-back from current hw-state for fastboot */
current_mode = intel_encoder_current_mode(intel_encoder);
if (current_mode) {
- DRM_DEBUG_KMS("Calculated pclk %d GOP %d\n",
- intel_dsi->pclk, current_mode->clock);
+ drm_dbg_kms(&dev_priv->drm, "Calculated pclk %d GOP %d\n",
+ intel_dsi->pclk, current_mode->clock);
if (intel_fuzzy_clock_check(intel_dsi->pclk,
current_mode->clock)) {
- DRM_DEBUG_KMS("Using GOP pclk\n");
+ drm_dbg_kms(&dev_priv->drm, "Using GOP pclk\n");
intel_dsi->pclk = current_mode->clock;
}
@@ -1933,7 +1917,7 @@ void vlv_dsi_init(struct drm_i915_private *dev_priv)
mutex_unlock(&dev->mode_config.mutex);
if (!fixed_mode) {
- DRM_DEBUG_KMS("no fixed mode\n");
+ drm_dbg_kms(&dev_priv->drm, "no fixed mode\n");
goto err_cleanup_connector;
}
diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
index 6b89e67b120f..d0a514301575 100644
--- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
+++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c
@@ -64,7 +64,7 @@ static int dsi_calc_mnp(struct drm_i915_private *dev_priv,
/* target_dsi_clk is expected in kHz */
if (target_dsi_clk < 300000 || target_dsi_clk > 1150000) {
- DRM_ERROR("DSI CLK Out of Range\n");
+ drm_err(&dev_priv->drm, "DSI CLK Out of Range\n");
return -ECHRNG;
}
@@ -126,7 +126,7 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
ret = dsi_calc_mnp(dev_priv, config, dsi_clk);
if (ret) {
- DRM_DEBUG_KMS("dsi_calc_mnp failed\n");
+ drm_dbg_kms(&dev_priv->drm, "dsi_calc_mnp failed\n");
return ret;
}
@@ -138,8 +138,8 @@ int vlv_dsi_pll_compute(struct intel_encoder *encoder,
config->dsi_pll.ctrl |= DSI_PLL_VCO_EN;
- DRM_DEBUG_KMS("dsi pll div %08x, ctrl %08x\n",
- config->dsi_pll.div, config->dsi_pll.ctrl);
+ drm_dbg_kms(&dev_priv->drm, "dsi pll div %08x, ctrl %08x\n",
+ config->dsi_pll.div, config->dsi_pll.ctrl);
return 0;
}
@@ -149,7 +149,7 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
vlv_cck_get(dev_priv);
@@ -169,12 +169,12 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder,
DSI_PLL_LOCK, 20)) {
vlv_cck_put(dev_priv);
- DRM_ERROR("DSI PLL lock failed\n");
+ drm_err(&dev_priv->drm, "DSI PLL lock failed\n");
return;
}
vlv_cck_put(dev_priv);
- DRM_DEBUG_KMS("DSI PLL locked\n");
+ drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
}
void vlv_dsi_pll_disable(struct intel_encoder *encoder)
@@ -182,7 +182,7 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 tmp;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
vlv_cck_get(dev_priv);
@@ -201,7 +201,7 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
u32 mask;
mask = BXT_DSI_PLL_DO_ENABLE | BXT_DSI_PLL_LOCKED;
- val = I915_READ(BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
enabled = (val & mask) == mask;
if (!enabled)
@@ -215,15 +215,17 @@ bool bxt_dsi_pll_is_enabled(struct drm_i915_private *dev_priv)
* times, and since accessing DSI registers with invalid dividers
* causes a system hang.
*/
- val = I915_READ(BXT_DSI_PLL_CTL);
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
if (IS_GEMINILAKE(dev_priv)) {
if (!(val & BXT_DSIA_16X_MASK)) {
- DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
+ drm_dbg(&dev_priv->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
} else {
if (!(val & BXT_DSIA_16X_MASK) || !(val & BXT_DSIC_16X_MASK)) {
- DRM_DEBUG_DRIVER("Invalid PLL divider (%08x)\n", val);
+ drm_dbg(&dev_priv->drm,
+ "Invalid PLL divider (%08x)\n", val);
enabled = false;
}
}
@@ -236,11 +238,11 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
- val = I915_READ(BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
val &= ~BXT_DSI_PLL_DO_ENABLE;
- I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
/*
* PLL lock should deassert within 200us.
@@ -248,7 +250,8 @@ void bxt_dsi_pll_disable(struct intel_encoder *encoder)
*/
if (intel_de_wait_for_clear(dev_priv, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1))
- DRM_ERROR("Timeout waiting for PLL lock deassertion\n");
+ drm_err(&dev_priv->drm,
+ "Timeout waiting for PLL lock deassertion\n");
}
u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
@@ -263,7 +266,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
int refclk = IS_CHERRYVIEW(dev_priv) ? 100000 : 25000;
int i;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
vlv_cck_get(dev_priv);
pll_ctl = vlv_cck_read(dev_priv, CCK_REG_DSI_PLL_CONTROL);
@@ -292,7 +295,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
p--;
if (!p) {
- DRM_ERROR("wrong P1 divisor\n");
+ drm_err(&dev_priv->drm, "wrong P1 divisor\n");
return 0;
}
@@ -302,7 +305,7 @@ u32 vlv_dsi_get_pclk(struct intel_encoder *encoder,
}
if (i == ARRAY_SIZE(lfsr_converts)) {
- DRM_ERROR("wrong m_seed programmed\n");
+ drm_err(&dev_priv->drm, "wrong m_seed programmed\n");
return 0;
}
@@ -325,7 +328,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
int bpp = mipi_dsi_pixel_format_to_bpp(intel_dsi->pixel_format);
- config->dsi_pll.ctrl = I915_READ(BXT_DSI_PLL_CTL);
+ config->dsi_pll.ctrl = intel_de_read(dev_priv, BXT_DSI_PLL_CTL);
dsi_ratio = config->dsi_pll.ctrl & BXT_DSI_PLL_RATIO_MASK;
@@ -333,7 +336,7 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder,
pclk = DIV_ROUND_CLOSEST(dsi_clk * intel_dsi->lane_count, bpp);
- DRM_DEBUG_DRIVER("Calculated pclk=%u\n", pclk);
+ drm_dbg(&dev_priv->drm, "Calculated pclk=%u\n", pclk);
return pclk;
}
@@ -343,11 +346,10 @@ void vlv_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder);
- temp = I915_READ(MIPI_CTRL(port));
+ temp = intel_de_read(dev_priv, MIPI_CTRL(port));
temp &= ~ESCAPE_CLOCK_DIVIDER_MASK;
- I915_WRITE(MIPI_CTRL(port), temp |
- intel_dsi->escape_clk_div <<
- ESCAPE_CLOCK_DIVIDER_SHIFT);
+ intel_de_write(dev_priv, MIPI_CTRL(port),
+ temp | intel_dsi->escape_clk_div << ESCAPE_CLOCK_DIVIDER_SHIFT);
}
static void glk_dsi_program_esc_clock(struct drm_device *dev,
@@ -393,8 +395,10 @@ static void glk_dsi_program_esc_clock(struct drm_device *dev,
else
txesc2_div = 10;
- I915_WRITE(MIPIO_TXESC_CLK_DIV1, (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
- I915_WRITE(MIPIO_TXESC_CLK_DIV2, (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1,
+ (1 << (txesc1_div - 1)) & GLK_TX_ESC_CLK_DIV1_MASK);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2,
+ (1 << (txesc2_div - 1)) & GLK_TX_ESC_CLK_DIV2_MASK);
}
/* Program BXT Mipi clocks and dividers */
@@ -412,7 +416,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
u32 mipi_8by3_divider;
/* Clear old configurations */
- tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
@@ -448,7 +452,7 @@ static void bxt_dsi_program_clocks(struct drm_device *dev, enum port port,
tmp |= BXT_MIPI_RX_ESCLK_LOWER_DIVIDER(port, rx_div_lower);
tmp |= BXT_MIPI_RX_ESCLK_UPPER_DIVIDER(port, rx_div_upper);
- I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
}
int bxt_dsi_pll_compute(struct intel_encoder *encoder,
@@ -478,10 +482,11 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder,
}
if (dsi_ratio < dsi_ratio_min || dsi_ratio > dsi_ratio_max) {
- DRM_ERROR("Cant get a suitable ratio from DSI PLL ratios\n");
+ drm_err(&dev_priv->drm,
+ "Cant get a suitable ratio from DSI PLL ratios\n");
return -ECHRNG;
} else
- DRM_DEBUG_KMS("DSI PLL calculation is Done!!\n");
+ drm_dbg_kms(&dev_priv->drm, "DSI PLL calculation is Done!!\n");
/*
* Program DSI ratio and Select MIPIC and MIPIA PLL output as 8x
@@ -507,11 +512,11 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
enum port port;
u32 val;
- DRM_DEBUG_KMS("\n");
+ drm_dbg_kms(&dev_priv->drm, "\n");
/* Configure PLL vales */
- I915_WRITE(BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
- POSTING_READ(BXT_DSI_PLL_CTL);
+ intel_de_write(dev_priv, BXT_DSI_PLL_CTL, config->dsi_pll.ctrl);
+ intel_de_posting_read(dev_priv, BXT_DSI_PLL_CTL);
/* Program TX, RX, Dphy clocks */
if (IS_BROXTON(dev_priv)) {
@@ -522,18 +527,19 @@ void bxt_dsi_pll_enable(struct intel_encoder *encoder,
}
/* Enable DSI PLL */
- val = I915_READ(BXT_DSI_PLL_ENABLE);
+ val = intel_de_read(dev_priv, BXT_DSI_PLL_ENABLE);
val |= BXT_DSI_PLL_DO_ENABLE;
- I915_WRITE(BXT_DSI_PLL_ENABLE, val);
+ intel_de_write(dev_priv, BXT_DSI_PLL_ENABLE, val);
/* Timeout and fail if PLL not locked */
if (intel_de_wait_for_set(dev_priv, BXT_DSI_PLL_ENABLE,
BXT_DSI_PLL_LOCKED, 1)) {
- DRM_ERROR("Timed out waiting for DSI PLL to lock\n");
+ drm_err(&dev_priv->drm,
+ "Timed out waiting for DSI PLL to lock\n");
return;
}
- DRM_DEBUG_KMS("DSI PLL locked\n");
+ drm_dbg_kms(&dev_priv->drm, "DSI PLL locked\n");
}
void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
@@ -544,20 +550,20 @@ void bxt_dsi_reset_clocks(struct intel_encoder *encoder, enum port port)
/* Clear old configurations */
if (IS_BROXTON(dev_priv)) {
- tmp = I915_READ(BXT_MIPI_CLOCK_CTL);
+ tmp = intel_de_read(dev_priv, BXT_MIPI_CLOCK_CTL);
tmp &= ~(BXT_MIPI_TX_ESCLK_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_UPPER_FIXDIV_MASK(port));
tmp &= ~(BXT_MIPI_8X_BY3_DIVIDER_MASK(port));
tmp &= ~(BXT_MIPI_RX_ESCLK_LOWER_FIXDIV_MASK(port));
- I915_WRITE(BXT_MIPI_CLOCK_CTL, tmp);
+ intel_de_write(dev_priv, BXT_MIPI_CLOCK_CTL, tmp);
} else {
- tmp = I915_READ(MIPIO_TXESC_CLK_DIV1);
+ tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV1);
tmp &= ~GLK_TX_ESC_CLK_DIV1_MASK;
- I915_WRITE(MIPIO_TXESC_CLK_DIV1, tmp);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV1, tmp);
- tmp = I915_READ(MIPIO_TXESC_CLK_DIV2);
+ tmp = intel_de_read(dev_priv, MIPIO_TXESC_CLK_DIV2);
tmp &= ~GLK_TX_ESC_CLK_DIV2_MASK;
- I915_WRITE(MIPIO_TXESC_CLK_DIV2, tmp);
+ intel_de_write(dev_priv, MIPIO_TXESC_CLK_DIV2, tmp);
}
- I915_WRITE(MIPI_EOT_DISABLE(port), CLOCKSTOP);
+ intel_de_write(dev_priv, MIPI_EOT_DISABLE(port), CLOCKSTOP);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
index 81366aa4812b..0598e5382a1d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_client_blt.c
@@ -217,7 +217,7 @@ static void clear_pages_worker(struct work_struct *work)
0);
out_request:
if (unlikely(err)) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err = 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.c b/drivers/gpu/drm/i915/gem/i915_gem_context.c
index a2e57e62af30..026999b34abd 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.c
@@ -67,14 +67,11 @@
#include <linux/log2.h>
#include <linux/nospec.h>
-#include <drm/i915_drm.h>
-
#include "gt/gen6_ppgtt.h"
#include "gt/intel_context.h"
+#include "gt/intel_context_param.h"
#include "gt/intel_engine_heartbeat.h"
-#include "gt/intel_engine_pm.h"
#include "gt/intel_engine_user.h"
-#include "gt/intel_lrc_reg.h"
#include "gt/intel_ring.h"
#include "i915_gem_context.h"
@@ -245,7 +242,6 @@ static void __free_engines(struct i915_gem_engines *e, unsigned int count)
if (!e->engines[count])
continue;
- RCU_INIT_POINTER(e->engines[count]->gem_context, NULL);
intel_context_put(e->engines[count]);
}
kfree(e);
@@ -258,7 +254,51 @@ static void free_engines(struct i915_gem_engines *e)
static void free_engines_rcu(struct rcu_head *rcu)
{
- free_engines(container_of(rcu, struct i915_gem_engines, rcu));
+ struct i915_gem_engines *engines =
+ container_of(rcu, struct i915_gem_engines, rcu);
+
+ i915_sw_fence_fini(&engines->fence);
+ free_engines(engines);
+}
+
+static int __i915_sw_fence_call
+engines_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
+{
+ struct i915_gem_engines *engines =
+ container_of(fence, typeof(*engines), fence);
+
+ switch (state) {
+ case FENCE_COMPLETE:
+ if (!list_empty(&engines->link)) {
+ struct i915_gem_context *ctx = engines->ctx;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ctx->stale.lock, flags);
+ list_del(&engines->link);
+ spin_unlock_irqrestore(&ctx->stale.lock, flags);
+ }
+ i915_gem_context_put(engines->ctx);
+ break;
+
+ case FENCE_FREE:
+ init_rcu_head(&engines->rcu);
+ call_rcu(&engines->rcu, free_engines_rcu);
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+static struct i915_gem_engines *alloc_engines(unsigned int count)
+{
+ struct i915_gem_engines *e;
+
+ e = kzalloc(struct_size(e, engines, count), GFP_KERNEL);
+ if (!e)
+ return NULL;
+
+ i915_sw_fence_init(&e->fence, engines_notify);
+ return e;
}
static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
@@ -268,11 +308,10 @@ static struct i915_gem_engines *default_engines(struct i915_gem_context *ctx)
struct i915_gem_engines *e;
enum intel_engine_id id;
- e = kzalloc(struct_size(e, engines, I915_NUM_ENGINES), GFP_KERNEL);
+ e = alloc_engines(I915_NUM_ENGINES);
if (!e)
return ERR_PTR(-ENOMEM);
- init_rcu_head(&e->rcu);
for_each_engine(engine, gt, id) {
struct intel_context *ce;
@@ -306,7 +345,6 @@ static void i915_gem_context_free(struct i915_gem_context *ctx)
list_del(&ctx->link);
spin_unlock(&ctx->i915->gem.contexts.lock);
- free_engines(rcu_access_pointer(ctx->engines));
mutex_destroy(&ctx->engines_mutex);
if (ctx->timeline)
@@ -421,7 +459,7 @@ static struct intel_engine_cs *__active_engine(struct i915_request *rq)
}
engine = NULL;
- if (i915_request_is_active(rq) && !rq->fence.error)
+ if (i915_request_is_active(rq) && rq->fence.error != -EIO)
engine = rq->engine;
spin_unlock_irq(&locked->active.lock);
@@ -452,7 +490,7 @@ static struct intel_engine_cs *active_engine(struct intel_context *ce)
return engine;
}
-static void kill_context(struct i915_gem_context *ctx)
+static void kill_engines(struct i915_gem_engines *engines)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
@@ -464,7 +502,7 @@ static void kill_context(struct i915_gem_context *ctx)
* However, we only care about pending requests, so only include
* engines on which there are incomplete requests.
*/
- for_each_gem_engine(ce, __context_engines_static(ctx), it) {
+ for_each_gem_engine(ce, engines, it) {
struct intel_engine_cs *engine;
if (intel_context_set_banned(ce))
@@ -486,8 +524,82 @@ static void kill_context(struct i915_gem_context *ctx)
* the context from the GPU, we have to resort to a full
* reset. We hope the collateral damage is worth it.
*/
- __reset_context(ctx, engine);
+ __reset_context(engines->ctx, engine);
+ }
+}
+
+static void kill_stale_engines(struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines *pos, *next;
+
+ spin_lock_irq(&ctx->stale.lock);
+ GEM_BUG_ON(!i915_gem_context_is_closed(ctx));
+ list_for_each_entry_safe(pos, next, &ctx->stale.engines, link) {
+ if (!i915_sw_fence_await(&pos->fence)) {
+ list_del_init(&pos->link);
+ continue;
+ }
+
+ spin_unlock_irq(&ctx->stale.lock);
+
+ kill_engines(pos);
+
+ spin_lock_irq(&ctx->stale.lock);
+ GEM_BUG_ON(i915_sw_fence_signaled(&pos->fence));
+ list_safe_reset_next(pos, next, link);
+ list_del_init(&pos->link); /* decouple from FENCE_COMPLETE */
+
+ i915_sw_fence_complete(&pos->fence);
+ }
+ spin_unlock_irq(&ctx->stale.lock);
+}
+
+static void kill_context(struct i915_gem_context *ctx)
+{
+ kill_stale_engines(ctx);
+}
+
+static void engines_idle_release(struct i915_gem_context *ctx,
+ struct i915_gem_engines *engines)
+{
+ struct i915_gem_engines_iter it;
+ struct intel_context *ce;
+
+ INIT_LIST_HEAD(&engines->link);
+
+ engines->ctx = i915_gem_context_get(ctx);
+
+ for_each_gem_engine(ce, engines, it) {
+ struct dma_fence *fence;
+ int err = 0;
+
+ /* serialises with execbuf */
+ RCU_INIT_POINTER(ce->gem_context, NULL);
+ if (!intel_context_pin_if_active(ce))
+ continue;
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ err = i915_sw_fence_await_dma_fence(&engines->fence,
+ fence, 0,
+ GFP_KERNEL);
+ dma_fence_put(fence);
+ }
+ intel_context_unpin(ce);
+ if (err < 0)
+ goto kill;
}
+
+ spin_lock_irq(&ctx->stale.lock);
+ if (!i915_gem_context_is_closed(ctx))
+ list_add_tail(&engines->link, &ctx->stale.engines);
+ spin_unlock_irq(&ctx->stale.lock);
+
+kill:
+ if (list_empty(&engines->link)) /* raced, already closed */
+ kill_engines(engines);
+
+ i915_sw_fence_commit(&engines->fence);
}
static void set_closed_name(struct i915_gem_context *ctx)
@@ -511,11 +623,16 @@ static void context_close(struct i915_gem_context *ctx)
{
struct i915_address_space *vm;
+ /* Flush any concurrent set_engines() */
+ mutex_lock(&ctx->engines_mutex);
+ engines_idle_release(ctx, rcu_replace_pointer(ctx->engines, NULL, 1));
i915_gem_context_set_closed(ctx);
- set_closed_name(ctx);
+ mutex_unlock(&ctx->engines_mutex);
mutex_lock(&ctx->mutex);
+ set_closed_name(ctx);
+
vm = i915_gem_context_vm(ctx);
if (vm)
i915_vm_close(vm);
@@ -565,6 +682,22 @@ static int __context_set_persistence(struct i915_gem_context *ctx, bool state)
if (!(ctx->i915->caps.scheduler & I915_SCHEDULER_CAP_PREEMPTION))
return -ENODEV;
+ /*
+ * If the cancel fails, we then need to reset, cleanly!
+ *
+ * If the per-engine reset fails, all hope is lost! We resort
+ * to a full GPU reset in that unlikely case, but realistically
+ * if the engine could not reset, the full reset does not fare
+ * much better. The damage has been done.
+ *
+ * However, if we cannot reset an engine by itself, we cannot
+ * cleanup a hanging persistent context without causing
+ * colateral damage, and we should not pretend we can by
+ * exposing the interface.
+ */
+ if (!intel_has_reset_engine(&ctx->i915->gt))
+ return -ENODEV;
+
i915_gem_context_clear_persistence(ctx);
}
@@ -588,6 +721,9 @@ __create_context(struct drm_i915_private *i915)
ctx->sched.priority = I915_USER_PRIORITY(I915_PRIORITY_NORMAL);
mutex_init(&ctx->mutex);
+ spin_lock_init(&ctx->stale.lock);
+ INIT_LIST_HEAD(&ctx->stale.engines);
+
mutex_init(&ctx->engines_mutex);
e = default_engines(ctx);
if (IS_ERR(e)) {
@@ -621,23 +757,30 @@ err_free:
return ERR_PTR(err);
}
-static void
+static int
context_apply_all(struct i915_gem_context *ctx,
- void (*fn)(struct intel_context *ce, void *data),
+ int (*fn)(struct intel_context *ce, void *data),
void *data)
{
struct i915_gem_engines_iter it;
struct intel_context *ce;
+ int err = 0;
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it)
- fn(ce, data);
+ for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ err = fn(ce, data);
+ if (err)
+ break;
+ }
i915_gem_context_unlock_engines(ctx);
+
+ return err;
}
-static void __apply_ppgtt(struct intel_context *ce, void *vm)
+static int __apply_ppgtt(struct intel_context *ce, void *vm)
{
i915_vm_put(ce->vm);
ce->vm = i915_vm_get(vm);
+ return 0;
}
static struct i915_address_space *
@@ -675,9 +818,10 @@ static void __set_timeline(struct intel_timeline **dst,
intel_timeline_put(old);
}
-static void __apply_timeline(struct intel_context *ce, void *timeline)
+static int __apply_timeline(struct intel_context *ce, void *timeline)
{
__set_timeline(&ce->timeline, timeline);
+ return 0;
}
static void __assign_timeline(struct i915_gem_context *ctx,
@@ -708,8 +852,8 @@ i915_gem_create_context(struct drm_i915_private *i915, unsigned int flags)
ppgtt = i915_ppgtt_create(&i915->gt);
if (IS_ERR(ppgtt)) {
- DRM_DEBUG_DRIVER("PPGTT setup failed (%ld)\n",
- PTR_ERR(ppgtt));
+ drm_dbg(&i915->drm, "PPGTT setup failed (%ld)\n",
+ PTR_ERR(ppgtt));
context_close(ctx);
return ERR_CAST(ppgtt);
}
@@ -751,20 +895,15 @@ static void init_contexts(struct i915_gem_contexts *gc)
void i915_gem_init__contexts(struct drm_i915_private *i915)
{
init_contexts(&i915->gem.contexts);
- DRM_DEBUG_DRIVER("%s context support initialized\n",
- DRIVER_CAPS(i915)->has_logical_contexts ?
- "logical" : "fake");
+ drm_dbg(&i915->drm, "%s context support initialized\n",
+ DRIVER_CAPS(i915)->has_logical_contexts ?
+ "logical" : "fake");
}
void i915_gem_driver_release__contexts(struct drm_i915_private *i915)
{
flush_work(&i915->gem.contexts.free_work);
-}
-
-static int vm_idr_cleanup(int id, void *p, void *data)
-{
- i915_vm_put(p);
- return 0;
+ rcu_barrier(); /* and flush the left over RCU frees */
}
static int gem_context_register(struct i915_gem_context *ctx,
@@ -804,8 +943,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
xa_init_flags(&file_priv->context_xa, XA_FLAGS_ALLOC);
- mutex_init(&file_priv->vm_idr_lock);
- idr_init_base(&file_priv->vm_idr, 1);
+ /* 0 reserved for invalid/unassigned ppgtt */
+ xa_init_flags(&file_priv->vm_xa, XA_FLAGS_ALLOC1);
ctx = i915_gem_create_context(i915, 0);
if (IS_ERR(ctx)) {
@@ -823,9 +962,8 @@ int i915_gem_context_open(struct drm_i915_private *i915,
err_ctx:
context_close(ctx);
err:
- idr_destroy(&file_priv->vm_idr);
+ xa_destroy(&file_priv->vm_xa);
xa_destroy(&file_priv->context_xa);
- mutex_destroy(&file_priv->vm_idr_lock);
return err;
}
@@ -833,6 +971,7 @@ void i915_gem_context_close(struct drm_file *file)
{
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_private *i915 = file_priv->dev_priv;
+ struct i915_address_space *vm;
struct i915_gem_context *ctx;
unsigned long idx;
@@ -840,9 +979,9 @@ void i915_gem_context_close(struct drm_file *file)
context_close(ctx);
xa_destroy(&file_priv->context_xa);
- idr_for_each(&file_priv->vm_idr, vm_idr_cleanup, NULL);
- idr_destroy(&file_priv->vm_idr);
- mutex_destroy(&file_priv->vm_idr_lock);
+ xa_for_each(&file_priv->vm_xa, idx, vm)
+ i915_vm_put(vm);
+ xa_destroy(&file_priv->vm_xa);
contexts_flush_free(&i915->gem.contexts);
}
@@ -854,6 +993,7 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
struct drm_i915_gem_vm_control *args = data;
struct drm_i915_file_private *file_priv = file->driver_priv;
struct i915_ppgtt *ppgtt;
+ u32 id;
int err;
if (!HAS_FULL_PPGTT(i915))
@@ -876,23 +1016,15 @@ int i915_gem_vm_create_ioctl(struct drm_device *dev, void *data,
goto err_put;
}
- err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
+ err = xa_alloc(&file_priv->vm_xa, &id, &ppgtt->vm,
+ xa_limit_32b, GFP_KERNEL);
if (err)
goto err_put;
- err = idr_alloc(&file_priv->vm_idr, &ppgtt->vm, 0, 0, GFP_KERNEL);
- if (err < 0)
- goto err_unlock;
-
- GEM_BUG_ON(err == 0); /* reserved for invalid/unassigned ppgtt */
-
- mutex_unlock(&file_priv->vm_idr_lock);
-
- args->vm_id = err;
+ GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
+ args->vm_id = id;
return 0;
-err_unlock:
- mutex_unlock(&file_priv->vm_idr_lock);
err_put:
i915_vm_put(&ppgtt->vm);
return err;
@@ -904,8 +1036,6 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_i915_file_private *file_priv = file->driver_priv;
struct drm_i915_gem_vm_control *args = data;
struct i915_address_space *vm;
- int err;
- u32 id;
if (args->flags)
return -EINVAL;
@@ -913,17 +1043,7 @@ int i915_gem_vm_destroy_ioctl(struct drm_device *dev, void *data,
if (args->extensions)
return -EINVAL;
- id = args->vm_id;
- if (!id)
- return -ENOENT;
-
- err = mutex_lock_interruptible(&file_priv->vm_idr_lock);
- if (err)
- return err;
-
- vm = idr_remove(&file_priv->vm_idr, id);
-
- mutex_unlock(&file_priv->vm_idr_lock);
+ vm = xa_erase(&file_priv->vm_xa, args->vm_id);
if (!vm)
return -ENOENT;
@@ -949,6 +1069,30 @@ static void cb_retire(struct i915_active *base)
kfree(cb);
}
+static inline struct i915_gem_engines *
+__context_engines_await(const struct i915_gem_context *ctx)
+{
+ struct i915_gem_engines *engines;
+
+ rcu_read_lock();
+ do {
+ engines = rcu_dereference(ctx->engines);
+ if (unlikely(!engines))
+ break;
+
+ if (unlikely(!i915_sw_fence_await(&engines->fence)))
+ continue;
+
+ if (likely(engines == rcu_access_pointer(ctx->engines)))
+ break;
+
+ i915_sw_fence_complete(&engines->fence);
+ } while (1);
+ rcu_read_unlock();
+
+ return engines;
+}
+
I915_SELFTEST_DECLARE(static intel_engine_mask_t context_barrier_inject_fault);
static int context_barrier_task(struct i915_gem_context *ctx,
intel_engine_mask_t engines,
@@ -959,6 +1103,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
{
struct context_barrier_task *cb;
struct i915_gem_engines_iter it;
+ struct i915_gem_engines *e;
struct intel_context *ce;
int err = 0;
@@ -975,7 +1120,13 @@ static int context_barrier_task(struct i915_gem_context *ctx,
return err;
}
- for_each_gem_engine(ce, i915_gem_context_lock_engines(ctx), it) {
+ e = __context_engines_await(ctx);
+ if (!e) {
+ i915_active_release(&cb->base);
+ return -ENOENT;
+ }
+
+ for_each_gem_engine(ce, e, it) {
struct i915_request *rq;
if (I915_SELFTEST_ONLY(context_barrier_inject_fault &
@@ -1006,7 +1157,7 @@ static int context_barrier_task(struct i915_gem_context *ctx,
if (err)
break;
}
- i915_gem_context_unlock_engines(ctx);
+ i915_sw_fence_complete(&e->fence);
cb->task = err ? NULL : task; /* caller needs to unwind instead */
cb->data = data;
@@ -1021,7 +1172,8 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
struct drm_i915_gem_context_param *args)
{
struct i915_address_space *vm;
- int ret;
+ int err;
+ u32 id;
if (!rcu_access_pointer(ctx->vm))
return -ENODEV;
@@ -1029,27 +1181,22 @@ static int get_ppgtt(struct drm_i915_file_private *file_priv,
rcu_read_lock();
vm = context_get_vm_rcu(ctx);
rcu_read_unlock();
+ if (!vm)
+ return -ENODEV;
- ret = mutex_lock_interruptible(&file_priv->vm_idr_lock);
- if (ret)
+ err = xa_alloc(&file_priv->vm_xa, &id, vm, xa_limit_32b, GFP_KERNEL);
+ if (err)
goto err_put;
- ret = idr_alloc(&file_priv->vm_idr, vm, 0, 0, GFP_KERNEL);
- GEM_BUG_ON(!ret);
- if (ret < 0)
- goto err_unlock;
-
i915_vm_open(vm);
+ GEM_BUG_ON(id == 0); /* reserved for invalid/unassigned ppgtt */
+ args->value = id;
args->size = 0;
- args->value = ret;
- ret = 0;
-err_unlock:
- mutex_unlock(&file_priv->vm_idr_lock);
err_put:
i915_vm_put(vm);
- return ret;
+ return err;
}
static void set_ppgtt_barrier(void *data)
@@ -1151,7 +1298,7 @@ static int set_ppgtt(struct drm_i915_file_private *file_priv,
return -ENOENT;
rcu_read_lock();
- vm = idr_find(&file_priv->vm_idr, args->value);
+ vm = xa_load(&file_priv->vm_xa, args->value);
if (vm && !kref_get_unless_zero(&vm->ref))
vm = NULL;
rcu_read_unlock();
@@ -1197,87 +1344,61 @@ out:
return err;
}
-static int gen8_emit_rpcs_config(struct i915_request *rq,
- struct intel_context *ce,
- struct intel_sseu sseu)
+static int __apply_ringsize(struct intel_context *ce, void *sz)
{
- u64 offset;
- u32 *cs;
+ return intel_context_set_ring_size(ce, (unsigned long)sz);
+}
+
+static int set_ringsize(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
+{
+ if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+ return -ENODEV;
- cs = intel_ring_begin(rq, 4);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
+ if (args->size)
+ return -EINVAL;
- offset = i915_ggtt_offset(ce->state) +
- LRC_STATE_PN * PAGE_SIZE +
- CTX_R_PWR_CLK_STATE * 4;
+ if (!IS_ALIGNED(args->value, I915_GTT_PAGE_SIZE))
+ return -EINVAL;
- *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
- *cs++ = lower_32_bits(offset);
- *cs++ = upper_32_bits(offset);
- *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
+ if (args->value < I915_GTT_PAGE_SIZE)
+ return -EINVAL;
- intel_ring_advance(rq, cs);
+ if (args->value > 128 * I915_GTT_PAGE_SIZE)
+ return -EINVAL;
- return 0;
+ return context_apply_all(ctx,
+ __apply_ringsize,
+ __intel_context_ring_size(args->value));
}
-static int
-gen8_modify_rpcs(struct intel_context *ce, struct intel_sseu sseu)
+static int __get_ringsize(struct intel_context *ce, void *arg)
{
- struct i915_request *rq;
- int ret;
-
- lockdep_assert_held(&ce->pin_mutex);
+ long sz;
- /*
- * If the context is not idle, we have to submit an ordered request to
- * modify its context image via the kernel context (writing to our own
- * image, or into the registers directory, does not stick). Pristine
- * and idle contexts will be configured on pinning.
- */
- if (!intel_context_pin_if_active(ce))
- return 0;
+ sz = intel_context_get_ring_size(ce);
+ GEM_BUG_ON(sz > INT_MAX);
- rq = intel_engine_create_kernel_request(ce->engine);
- if (IS_ERR(rq)) {
- ret = PTR_ERR(rq);
- goto out_unpin;
- }
-
- /* Serialise with the remote context */
- ret = intel_context_prepare_remote_request(ce, rq);
- if (ret == 0)
- ret = gen8_emit_rpcs_config(rq, ce, sseu);
-
- i915_request_add(rq);
-out_unpin:
- intel_context_unpin(ce);
- return ret;
+ return sz; /* stop on first engine */
}
-static int
-intel_context_reconfigure_sseu(struct intel_context *ce, struct intel_sseu sseu)
+static int get_ringsize(struct i915_gem_context *ctx,
+ struct drm_i915_gem_context_param *args)
{
- int ret;
-
- GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
+ int sz;
- ret = intel_context_lock_pinned(ce);
- if (ret)
- return ret;
+ if (!HAS_LOGICAL_RING_CONTEXTS(ctx->i915))
+ return -ENODEV;
- /* Nothing to do if unmodified. */
- if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
- goto unlock;
+ if (args->size)
+ return -EINVAL;
- ret = gen8_modify_rpcs(ce, sseu);
- if (!ret)
- ce->sseu = sseu;
+ sz = context_apply_all(ctx, __get_ringsize, NULL);
+ if (sz < 0)
+ return sz;
-unlock:
- intel_context_unlock_pinned(ce);
- return ret;
+ args->value = sz;
+ return 0;
}
static int
@@ -1444,6 +1565,7 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
struct i915_context_engines_load_balance __user *ext =
container_of_user(base, typeof(*ext), base);
const struct set_engines *set = data;
+ struct drm_i915_private *i915 = set->ctx->i915;
struct intel_engine_cs *stack[16];
struct intel_engine_cs **siblings;
struct intel_context *ce;
@@ -1451,24 +1573,25 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
unsigned int n;
int err;
- if (!HAS_EXECLISTS(set->ctx->i915))
+ if (!HAS_EXECLISTS(i915))
return -ENODEV;
- if (USES_GUC_SUBMISSION(set->ctx->i915))
+ if (intel_uc_uses_guc_submission(&i915->gt.uc))
return -ENODEV; /* not implement yet */
if (get_user(idx, &ext->engine_index))
return -EFAULT;
if (idx >= set->engines->num_engines) {
- DRM_DEBUG("Invalid placement value, %d >= %d\n",
- idx, set->engines->num_engines);
+ drm_dbg(&i915->drm, "Invalid placement value, %d >= %d\n",
+ idx, set->engines->num_engines);
return -EINVAL;
}
idx = array_index_nospec(idx, set->engines->num_engines);
if (set->engines->engines[idx]) {
- DRM_DEBUG("Invalid placement[%d], already occupied\n", idx);
+ drm_dbg(&i915->drm,
+ "Invalid placement[%d], already occupied\n", idx);
return -EEXIST;
}
@@ -1500,12 +1623,13 @@ set_engines__load_balance(struct i915_user_extension __user *base, void *data)
goto out_siblings;
}
- siblings[n] = intel_engine_lookup_user(set->ctx->i915,
+ siblings[n] = intel_engine_lookup_user(i915,
ci.engine_class,
ci.engine_instance);
if (!siblings[n]) {
- DRM_DEBUG("Invalid sibling[%d]: { class:%d, inst:%d }\n",
- n, ci.engine_class, ci.engine_instance);
+ drm_dbg(&i915->drm,
+ "Invalid sibling[%d]: { class:%d, inst:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
err = -EINVAL;
goto out_siblings;
}
@@ -1538,6 +1662,7 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
struct i915_context_engines_bond __user *ext =
container_of_user(base, typeof(*ext), base);
const struct set_engines *set = data;
+ struct drm_i915_private *i915 = set->ctx->i915;
struct i915_engine_class_instance ci;
struct intel_engine_cs *virtual;
struct intel_engine_cs *master;
@@ -1548,14 +1673,15 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
return -EFAULT;
if (idx >= set->engines->num_engines) {
- DRM_DEBUG("Invalid index for virtual engine: %d >= %d\n",
- idx, set->engines->num_engines);
+ drm_dbg(&i915->drm,
+ "Invalid index for virtual engine: %d >= %d\n",
+ idx, set->engines->num_engines);
return -EINVAL;
}
idx = array_index_nospec(idx, set->engines->num_engines);
if (!set->engines->engines[idx]) {
- DRM_DEBUG("Invalid engine at %d\n", idx);
+ drm_dbg(&i915->drm, "Invalid engine at %d\n", idx);
return -EINVAL;
}
virtual = set->engines->engines[idx]->engine;
@@ -1573,11 +1699,12 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
if (copy_from_user(&ci, &ext->master, sizeof(ci)))
return -EFAULT;
- master = intel_engine_lookup_user(set->ctx->i915,
+ master = intel_engine_lookup_user(i915,
ci.engine_class, ci.engine_instance);
if (!master) {
- DRM_DEBUG("Unrecognised master engine: { class:%u, instance:%u }\n",
- ci.engine_class, ci.engine_instance);
+ drm_dbg(&i915->drm,
+ "Unrecognised master engine: { class:%u, instance:%u }\n",
+ ci.engine_class, ci.engine_instance);
return -EINVAL;
}
@@ -1590,12 +1717,13 @@ set_engines__bond(struct i915_user_extension __user *base, void *data)
if (copy_from_user(&ci, &ext->engines[n], sizeof(ci)))
return -EFAULT;
- bond = intel_engine_lookup_user(set->ctx->i915,
+ bond = intel_engine_lookup_user(i915,
ci.engine_class,
ci.engine_instance);
if (!bond) {
- DRM_DEBUG("Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
- n, ci.engine_class, ci.engine_instance);
+ drm_dbg(&i915->drm,
+ "Unrecognised engine[%d] for bonding: { class:%d, instance: %d }\n",
+ n, ci.engine_class, ci.engine_instance);
return -EINVAL;
}
@@ -1624,6 +1752,7 @@ static int
set_engines(struct i915_gem_context *ctx,
const struct drm_i915_gem_context_param *args)
{
+ struct drm_i915_private *i915 = ctx->i915;
struct i915_context_param_engines __user *user =
u64_to_user_ptr(args->value);
struct set_engines set = { .ctx = ctx };
@@ -1645,8 +1774,8 @@ set_engines(struct i915_gem_context *ctx,
BUILD_BUG_ON(!IS_ALIGNED(sizeof(*user), sizeof(*user->engines)));
if (args->size < sizeof(*user) ||
!IS_ALIGNED(args->size, sizeof(*user->engines))) {
- DRM_DEBUG("Invalid size for engine array: %d\n",
- args->size);
+ drm_dbg(&i915->drm, "Invalid size for engine array: %d\n",
+ args->size);
return -EINVAL;
}
@@ -1655,13 +1784,10 @@ set_engines(struct i915_gem_context *ctx,
* first 64 engines defined here.
*/
num_engines = (args->size - sizeof(*user)) / sizeof(*user->engines);
-
- set.engines = kmalloc(struct_size(set.engines, engines, num_engines),
- GFP_KERNEL);
+ set.engines = alloc_engines(num_engines);
if (!set.engines)
return -ENOMEM;
- init_rcu_head(&set.engines->rcu);
for (n = 0; n < num_engines; n++) {
struct i915_engine_class_instance ci;
struct intel_engine_cs *engine;
@@ -1682,8 +1808,9 @@ set_engines(struct i915_gem_context *ctx,
ci.engine_class,
ci.engine_instance);
if (!engine) {
- DRM_DEBUG("Invalid engine[%d]: { class:%d, instance:%d }\n",
- n, ci.engine_class, ci.engine_instance);
+ drm_dbg(&i915->drm,
+ "Invalid engine[%d]: { class:%d, instance:%d }\n",
+ n, ci.engine_class, ci.engine_instance);
__free_engines(set.engines, n);
return -ENOENT;
}
@@ -1713,6 +1840,11 @@ set_engines(struct i915_gem_context *ctx,
replace:
mutex_lock(&ctx->engines_mutex);
+ if (i915_gem_context_is_closed(ctx)) {
+ mutex_unlock(&ctx->engines_mutex);
+ free_engines(set.engines);
+ return -ENOENT;
+ }
if (args->size)
i915_gem_context_set_user_engines(ctx);
else
@@ -1720,7 +1852,8 @@ replace:
set.engines = rcu_replace_pointer(ctx->engines, set.engines, 1);
mutex_unlock(&ctx->engines_mutex);
- call_rcu(&set.engines->rcu, free_engines_rcu);
+ /* Keep track of old engine sets for kill_context() */
+ engines_idle_release(ctx, set.engines);
return 0;
}
@@ -1731,11 +1864,10 @@ __copy_engines(struct i915_gem_engines *e)
struct i915_gem_engines *copy;
unsigned int n;
- copy = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ copy = alloc_engines(e->num_engines);
if (!copy)
return ERR_PTR(-ENOMEM);
- init_rcu_head(&copy->rcu);
for (n = 0; n < e->num_engines; n++) {
if (e->engines[n])
copy->engines[n] = intel_context_get(e->engines[n]);
@@ -1836,17 +1968,19 @@ set_persistence(struct i915_gem_context *ctx,
return __context_set_persistence(ctx, args->value);
}
-static void __apply_priority(struct intel_context *ce, void *arg)
+static int __apply_priority(struct intel_context *ce, void *arg)
{
struct i915_gem_context *ctx = arg;
if (!intel_engine_has_semaphores(ce->engine))
- return;
+ return 0;
if (ctx->sched.priority >= I915_PRIORITY_NORMAL)
intel_context_set_use_semaphores(ce);
else
intel_context_clear_use_semaphores(ce);
+
+ return 0;
}
static int set_priority(struct i915_gem_context *ctx,
@@ -1939,6 +2073,10 @@ static int ctx_setparam(struct drm_i915_file_private *fpriv,
ret = set_persistence(ctx, args);
break;
+ case I915_CONTEXT_PARAM_RINGSIZE:
+ ret = set_ringsize(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -1967,6 +2105,18 @@ static int create_setparam(struct i915_user_extension __user *ext, void *data)
return ctx_setparam(arg->fpriv, arg->ctx, &local.param);
}
+static int copy_ring_size(struct intel_context *dst,
+ struct intel_context *src)
+{
+ long sz;
+
+ sz = intel_context_get_ring_size(src);
+ if (sz < 0)
+ return sz;
+
+ return intel_context_set_ring_size(dst, sz);
+}
+
static int clone_engines(struct i915_gem_context *dst,
struct i915_gem_context *src)
{
@@ -1975,11 +2125,10 @@ static int clone_engines(struct i915_gem_context *dst,
bool user_engines;
unsigned long n;
- clone = kmalloc(struct_size(e, engines, e->num_engines), GFP_KERNEL);
+ clone = alloc_engines(e->num_engines);
if (!clone)
goto err_unlock;
- init_rcu_head(&clone->rcu);
for (n = 0; n < e->num_engines; n++) {
struct intel_engine_cs *engine;
@@ -2009,6 +2158,12 @@ static int clone_engines(struct i915_gem_context *dst,
}
intel_context_set_gem(clone->engines[n], dst);
+
+ /* Copy across the preferred ringsize */
+ if (copy_ring_size(clone->engines[n], e->engines[n])) {
+ __free_engines(clone, n + 1);
+ goto err_unlock;
+ }
}
clone->num_engines = n;
@@ -2016,8 +2171,7 @@ static int clone_engines(struct i915_gem_context *dst,
i915_gem_context_unlock_engines(src);
/* Serialised by constructor */
- free_engines(__context_engines_static(dst));
- RCU_INIT_POINTER(dst->engines, clone);
+ engines_idle_release(dst, rcu_replace_pointer(dst->engines, clone, 1));
if (user_engines)
i915_gem_context_set_user_engines(dst);
else
@@ -2197,8 +2351,9 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
ext_data.fpriv = file->driver_priv;
if (client_is_banned(ext_data.fpriv)) {
- DRM_DEBUG("client %s[%d] banned from creating ctx\n",
- current->comm, task_pid_nr(current));
+ drm_dbg(&i915->drm,
+ "client %s[%d] banned from creating ctx\n",
+ current->comm, task_pid_nr(current));
return -EIO;
}
@@ -2220,7 +2375,7 @@ int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
goto err_ctx;
args->ctx_id = id;
- DRM_DEBUG("HW context %d created\n", args->ctx_id);
+ drm_dbg(&i915->drm, "HW context %d created\n", args->ctx_id);
return 0;
@@ -2370,6 +2525,10 @@ int i915_gem_context_getparam_ioctl(struct drm_device *dev, void *data,
args->value = i915_gem_context_is_persistent(ctx);
break;
+ case I915_CONTEXT_PARAM_RINGSIZE:
+ ret = get_ringsize(ctx, args);
+ break;
+
case I915_CONTEXT_PARAM_BAN_PERIOD:
default:
ret = -EINVAL;
@@ -2443,6 +2602,9 @@ i915_gem_engines_iter_next(struct i915_gem_engines_iter *it)
const struct i915_gem_engines *e = it->engines;
struct intel_context *ctx;
+ if (unlikely(!e))
+ return NULL;
+
do {
if (it->idx >= e->num_engines)
return NULL;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context.h b/drivers/gpu/drm/i915/gem/i915_gem_context.h
index 3ae61a355d87..57b7ae2893e1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context.h
@@ -207,7 +207,6 @@ static inline void
i915_gem_engines_iter_init(struct i915_gem_engines_iter *it,
struct i915_gem_engines *engines)
{
- GEM_BUG_ON(!engines);
it->engines = engines;
it->idx = 0;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
index 017ca803ab47..28760bd03265 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_context_types.h
@@ -20,6 +20,7 @@
#include "gt/intel_context_types.h"
#include "i915_scheduler.h"
+#include "i915_sw_fence.h"
struct pid;
@@ -30,7 +31,12 @@ struct intel_timeline;
struct intel_ring;
struct i915_gem_engines {
- struct rcu_head rcu;
+ union {
+ struct list_head link;
+ struct rcu_head rcu;
+ };
+ struct i915_sw_fence fence;
+ struct i915_gem_context *ctx;
unsigned int num_engines;
struct intel_context *engines[];
};
@@ -173,6 +179,11 @@ struct i915_gem_context {
* context in messages.
*/
char name[TASK_COMM_LEN + 8];
+
+ struct {
+ spinlock_t lock;
+ struct list_head engines;
+ } stale;
};
#endif /* __I915_GEM_CONTEXT_TYPES_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
index 372b57ca0efc..7db5a793739d 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_dmabuf.c
@@ -48,7 +48,9 @@ static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachme
src = sg_next(src);
}
- if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
+ if (!dma_map_sg_attrs(attachment->dev,
+ st->sgl, st->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC)) {
ret = -ENOMEM;
goto err_free_sg;
}
@@ -71,7 +73,9 @@ static void i915_gem_unmap_dma_buf(struct dma_buf_attachment *attachment,
{
struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
- dma_unmap_sg(attachment->dev, sg->sgl, sg->nents, dir);
+ dma_unmap_sg_attrs(attachment->dev,
+ sg->sgl, sg->nents, dir,
+ DMA_ATTR_SKIP_CPU_SYNC);
sg_free_table(sg);
kfree(sg);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
index d5a0f5ae4a8b..d3f4f28e9468 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
@@ -10,7 +10,6 @@
#include <linux/uaccess.h>
#include <drm/drm_syncobj.h>
-#include <drm/i915_drm.h>
#include "display/intel_frontbuffer.h"
@@ -28,6 +27,19 @@
#include "i915_sw_fence_work.h"
#include "i915_trace.h"
+struct eb_vma {
+ struct i915_vma *vma;
+ unsigned int flags;
+
+ /** This vma's place in the execbuf reservation list */
+ struct drm_i915_gem_exec_object2 *exec;
+ struct list_head bind_link;
+ struct list_head reloc_link;
+
+ struct hlist_node node;
+ u32 handle;
+};
+
enum {
FORCE_CPU_RELOC = 1,
FORCE_GTT_RELOC,
@@ -35,17 +47,15 @@ enum {
#define DBG_FORCE_RELOC 0 /* choose one of the above! */
};
-#define __EXEC_OBJECT_HAS_REF BIT(31)
-#define __EXEC_OBJECT_HAS_PIN BIT(30)
-#define __EXEC_OBJECT_HAS_FENCE BIT(29)
-#define __EXEC_OBJECT_NEEDS_MAP BIT(28)
-#define __EXEC_OBJECT_NEEDS_BIAS BIT(27)
-#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 27) /* all of the above */
+#define __EXEC_OBJECT_HAS_PIN BIT(31)
+#define __EXEC_OBJECT_HAS_FENCE BIT(30)
+#define __EXEC_OBJECT_NEEDS_MAP BIT(29)
+#define __EXEC_OBJECT_NEEDS_BIAS BIT(28)
+#define __EXEC_OBJECT_INTERNAL_FLAGS (~0u << 28) /* all of the above */
#define __EXEC_OBJECT_RESERVED (__EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_FENCE)
#define __EXEC_HAS_RELOC BIT(31)
-#define __EXEC_VALIDATED BIT(30)
-#define __EXEC_INTERNAL_FLAGS (~0u << 30)
+#define __EXEC_INTERNAL_FLAGS (~0u << 31)
#define UPDATE PIN_OFFSET_FIXED
#define BATCH_OFFSET_BIAS (256*1024)
@@ -220,15 +230,14 @@ struct i915_execbuffer {
struct drm_file *file; /** per-file lookup tables and limits */
struct drm_i915_gem_execbuffer2 *args; /** ioctl parameters */
struct drm_i915_gem_exec_object2 *exec; /** ioctl execobj[] */
- struct i915_vma **vma;
- unsigned int *flags;
+ struct eb_vma *vma;
struct intel_engine_cs *engine; /** engine to queue the request to */
struct intel_context *context; /* logical state for the request */
struct i915_gem_context *gem_context; /** caller's context */
struct i915_request *request; /** our request to build */
- struct i915_vma *batch; /** identity of the batch obj/vma */
+ struct eb_vma *batch; /** identity of the batch obj/vma */
struct i915_vma *trampoline; /** trampoline used for chaining */
/** actual size of execobj[] as we may extend it for the cmdparser */
@@ -276,8 +285,6 @@ struct i915_execbuffer {
struct hlist_head *buckets; /** ht for relocation handles */
};
-#define exec_entry(EB, VMA) (&(EB)->exec[(VMA)->exec_flags - (EB)->flags])
-
static inline bool eb_use_cmdparser(const struct i915_execbuffer *eb)
{
return intel_engine_requires_cmd_parser(eb->engine) ||
@@ -364,9 +371,9 @@ eb_vma_misplaced(const struct drm_i915_gem_exec_object2 *entry,
static inline bool
eb_pin_vma(struct i915_execbuffer *eb,
const struct drm_i915_gem_exec_object2 *entry,
- struct i915_vma *vma)
+ struct eb_vma *ev)
{
- unsigned int exec_flags = *vma->exec_flags;
+ struct i915_vma *vma = ev->vma;
u64 pin_flags;
if (vma->node.size)
@@ -375,24 +382,24 @@ eb_pin_vma(struct i915_execbuffer *eb,
pin_flags = entry->offset & PIN_OFFSET_MASK;
pin_flags |= PIN_USER | PIN_NOEVICT | PIN_OFFSET_FIXED;
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_GTT))
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_GTT))
pin_flags |= PIN_GLOBAL;
if (unlikely(i915_vma_pin(vma, 0, 0, pin_flags)))
return false;
- if (unlikely(exec_flags & EXEC_OBJECT_NEEDS_FENCE)) {
+ if (unlikely(ev->flags & EXEC_OBJECT_NEEDS_FENCE)) {
if (unlikely(i915_vma_pin_fence(vma))) {
i915_vma_unpin(vma);
return false;
}
if (vma->fence)
- exec_flags |= __EXEC_OBJECT_HAS_FENCE;
+ ev->flags |= __EXEC_OBJECT_HAS_FENCE;
}
- *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
- return !eb_vma_misplaced(entry, vma, exec_flags);
+ ev->flags |= __EXEC_OBJECT_HAS_PIN;
+ return !eb_vma_misplaced(entry, vma, ev->flags);
}
static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
@@ -406,13 +413,13 @@ static inline void __eb_unreserve_vma(struct i915_vma *vma, unsigned int flags)
}
static inline void
-eb_unreserve_vma(struct i915_vma *vma, unsigned int *flags)
+eb_unreserve_vma(struct eb_vma *ev)
{
- if (!(*flags & __EXEC_OBJECT_HAS_PIN))
+ if (!(ev->flags & __EXEC_OBJECT_HAS_PIN))
return;
- __eb_unreserve_vma(vma, *flags);
- *flags &= ~__EXEC_OBJECT_RESERVED;
+ __eb_unreserve_vma(ev->vma, ev->flags);
+ ev->flags &= ~__EXEC_OBJECT_RESERVED;
}
static int
@@ -423,7 +430,8 @@ eb_validate_vma(struct i915_execbuffer *eb,
if (unlikely(entry->flags & eb->invalid_flags))
return -EINVAL;
- if (unlikely(entry->alignment && !is_power_of_2(entry->alignment)))
+ if (unlikely(entry->alignment &&
+ !is_power_of_2_u64(entry->alignment)))
return -EINVAL;
/*
@@ -441,13 +449,6 @@ eb_validate_vma(struct i915_execbuffer *eb,
} else {
entry->pad_to_size = 0;
}
-
- if (unlikely(vma->exec_flags)) {
- DRM_DEBUG("Object [handle %d, index %d] appears more than once in object list\n",
- entry->handle, (int)(entry - eb->exec));
- return -EINVAL;
- }
-
/*
* From drm_mm perspective address space is continuous,
* so from this point we're always using non-canonical
@@ -470,41 +471,29 @@ eb_validate_vma(struct i915_execbuffer *eb,
return 0;
}
-static int
+static void
eb_add_vma(struct i915_execbuffer *eb,
unsigned int i, unsigned batch_idx,
struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
- int err;
+ struct eb_vma *ev = &eb->vma[i];
GEM_BUG_ON(i915_vma_is_closed(vma));
- if (!(eb->args->flags & __EXEC_VALIDATED)) {
- err = eb_validate_vma(eb, entry, vma);
- if (unlikely(err))
- return err;
- }
+ ev->vma = i915_vma_get(vma);
+ ev->exec = entry;
+ ev->flags = entry->flags;
if (eb->lut_size > 0) {
- vma->exec_handle = entry->handle;
- hlist_add_head(&vma->exec_node,
+ ev->handle = entry->handle;
+ hlist_add_head(&ev->node,
&eb->buckets[hash_32(entry->handle,
eb->lut_size)]);
}
if (entry->relocation_count)
- list_add_tail(&vma->reloc_link, &eb->relocs);
-
- /*
- * Stash a pointer from the vma to execobj, so we can query its flags,
- * size, alignment etc as provided by the user. Also we stash a pointer
- * to the vma inside the execobj so that we can use a direct lookup
- * to find the right target VMA when doing relocations.
- */
- eb->vma[i] = vma;
- eb->flags[i] = entry->flags;
- vma->exec_flags = &eb->flags[i];
+ list_add_tail(&ev->reloc_link, &eb->relocs);
/*
* SNA is doing fancy tricks with compressing batch buffers, which leads
@@ -517,30 +506,23 @@ eb_add_vma(struct i915_execbuffer *eb,
*/
if (i == batch_idx) {
if (entry->relocation_count &&
- !(eb->flags[i] & EXEC_OBJECT_PINNED))
- eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+ !(ev->flags & EXEC_OBJECT_PINNED))
+ ev->flags |= __EXEC_OBJECT_NEEDS_BIAS;
if (eb->reloc_cache.has_fence)
- eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+ ev->flags |= EXEC_OBJECT_NEEDS_FENCE;
- eb->batch = vma;
+ eb->batch = ev;
}
- err = 0;
- if (eb_pin_vma(eb, entry, vma)) {
+ if (eb_pin_vma(eb, entry, ev)) {
if (entry->offset != vma->node.start) {
entry->offset = vma->node.start | UPDATE;
eb->args->flags |= __EXEC_HAS_RELOC;
}
} else {
- eb_unreserve_vma(vma, vma->exec_flags);
-
- list_add_tail(&vma->exec_link, &eb->unbound);
- if (drm_mm_node_allocated(&vma->node))
- err = i915_vma_unbind(vma);
- if (unlikely(err))
- vma->exec_flags = NULL;
+ eb_unreserve_vma(ev);
+ list_add_tail(&ev->bind_link, &eb->unbound);
}
- return err;
}
static inline int use_cpu_reloc(const struct reloc_cache *cache,
@@ -561,14 +543,14 @@ static inline int use_cpu_reloc(const struct reloc_cache *cache,
}
static int eb_reserve_vma(const struct i915_execbuffer *eb,
- struct i915_vma *vma)
+ struct eb_vma *ev,
+ u64 pin_flags)
{
- struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
- unsigned int exec_flags = *vma->exec_flags;
- u64 pin_flags;
+ struct drm_i915_gem_exec_object2 *entry = ev->exec;
+ unsigned int exec_flags = ev->flags;
+ struct i915_vma *vma = ev->vma;
int err;
- pin_flags = PIN_USER | PIN_NONBLOCK;
if (exec_flags & EXEC_OBJECT_NEEDS_GTT)
pin_flags |= PIN_GLOBAL;
@@ -582,11 +564,16 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
if (exec_flags & __EXEC_OBJECT_NEEDS_MAP)
pin_flags |= PIN_MAPPABLE;
- if (exec_flags & EXEC_OBJECT_PINNED) {
+ if (exec_flags & EXEC_OBJECT_PINNED)
pin_flags |= entry->offset | PIN_OFFSET_FIXED;
- pin_flags &= ~PIN_NONBLOCK; /* force overlapping checks */
- } else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS) {
+ else if (exec_flags & __EXEC_OBJECT_NEEDS_BIAS)
pin_flags |= BATCH_OFFSET_BIAS | PIN_OFFSET_BIAS;
+
+ if (drm_mm_node_allocated(&vma->node) &&
+ eb_vma_misplaced(entry, vma, ev->flags)) {
+ err = i915_vma_unbind(vma);
+ if (err)
+ return err;
}
err = i915_vma_pin(vma,
@@ -611,8 +598,8 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
exec_flags |= __EXEC_OBJECT_HAS_FENCE;
}
- *vma->exec_flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
- GEM_BUG_ON(eb_vma_misplaced(entry, vma, exec_flags));
+ ev->flags = exec_flags | __EXEC_OBJECT_HAS_PIN;
+ GEM_BUG_ON(eb_vma_misplaced(entry, vma, ev->flags));
return 0;
}
@@ -620,10 +607,11 @@ static int eb_reserve_vma(const struct i915_execbuffer *eb,
static int eb_reserve(struct i915_execbuffer *eb)
{
const unsigned int count = eb->buffer_count;
+ unsigned int pin_flags = PIN_USER | PIN_NONBLOCK;
struct list_head last;
- struct i915_vma *vma;
+ struct eb_vma *ev;
unsigned int i, pass;
- int err;
+ int err = 0;
/*
* Attempt to pin all of the buffers into the GTT.
@@ -639,44 +627,54 @@ static int eb_reserve(struct i915_execbuffer *eb)
* room for the earlier objects *unless* we need to defragment.
*/
+ if (mutex_lock_interruptible(&eb->i915->drm.struct_mutex))
+ return -EINTR;
+
pass = 0;
- err = 0;
do {
- list_for_each_entry(vma, &eb->unbound, exec_link) {
- err = eb_reserve_vma(eb, vma);
+ list_for_each_entry(ev, &eb->unbound, bind_link) {
+ err = eb_reserve_vma(eb, ev, pin_flags);
if (err)
break;
}
- if (err != -ENOSPC)
- return err;
+ if (!(err == -ENOSPC || err == -EAGAIN))
+ break;
/* Resort *all* the objects into priority order */
INIT_LIST_HEAD(&eb->unbound);
INIT_LIST_HEAD(&last);
for (i = 0; i < count; i++) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
+ unsigned int flags;
+ ev = &eb->vma[i];
+ flags = ev->flags;
if (flags & EXEC_OBJECT_PINNED &&
flags & __EXEC_OBJECT_HAS_PIN)
continue;
- eb_unreserve_vma(vma, &eb->flags[i]);
+ eb_unreserve_vma(ev);
if (flags & EXEC_OBJECT_PINNED)
/* Pinned must have their slot */
- list_add(&vma->exec_link, &eb->unbound);
+ list_add(&ev->bind_link, &eb->unbound);
else if (flags & __EXEC_OBJECT_NEEDS_MAP)
/* Map require the lowest 256MiB (aperture) */
- list_add_tail(&vma->exec_link, &eb->unbound);
+ list_add_tail(&ev->bind_link, &eb->unbound);
else if (!(flags & EXEC_OBJECT_SUPPORTS_48B_ADDRESS))
/* Prioritise 4GiB region for restricted bo */
- list_add(&vma->exec_link, &last);
+ list_add(&ev->bind_link, &last);
else
- list_add_tail(&vma->exec_link, &last);
+ list_add_tail(&ev->bind_link, &last);
}
list_splice_tail(&last, &eb->unbound);
+ if (err == -EAGAIN) {
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+ flush_workqueue(eb->i915->mm.userptr_wq);
+ mutex_lock(&eb->i915->drm.struct_mutex);
+ continue;
+ }
+
switch (pass++) {
case 0:
break;
@@ -687,13 +685,20 @@ static int eb_reserve(struct i915_execbuffer *eb)
err = i915_gem_evict_vm(eb->context->vm);
mutex_unlock(&eb->context->vm->mutex);
if (err)
- return err;
+ goto unlock;
break;
default:
- return -ENOSPC;
+ err = -ENOSPC;
+ goto unlock;
}
+
+ pin_flags = PIN_USER;
} while (1);
+
+unlock:
+ mutex_unlock(&eb->i915->drm.struct_mutex);
+ return err;
}
static unsigned int eb_batch_index(const struct i915_execbuffer *eb)
@@ -730,17 +735,14 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
unsigned int i, batch;
int err;
+ if (unlikely(i915_gem_context_is_closed(eb->gem_context)))
+ return -ENOENT;
+
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
batch = eb_batch_index(eb);
- mutex_lock(&eb->gem_context->mutex);
- if (unlikely(i915_gem_context_is_closed(eb->gem_context))) {
- err = -ENOENT;
- goto err_ctx;
- }
-
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut;
@@ -785,45 +787,37 @@ static int eb_lookup_vmas(struct i915_execbuffer *eb)
i915_gem_object_unlock(obj);
add_vma:
- err = eb_add_vma(eb, i, batch, vma);
+ err = eb_validate_vma(eb, &eb->exec[i], vma);
if (unlikely(err))
goto err_vma;
- GEM_BUG_ON(vma != eb->vma[i]);
- GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
- GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
- eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
+ eb_add_vma(eb, i, batch, vma);
}
- mutex_unlock(&eb->gem_context->mutex);
-
- eb->args->flags |= __EXEC_VALIDATED;
- return eb_reserve(eb);
+ return 0;
err_obj:
i915_gem_object_put(obj);
err_vma:
- eb->vma[i] = NULL;
-err_ctx:
- mutex_unlock(&eb->gem_context->mutex);
+ eb->vma[i].vma = NULL;
return err;
}
-static struct i915_vma *
+static struct eb_vma *
eb_get_vma(const struct i915_execbuffer *eb, unsigned long handle)
{
if (eb->lut_size < 0) {
if (handle >= -eb->lut_size)
return NULL;
- return eb->vma[handle];
+ return &eb->vma[handle];
} else {
struct hlist_head *head;
- struct i915_vma *vma;
+ struct eb_vma *ev;
head = &eb->buckets[hash_32(handle, eb->lut_size)];
- hlist_for_each_entry(vma, head, exec_node) {
- if (vma->exec_handle == handle)
- return vma;
+ hlist_for_each_entry(ev, head, node) {
+ if (ev->handle == handle)
+ return ev;
}
return NULL;
}
@@ -835,32 +829,21 @@ static void eb_release_vmas(const struct i915_execbuffer *eb)
unsigned int i;
for (i = 0; i < count; i++) {
- struct i915_vma *vma = eb->vma[i];
- unsigned int flags = eb->flags[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
if (!vma)
break;
- GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
- vma->exec_flags = NULL;
- eb->vma[i] = NULL;
+ eb->vma[i].vma = NULL;
- if (flags & __EXEC_OBJECT_HAS_PIN)
- __eb_unreserve_vma(vma, flags);
+ if (ev->flags & __EXEC_OBJECT_HAS_PIN)
+ __eb_unreserve_vma(vma, ev->flags);
- if (flags & __EXEC_OBJECT_HAS_REF)
- i915_vma_put(vma);
+ i915_vma_put(vma);
}
}
-static void eb_reset_vmas(const struct i915_execbuffer *eb)
-{
- eb_release_vmas(eb);
- if (eb->lut_size > 0)
- memset(eb->buckets, 0,
- sizeof(struct hlist_head) << eb->lut_size);
-}
-
static void eb_destroy(const struct i915_execbuffer *eb)
{
GEM_BUG_ON(eb->reloc_cache.rq);
@@ -1196,7 +1179,7 @@ static int __reloc_gpu_alloc(struct i915_execbuffer *eb,
goto out_pool;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1327,10 +1310,11 @@ out:
static u64
eb_relocate_entry(struct i915_execbuffer *eb,
- struct i915_vma *vma,
+ struct eb_vma *ev,
const struct drm_i915_gem_relocation_entry *reloc)
{
- struct i915_vma *target;
+ struct drm_i915_private *i915 = eb->i915;
+ struct eb_vma *target;
int err;
/* we've already hold a reference to all valid objects */
@@ -1340,7 +1324,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
/* Validate that the target is in a valid r/w GPU domain */
if (unlikely(reloc->write_domain & (reloc->write_domain - 1))) {
- DRM_DEBUG("reloc with multiple write domains: "
+ drm_dbg(&i915->drm, "reloc with multiple write domains: "
"target %d offset %d "
"read %08x write %08x",
reloc->target_handle,
@@ -1351,7 +1335,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
}
if (unlikely((reloc->write_domain | reloc->read_domains)
& ~I915_GEM_GPU_DOMAINS)) {
- DRM_DEBUG("reloc with read/write non-GPU domains: "
+ drm_dbg(&i915->drm, "reloc with read/write non-GPU domains: "
"target %d offset %d "
"read %08x write %08x",
reloc->target_handle,
@@ -1362,7 +1346,7 @@ eb_relocate_entry(struct i915_execbuffer *eb,
}
if (reloc->write_domain) {
- *target->exec_flags |= EXEC_OBJECT_WRITE;
+ target->flags |= EXEC_OBJECT_WRITE;
/*
* Sandybridge PPGTT errata: We need a global gtt mapping
@@ -1372,7 +1356,8 @@ eb_relocate_entry(struct i915_execbuffer *eb,
*/
if (reloc->write_domain == I915_GEM_DOMAIN_INSTRUCTION &&
IS_GEN(eb->i915, 6)) {
- err = i915_vma_bind(target, target->obj->cache_level,
+ err = i915_vma_bind(target->vma,
+ target->vma->obj->cache_level,
PIN_GLOBAL, NULL);
if (WARN_ONCE(err,
"Unexpected failure to bind target VMA!"))
@@ -1385,21 +1370,21 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* more work needs to be done.
*/
if (!DBG_FORCE_RELOC &&
- gen8_canonical_addr(target->node.start) == reloc->presumed_offset)
+ gen8_canonical_addr(target->vma->node.start) == reloc->presumed_offset)
return 0;
/* Check that the relocation address is valid... */
if (unlikely(reloc->offset >
- vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
- DRM_DEBUG("Relocation beyond object bounds: "
+ ev->vma->size - (eb->reloc_cache.use_64bit_reloc ? 8 : 4))) {
+ drm_dbg(&i915->drm, "Relocation beyond object bounds: "
"target %d offset %d size %d.\n",
reloc->target_handle,
(int)reloc->offset,
- (int)vma->size);
+ (int)ev->vma->size);
return -EINVAL;
}
if (unlikely(reloc->offset & 3)) {
- DRM_DEBUG("Relocation not 4-byte aligned: "
+ drm_dbg(&i915->drm, "Relocation not 4-byte aligned: "
"target %d offset %d.\n",
reloc->target_handle,
(int)reloc->offset);
@@ -1414,18 +1399,18 @@ eb_relocate_entry(struct i915_execbuffer *eb,
* do relocations we are already stalling, disable the user's opt
* out of our synchronisation.
*/
- *vma->exec_flags &= ~EXEC_OBJECT_ASYNC;
+ ev->flags &= ~EXEC_OBJECT_ASYNC;
/* and update the user's relocation entry */
- return relocate_entry(vma, reloc, eb, target);
+ return relocate_entry(ev->vma, reloc, eb, target->vma);
}
-static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
+static int eb_relocate_vma(struct i915_execbuffer *eb, struct eb_vma *ev)
{
#define N_RELOC(x) ((x) / sizeof(struct drm_i915_gem_relocation_entry))
struct drm_i915_gem_relocation_entry stack[N_RELOC(512)];
struct drm_i915_gem_relocation_entry __user *urelocs;
- const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
+ const struct drm_i915_gem_exec_object2 *entry = ev->exec;
unsigned int remain;
urelocs = u64_to_user_ptr(entry->relocs_ptr);
@@ -1455,9 +1440,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
* we would try to acquire the struct mutex again. Obviously
* this is bad and so lockdep complains vehemently.
*/
- pagefault_disable();
- copied = __copy_from_user_inatomic(r, urelocs, count * sizeof(r[0]));
- pagefault_enable();
+ copied = __copy_from_user(r, urelocs, count * sizeof(r[0]));
if (unlikely(copied)) {
remain = -EFAULT;
goto out;
@@ -1465,7 +1448,7 @@ static int eb_relocate_vma(struct i915_execbuffer *eb, struct i915_vma *vma)
remain -= count;
do {
- u64 offset = eb_relocate_entry(eb, vma, r);
+ u64 offset = eb_relocate_entry(eb, ev, r);
if (likely(offset == 0)) {
} else if ((s64)offset < 0) {
@@ -1507,281 +1490,34 @@ out:
return remain;
}
-static int
-eb_relocate_vma_slow(struct i915_execbuffer *eb, struct i915_vma *vma)
-{
- const struct drm_i915_gem_exec_object2 *entry = exec_entry(eb, vma);
- struct drm_i915_gem_relocation_entry *relocs =
- u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
- unsigned int i;
- int err;
-
- for (i = 0; i < entry->relocation_count; i++) {
- u64 offset = eb_relocate_entry(eb, vma, &relocs[i]);
-
- if ((s64)offset < 0) {
- err = (int)offset;
- goto err;
- }
- }
- err = 0;
-err:
- reloc_cache_reset(&eb->reloc_cache);
- return err;
-}
-
-static int check_relocations(const struct drm_i915_gem_exec_object2 *entry)
-{
- const char __user *addr, *end;
- unsigned long size;
- char __maybe_unused c;
-
- size = entry->relocation_count;
- if (size == 0)
- return 0;
-
- if (size > N_RELOC(ULONG_MAX))
- return -EINVAL;
-
- addr = u64_to_user_ptr(entry->relocs_ptr);
- size *= sizeof(struct drm_i915_gem_relocation_entry);
- if (!access_ok(addr, size))
- return -EFAULT;
-
- end = addr + size;
- for (; addr < end; addr += PAGE_SIZE) {
- int err = __get_user(c, addr);
- if (err)
- return err;
- }
- return __get_user(c, end - 1);
-}
-
-static int eb_copy_relocations(const struct i915_execbuffer *eb)
+static int eb_relocate(struct i915_execbuffer *eb)
{
- struct drm_i915_gem_relocation_entry *relocs;
- const unsigned int count = eb->buffer_count;
- unsigned int i;
int err;
- for (i = 0; i < count; i++) {
- const unsigned int nreloc = eb->exec[i].relocation_count;
- struct drm_i915_gem_relocation_entry __user *urelocs;
- unsigned long size;
- unsigned long copied;
-
- if (nreloc == 0)
- continue;
-
- err = check_relocations(&eb->exec[i]);
- if (err)
- goto err;
-
- urelocs = u64_to_user_ptr(eb->exec[i].relocs_ptr);
- size = nreloc * sizeof(*relocs);
-
- relocs = kvmalloc_array(size, 1, GFP_KERNEL);
- if (!relocs) {
- err = -ENOMEM;
- goto err;
- }
-
- /* copy_from_user is limited to < 4GiB */
- copied = 0;
- do {
- unsigned int len =
- min_t(u64, BIT_ULL(31), size - copied);
-
- if (__copy_from_user((char *)relocs + copied,
- (char __user *)urelocs + copied,
- len))
- goto end;
-
- copied += len;
- } while (copied < size);
-
- /*
- * As we do not update the known relocation offsets after
- * relocating (due to the complexities in lock handling),
- * we need to mark them as invalid now so that we force the
- * relocation processing next time. Just in case the target
- * object is evicted and then rebound into its old
- * presumed_offset before the next execbuffer - if that
- * happened we would make the mistake of assuming that the
- * relocations were valid.
- */
- if (!user_access_begin(urelocs, size))
- goto end;
-
- for (copied = 0; copied < nreloc; copied++)
- unsafe_put_user(-1,
- &urelocs[copied].presumed_offset,
- end_user);
- user_access_end();
-
- eb->exec[i].relocs_ptr = (uintptr_t)relocs;
- }
-
- return 0;
-
-end_user:
- user_access_end();
-end:
- kvfree(relocs);
- err = -EFAULT;
-err:
- while (i--) {
- relocs = u64_to_ptr(typeof(*relocs), eb->exec[i].relocs_ptr);
- if (eb->exec[i].relocation_count)
- kvfree(relocs);
- }
- return err;
-}
-
-static int eb_prefault_relocations(const struct i915_execbuffer *eb)
-{
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- if (unlikely(i915_modparams.prefault_disable))
- return 0;
-
- for (i = 0; i < count; i++) {
- int err;
-
- err = check_relocations(&eb->exec[i]);
- if (err)
- return err;
- }
-
- return 0;
-}
-
-static noinline int eb_relocate_slow(struct i915_execbuffer *eb)
-{
- struct drm_device *dev = &eb->i915->drm;
- bool have_copy = false;
- struct i915_vma *vma;
- int err = 0;
-
-repeat:
- if (signal_pending(current)) {
- err = -ERESTARTSYS;
- goto out;
- }
-
- /* We may process another execbuffer during the unlock... */
- eb_reset_vmas(eb);
- mutex_unlock(&dev->struct_mutex);
-
- /*
- * We take 3 passes through the slowpatch.
- *
- * 1 - we try to just prefault all the user relocation entries and
- * then attempt to reuse the atomic pagefault disabled fast path again.
- *
- * 2 - we copy the user entries to a local buffer here outside of the
- * local and allow ourselves to wait upon any rendering before
- * relocations
- *
- * 3 - we already have a local copy of the relocation entries, but
- * were interrupted (EAGAIN) whilst waiting for the objects, try again.
- */
- if (!err) {
- err = eb_prefault_relocations(eb);
- } else if (!have_copy) {
- err = eb_copy_relocations(eb);
- have_copy = err == 0;
- } else {
- cond_resched();
- err = 0;
- }
- if (err) {
- mutex_lock(&dev->struct_mutex);
- goto out;
- }
-
- /* A frequent cause for EAGAIN are currently unavailable client pages */
- flush_workqueue(eb->i915->mm.userptr_wq);
-
- err = i915_mutex_lock_interruptible(dev);
- if (err) {
- mutex_lock(&dev->struct_mutex);
- goto out;
- }
-
- /* reacquire the objects */
+ mutex_lock(&eb->gem_context->mutex);
err = eb_lookup_vmas(eb);
+ mutex_unlock(&eb->gem_context->mutex);
if (err)
- goto err;
-
- GEM_BUG_ON(!eb->batch);
-
- list_for_each_entry(vma, &eb->relocs, reloc_link) {
- if (!have_copy) {
- pagefault_disable();
- err = eb_relocate_vma(eb, vma);
- pagefault_enable();
- if (err)
- goto repeat;
- } else {
- err = eb_relocate_vma_slow(eb, vma);
- if (err)
- goto err;
- }
- }
-
- /*
- * Leave the user relocations as are, this is the painfully slow path,
- * and we want to avoid the complication of dropping the lock whilst
- * having buffers reserved in the aperture and so causing spurious
- * ENOSPC for random operations.
- */
-
-err:
- if (err == -EAGAIN)
- goto repeat;
-
-out:
- if (have_copy) {
- const unsigned int count = eb->buffer_count;
- unsigned int i;
-
- for (i = 0; i < count; i++) {
- const struct drm_i915_gem_exec_object2 *entry =
- &eb->exec[i];
- struct drm_i915_gem_relocation_entry *relocs;
-
- if (!entry->relocation_count)
- continue;
+ return err;
- relocs = u64_to_ptr(typeof(*relocs), entry->relocs_ptr);
- kvfree(relocs);
- }
+ if (!list_empty(&eb->unbound)) {
+ err = eb_reserve(eb);
+ if (err)
+ return err;
}
- return err;
-}
-
-static int eb_relocate(struct i915_execbuffer *eb)
-{
- if (eb_lookup_vmas(eb))
- goto slow;
-
/* The objects are in their final locations, apply the relocations. */
if (eb->args->flags & __EXEC_HAS_RELOC) {
- struct i915_vma *vma;
+ struct eb_vma *ev;
- list_for_each_entry(vma, &eb->relocs, reloc_link) {
- if (eb_relocate_vma(eb, vma))
- goto slow;
+ list_for_each_entry(ev, &eb->relocs, reloc_link) {
+ err = eb_relocate_vma(eb, ev);
+ if (err)
+ return err;
}
}
return 0;
-
-slow:
- return eb_relocate_slow(eb);
}
static int eb_move_to_gpu(struct i915_execbuffer *eb)
@@ -1794,27 +1530,19 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
ww_acquire_init(&acquire, &reservation_ww_class);
for (i = 0; i < count; i++) {
- struct i915_vma *vma = eb->vma[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
err = ww_mutex_lock_interruptible(&vma->resv->lock, &acquire);
- if (!err)
- continue;
-
- GEM_BUG_ON(err == -EALREADY); /* No duplicate vma */
-
if (err == -EDEADLK) {
GEM_BUG_ON(i == 0);
do {
int j = i - 1;
- ww_mutex_unlock(&eb->vma[j]->resv->lock);
+ ww_mutex_unlock(&eb->vma[j].vma->resv->lock);
- swap(eb->flags[i], eb->flags[j]);
swap(eb->vma[i], eb->vma[j]);
- eb->vma[i]->exec_flags = &eb->flags[i];
} while (--i);
- GEM_BUG_ON(vma != eb->vma[0]);
- vma->exec_flags = &eb->flags[0];
err = ww_mutex_lock_slow_interruptible(&vma->resv->lock,
&acquire);
@@ -1825,8 +1553,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
ww_acquire_done(&acquire);
while (i--) {
- unsigned int flags = eb->flags[i];
- struct i915_vma *vma = eb->vma[i];
+ struct eb_vma *ev = &eb->vma[i];
+ struct i915_vma *vma = ev->vma;
+ unsigned int flags = ev->flags;
struct drm_i915_gem_object *obj = vma->obj;
assert_vma_held(vma);
@@ -1870,10 +1599,9 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
i915_vma_unlock(vma);
__eb_unreserve_vma(vma, flags);
- vma->exec_flags = NULL;
+ i915_vma_put(vma);
- if (unlikely(flags & __EXEC_OBJECT_HAS_REF))
- i915_vma_put(vma);
+ ev->vma = NULL;
}
ww_acquire_fini(&acquire);
@@ -1887,7 +1615,7 @@ static int eb_move_to_gpu(struct i915_execbuffer *eb)
return 0;
err_skip:
- i915_request_skip(eb->request, err);
+ i915_request_set_error_once(eb->request, err);
return err;
}
@@ -1921,7 +1649,7 @@ static int i915_reset_gen7_sol_offsets(struct i915_request *rq)
int i;
if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
- DRM_DEBUG("sol reset is gen7/rcs only\n");
+ drm_dbg(&rq->i915->drm, "sol reset is gen7/rcs only\n");
return -EINVAL;
}
@@ -1981,9 +1709,20 @@ static int __eb_parse(struct dma_fence_work *work)
pw->trampoline);
}
+static void __eb_parse_release(struct dma_fence_work *work)
+{
+ struct eb_parse_work *pw = container_of(work, typeof(*pw), base);
+
+ if (pw->trampoline)
+ i915_active_release(&pw->trampoline->active);
+ i915_active_release(&pw->shadow->active);
+ i915_active_release(&pw->batch->active);
+}
+
static const struct dma_fence_work_ops eb_parse_ops = {
.name = "eb_parse",
.work = __eb_parse,
+ .release = __eb_parse_release,
};
static int eb_parse_pipeline(struct i915_execbuffer *eb,
@@ -1997,16 +1736,32 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
if (!pw)
return -ENOMEM;
+ err = i915_active_acquire(&eb->batch->vma->active);
+ if (err)
+ goto err_free;
+
+ err = i915_active_acquire(&shadow->active);
+ if (err)
+ goto err_batch;
+
+ if (trampoline) {
+ err = i915_active_acquire(&trampoline->active);
+ if (err)
+ goto err_shadow;
+ }
+
dma_fence_work_init(&pw->base, &eb_parse_ops);
pw->engine = eb->engine;
- pw->batch = eb->batch;
+ pw->batch = eb->batch->vma;
pw->batch_offset = eb->batch_start_offset;
pw->batch_length = eb->batch_len;
pw->shadow = shadow;
pw->trampoline = trampoline;
- dma_resv_lock(pw->batch->resv, NULL);
+ err = dma_resv_lock_interruptible(pw->batch->resv, NULL);
+ if (err)
+ goto err_trampoline;
err = dma_resv_reserve_shared(pw->batch->resv, 1);
if (err)
@@ -2034,12 +1789,21 @@ static int eb_parse_pipeline(struct i915_execbuffer *eb,
err_batch_unlock:
dma_resv_unlock(pw->batch->resv);
+err_trampoline:
+ if (trampoline)
+ i915_active_release(&trampoline->active);
+err_shadow:
+ i915_active_release(&shadow->active);
+err_batch:
+ i915_active_release(&eb->batch->vma->active);
+err_free:
kfree(pw);
return err;
}
static int eb_parse(struct i915_execbuffer *eb)
{
+ struct drm_i915_private *i915 = eb->i915;
struct intel_engine_pool_node *pool;
struct i915_vma *shadow, *trampoline;
unsigned int len;
@@ -2055,7 +1819,8 @@ static int eb_parse(struct i915_execbuffer *eb)
* post-scan tampering
*/
if (!eb->context->vm->has_read_only) {
- DRM_DEBUG("Cannot prevent post-scan tampering without RO capable vm\n");
+ drm_dbg(&i915->drm,
+ "Cannot prevent post-scan tampering without RO capable vm\n");
return -EINVAL;
}
} else {
@@ -2093,15 +1858,12 @@ static int eb_parse(struct i915_execbuffer *eb)
if (err)
goto err_trampoline;
- eb->vma[eb->buffer_count] = i915_vma_get(shadow);
- eb->flags[eb->buffer_count] =
- __EXEC_OBJECT_HAS_PIN | __EXEC_OBJECT_HAS_REF;
- shadow->exec_flags = &eb->flags[eb->buffer_count];
- eb->buffer_count++;
+ eb->vma[eb->buffer_count].vma = i915_vma_get(shadow);
+ eb->vma[eb->buffer_count].flags = __EXEC_OBJECT_HAS_PIN;
+ eb->batch = &eb->vma[eb->buffer_count++];
eb->trampoline = trampoline;
eb->batch_start_offset = 0;
- eb->batch = shadow;
shadow->private = pool;
return 0;
@@ -2128,7 +1890,7 @@ add_to_client(struct i915_request *rq, struct drm_file *file)
spin_unlock(&file_priv->mm.lock);
}
-static int eb_submit(struct i915_execbuffer *eb)
+static int eb_submit(struct i915_execbuffer *eb, struct i915_vma *batch)
{
int err;
@@ -2155,7 +1917,7 @@ static int eb_submit(struct i915_execbuffer *eb)
}
err = eb->engine->emit_bb_start(eb->request,
- eb->batch->node.start +
+ batch->node.start +
eb->batch_start_offset,
eb->batch_len,
eb->batch_flags);
@@ -2290,15 +2052,22 @@ static int __eb_pin_engine(struct i915_execbuffer *eb, struct intel_context *ce)
intel_context_timeline_unlock(tl);
if (rq) {
- if (i915_request_wait(rq,
- I915_WAIT_INTERRUPTIBLE,
- MAX_SCHEDULE_TIMEOUT) < 0) {
- i915_request_put(rq);
- err = -EINTR;
- goto err_exit;
- }
+ bool nonblock = eb->file->filp->f_flags & O_NONBLOCK;
+ long timeout;
+
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (nonblock)
+ timeout = 0;
+ timeout = i915_request_wait(rq,
+ I915_WAIT_INTERRUPTIBLE,
+ timeout);
i915_request_put(rq);
+
+ if (timeout < 0) {
+ err = nonblock ? -EWOULDBLOCK : timeout;
+ goto err_exit;
+ }
}
eb->engine = ce->engine;
@@ -2336,8 +2105,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
if (user_ring_id != I915_EXEC_BSD &&
(args->flags & I915_EXEC_BSD_MASK)) {
- DRM_DEBUG("execbuf with non bsd ring but with invalid "
- "bsd dispatch flags: %d\n", (int)(args->flags));
+ drm_dbg(&i915->drm,
+ "execbuf with non bsd ring but with invalid "
+ "bsd dispatch flags: %d\n", (int)(args->flags));
return -1;
}
@@ -2351,8 +2121,9 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
bsd_idx >>= I915_EXEC_BSD_SHIFT;
bsd_idx--;
} else {
- DRM_DEBUG("execbuf with unknown bsd ring: %u\n",
- bsd_idx);
+ drm_dbg(&i915->drm,
+ "execbuf with unknown bsd ring: %u\n",
+ bsd_idx);
return -1;
}
@@ -2360,7 +2131,8 @@ eb_select_legacy_ring(struct i915_execbuffer *eb,
}
if (user_ring_id >= ARRAY_SIZE(user_ring_map)) {
- DRM_DEBUG("execbuf with unknown ring: %u\n", user_ring_id);
+ drm_dbg(&i915->drm, "execbuf with unknown ring: %u\n",
+ user_ring_id);
return -1;
}
@@ -2520,6 +2292,73 @@ signal_fence_array(struct i915_execbuffer *eb,
}
}
+static void retire_requests(struct intel_timeline *tl, struct i915_request *end)
+{
+ struct i915_request *rq, *rn;
+
+ list_for_each_entry_safe(rq, rn, &tl->requests, link)
+ if (rq == end || !i915_request_retire(rq))
+ break;
+}
+
+static void eb_request_add(struct i915_execbuffer *eb)
+{
+ struct i915_request *rq = eb->request;
+ struct intel_timeline * const tl = i915_request_timeline(rq);
+ struct i915_sched_attr attr = {};
+ struct i915_request *prev;
+
+ lockdep_assert_held(&tl->mutex);
+ lockdep_unpin_lock(&tl->mutex, rq->cookie);
+
+ trace_i915_request_add(rq);
+
+ prev = __i915_request_commit(rq);
+
+ /* Check that the context wasn't destroyed before submission */
+ if (likely(rcu_access_pointer(eb->context->gem_context))) {
+ attr = eb->gem_context->sched;
+
+ /*
+ * Boost actual workloads past semaphores!
+ *
+ * With semaphores we spin on one engine waiting for another,
+ * simply to reduce the latency of starting our work when
+ * the signaler completes. However, if there is any other
+ * work that we could be doing on this engine instead, that
+ * is better utilisation and will reduce the overall duration
+ * of the current work. To avoid PI boosting a semaphore
+ * far in the distance past over useful work, we keep a history
+ * of any semaphore use along our dependency chain.
+ */
+ if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
+ attr.priority |= I915_PRIORITY_NOSEMAPHORE;
+
+ /*
+ * Boost priorities to new clients (new request flows).
+ *
+ * Allow interactive/synchronous clients to jump ahead of
+ * the bulk clients. (FQ_CODEL)
+ */
+ if (list_empty(&rq->sched.signalers_list))
+ attr.priority |= I915_PRIORITY_WAIT;
+ } else {
+ /* Serialise with context_close via the add_to_timeline */
+ i915_request_set_error_once(rq, -ENOENT);
+ __i915_request_skip(rq);
+ }
+
+ local_bh_disable();
+ __i915_request_queue(rq, &attr);
+ local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
+
+ /* Try to clean up the client's timeline after submitting the request */
+ if (prev)
+ retire_requests(tl, prev);
+
+ mutex_unlock(&tl->mutex);
+}
+
static int
i915_gem_do_execbuffer(struct drm_device *dev,
struct drm_file *file,
@@ -2532,6 +2371,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
struct dma_fence *in_fence = NULL;
struct dma_fence *exec_fence = NULL;
struct sync_file *out_fence = NULL;
+ struct i915_vma *batch;
int out_fence_fd = -1;
int err;
@@ -2546,9 +2386,8 @@ i915_gem_do_execbuffer(struct drm_device *dev,
args->flags |= __EXEC_HAS_RELOC;
eb.exec = exec;
- eb.vma = (struct i915_vma **)(exec + args->buffer_count + 1);
- eb.vma[0] = NULL;
- eb.flags = (unsigned int *)(eb.vma + args->buffer_count + 1);
+ eb.vma = (struct eb_vma *)(exec + args->buffer_count + 1);
+ eb.vma[0].vma = NULL;
eb.invalid_flags = __EXEC_OBJECT_UNKNOWN_FLAGS;
reloc_cache_init(&eb.reloc_cache, eb.i915);
@@ -2616,10 +2455,6 @@ i915_gem_do_execbuffer(struct drm_device *dev,
if (unlikely(err))
goto err_context;
- err = i915_mutex_lock_interruptible(dev);
- if (err)
- goto err_engine;
-
err = eb_relocate(&eb);
if (err) {
/*
@@ -2633,20 +2468,23 @@ i915_gem_do_execbuffer(struct drm_device *dev,
goto err_vma;
}
- if (unlikely(*eb.batch->exec_flags & EXEC_OBJECT_WRITE)) {
- DRM_DEBUG("Attempting to use self-modifying batch buffer\n");
+ if (unlikely(eb.batch->flags & EXEC_OBJECT_WRITE)) {
+ drm_dbg(&i915->drm,
+ "Attempting to use self-modifying batch buffer\n");
err = -EINVAL;
goto err_vma;
}
- if (eb.batch_start_offset > eb.batch->size ||
- eb.batch_len > eb.batch->size - eb.batch_start_offset) {
- DRM_DEBUG("Attempting to use out-of-bounds batch\n");
+
+ if (range_overflows_t(u64,
+ eb.batch_start_offset, eb.batch_len,
+ eb.batch->vma->size)) {
+ drm_dbg(&i915->drm, "Attempting to use out-of-bounds batch\n");
err = -EINVAL;
goto err_vma;
}
if (eb.batch_len == 0)
- eb.batch_len = eb.batch->size - eb.batch_start_offset;
+ eb.batch_len = eb.batch->vma->size - eb.batch_start_offset;
err = eb_parse(&eb);
if (err)
@@ -2656,6 +2494,7 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* snb/ivb/vlv conflate the "batch in ppgtt" bit with the "non-secure
* batch" bit. Hence we need to pin secure batches into the global gtt.
* hsw should have this fixed, but bdw mucks it up again. */
+ batch = eb.batch->vma;
if (eb.batch_flags & I915_DISPATCH_SECURE) {
struct i915_vma *vma;
@@ -2669,13 +2508,13 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* fitting due to fragmentation.
* So this is actually safe.
*/
- vma = i915_gem_object_ggtt_pin(eb.batch->obj, NULL, 0, 0, 0);
+ vma = i915_gem_object_ggtt_pin(batch->obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
err = PTR_ERR(vma);
- goto err_vma;
+ goto err_parse;
}
- eb.batch = vma;
+ batch = vma;
}
/* All GPU relocation batches must be submitted prior to the user rq */
@@ -2722,16 +2561,16 @@ i915_gem_do_execbuffer(struct drm_device *dev,
* inactive_list and lose its active reference. Hence we do not need
* to explicitly hold another reference here.
*/
- eb.request->batch = eb.batch;
- if (eb.batch->private)
- intel_engine_pool_mark_active(eb.batch->private, eb.request);
+ eb.request->batch = batch;
+ if (batch->private)
+ intel_engine_pool_mark_active(batch->private, eb.request);
trace_i915_request_queue(eb.request, eb.batch_flags);
- err = eb_submit(&eb);
+ err = eb_submit(&eb, batch);
err_request:
add_to_client(eb.request, file);
i915_request_get(eb.request);
- i915_request_add(eb.request);
+ eb_request_add(&eb);
if (fences)
signal_fence_array(&eb, fences);
@@ -2750,16 +2589,15 @@ err_request:
err_batch_unpin:
if (eb.batch_flags & I915_DISPATCH_SECURE)
- i915_vma_unpin(eb.batch);
- if (eb.batch->private)
- intel_engine_pool_put(eb.batch->private);
+ i915_vma_unpin(batch);
+err_parse:
+ if (batch->private)
+ intel_engine_pool_put(batch->private);
err_vma:
if (eb.exec)
eb_release_vmas(&eb);
if (eb.trampoline)
i915_vma_unpin(eb.trampoline);
- mutex_unlock(&dev->struct_mutex);
-err_engine:
eb_unpin_engine(&eb);
err_context:
i915_gem_context_put(eb.gem_context);
@@ -2777,9 +2615,7 @@ err_in_fence:
static size_t eb_element_size(void)
{
- return (sizeof(struct drm_i915_gem_exec_object2) +
- sizeof(struct i915_vma *) +
- sizeof(unsigned int));
+ return sizeof(struct drm_i915_gem_exec_object2) + sizeof(struct eb_vma);
}
static bool check_buffer_count(size_t count)
@@ -2803,6 +2639,7 @@ int
i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_execbuffer *args = data;
struct drm_i915_gem_execbuffer2 exec2;
struct drm_i915_gem_exec_object *exec_list = NULL;
@@ -2812,7 +2649,7 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
int err;
if (!check_buffer_count(count)) {
- DRM_DEBUG("execbuf2 with %zd buffers\n", count);
+ drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
return -EINVAL;
}
@@ -2837,8 +2674,9 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec_list == NULL || exec2_list == NULL) {
- DRM_DEBUG("Failed to allocate exec list for %d buffers\n",
- args->buffer_count);
+ drm_dbg(&i915->drm,
+ "Failed to allocate exec list for %d buffers\n",
+ args->buffer_count);
kvfree(exec_list);
kvfree(exec2_list);
return -ENOMEM;
@@ -2847,8 +2685,8 @@ i915_gem_execbuffer_ioctl(struct drm_device *dev, void *data,
u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec_list) * count);
if (err) {
- DRM_DEBUG("copy %d exec entries failed %d\n",
- args->buffer_count, err);
+ drm_dbg(&i915->drm, "copy %d exec entries failed %d\n",
+ args->buffer_count, err);
kvfree(exec_list);
kvfree(exec2_list);
return -EFAULT;
@@ -2895,6 +2733,7 @@ int
i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
struct drm_file *file)
{
+ struct drm_i915_private *i915 = to_i915(dev);
struct drm_i915_gem_execbuffer2 *args = data;
struct drm_i915_gem_exec_object2 *exec2_list;
struct drm_syncobj **fences = NULL;
@@ -2902,7 +2741,7 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
int err;
if (!check_buffer_count(count)) {
- DRM_DEBUG("execbuf2 with %zd buffers\n", count);
+ drm_dbg(&i915->drm, "execbuf2 with %zd buffers\n", count);
return -EINVAL;
}
@@ -2914,14 +2753,14 @@ i915_gem_execbuffer2_ioctl(struct drm_device *dev, void *data,
exec2_list = kvmalloc_array(count + 1, eb_element_size(),
__GFP_NOWARN | GFP_KERNEL);
if (exec2_list == NULL) {
- DRM_DEBUG("Failed to allocate exec list for %zd buffers\n",
- count);
+ drm_dbg(&i915->drm, "Failed to allocate exec list for %zd buffers\n",
+ count);
return -ENOMEM;
}
if (copy_from_user(exec2_list,
u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec2_list) * count)) {
- DRM_DEBUG("copy %zd exec entries failed\n", count);
+ drm_dbg(&i915->drm, "copy %zd exec entries failed\n", count);
kvfree(exec2_list);
return -EFAULT;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_internal.c b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
index 9cfb0e41ff06..cbbff81aa0af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_internal.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_internal.c
@@ -8,8 +8,6 @@
#include <linux/slab.h>
#include <linux/swiotlb.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_gem.h"
#include "i915_gem_object.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
index b9fdac2f9003..b39c24dae64e 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_mman.c
@@ -455,10 +455,11 @@ out:
void i915_gem_object_release_mmap_offset(struct drm_i915_gem_object *obj)
{
- struct i915_mmap_offset *mmo;
+ struct i915_mmap_offset *mmo, *mn;
spin_lock(&obj->mmo.lock);
- list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets, offset) {
/*
* vma_node_unmap for GTT mmaps handled already in
* __i915_gem_object_release_mmap_gtt
@@ -488,6 +489,67 @@ void i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
}
static struct i915_mmap_offset *
+lookup_mmo(struct drm_i915_gem_object *obj,
+ enum i915_mmap_type mmap_type)
+{
+ struct rb_node *rb;
+
+ spin_lock(&obj->mmo.lock);
+ rb = obj->mmo.offsets.rb_node;
+ while (rb) {
+ struct i915_mmap_offset *mmo =
+ rb_entry(rb, typeof(*mmo), offset);
+
+ if (mmo->mmap_type == mmap_type) {
+ spin_unlock(&obj->mmo.lock);
+ return mmo;
+ }
+
+ if (mmo->mmap_type < mmap_type)
+ rb = rb->rb_right;
+ else
+ rb = rb->rb_left;
+ }
+ spin_unlock(&obj->mmo.lock);
+
+ return NULL;
+}
+
+static struct i915_mmap_offset *
+insert_mmo(struct drm_i915_gem_object *obj, struct i915_mmap_offset *mmo)
+{
+ struct rb_node *rb, **p;
+
+ spin_lock(&obj->mmo.lock);
+ rb = NULL;
+ p = &obj->mmo.offsets.rb_node;
+ while (*p) {
+ struct i915_mmap_offset *pos;
+
+ rb = *p;
+ pos = rb_entry(rb, typeof(*pos), offset);
+
+ if (pos->mmap_type == mmo->mmap_type) {
+ spin_unlock(&obj->mmo.lock);
+ drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node);
+ kfree(mmo);
+ return pos;
+ }
+
+ if (pos->mmap_type < mmo->mmap_type)
+ p = &rb->rb_right;
+ else
+ p = &rb->rb_left;
+ }
+ rb_link_node(&mmo->offset, rb, p);
+ rb_insert_color(&mmo->offset, &obj->mmo.offsets);
+ spin_unlock(&obj->mmo.lock);
+
+ return mmo;
+}
+
+static struct i915_mmap_offset *
mmap_offset_attach(struct drm_i915_gem_object *obj,
enum i915_mmap_type mmap_type,
struct drm_file *file)
@@ -496,20 +558,22 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
struct i915_mmap_offset *mmo;
int err;
+ mmo = lookup_mmo(obj, mmap_type);
+ if (mmo)
+ goto out;
+
mmo = kmalloc(sizeof(*mmo), GFP_KERNEL);
if (!mmo)
return ERR_PTR(-ENOMEM);
mmo->obj = obj;
- mmo->dev = obj->base.dev;
- mmo->file = file;
mmo->mmap_type = mmap_type;
drm_vma_node_reset(&mmo->vma_node);
- err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
- obj->base.size / PAGE_SIZE);
+ err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node, obj->base.size / PAGE_SIZE);
if (likely(!err))
- goto out;
+ goto insert;
/* Attempt to reap some mmap space from dead objects */
err = intel_gt_retire_requests_timeout(&i915->gt, MAX_SCHEDULE_TIMEOUT);
@@ -517,19 +581,17 @@ mmap_offset_attach(struct drm_i915_gem_object *obj,
goto err;
i915_gem_drain_freed_objects(i915);
- err = drm_vma_offset_add(mmo->dev->vma_offset_manager, &mmo->vma_node,
- obj->base.size / PAGE_SIZE);
+ err = drm_vma_offset_add(obj->base.dev->vma_offset_manager,
+ &mmo->vma_node, obj->base.size / PAGE_SIZE);
if (err)
goto err;
+insert:
+ mmo = insert_mmo(obj, mmo);
+ GEM_BUG_ON(lookup_mmo(obj, mmap_type) != mmo);
out:
if (file)
drm_vma_node_allow(&mmo->vma_node, file);
-
- spin_lock(&obj->mmo.lock);
- list_add(&mmo->offset, &obj->mmo.offsets);
- spin_unlock(&obj->mmo.lock);
-
return mmo;
err:
@@ -551,8 +613,7 @@ __assign_mmap_offset(struct drm_file *file,
if (!obj)
return -ENOENT;
- if (mmap_type == I915_MMAP_TYPE_GTT &&
- i915_gem_object_never_bind_ggtt(obj)) {
+ if (i915_gem_object_never_mmap(obj)) {
err = -ENODEV;
goto out;
}
@@ -714,7 +775,7 @@ static struct file *mmap_singleton(struct drm_i915_private *i915)
struct file *file;
rcu_read_lock();
- file = i915->gem.mmap_singleton;
+ file = READ_ONCE(i915->gem.mmap_singleton);
if (file && !get_file_rcu(file))
file = NULL;
rcu_read_unlock();
@@ -745,60 +806,43 @@ int i915_gem_mmap(struct file *filp, struct vm_area_struct *vma)
struct drm_vma_offset_node *node;
struct drm_file *priv = filp->private_data;
struct drm_device *dev = priv->minor->dev;
+ struct drm_i915_gem_object *obj = NULL;
struct i915_mmap_offset *mmo = NULL;
- struct drm_gem_object *obj = NULL;
struct file *anon;
if (drm_dev_is_unplugged(dev))
return -ENODEV;
+ rcu_read_lock();
drm_vma_offset_lock_lookup(dev->vma_offset_manager);
node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
vma->vm_pgoff,
vma_pages(vma));
- if (likely(node)) {
- mmo = container_of(node, struct i915_mmap_offset,
- vma_node);
- /*
- * In our dependency chain, the drm_vma_offset_node
- * depends on the validity of the mmo, which depends on
- * the gem object. However the only reference we have
- * at this point is the mmo (as the parent of the node).
- * Try to check if the gem object was at least cleared.
- */
- if (!mmo || !mmo->obj) {
- drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
- return -EINVAL;
- }
+ if (node && drm_vma_node_is_allowed(node, priv)) {
/*
* Skip 0-refcnted objects as it is in the process of being
* destroyed and will be invalid when the vma manager lock
* is released.
*/
- obj = &mmo->obj->base;
- if (!kref_get_unless_zero(&obj->refcount))
- obj = NULL;
+ mmo = container_of(node, struct i915_mmap_offset, vma_node);
+ obj = i915_gem_object_get_rcu(mmo->obj);
}
drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
+ rcu_read_unlock();
if (!obj)
- return -EINVAL;
-
- if (!drm_vma_node_is_allowed(node, priv)) {
- drm_gem_object_put_unlocked(obj);
- return -EACCES;
- }
+ return node ? -EACCES : -EINVAL;
- if (i915_gem_object_is_readonly(to_intel_bo(obj))) {
+ if (i915_gem_object_is_readonly(obj)) {
if (vma->vm_flags & VM_WRITE) {
- drm_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return -EINVAL;
}
vma->vm_flags &= ~VM_MAYWRITE;
}
- anon = mmap_singleton(to_i915(obj->dev));
+ anon = mmap_singleton(to_i915(dev));
if (IS_ERR(anon)) {
- drm_gem_object_put_unlocked(obj);
+ i915_gem_object_put(obj);
return PTR_ERR(anon);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.c b/drivers/gpu/drm/i915/gem/i915_gem_object.c
index 46bacc82ddc4..5da9f9e534b9 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.c
@@ -63,7 +63,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
INIT_LIST_HEAD(&obj->lut_list);
spin_lock_init(&obj->mmo.lock);
- INIT_LIST_HEAD(&obj->mmo.offsets);
+ obj->mmo.offsets = RB_ROOT;
init_rcu_head(&obj->rcu);
@@ -100,8 +100,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
{
struct drm_i915_gem_object *obj = to_intel_bo(gem);
struct drm_i915_file_private *fpriv = file->driver_priv;
+ struct i915_mmap_offset *mmo, *mn;
struct i915_lut_handle *lut, *ln;
- struct i915_mmap_offset *mmo;
LIST_HEAD(close);
i915_gem_object_lock(obj);
@@ -117,14 +117,8 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
i915_gem_object_unlock(obj);
spin_lock(&obj->mmo.lock);
- list_for_each_entry(mmo, &obj->mmo.offsets, offset) {
- if (mmo->file != file)
- continue;
-
- spin_unlock(&obj->mmo.lock);
+ rbtree_postorder_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset)
drm_vma_node_revoke(&mmo->vma_node, file);
- spin_lock(&obj->mmo.lock);
- }
spin_unlock(&obj->mmo.lock);
list_for_each_entry_safe(lut, ln, &close, obj_link) {
@@ -203,12 +197,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
i915_gem_object_release_mmap(obj);
- list_for_each_entry_safe(mmo, mn, &obj->mmo.offsets, offset) {
+ rbtree_postorder_for_each_entry_safe(mmo, mn,
+ &obj->mmo.offsets,
+ offset) {
drm_vma_offset_remove(obj->base.dev->vma_offset_manager,
&mmo->vma_node);
kfree(mmo);
}
- INIT_LIST_HEAD(&obj->mmo.offsets);
+ obj->mmo.offsets = RB_ROOT;
GEM_BUG_ON(atomic_read(&obj->bind_count));
GEM_BUG_ON(obj->userfault_count);
@@ -229,6 +225,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
/* But keep the pointer alive for RCU-protected lookups */
call_rcu(&obj->rcu, __i915_gem_free_object_rcu);
+ cond_resched();
}
intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object.h b/drivers/gpu/drm/i915/gem/i915_gem_object.h
index db70a3306e59..2faa481cc18f 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object.h
@@ -11,8 +11,6 @@
#include <drm/drm_file.h>
#include <drm/drm_device.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_frontbuffer.h"
#include "i915_gem_object_types.h"
#include "i915_gem_gtt.h"
@@ -70,14 +68,22 @@ i915_gem_object_lookup_rcu(struct drm_file *file, u32 handle)
}
static inline struct drm_i915_gem_object *
+i915_gem_object_get_rcu(struct drm_i915_gem_object *obj)
+{
+ if (obj && !kref_get_unless_zero(&obj->base.refcount))
+ obj = NULL;
+
+ return obj;
+}
+
+static inline struct drm_i915_gem_object *
i915_gem_object_lookup(struct drm_file *file, u32 handle)
{
struct drm_i915_gem_object *obj;
rcu_read_lock();
obj = i915_gem_object_lookup_rcu(file, handle);
- if (obj && !kref_get_unless_zero(&obj->base.refcount))
- obj = NULL;
+ obj = i915_gem_object_get_rcu(obj);
rcu_read_unlock();
return obj;
@@ -186,9 +192,9 @@ i915_gem_object_is_proxy(const struct drm_i915_gem_object *obj)
}
static inline bool
-i915_gem_object_never_bind_ggtt(const struct drm_i915_gem_object *obj)
+i915_gem_object_never_mmap(const struct drm_i915_gem_object *obj)
{
- return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_GGTT);
+ return i915_gem_object_type_has(obj, I915_GEM_OBJECT_NO_MMAP);
}
static inline bool
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
index 70809d8897cd..e00792158f13 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_blt.c
@@ -186,7 +186,7 @@ int i915_gem_object_fill_blt(struct drm_i915_gem_object *obj,
0);
out_request:
if (unlikely(err))
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
out_batch:
@@ -196,6 +196,17 @@ out_unpin:
return err;
}
+/* Wa_1209644611:icl,ehl */
+static bool wa_1209644611_applies(struct drm_i915_private *i915, u32 size)
+{
+ u32 height = size >> PAGE_SHIFT;
+
+ if (!IS_GEN(i915, 11))
+ return false;
+
+ return height % 4 == 3 && height <= 8;
+}
+
struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
struct i915_vma *src,
struct i915_vma *dst)
@@ -237,7 +248,8 @@ struct i915_vma *intel_emit_vma_copy_blt(struct intel_context *ce,
size = min_t(u64, rem, block_size);
GEM_BUG_ON(size >> PAGE_SHIFT > S16_MAX);
- if (INTEL_GEN(i915) >= 9) {
+ if (INTEL_GEN(i915) >= 9 &&
+ !wa_1209644611_applies(i915, size)) {
*cmd++ = GEN9_XY_FAST_COPY_BLT_CMD | (10 - 2);
*cmd++ = BLT_DEPTH_32 | PAGE_SIZE;
*cmd++ = 0;
@@ -385,7 +397,7 @@ out_unlock:
drm_gem_unlock_reservations(objs, ARRAY_SIZE(objs), &acquire);
out_request:
if (unlikely(err))
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
out_batch:
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
index 88e268633fdc..a0b10bcd8d8a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_object_types.h
@@ -34,7 +34,7 @@ struct drm_i915_gem_object_ops {
#define I915_GEM_OBJECT_HAS_IOMEM BIT(1)
#define I915_GEM_OBJECT_IS_SHRINKABLE BIT(2)
#define I915_GEM_OBJECT_IS_PROXY BIT(3)
-#define I915_GEM_OBJECT_NO_GGTT BIT(4)
+#define I915_GEM_OBJECT_NO_MMAP BIT(4)
#define I915_GEM_OBJECT_ASYNC_CANCEL BIT(5)
/* Interface between the GEM object and its backing storage.
@@ -71,13 +71,11 @@ enum i915_mmap_type {
};
struct i915_mmap_offset {
- struct drm_device *dev;
struct drm_vma_offset_node vma_node;
struct drm_i915_gem_object *obj;
- struct drm_file *file;
enum i915_mmap_type mmap_type;
- struct list_head offset;
+ struct rb_node offset;
};
struct drm_i915_gem_object {
@@ -137,7 +135,7 @@ struct drm_i915_gem_object {
struct {
spinlock_t lock; /* Protects access to mmo offsets */
- struct list_head offsets;
+ struct rb_root offsets;
} mmo;
I915_SELFTEST_DECLARE(struct list_head st_link);
@@ -287,9 +285,6 @@ struct drm_i915_gem_object {
void *gvt_info;
};
-
- /** for phys allocated objects */
- struct drm_dma_handle *phys_handle;
};
static inline struct drm_i915_gem_object *
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pages.c b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
index 54aca5c9101e..24f4cadea114 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pages.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pages.c
@@ -83,10 +83,12 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
{
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
int err;
if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
- DRM_DEBUG("Attempting to obtain a purgeable object\n");
+ drm_dbg(&i915->drm,
+ "Attempting to obtain a purgeable object\n");
return -EFAULT;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_phys.c b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
index b1b7c1b3038a..698e22420dc5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_phys.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_phys.c
@@ -22,88 +22,87 @@
static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
{
struct address_space *mapping = obj->base.filp->f_mapping;
- struct drm_dma_handle *phys;
- struct sg_table *st;
struct scatterlist *sg;
- char *vaddr;
+ struct sg_table *st;
+ dma_addr_t dma;
+ void *vaddr;
+ void *dst;
int i;
- int err;
if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
return -EINVAL;
- /* Always aligning to the object size, allows a single allocation
+ /*
+ * Always aligning to the object size, allows a single allocation
* to handle all possible callers, and given typical object sizes,
* the alignment of the buddy allocation will naturally match.
*/
- phys = drm_pci_alloc(obj->base.dev,
- roundup_pow_of_two(obj->base.size),
- roundup_pow_of_two(obj->base.size));
- if (!phys)
+ vaddr = dma_alloc_coherent(&obj->base.dev->pdev->dev,
+ roundup_pow_of_two(obj->base.size),
+ &dma, GFP_KERNEL);
+ if (!vaddr)
return -ENOMEM;
- vaddr = phys->vaddr;
+ st = kmalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto err_pci;
+
+ if (sg_alloc_table(st, 1, GFP_KERNEL))
+ goto err_st;
+
+ sg = st->sgl;
+ sg->offset = 0;
+ sg->length = obj->base.size;
+
+ sg_assign_page(sg, (struct page *)vaddr);
+ sg_dma_address(sg) = dma;
+ sg_dma_len(sg) = obj->base.size;
+
+ dst = vaddr;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
struct page *page;
- char *src;
+ void *src;
page = shmem_read_mapping_page(mapping, i);
- if (IS_ERR(page)) {
- err = PTR_ERR(page);
- goto err_phys;
- }
+ if (IS_ERR(page))
+ goto err_st;
src = kmap_atomic(page);
- memcpy(vaddr, src, PAGE_SIZE);
- drm_clflush_virt_range(vaddr, PAGE_SIZE);
+ memcpy(dst, src, PAGE_SIZE);
+ drm_clflush_virt_range(dst, PAGE_SIZE);
kunmap_atomic(src);
put_page(page);
- vaddr += PAGE_SIZE;
+ dst += PAGE_SIZE;
}
intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
- st = kmalloc(sizeof(*st), GFP_KERNEL);
- if (!st) {
- err = -ENOMEM;
- goto err_phys;
- }
-
- if (sg_alloc_table(st, 1, GFP_KERNEL)) {
- kfree(st);
- err = -ENOMEM;
- goto err_phys;
- }
-
- sg = st->sgl;
- sg->offset = 0;
- sg->length = obj->base.size;
-
- sg_dma_address(sg) = phys->busaddr;
- sg_dma_len(sg) = obj->base.size;
-
- obj->phys_handle = phys;
-
__i915_gem_object_set_pages(obj, st, sg->length);
return 0;
-err_phys:
- drm_pci_free(obj->base.dev, phys);
-
- return err;
+err_st:
+ kfree(st);
+err_pci:
+ dma_free_coherent(&obj->base.dev->pdev->dev,
+ roundup_pow_of_two(obj->base.size),
+ vaddr, dma);
+ return -ENOMEM;
}
static void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
struct sg_table *pages)
{
+ dma_addr_t dma = sg_dma_address(pages->sgl);
+ void *vaddr = sg_page(pages->sgl);
+
__i915_gem_object_release_shmem(obj, pages, false);
if (obj->mm.dirty) {
struct address_space *mapping = obj->base.filp->f_mapping;
- char *vaddr = obj->phys_handle->vaddr;
+ void *src = vaddr;
int i;
for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
@@ -115,15 +114,16 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
continue;
dst = kmap_atomic(page);
- drm_clflush_virt_range(vaddr, PAGE_SIZE);
- memcpy(dst, vaddr, PAGE_SIZE);
+ drm_clflush_virt_range(src, PAGE_SIZE);
+ memcpy(dst, src, PAGE_SIZE);
kunmap_atomic(dst);
set_page_dirty(page);
if (obj->mm.madv == I915_MADV_WILLNEED)
mark_page_accessed(page);
put_page(page);
- vaddr += PAGE_SIZE;
+
+ src += PAGE_SIZE;
}
obj->mm.dirty = false;
}
@@ -131,7 +131,9 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
sg_free_table(pages);
kfree(pages);
- drm_pci_free(obj->base.dev, obj->phys_handle);
+ dma_free_coherent(&obj->base.dev->pdev->dev,
+ roundup_pow_of_two(obj->base.size),
+ vaddr, dma);
}
static void phys_release(struct drm_i915_gem_object *obj)
@@ -192,10 +194,11 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
/* Perma-pin (until release) the physical set of pages */
__i915_gem_object_pin_pages(obj);
- if (!IS_ERR_OR_NULL(pages)) {
+ if (!IS_ERR_OR_NULL(pages))
i915_gem_shmem_ops.put_pages(obj, pages);
- i915_gem_object_release_memory_region(obj);
- }
+
+ i915_gem_object_release_memory_region(obj);
+
mutex_unlock(&obj->mm.lock);
return 0;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_pm.c b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
index c8264eb036bf..3d215164dd5a 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_pm.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_pm.c
@@ -85,7 +85,8 @@ void i915_gem_suspend_late(struct drm_i915_private *i915)
spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
i915_gem_object_lock(obj);
- WARN_ON(i915_gem_object_set_to_gtt_domain(obj, false));
+ drm_WARN_ON(&i915->drm,
+ i915_gem_object_set_to_gtt_domain(obj, false));
i915_gem_object_unlock(obj);
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
index a2a980d9d241..5d5d7eef3f43 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shmem.c
@@ -148,7 +148,8 @@ rebuild_st:
last_pfn = page_to_pfn(page);
/* Check that the i965g/gm workaround works. */
- WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
+ drm_WARN_ON(&i915->drm,
+ (gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
}
if (sg) { /* loop terminated early; short sg table */
sg_page_sizes |= sg->length;
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
index f7e4b39c734f..03e5eb4c99d1 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_shrinker.c
@@ -12,7 +12,6 @@
#include <linux/pci.h>
#include <linux/dma-buf.h>
#include <linux/vmalloc.h>
-#include <drm/i915_drm.h>
#include "i915_trace.h"
@@ -256,8 +255,7 @@ unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
freed = i915_gem_shrink(i915, -1UL, NULL,
I915_SHRINK_BOUND |
- I915_SHRINK_UNBOUND |
- I915_SHRINK_ACTIVE);
+ I915_SHRINK_UNBOUND);
}
return freed;
@@ -336,7 +334,6 @@ i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
freed_pages = 0;
with_intel_runtime_pm(&i915->runtime_pm, wakeref)
freed_pages += i915_gem_shrink(i915, -1UL, NULL,
- I915_SHRINK_ACTIVE |
I915_SHRINK_BOUND |
I915_SHRINK_UNBOUND |
I915_SHRINK_WRITEBACK);
@@ -403,19 +400,22 @@ void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
i915->mm.shrinker.seeks = DEFAULT_SEEKS;
i915->mm.shrinker.batch = 4096;
- WARN_ON(register_shrinker(&i915->mm.shrinker));
+ drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker));
i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
- WARN_ON(register_oom_notifier(&i915->mm.oom_notifier));
+ drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
- WARN_ON(register_vmap_purge_notifier(&i915->mm.vmap_notifier));
+ drm_WARN_ON(&i915->drm,
+ register_vmap_purge_notifier(&i915->mm.vmap_notifier));
}
void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
{
- WARN_ON(unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
- WARN_ON(unregister_oom_notifier(&i915->mm.oom_notifier));
+ drm_WARN_ON(&i915->drm,
+ unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
+ drm_WARN_ON(&i915->drm,
+ unregister_oom_notifier(&i915->mm.oom_notifier));
unregister_shrinker(&i915->mm.shrinker);
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
index 451f3078d60d..5557dfa83a7b 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.c
@@ -13,6 +13,7 @@
#include "gem/i915_gem_region.h"
#include "i915_drv.h"
#include "i915_gem_stolen.h"
+#include "i915_vgpu.h"
/*
* The BIOS typically reserves some of the system's memory for the exclusive
@@ -110,8 +111,11 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
if (stolen[0].start != stolen[1].start ||
stolen[0].end != stolen[1].end) {
- DRM_DEBUG_DRIVER("GTT within stolen memory at %pR\n", &ggtt_res);
- DRM_DEBUG_DRIVER("Stolen memory adjusted to %pR\n", dsm);
+ drm_dbg(&i915->drm,
+ "GTT within stolen memory at %pR\n",
+ &ggtt_res);
+ drm_dbg(&i915->drm, "Stolen memory adjusted to %pR\n",
+ dsm);
}
}
@@ -142,8 +146,9 @@ static int i915_adjust_stolen(struct drm_i915_private *i915,
* range. Apparently this works.
*/
if (!r && !IS_GEN(i915, 3)) {
- DRM_ERROR("conflict detected with stolen region: %pR\n",
- dsm);
+ drm_err(&i915->drm,
+ "conflict detected with stolen region: %pR\n",
+ dsm);
return -EBUSY;
}
@@ -171,8 +176,8 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
ELK_STOLEN_RESERVED);
resource_size_t stolen_top = i915->dsm.end + 1;
- DRM_DEBUG_DRIVER("%s_STOLEN_RESERVED = %08x\n",
- IS_GM45(i915) ? "CTG" : "ELK", reg_val);
+ drm_dbg(&i915->drm, "%s_STOLEN_RESERVED = %08x\n",
+ IS_GM45(i915) ? "CTG" : "ELK", reg_val);
if ((reg_val & G4X_STOLEN_RESERVED_ENABLE) == 0)
return;
@@ -181,14 +186,16 @@ static void g4x_get_stolen_reserved(struct drm_i915_private *i915,
* Whether ILK really reuses the ELK register for this is unclear.
* Let's see if we catch anyone with this supposedly enabled on ILK.
*/
- WARN(IS_GEN(i915, 5), "ILK stolen reserved found? 0x%08x\n",
- reg_val);
+ drm_WARN(&i915->drm, IS_GEN(i915, 5),
+ "ILK stolen reserved found? 0x%08x\n",
+ reg_val);
if (!(reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK))
return;
*base = (reg_val & G4X_STOLEN_RESERVED_ADDR2_MASK) << 16;
- WARN_ON((reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
+ drm_WARN_ON(&i915->drm,
+ (reg_val & G4X_STOLEN_RESERVED_ADDR1_MASK) < *base);
*size = stolen_top - *base;
}
@@ -200,7 +207,7 @@ static void gen6_get_stolen_reserved(struct drm_i915_private *i915,
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -234,7 +241,7 @@ static void vlv_get_stolen_reserved(struct drm_i915_private *i915,
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
resource_size_t stolen_top = i915->dsm.end + 1;
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -262,7 +269,7 @@ static void gen7_get_stolen_reserved(struct drm_i915_private *i915,
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -289,7 +296,7 @@ static void chv_get_stolen_reserved(struct drm_i915_private *i915,
{
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -323,7 +330,7 @@ static void bdw_get_stolen_reserved(struct drm_i915_private *i915,
u32 reg_val = intel_uncore_read(uncore, GEN6_STOLEN_RESERVED);
resource_size_t stolen_top = i915->dsm.end + 1;
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = %08x\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = %08x\n", reg_val);
if (!(reg_val & GEN6_STOLEN_RESERVED_ENABLE))
return;
@@ -342,7 +349,7 @@ static void icl_get_stolen_reserved(struct drm_i915_private *i915,
{
u64 reg_val = intel_uncore_read64(uncore, GEN6_STOLEN_RESERVED);
- DRM_DEBUG_DRIVER("GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
+ drm_dbg(&i915->drm, "GEN6_STOLEN_RESERVED = 0x%016llx\n", reg_val);
*base = reg_val & GEN11_STOLEN_RESERVED_ADDR_MASK;
@@ -453,8 +460,9 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
* it likely means we failed to read the registers correctly.
*/
if (!reserved_base) {
- DRM_ERROR("inconsistent reservation %pa + %pa; ignoring\n",
- &reserved_base, &reserved_size);
+ drm_err(&i915->drm,
+ "inconsistent reservation %pa + %pa; ignoring\n",
+ &reserved_base, &reserved_size);
reserved_base = stolen_top;
reserved_size = 0;
}
@@ -463,8 +471,9 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
(struct resource)DEFINE_RES_MEM(reserved_base, reserved_size);
if (!resource_contains(&i915->dsm, &i915->dsm_reserved)) {
- DRM_ERROR("Stolen reserved area %pR outside stolen memory %pR\n",
- &i915->dsm_reserved, &i915->dsm);
+ drm_err(&i915->drm,
+ "Stolen reserved area %pR outside stolen memory %pR\n",
+ &i915->dsm_reserved, &i915->dsm);
return 0;
}
@@ -472,9 +481,10 @@ static int i915_gem_init_stolen(struct drm_i915_private *i915)
* memory, so just consider the start. */
reserved_total = stolen_top - reserved_base;
- DRM_DEBUG_DRIVER("Memory reserved for graphics device: %lluK, usable: %lluK\n",
- (u64)resource_size(&i915->dsm) >> 10,
- ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
+ drm_dbg(&i915->drm,
+ "Memory reserved for graphics device: %lluK, usable: %lluK\n",
+ (u64)resource_size(&i915->dsm) >> 10,
+ ((u64)resource_size(&i915->dsm) - reserved_total) >> 10);
i915->stolen_usable_size =
resource_size(&i915->dsm) - reserved_total;
@@ -677,26 +687,24 @@ struct intel_memory_region *i915_gem_stolen_setup(struct drm_i915_private *i915)
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
resource_size_t stolen_offset,
- resource_size_t gtt_offset,
resource_size_t size)
{
struct intel_memory_region *mem = i915->mm.regions[INTEL_REGION_STOLEN];
- struct i915_ggtt *ggtt = &i915->ggtt;
struct drm_i915_gem_object *obj;
struct drm_mm_node *stolen;
- struct i915_vma *vma;
int ret;
if (!drm_mm_initialized(&i915->mm.stolen))
return ERR_PTR(-ENODEV);
- DRM_DEBUG_DRIVER("creating preallocated stolen object: stolen_offset=%pa, gtt_offset=%pa, size=%pa\n",
- &stolen_offset, &gtt_offset, &size);
+ drm_dbg(&i915->drm,
+ "creating preallocated stolen object: stolen_offset=%pa, size=%pa\n",
+ &stolen_offset, &size);
/* KISS and expect everything to be page-aligned */
- if (WARN_ON(size == 0) ||
- WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
- WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
+ if (GEM_WARN_ON(size == 0) ||
+ GEM_WARN_ON(!IS_ALIGNED(size, I915_GTT_PAGE_SIZE)) ||
+ GEM_WARN_ON(!IS_ALIGNED(stolen_offset, I915_GTT_MIN_ALIGNMENT)))
return ERR_PTR(-EINVAL);
stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
@@ -709,68 +717,20 @@ i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *i915,
ret = drm_mm_reserve_node(&i915->mm.stolen, stolen);
mutex_unlock(&i915->mm.stolen_lock);
if (ret) {
- DRM_DEBUG_DRIVER("failed to allocate stolen space\n");
- kfree(stolen);
- return ERR_PTR(ret);
+ obj = ERR_PTR(ret);
+ goto err_free;
}
obj = __i915_gem_object_create_stolen(mem, stolen);
- if (IS_ERR(obj)) {
- DRM_DEBUG_DRIVER("failed to allocate stolen object\n");
- i915_gem_stolen_remove_node(i915, stolen);
- kfree(stolen);
- return obj;
- }
-
- /* Some objects just need physical mem from stolen space */
- if (gtt_offset == I915_GTT_OFFSET_NONE)
- return obj;
-
- ret = i915_gem_object_pin_pages(obj);
- if (ret)
- goto err;
-
- vma = i915_vma_instance(obj, &ggtt->vm, NULL);
- if (IS_ERR(vma)) {
- ret = PTR_ERR(vma);
- goto err_pages;
- }
-
- /* To simplify the initialisation sequence between KMS and GTT,
- * we allow construction of the stolen object prior to
- * setting up the GTT space. The actual reservation will occur
- * later.
- */
- mutex_lock(&ggtt->vm.mutex);
- ret = i915_gem_gtt_reserve(&ggtt->vm, &vma->node,
- size, gtt_offset, obj->cache_level,
- 0);
- if (ret) {
- DRM_DEBUG_DRIVER("failed to allocate stolen GTT space\n");
- mutex_unlock(&ggtt->vm.mutex);
- goto err_pages;
- }
-
- GEM_BUG_ON(!drm_mm_node_allocated(&vma->node));
-
- GEM_BUG_ON(vma->pages);
- vma->pages = obj->mm.pages;
- atomic_set(&vma->pages_count, I915_VMA_PAGES_ACTIVE);
-
- set_bit(I915_VMA_GLOBAL_BIND_BIT, __i915_vma_flags(vma));
- __i915_vma_set_map_and_fenceable(vma);
-
- list_add_tail(&vma->vm_link, &ggtt->vm.bound_list);
- mutex_unlock(&ggtt->vm.mutex);
-
- GEM_BUG_ON(i915_gem_object_is_shrinkable(obj));
- atomic_inc(&obj->bind_count);
+ if (IS_ERR(obj))
+ goto err_stolen;
+ i915_gem_object_set_cache_coherency(obj, I915_CACHE_NONE);
return obj;
-err_pages:
- i915_gem_object_unpin_pages(obj);
-err:
- i915_gem_object_put(obj);
- return ERR_PTR(ret);
+err_stolen:
+ i915_gem_stolen_remove_node(i915, stolen);
+err_free:
+ kfree(stolen);
+ return obj;
}
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
index c1040627fbf3..e15c0adad8af 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
+++ b/drivers/gpu/drm/i915/gem/i915_gem_stolen.h
@@ -28,7 +28,6 @@ i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_i915_private *dev_priv,
resource_size_t stolen_offset,
- resource_size_t gtt_offset,
resource_size_t size);
#endif /* __I915_GEM_STOLEN_H__ */
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
index 6c7825a2dc2a..37f77aee1212 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c
@@ -6,7 +6,6 @@
#include <linux/string.h>
#include <linux/bitops.h>
-#include <drm/i915_drm.h>
#include "i915_drv.h"
#include "i915_gem.h"
diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
index 580319b7bf1a..7ffd7afeb7a5 100644
--- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c
@@ -10,8 +10,6 @@
#include <linux/swap.h>
#include <linux/sched/mm.h>
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_gem_ioctls.h"
#include "i915_gem_object.h"
@@ -704,7 +702,7 @@ i915_gem_userptr_dmabuf_export(struct drm_i915_gem_object *obj)
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
.flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
I915_GEM_OBJECT_IS_SHRINKABLE |
- I915_GEM_OBJECT_NO_GGTT |
+ I915_GEM_OBJECT_NO_MMAP |
I915_GEM_OBJECT_ASYNC_CANCEL,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
@@ -770,6 +768,23 @@ i915_gem_userptr_ioctl(struct drm_device *dev,
I915_USERPTR_UNSYNCHRONIZED))
return -EINVAL;
+ /*
+ * XXX: There is a prevalence of the assumption that we fit the
+ * object's page count inside a 32bit _signed_ variable. Let's document
+ * this and catch if we ever need to fix it. In the meantime, if you do
+ * spot such a local variable, please consider fixing!
+ *
+ * Aside from our own locals (for which we have no excuse!):
+ * - sg_table embeds unsigned int for num_pages
+ * - get_user_pages*() mixed ints with longs
+ */
+
+ if (args->user_size >> PAGE_SHIFT > INT_MAX)
+ return -E2BIG;
+
+ if (overflows_type(args->user_size, obj->base.size))
+ return -E2BIG;
+
if (!args->user_size)
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
index 9311250d7d6f..2d0fd50c5312 100644
--- a/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
+++ b/drivers/gpu/drm/i915/gem/selftests/huge_pages.c
@@ -1208,107 +1208,6 @@ static int igt_write_huge(struct i915_gem_context *ctx,
return err;
}
-static int igt_ppgtt_exhaust_huge(void *arg)
-{
- struct i915_gem_context *ctx = arg;
- struct drm_i915_private *i915 = ctx->i915;
- unsigned long supported = INTEL_INFO(i915)->page_sizes;
- static unsigned int pages[ARRAY_SIZE(page_sizes)];
- struct drm_i915_gem_object *obj;
- unsigned int size_mask;
- unsigned int page_mask;
- int n, i;
- int err = -ENODEV;
-
- if (supported == I915_GTT_PAGE_SIZE_4K)
- return 0;
-
- /*
- * Sanity check creating objects with a varying mix of page sizes --
- * ensuring that our writes lands in the right place.
- */
-
- n = 0;
- for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1)
- pages[n++] = BIT(i);
-
- for (size_mask = 2; size_mask < BIT(n); size_mask++) {
- unsigned int size = 0;
-
- for (i = 0; i < n; i++) {
- if (size_mask & BIT(i))
- size |= pages[i];
- }
-
- /*
- * For our page mask we want to enumerate all the page-size
- * combinations which will fit into our chosen object size.
- */
- for (page_mask = 2; page_mask <= size_mask; page_mask++) {
- unsigned int page_sizes = 0;
-
- for (i = 0; i < n; i++) {
- if (page_mask & BIT(i))
- page_sizes |= pages[i];
- }
-
- /*
- * Ensure that we can actually fill the given object
- * with our chosen page mask.
- */
- if (!IS_ALIGNED(size, BIT(__ffs(page_sizes))))
- continue;
-
- obj = huge_pages_object(i915, size, page_sizes);
- if (IS_ERR(obj)) {
- err = PTR_ERR(obj);
- goto out_device;
- }
-
- err = i915_gem_object_pin_pages(obj);
- if (err) {
- i915_gem_object_put(obj);
-
- if (err == -ENOMEM) {
- pr_info("unable to get pages, size=%u, pages=%u\n",
- size, page_sizes);
- err = 0;
- break;
- }
-
- pr_err("pin_pages failed, size=%u, pages=%u\n",
- size_mask, page_mask);
-
- goto out_device;
- }
-
- /* Force the page-size for the gtt insertion */
- obj->mm.page_sizes.sg = page_sizes;
-
- err = igt_write_huge(ctx, obj);
- if (err) {
- pr_err("exhaust write-huge failed with size=%u\n",
- size);
- goto out_unpin;
- }
-
- i915_gem_object_unpin_pages(obj);
- __i915_gem_object_put_pages(obj);
- i915_gem_object_put(obj);
- }
- }
-
- goto out_device;
-
-out_unpin:
- i915_gem_object_unpin_pages(obj);
- i915_gem_object_put(obj);
-out_device:
- mkwrite_device_info(i915)->page_sizes = supported;
-
- return err;
-}
-
typedef struct drm_i915_gem_object *
(*igt_create_fn)(struct drm_i915_private *i915, u32 size, u32 flags);
@@ -1900,7 +1799,6 @@ int i915_gem_huge_page_live_selftests(struct drm_i915_private *i915)
SUBTEST(igt_shrink_thp),
SUBTEST(igt_ppgtt_pin_update),
SUBTEST(igt_tmpfs_fallback),
- SUBTEST(igt_ppgtt_exhaust_huge),
SUBTEST(igt_ppgtt_smoke_huge),
SUBTEST(igt_ppgtt_sanity_check),
};
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
index 7fc46861a54d..54b86cf7f5d2 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_context.c
@@ -1004,7 +1004,7 @@ emit_rpcs_query(struct drm_i915_gem_object *obj,
return 0;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_batch:
@@ -1465,9 +1465,12 @@ out_file:
static int check_scratch(struct i915_address_space *vm, u64 offset)
{
- struct drm_mm_node *node =
- __drm_mm_interval_first(&vm->mm,
- offset, offset + sizeof(u32) - 1);
+ struct drm_mm_node *node;
+
+ mutex_lock(&vm->mutex);
+ node = __drm_mm_interval_first(&vm->mm,
+ offset, offset + sizeof(u32) - 1);
+ mutex_unlock(&vm->mutex);
if (!node || node->start > offset)
return 0;
@@ -1492,6 +1495,10 @@ static int write_to_scratch(struct i915_gem_context *ctx,
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+ err = check_scratch(ctx_vm(ctx), offset);
+ if (err)
+ return err;
+
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -1528,10 +1535,6 @@ static int write_to_scratch(struct i915_gem_context *ctx,
if (err)
goto out_vm;
- err = check_scratch(vm, offset);
- if (err)
- goto err_unpin;
-
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
@@ -1556,7 +1559,7 @@ static int write_to_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1575,64 +1578,95 @@ static int read_from_scratch(struct i915_gem_context *ctx,
struct drm_i915_private *i915 = ctx->i915;
struct drm_i915_gem_object *obj;
struct i915_address_space *vm;
- const u32 RCS_GPR0 = 0x2600; /* not all engines have their own GPR! */
const u32 result = 0x100;
struct i915_request *rq;
struct i915_vma *vma;
+ unsigned int flags;
u32 *cmd;
int err;
GEM_BUG_ON(offset < I915_GTT_PAGE_SIZE);
+ err = check_scratch(ctx_vm(ctx), offset);
+ if (err)
+ return err;
+
obj = i915_gem_object_create_internal(i915, PAGE_SIZE);
if (IS_ERR(obj))
return PTR_ERR(obj);
- cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
- if (IS_ERR(cmd)) {
- err = PTR_ERR(cmd);
- goto out;
- }
-
- memset(cmd, POISON_INUSE, PAGE_SIZE);
if (INTEL_GEN(i915) >= 8) {
+ const u32 GPR0 = engine->mmio_base + 0x600;
+
+ vm = i915_gem_context_get_vm_rcu(ctx);
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_vm;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
+ if (err)
+ goto out_vm;
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out;
+ }
+
+ memset(cmd, POISON_INUSE, PAGE_SIZE);
*cmd++ = MI_LOAD_REGISTER_MEM_GEN8;
- *cmd++ = RCS_GPR0;
+ *cmd++ = GPR0;
*cmd++ = lower_32_bits(offset);
*cmd++ = upper_32_bits(offset);
*cmd++ = MI_STORE_REGISTER_MEM_GEN8;
- *cmd++ = RCS_GPR0;
+ *cmd++ = GPR0;
*cmd++ = result;
*cmd++ = 0;
+ *cmd = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
+
+ flags = 0;
} else {
+ const u32 reg = engine->mmio_base + 0x420;
+
+ /* hsw: register access even to 3DPRIM! is protected */
+ vm = i915_vm_get(&engine->gt->ggtt->vm);
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto out_vm;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL);
+ if (err)
+ goto out_vm;
+
+ cmd = i915_gem_object_pin_map(obj, I915_MAP_WB);
+ if (IS_ERR(cmd)) {
+ err = PTR_ERR(cmd);
+ goto out;
+ }
+
+ memset(cmd, POISON_INUSE, PAGE_SIZE);
*cmd++ = MI_LOAD_REGISTER_MEM;
- *cmd++ = RCS_GPR0;
+ *cmd++ = reg;
*cmd++ = offset;
- *cmd++ = MI_STORE_REGISTER_MEM;
- *cmd++ = RCS_GPR0;
- *cmd++ = result;
- }
- *cmd = MI_BATCH_BUFFER_END;
+ *cmd++ = MI_STORE_REGISTER_MEM | MI_USE_GGTT;
+ *cmd++ = reg;
+ *cmd++ = vma->node.start + result;
+ *cmd = MI_BATCH_BUFFER_END;
- i915_gem_object_flush_map(obj);
- i915_gem_object_unpin_map(obj);
+ i915_gem_object_flush_map(obj);
+ i915_gem_object_unpin_map(obj);
- intel_gt_chipset_flush(engine->gt);
-
- vm = i915_gem_context_get_vm_rcu(ctx);
- vma = i915_vma_instance(obj, vm, NULL);
- if (IS_ERR(vma)) {
- err = PTR_ERR(vma);
- goto out_vm;
+ flags = I915_DISPATCH_SECURE;
}
- err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_OFFSET_FIXED);
- if (err)
- goto out_vm;
-
- err = check_scratch(vm, offset);
- if (err)
- goto err_unpin;
+ intel_gt_chipset_flush(engine->gt);
rq = igt_request_alloc(ctx, engine);
if (IS_ERR(rq)) {
@@ -1640,7 +1674,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto err_unpin;
}
- err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, 0);
+ err = engine->emit_bb_start(rq, vma->node.start, vma->node.size, flags);
if (err)
goto err_request;
@@ -1674,7 +1708,7 @@ static int read_from_scratch(struct i915_gem_context *ctx,
goto out_vm;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_unpin:
@@ -1686,6 +1720,39 @@ out:
return err;
}
+static int check_scratch_page(struct i915_gem_context *ctx, u32 *out)
+{
+ struct i915_address_space *vm;
+ struct page *page;
+ u32 *vaddr;
+ int err = 0;
+
+ vm = ctx_vm(ctx);
+ if (!vm)
+ return -ENODEV;
+
+ page = vm->scratch[0].base.page;
+ if (!page) {
+ pr_err("No scratch page!\n");
+ return -EINVAL;
+ }
+
+ vaddr = kmap(page);
+ if (!vaddr) {
+ pr_err("No (mappable) scratch page!\n");
+ return -EINVAL;
+ }
+
+ memcpy(out, vaddr, sizeof(*out));
+ if (memchr_inv(vaddr, *out, PAGE_SIZE)) {
+ pr_err("Inconsistent initial state of scratch page!\n");
+ err = -EINVAL;
+ }
+ kunmap(page);
+
+ return err;
+}
+
static int igt_vm_isolation(void *arg)
{
struct drm_i915_private *i915 = arg;
@@ -1696,6 +1763,7 @@ static int igt_vm_isolation(void *arg)
I915_RND_STATE(prng);
struct file *file;
u64 vm_total;
+ u32 expected;
int err;
if (INTEL_GEN(i915) < 7)
@@ -1730,9 +1798,17 @@ static int igt_vm_isolation(void *arg)
if (ctx_vm(ctx_a) == ctx_vm(ctx_b))
goto out_file;
+ /* Read the initial state of the scratch page */
+ err = check_scratch_page(ctx_a, &expected);
+ if (err)
+ goto out_file;
+
+ err = check_scratch_page(ctx_b, &expected);
+ if (err)
+ goto out_file;
+
vm_total = ctx_vm(ctx_a)->total;
GEM_BUG_ON(ctx_vm(ctx_b)->total != vm_total);
- vm_total -= I915_GTT_PAGE_SIZE;
count = 0;
num_engines = 0;
@@ -1743,14 +1819,18 @@ static int igt_vm_isolation(void *arg)
if (!intel_engine_can_store_dword(engine))
continue;
+ /* Not all engines have their own GPR! */
+ if (INTEL_GEN(i915) < 8 && engine->class != RENDER_CLASS)
+ continue;
+
while (!__igt_timeout(end_time, NULL)) {
u32 value = 0xc5c5c5c5;
u64 offset;
- div64_u64_rem(i915_prandom_u64_state(&prng),
- vm_total, &offset);
- offset = round_down(offset, alignof_dword);
- offset += I915_GTT_PAGE_SIZE;
+ /* Leave enough space at offset 0 for the batch */
+ offset = igt_random_offset(&prng,
+ I915_GTT_PAGE_SIZE, vm_total,
+ sizeof(u32), alignof_dword);
err = write_to_scratch(ctx_a, engine,
offset, 0xdeadbeef);
@@ -1760,7 +1840,7 @@ static int igt_vm_isolation(void *arg)
if (err)
goto out_file;
- if (value) {
+ if (value != expected) {
pr_err("%s: Read %08x from scratch (offset 0x%08x_%08x), after %lu reads!\n",
engine->name, value,
upper_32_bits(offset),
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
index ef7c74cff28a..43912e9b683d 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_mman.c
@@ -570,7 +570,7 @@ static bool assert_mmap_offset(struct drm_i915_private *i915,
obj = i915_gem_object_create_internal(i915, size);
if (IS_ERR(obj))
- return PTR_ERR(obj);
+ return false;
mmo = mmap_offset_attach(obj, I915_MMAP_OFFSET_GTT, NULL);
i915_gem_object_put(obj);
diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
index 62077fe46715..31549ad83fa6 100644
--- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
+++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_object_blt.c
@@ -210,6 +210,7 @@ static int igt_fill_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total, max;
int err;
ctx = thread->ctx;
@@ -225,27 +226,32 @@ static int igt_fill_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ /*
+ * If we have a tiny shared address space, like for the GGTT
+ * then we can't be too greedy.
+ */
+ max = ce->vm->total;
+ if (i915_is_ggtt(ce->vm) || thread->ctx)
+ max = div_u64(max, thread->n_cpus);
+ max >>= 4;
+
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
u32 i;
- /*
- * If we have a tiny shared address space, like for the GGTT
- * then we can't be too greedy.
- */
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, max);
+ sz = i915_prandom_u32_max_state(total, prng) + 1;
+ phys_sz = sz % max_phys_size + 1;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
+ phys_sz = min(phys_sz, sz);
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
@@ -276,13 +282,14 @@ static int igt_fill_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(obj);
- err = i915_gem_object_set_to_cpu_domain(obj, false);
- i915_gem_object_unlock(obj);
+ err = i915_gem_object_wait(obj, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(obj) / sizeof(u32); i += 17) {
+ if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -293,6 +300,8 @@ static int igt_fill_blt_thread(void *arg)
i915_gem_object_unpin_map(obj);
i915_gem_object_put(obj);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
@@ -319,6 +328,7 @@ static int igt_copy_blt_thread(void *arg)
struct intel_context *ce;
unsigned int prio;
IGT_TIMEOUT(end);
+ u64 total, max;
int err;
ctx = thread->ctx;
@@ -334,23 +344,32 @@ static int igt_copy_blt_thread(void *arg)
ce = i915_gem_context_get_engine(ctx, BCS0);
GEM_BUG_ON(IS_ERR(ce));
+ /*
+ * If we have a tiny shared address space, like for the GGTT
+ * then we can't be too greedy.
+ */
+ max = ce->vm->total;
+ if (i915_is_ggtt(ce->vm) || thread->ctx)
+ max = div_u64(max, thread->n_cpus);
+ max >>= 4;
+
+ total = PAGE_SIZE;
do {
- const u32 max_block_size = S16_MAX * PAGE_SIZE;
+ /* Aim to keep the runtime under reasonable bounds! */
+ const u32 max_phys_size = SZ_64K;
u32 val = prandom_u32_state(prng);
- u64 total = ce->vm->total;
u32 phys_sz;
u32 sz;
u32 *vaddr;
u32 i;
- if (i915_is_ggtt(ce->vm))
- total = div64_u64(total, thread->n_cpus);
-
- sz = min_t(u64, total >> 4, prandom_u32_state(prng));
- phys_sz = sz % (max_block_size + 1);
+ total = min(total, max);
+ sz = i915_prandom_u32_max_state(total, prng) + 1;
+ phys_sz = sz % max_phys_size + 1;
sz = round_up(sz, PAGE_SIZE);
phys_sz = round_up(phys_sz, PAGE_SIZE);
+ phys_sz = min(phys_sz, sz);
pr_debug("%s with phys_sz= %x, sz=%x, val=%x\n", __func__,
phys_sz, sz, val);
@@ -397,13 +416,14 @@ static int igt_copy_blt_thread(void *arg)
if (err)
goto err_unpin;
- i915_gem_object_lock(dst);
- err = i915_gem_object_set_to_cpu_domain(dst, false);
- i915_gem_object_unlock(dst);
+ err = i915_gem_object_wait(dst, 0, MAX_SCHEDULE_TIMEOUT);
if (err)
goto err_unpin;
- for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); ++i) {
+ for (i = 0; i < huge_gem_object_phys_size(dst) / sizeof(u32); i += 17) {
+ if (!(dst->cache_coherent & I915_BO_CACHE_COHERENT_FOR_READ))
+ drm_clflush_virt_range(&vaddr[i], sizeof(vaddr[i]));
+
if (vaddr[i] != val) {
pr_err("vaddr[%u]=%x, expected=%x\n", i,
vaddr[i], val);
@@ -416,6 +436,8 @@ static int igt_copy_blt_thread(void *arg)
i915_gem_object_put(src);
i915_gem_object_put(dst);
+
+ total <<= 1;
} while (!time_after(jiffies, end));
goto err_flush;
diff --git a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
index 6718da20f35d..772d8cba7da9 100644
--- a/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
+++ b/drivers/gpu/drm/i915/gem/selftests/igt_gem_utils.c
@@ -159,7 +159,7 @@ int igt_gpu_fill_dw(struct intel_context *ce,
return 0;
skip_request:
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
err_request:
i915_request_add(rq);
err_batch:
diff --git a/drivers/gpu/drm/i915/gem/selftests/mock_context.c b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
index 384143aa7776..e7e3c620f542 100644
--- a/drivers/gpu/drm/i915/gem/selftests/mock_context.c
+++ b/drivers/gpu/drm/i915/gem/selftests/mock_context.c
@@ -23,6 +23,9 @@ mock_context(struct drm_i915_private *i915,
INIT_LIST_HEAD(&ctx->link);
ctx->i915 = i915;
+ spin_lock_init(&ctx->stale.lock);
+ INIT_LIST_HEAD(&ctx->stale.engines);
+
i915_gem_context_set_persistence(ctx);
mutex_init(&ctx->engines_mutex);
@@ -37,7 +40,7 @@ mock_context(struct drm_i915_private *i915,
if (name) {
struct i915_ppgtt *ppgtt;
- strncpy(ctx->name, name, sizeof(ctx->name));
+ strncpy(ctx->name, name, sizeof(ctx->name) - 1);
ppgtt = mock_ppgtt(i915, name);
if (!ppgtt)
@@ -83,6 +86,8 @@ live_context(struct drm_i915_private *i915, struct file *file)
if (IS_ERR(ctx))
return ctx;
+ i915_gem_context_set_no_error_capture(ctx);
+
err = gem_context_register(ctx, to_drm_file(file)->driver_priv, &id);
if (err < 0)
goto err_ctx;
@@ -105,6 +110,7 @@ kernel_context(struct drm_i915_private *i915)
i915_gem_context_clear_bannable(ctx);
i915_gem_context_set_persistence(ctx);
+ i915_gem_context_set_no_error_capture(ctx);
return ctx;
}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.c b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
new file mode 100644
index 000000000000..de595b66a746
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "gen7_renderclear.h"
+#include "i915_drv.h"
+#include "intel_gpu_commands.h"
+
+#define MAX_URB_ENTRIES 64
+#define STATE_SIZE (4 * 1024)
+#define GT3_INLINE_DATA_DELAYS 0x1E00
+#define batch_advance(Y, CS) GEM_BUG_ON((Y)->end != (CS))
+
+struct cb_kernel {
+ const void *data;
+ u32 size;
+};
+
+#define CB_KERNEL(name) { .data = (name), .size = sizeof(name) }
+
+#include "ivb_clear_kernel.c"
+static const struct cb_kernel cb_kernel_ivb = CB_KERNEL(ivb_clear_kernel);
+
+#include "hsw_clear_kernel.c"
+static const struct cb_kernel cb_kernel_hsw = CB_KERNEL(hsw_clear_kernel);
+
+struct batch_chunk {
+ struct i915_vma *vma;
+ u32 offset;
+ u32 *start;
+ u32 *end;
+ u32 max_items;
+};
+
+struct batch_vals {
+ u32 max_primitives;
+ u32 max_urb_entries;
+ u32 cmd_size;
+ u32 state_size;
+ u32 state_start;
+ u32 batch_size;
+ u32 surface_height;
+ u32 surface_width;
+ u32 scratch_size;
+ u32 max_size;
+};
+
+static void
+batch_get_defaults(struct drm_i915_private *i915, struct batch_vals *bv)
+{
+ if (IS_HASWELL(i915)) {
+ bv->max_primitives = 280;
+ bv->max_urb_entries = MAX_URB_ENTRIES;
+ bv->surface_height = 16 * 16;
+ bv->surface_width = 32 * 2 * 16;
+ } else {
+ bv->max_primitives = 128;
+ bv->max_urb_entries = MAX_URB_ENTRIES / 2;
+ bv->surface_height = 16 * 8;
+ bv->surface_width = 32 * 16;
+ }
+ bv->cmd_size = bv->max_primitives * 4096;
+ bv->state_size = STATE_SIZE;
+ bv->state_start = bv->cmd_size;
+ bv->batch_size = bv->cmd_size + bv->state_size;
+ bv->scratch_size = bv->surface_height * bv->surface_width;
+ bv->max_size = bv->batch_size + bv->scratch_size;
+}
+
+static void batch_init(struct batch_chunk *bc,
+ struct i915_vma *vma,
+ u32 *start, u32 offset, u32 max_bytes)
+{
+ bc->vma = vma;
+ bc->offset = offset;
+ bc->start = start + bc->offset / sizeof(*bc->start);
+ bc->end = bc->start;
+ bc->max_items = max_bytes / sizeof(*bc->start);
+}
+
+static u32 batch_offset(const struct batch_chunk *bc, u32 *cs)
+{
+ return (cs - bc->start) * sizeof(*bc->start) + bc->offset;
+}
+
+static u32 batch_addr(const struct batch_chunk *bc)
+{
+ return bc->vma->node.start;
+}
+
+static void batch_add(struct batch_chunk *bc, const u32 d)
+{
+ GEM_BUG_ON((bc->end - bc->start) >= bc->max_items);
+ *bc->end++ = d;
+}
+
+static u32 *batch_alloc_items(struct batch_chunk *bc, u32 align, u32 items)
+{
+ u32 *map;
+
+ if (align) {
+ u32 *end = PTR_ALIGN(bc->end, align);
+
+ memset32(bc->end, 0, end - bc->end);
+ bc->end = end;
+ }
+
+ map = bc->end;
+ bc->end += items;
+
+ return map;
+}
+
+static u32 *batch_alloc_bytes(struct batch_chunk *bc, u32 align, u32 bytes)
+{
+ GEM_BUG_ON(!IS_ALIGNED(bytes, sizeof(*bc->start)));
+ return batch_alloc_items(bc, align, bytes / sizeof(*bc->start));
+}
+
+static u32
+gen7_fill_surface_state(struct batch_chunk *state,
+ const u32 dst_offset,
+ const struct batch_vals *bv)
+{
+ u32 surface_h = bv->surface_height;
+ u32 surface_w = bv->surface_width;
+ u32 *cs = batch_alloc_items(state, 32, 8);
+ u32 offset = batch_offset(state, cs);
+
+#define SURFACE_2D 1
+#define SURFACEFORMAT_B8G8R8A8_UNORM 0x0C0
+#define RENDER_CACHE_READ_WRITE 1
+
+ *cs++ = SURFACE_2D << 29 |
+ (SURFACEFORMAT_B8G8R8A8_UNORM << 18) |
+ (RENDER_CACHE_READ_WRITE << 8);
+
+ *cs++ = batch_addr(state) + dst_offset;
+
+ *cs++ = ((surface_h / 4 - 1) << 16) | (surface_w / 4 - 1);
+ *cs++ = surface_w;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+#define SHADER_CHANNELS(r, g, b, a) \
+ (((r) << 25) | ((g) << 22) | ((b) << 19) | ((a) << 16))
+ *cs++ = SHADER_CHANNELS(4, 5, 6, 7);
+ batch_advance(state, cs);
+
+ return offset;
+}
+
+static u32
+gen7_fill_binding_table(struct batch_chunk *state,
+ const struct batch_vals *bv)
+{
+ u32 surface_start = gen7_fill_surface_state(state, bv->batch_size, bv);
+ u32 *cs = batch_alloc_items(state, 32, 8);
+ u32 offset = batch_offset(state, cs);
+
+ *cs++ = surface_start - state->offset;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(state, cs);
+
+ return offset;
+}
+
+static u32
+gen7_fill_kernel_data(struct batch_chunk *state,
+ const u32 *data,
+ const u32 size)
+{
+ return batch_offset(state,
+ memcpy(batch_alloc_bytes(state, 64, size),
+ data, size));
+}
+
+static u32
+gen7_fill_interface_descriptor(struct batch_chunk *state,
+ const struct batch_vals *bv,
+ const struct cb_kernel *kernel,
+ unsigned int count)
+{
+ u32 kernel_offset =
+ gen7_fill_kernel_data(state, kernel->data, kernel->size);
+ u32 binding_table = gen7_fill_binding_table(state, bv);
+ u32 *cs = batch_alloc_items(state, 32, 8 * count);
+ u32 offset = batch_offset(state, cs);
+
+ *cs++ = kernel_offset;
+ *cs++ = (1 << 7) | (1 << 13);
+ *cs++ = 0;
+ *cs++ = (binding_table - state->offset) | 1;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* 1 - 63dummy idds */
+ memset32(cs, 0x00, (count - 1) * 8);
+ batch_advance(state, cs + (count - 1) * 8);
+
+ return offset;
+}
+
+static void
+gen7_emit_state_base_address(struct batch_chunk *batch,
+ u32 surface_state_base)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 12);
+
+ *cs++ = STATE_BASE_ADDRESS | (12 - 2);
+ /* general */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* surface */
+ *cs++ = batch_addr(batch) | surface_state_base | BASE_ADDRESS_MODIFY;
+ /* dynamic */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* indirect */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+ /* instruction */
+ *cs++ = batch_addr(batch) | BASE_ADDRESS_MODIFY;
+
+ /* general/dynamic/indirect/instruction access Bound */
+ *cs++ = 0;
+ *cs++ = BASE_ADDRESS_MODIFY;
+ *cs++ = 0;
+ *cs++ = BASE_ADDRESS_MODIFY;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_vfe_state(struct batch_chunk *batch,
+ const struct batch_vals *bv,
+ u32 urb_size, u32 curbe_size,
+ u32 mode)
+{
+ u32 urb_entries = bv->max_urb_entries;
+ u32 threads = bv->max_primitives - 1;
+ u32 *cs = batch_alloc_items(batch, 32, 8);
+
+ *cs++ = MEDIA_VFE_STATE | (8 - 2);
+
+ /* scratch buffer */
+ *cs++ = 0;
+
+ /* number of threads & urb entries for GPGPU vs Media Mode */
+ *cs++ = threads << 16 | urb_entries << 8 | mode << 2;
+
+ *cs++ = 0;
+
+ /* urb entry size & curbe size in 256 bits unit */
+ *cs++ = urb_size << 16 | curbe_size;
+
+ /* scoreboard */
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_interface_descriptor_load(struct batch_chunk *batch,
+ const u32 interface_descriptor,
+ unsigned int count)
+{
+ u32 *cs = batch_alloc_items(batch, 8, 4);
+
+ *cs++ = MEDIA_INTERFACE_DESCRIPTOR_LOAD | (4 - 2);
+ *cs++ = 0;
+ *cs++ = count * 8 * sizeof(*cs);
+
+ /*
+ * interface descriptor address - it is relative to the dynamics base
+ * address
+ */
+ *cs++ = interface_descriptor;
+ batch_advance(batch, cs);
+}
+
+static void
+gen7_emit_media_object(struct batch_chunk *batch,
+ unsigned int media_object_index)
+{
+ unsigned int x_offset = (media_object_index % 16) * 64;
+ unsigned int y_offset = (media_object_index / 16) * 16;
+ unsigned int inline_data_size;
+ unsigned int media_batch_size;
+ unsigned int i;
+ u32 *cs;
+
+ inline_data_size = 112 * 8;
+ media_batch_size = inline_data_size + 6;
+
+ cs = batch_alloc_items(batch, 8, media_batch_size);
+
+ *cs++ = MEDIA_OBJECT | (media_batch_size - 2);
+
+ /* interface descriptor offset */
+ *cs++ = 0;
+
+ /* without indirect data */
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* scoreboard */
+ *cs++ = 0;
+ *cs++ = 0;
+
+ /* inline */
+ *cs++ = (y_offset << 16) | (x_offset);
+ *cs++ = 0;
+ *cs++ = GT3_INLINE_DATA_DELAYS;
+ for (i = 3; i < inline_data_size; i++)
+ *cs++ = 0;
+
+ batch_advance(batch, cs);
+}
+
+static void gen7_emit_pipeline_flush(struct batch_chunk *batch)
+{
+ u32 *cs = batch_alloc_items(batch, 0, 5);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_STATE_CACHE_INVALIDATE |
+ PIPE_CONTROL_GLOBAL_GTT_IVB;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = 0;
+ batch_advance(batch, cs);
+}
+
+static void emit_batch(struct i915_vma * const vma,
+ u32 *start,
+ const struct batch_vals *bv)
+{
+ struct drm_i915_private *i915 = vma->vm->i915;
+ unsigned int desc_count = 64;
+ const u32 urb_size = 112;
+ struct batch_chunk cmds, state;
+ u32 interface_descriptor;
+ unsigned int i;
+
+ batch_init(&cmds, vma, start, 0, bv->cmd_size);
+ batch_init(&state, vma, start, bv->state_start, bv->state_size);
+
+ interface_descriptor =
+ gen7_fill_interface_descriptor(&state, bv,
+ IS_HASWELL(i915) ?
+ &cb_kernel_hsw :
+ &cb_kernel_ivb,
+ desc_count);
+ gen7_emit_pipeline_flush(&cmds);
+ batch_add(&cmds, PIPELINE_SELECT | PIPELINE_SELECT_MEDIA);
+ batch_add(&cmds, MI_NOOP);
+ gen7_emit_state_base_address(&cmds, interface_descriptor);
+ gen7_emit_pipeline_flush(&cmds);
+
+ gen7_emit_vfe_state(&cmds, bv, urb_size - 1, 0, 0);
+
+ gen7_emit_interface_descriptor_load(&cmds,
+ interface_descriptor,
+ desc_count);
+
+ for (i = 0; i < bv->max_primitives; i++)
+ gen7_emit_media_object(&cmds, i);
+
+ batch_add(&cmds, MI_BATCH_BUFFER_END);
+}
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma)
+{
+ struct batch_vals bv;
+ u32 *batch;
+
+ batch_get_defaults(engine->i915, &bv);
+ if (!vma)
+ return bv.max_size;
+
+ GEM_BUG_ON(vma->obj->base.size < bv.max_size);
+
+ batch = i915_gem_object_pin_map(vma->obj, I915_MAP_WC);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ emit_batch(vma, memset(batch, 0, bv.max_size), &bv);
+
+ i915_gem_object_flush_map(vma->obj);
+ i915_gem_object_unpin_map(vma->obj);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/i915/gt/gen7_renderclear.h b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
new file mode 100644
index 000000000000..bb100748e2c6
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/gen7_renderclear.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __GEN7_RENDERCLEAR_H__
+#define __GEN7_RENDERCLEAR_H__
+
+struct intel_engine_cs;
+struct i915_vma;
+
+int gen7_setup_clear_gpr_bb(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma);
+
+#endif /* __GEN7_RENDERCLEAR_H__ */
diff --git a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
index 4d1de2d97d5c..94e746af8926 100644
--- a/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
+++ b/drivers/gpu/drm/i915/gt/gen8_ppgtt.c
@@ -8,6 +8,7 @@
#include "gen8_ppgtt.h"
#include "i915_scatterlist.h"
#include "i915_trace.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gtt.h"
@@ -25,6 +26,30 @@ static u64 gen8_pde_encode(const dma_addr_t addr,
return pde;
}
+static u64 gen8_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
+
+ if (unlikely(flags & PTE_READ_ONLY))
+ pte &= ~_PAGE_RW;
+
+ switch (level) {
+ case I915_CACHE_NONE:
+ pte |= PPAT_UNCACHED;
+ break;
+ case I915_CACHE_WT:
+ pte |= PPAT_DISPLAY_ELLC;
+ break;
+ default:
+ pte |= PPAT_CACHED;
+ break;
+ }
+
+ return pte;
+}
+
static void gen8_ppgtt_notify_vgt(struct i915_ppgtt *ppgtt, bool create)
{
struct drm_i915_private *i915 = ppgtt->vm.i915;
@@ -706,6 +731,8 @@ struct i915_ppgtt *gen8_ppgtt_create(struct intel_gt *gt)
ppgtt->vm.allocate_va_range = gen8_ppgtt_alloc;
ppgtt->vm.clear_range = gen8_ppgtt_clear;
+ ppgtt->vm.pte_encode = gen8_pte_encode;
+
if (intel_vgpu_active(gt->i915))
gen8_ppgtt_notify_vgt(ppgtt, true);
diff --git a/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
new file mode 100644
index 000000000000..b47f9d4a0848
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/hsw_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:30:13 AM UTC
+ */
+
+static const u32 hsw_clear_kernel[] = {
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+ 0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+ 0x00010220, 0x34001c00, 0x00001400, 0x00000160,
+ 0x00600001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+ 0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+ 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+ 0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+ 0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+ 0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+ 0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+ 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+ 0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+ 0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+ 0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+ 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+ 0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+ 0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+ 0x00000001, 0x20840021, 0x00000068, 0x00000000,
+ 0x00000001, 0x20880061, 0x00000000, 0x00000003,
+ 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+ 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+ 0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+ 0x00010220, 0x34001c00, 0x00001400, 0xffffffe0,
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+ 0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+ 0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+ 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+ 0x00200001, 0x20400121, 0x00450020, 0x00000000,
+ 0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+ 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+ 0x00800001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20800061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21000061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21200061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21400061, 0x00000000, 0x00000000,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+ 0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+ 0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+ 0x00010220, 0x34001c00, 0x00001400, 0xffffffc0,
+ 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
index 0ba524a414c6..cbad7fe722ce 100644
--- a/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
+++ b/drivers/gpu/drm/i915/gt/intel_breadcrumbs.c
@@ -136,6 +136,9 @@ static void add_retire(struct intel_breadcrumbs *b, struct intel_timeline *tl)
struct intel_engine_cs *engine =
container_of(b, struct intel_engine_cs, breadcrumbs);
+ if (unlikely(intel_engine_is_virtual(engine)))
+ engine = intel_virtual_engine_get_sibling(engine, 0);
+
intel_engine_add_retire(engine, tl);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_context.c b/drivers/gpu/drm/i915/gt/intel_context.c
index 23137b2a8689..01474d3a558b 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.c
+++ b/drivers/gpu/drm/i915/gt/intel_context.c
@@ -51,6 +51,11 @@ int intel_context_alloc_state(struct intel_context *ce)
return -EINTR;
if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ if (intel_context_is_banned(ce)) {
+ err = -EIO;
+ goto unlock;
+ }
+
err = ce->ops->alloc(ce);
if (unlikely(err))
goto unlock;
@@ -67,21 +72,18 @@ static int intel_context_active_acquire(struct intel_context *ce)
{
int err;
- err = i915_active_acquire(&ce->active);
- if (err)
- return err;
+ __i915_active_acquire(&ce->active);
+
+ if (intel_context_is_barrier(ce))
+ return 0;
/* Preallocate tracking nodes */
- if (!intel_context_is_barrier(ce)) {
- err = i915_active_acquire_preallocate_barrier(&ce->active,
- ce->engine);
- if (err) {
- i915_active_release(&ce->active);
- return err;
- }
- }
+ err = i915_active_acquire_preallocate_barrier(&ce->active,
+ ce->engine);
+ if (err)
+ i915_active_release(&ce->active);
- return 0;
+ return err;
}
static void intel_context_active_release(struct intel_context *ce)
@@ -101,34 +103,42 @@ int __intel_context_do_pin(struct intel_context *ce)
return err;
}
- if (mutex_lock_interruptible(&ce->pin_mutex))
- return -EINTR;
+ err = i915_active_acquire(&ce->active);
+ if (err)
+ return err;
+
+ if (mutex_lock_interruptible(&ce->pin_mutex)) {
+ err = -EINTR;
+ goto out_release;
+ }
- if (likely(!atomic_read(&ce->pin_count))) {
+ if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
err = intel_context_active_acquire(ce);
if (unlikely(err))
- goto err;
+ goto out_unlock;
err = ce->ops->pin(ce);
if (unlikely(err))
goto err_active;
- CE_TRACE(ce, "pin ring:{head:%04x, tail:%04x}\n",
+ CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
+ i915_ggtt_offset(ce->ring->vma),
ce->ring->head, ce->ring->tail);
smp_mb__before_atomic(); /* flush pin before it is visible */
+ atomic_inc(&ce->pin_count);
}
- atomic_inc(&ce->pin_count);
GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
-
- mutex_unlock(&ce->pin_mutex);
- return 0;
+ GEM_BUG_ON(i915_active_is_idle(&ce->active));
+ goto out_unlock;
err_active:
intel_context_active_release(ce);
-err:
+out_unlock:
mutex_unlock(&ce->pin_mutex);
+out_release:
+ i915_active_release(&ce->active);
return err;
}
@@ -215,7 +225,9 @@ static void __intel_context_retire(struct i915_active *active)
{
struct intel_context *ce = container_of(active, typeof(*ce), active);
- CE_TRACE(ce, "retire\n");
+ CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
+ intel_context_get_total_runtime_ns(ce),
+ intel_context_get_avg_runtime_ns(ce));
set_bit(CONTEXT_VALID_BIT, &ce->flags);
if (ce->state)
@@ -276,6 +288,8 @@ intel_context_init(struct intel_context *ce,
ce->sseu = engine->sseu;
ce->ring = __intel_context_ring_size(SZ_4K);
+ ewma_runtime_init(&ce->runtime.avg);
+
ce->vm = i915_vm_get(engine->gt->vm);
INIT_LIST_HEAD(&ce->signal_link);
diff --git a/drivers/gpu/drm/i915/gt/intel_context.h b/drivers/gpu/drm/i915/gt/intel_context.h
index 30bd248827d8..18efad255124 100644
--- a/drivers/gpu/drm/i915/gt/intel_context.h
+++ b/drivers/gpu/drm/i915/gt/intel_context.h
@@ -12,6 +12,7 @@
#include <linux/types.h>
#include "i915_active.h"
+#include "i915_drv.h"
#include "intel_context_types.h"
#include "intel_engine_types.h"
#include "intel_ring_types.h"
@@ -35,6 +36,9 @@ int intel_context_alloc_state(struct intel_context *ce);
void intel_context_free(struct intel_context *ce);
+int intel_context_reconfigure_sseu(struct intel_context *ce,
+ const struct intel_sseu sseu);
+
/**
* intel_context_lock_pinned - Stablises the 'pinned' status of the HW context
* @ce - the context
@@ -224,4 +228,20 @@ intel_context_clear_nopreempt(struct intel_context *ce)
clear_bit(CONTEXT_NOPREEMPT, &ce->flags);
}
+static inline u64 intel_context_get_total_runtime_ns(struct intel_context *ce)
+{
+ const u32 period =
+ RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+
+ return READ_ONCE(ce->runtime.total) * period;
+}
+
+static inline u64 intel_context_get_avg_runtime_ns(struct intel_context *ce)
+{
+ const u32 period =
+ RUNTIME_INFO(ce->engine->i915)->cs_timestamp_period_ns;
+
+ return mul_u32_u32(ewma_runtime_read(&ce->runtime.avg), period);
+}
+
#endif /* __INTEL_CONTEXT_H__ */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.c b/drivers/gpu/drm/i915/gt/intel_context_param.c
new file mode 100644
index 000000000000..65dcd090245d
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_active.h"
+#include "intel_context.h"
+#include "intel_context_param.h"
+#include "intel_ring.h"
+
+int intel_context_set_ring_size(struct intel_context *ce, long sz)
+{
+ int err;
+
+ if (intel_context_lock_pinned(ce))
+ return -EINTR;
+
+ err = i915_active_wait(&ce->active);
+ if (err < 0)
+ goto unlock;
+
+ if (intel_context_is_pinned(ce)) {
+ err = -EBUSY; /* In active use, come back later! */
+ goto unlock;
+ }
+
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ struct intel_ring *ring;
+
+ /* Replace the existing ringbuffer */
+ ring = intel_engine_create_ring(ce->engine, sz);
+ if (IS_ERR(ring)) {
+ err = PTR_ERR(ring);
+ goto unlock;
+ }
+
+ intel_ring_put(ce->ring);
+ ce->ring = ring;
+
+ /* Context image will be updated on next pin */
+ } else {
+ ce->ring = __intel_context_ring_size(sz);
+ }
+
+unlock:
+ intel_context_unlock_pinned(ce);
+ return err;
+}
+
+long intel_context_get_ring_size(struct intel_context *ce)
+{
+ long sz = (unsigned long)READ_ONCE(ce->ring);
+
+ if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+ if (intel_context_lock_pinned(ce))
+ return -EINTR;
+
+ sz = ce->ring->size;
+ intel_context_unlock_pinned(ce);
+ }
+
+ return sz;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_param.h b/drivers/gpu/drm/i915/gt/intel_context_param.h
new file mode 100644
index 000000000000..f053d8633fe2
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_param.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_CONTEXT_PARAM_H
+#define INTEL_CONTEXT_PARAM_H
+
+struct intel_context;
+
+int intel_context_set_ring_size(struct intel_context *ce, long sz);
+long intel_context_get_ring_size(struct intel_context *ce);
+
+#endif /* INTEL_CONTEXT_PARAM_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_context_sseu.c b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
new file mode 100644
index 000000000000..57a30956c922
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/intel_context_sseu.c
@@ -0,0 +1,98 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "i915_vma.h"
+#include "intel_context.h"
+#include "intel_engine_pm.h"
+#include "intel_gpu_commands.h"
+#include "intel_lrc.h"
+#include "intel_lrc_reg.h"
+#include "intel_ring.h"
+#include "intel_sseu.h"
+
+static int gen8_emit_rpcs_config(struct i915_request *rq,
+ const struct intel_context *ce,
+ const struct intel_sseu sseu)
+{
+ u64 offset;
+ u32 *cs;
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ offset = i915_ggtt_offset(ce->state) +
+ LRC_STATE_PN * PAGE_SIZE +
+ CTX_R_PWR_CLK_STATE * 4;
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = intel_sseu_make_rpcs(rq->i915, &sseu);
+
+ intel_ring_advance(rq, cs);
+
+ return 0;
+}
+
+static int
+gen8_modify_rpcs(struct intel_context *ce, const struct intel_sseu sseu)
+{
+ struct i915_request *rq;
+ int ret;
+
+ lockdep_assert_held(&ce->pin_mutex);
+
+ /*
+ * If the context is not idle, we have to submit an ordered request to
+ * modify its context image via the kernel context (writing to our own
+ * image, or into the registers directory, does not stick). Pristine
+ * and idle contexts will be configured on pinning.
+ */
+ if (!intel_context_pin_if_active(ce))
+ return 0;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq)) {
+ ret = PTR_ERR(rq);
+ goto out_unpin;
+ }
+
+ /* Serialise with the remote context */
+ ret = intel_context_prepare_remote_request(ce, rq);
+ if (ret == 0)
+ ret = gen8_emit_rpcs_config(rq, ce, sseu);
+
+ i915_request_add(rq);
+out_unpin:
+ intel_context_unpin(ce);
+ return ret;
+}
+
+int
+intel_context_reconfigure_sseu(struct intel_context *ce,
+ const struct intel_sseu sseu)
+{
+ int ret;
+
+ GEM_BUG_ON(INTEL_GEN(ce->engine->i915) < 8);
+
+ ret = intel_context_lock_pinned(ce);
+ if (ret)
+ return ret;
+
+ /* Nothing to do if unmodified. */
+ if (!memcmp(&ce->sseu, &sseu, sizeof(sseu)))
+ goto unlock;
+
+ ret = gen8_modify_rpcs(ce, sseu);
+ if (!ret)
+ ce->sseu = sseu;
+
+unlock:
+ intel_context_unlock_pinned(ce);
+ return ret;
+}
diff --git a/drivers/gpu/drm/i915/gt/intel_context_types.h b/drivers/gpu/drm/i915/gt/intel_context_types.h
index ca1420fb8b53..0f3b68b95c56 100644
--- a/drivers/gpu/drm/i915/gt/intel_context_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_context_types.h
@@ -7,6 +7,7 @@
#ifndef __INTEL_CONTEXT_TYPES__
#define __INTEL_CONTEXT_TYPES__
+#include <linux/average.h>
#include <linux/kref.h>
#include <linux/list.h>
#include <linux/mutex.h>
@@ -19,6 +20,8 @@
#define CONTEXT_REDZONE POISON_INUSE
+DECLARE_EWMA(runtime, 3, 8);
+
struct i915_gem_context;
struct i915_vma;
struct intel_context;
@@ -42,8 +45,8 @@ struct intel_context {
struct intel_engine_cs *engine;
struct intel_engine_cs *inflight;
-#define intel_context_inflight(ce) ptr_mask_bits((ce)->inflight, 2)
-#define intel_context_inflight_count(ce) ptr_unmask_bits((ce)->inflight, 2)
+#define intel_context_inflight(ce) ptr_mask_bits(READ_ONCE((ce)->inflight), 2)
+#define intel_context_inflight_count(ce) ptr_unmask_bits(READ_ONCE((ce)->inflight), 2)
struct i915_address_space *vm;
struct i915_gem_context __rcu *gem_context;
@@ -68,6 +71,15 @@ struct intel_context {
u64 lrc_desc;
u32 tag; /* cookie passed to HW to track this context on submission */
+ /* Time on GPU as tracked by the hw. */
+ struct {
+ struct ewma_runtime avg;
+ u64 total;
+ u32 last;
+ I915_SELFTEST_DECLARE(u32 num_underflow);
+ I915_SELFTEST_DECLARE(u32 max_underflow);
+ } runtime;
+
unsigned int active_count; /* protected by timeline->mutex */
atomic_t pin_count;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
index 5df003061e44..b469de0dd9b6 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine.h
@@ -107,7 +107,20 @@ execlists_num_ports(const struct intel_engine_execlists * const execlists)
static inline struct i915_request *
execlists_active(const struct intel_engine_execlists *execlists)
{
- return *READ_ONCE(execlists->active);
+ struct i915_request * const *cur, * const *old, *active;
+
+ cur = READ_ONCE(execlists->active);
+ smp_rmb(); /* pairs with overwrite protection in process_csb() */
+ do {
+ old = cur;
+
+ active = READ_ONCE(*cur);
+ cur = READ_ONCE(execlists->active);
+
+ smp_rmb(); /* and complete the seqlock retry */
+ } while (unlikely(cur != old));
+
+ return active;
}
static inline void
@@ -192,6 +205,8 @@ void intel_engines_free(struct intel_gt *gt);
int intel_engine_init_common(struct intel_engine_cs *engine);
void intel_engine_cleanup_common(struct intel_engine_cs *engine);
+int intel_engine_resume(struct intel_engine_cs *engine);
+
int intel_ring_submission_setup(struct intel_engine_cs *engine);
int intel_engine_stop_cs(struct intel_engine_cs *engine);
@@ -303,26 +318,6 @@ intel_engine_find_active_request(struct intel_engine_cs *engine);
u32 intel_engine_context_size(struct intel_gt *gt, u8 class);
-#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
-
-static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
-{
- if (!execlists->preempt_hang.inject_hang)
- return false;
-
- complete(&execlists->preempt_hang.completion);
- return true;
-}
-
-#else
-
-static inline bool inject_preempt_hang(struct intel_engine_execlists *execlists)
-{
- return false;
-}
-
-#endif
-
void intel_engine_init_active(struct intel_engine_cs *engine,
unsigned int subclass);
#define ENGINE_PHYSICAL 0
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
index f451ef376548..3aa8a652c16d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c
@@ -35,6 +35,7 @@
#include "intel_engine_user.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
+#include "intel_gt_pm.h"
#include "intel_lrc.h"
#include "intel_reset.h"
#include "intel_ring.h"
@@ -199,10 +200,10 @@ u32 intel_engine_context_size(struct intel_gt *gt, u8 class)
* out in the wash.
*/
cxt_size = intel_uncore_read(uncore, CXT_SIZE) + 1;
- DRM_DEBUG_DRIVER("gen%d CXT_SIZE = %d bytes [0x%08x]\n",
- INTEL_GEN(gt->i915),
- cxt_size * 64,
- cxt_size - 1);
+ drm_dbg(&gt->i915->drm,
+ "gen%d CXT_SIZE = %d bytes [0x%08x]\n",
+ INTEL_GEN(gt->i915), cxt_size * 64,
+ cxt_size - 1);
return round_up(cxt_size * 64, PAGE_SIZE);
case 3:
case 2:
@@ -274,6 +275,7 @@ static void intel_engine_sanitize_mmio(struct intel_engine_cs *engine)
static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
{
const struct engine_info *info = &intel_engines[id];
+ struct drm_i915_private *i915 = gt->i915;
struct intel_engine_cs *engine;
BUILD_BUG_ON(MAX_ENGINE_CLASS >= BIT(GEN11_ENGINE_CLASS_WIDTH));
@@ -300,11 +302,11 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->id = id;
engine->legacy_idx = INVALID_ENGINE;
engine->mask = BIT(id);
- engine->i915 = gt->i915;
+ engine->i915 = i915;
engine->gt = gt;
engine->uncore = gt->uncore;
engine->hw_id = engine->guc_id = info->hw_id;
- engine->mmio_base = __engine_mmio_base(gt->i915, info->mmio_bases);
+ engine->mmio_base = __engine_mmio_base(i915, info->mmio_bases);
engine->class = info->class;
engine->instance = info->instance;
@@ -312,6 +314,8 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->props.heartbeat_interval_ms =
CONFIG_DRM_I915_HEARTBEAT_INTERVAL;
+ engine->props.max_busywait_duration_ns =
+ CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT;
engine->props.preempt_timeout_ms =
CONFIG_DRM_I915_PREEMPT_TIMEOUT;
engine->props.stop_timeout_ms =
@@ -319,11 +323,15 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
engine->props.timeslice_duration_ms =
CONFIG_DRM_I915_TIMESLICE_DURATION;
+ /* Override to uninterruptible for OpenCL workloads. */
+ if (INTEL_GEN(i915) == 12 && engine->class == RENDER_CLASS)
+ engine->props.preempt_timeout_ms = 0;
+
engine->context_size = intel_engine_context_size(gt, engine->class);
if (WARN_ON(engine->context_size > BIT(20)))
engine->context_size = 0;
if (engine->context_size)
- DRIVER_CAPS(gt->i915)->has_logical_contexts = true;
+ DRIVER_CAPS(i915)->has_logical_contexts = true;
/* Nothing to do here, execute in order of dependencies */
engine->schedule = NULL;
@@ -339,7 +347,7 @@ static int intel_engine_setup(struct intel_gt *gt, enum intel_engine_id id)
gt->engine_class[info->class][info->instance] = engine;
gt->engine[id] = engine;
- gt->i915->engine[id] = engine;
+ i915->engine[id] = engine;
return 0;
}
@@ -392,8 +400,24 @@ void intel_engines_release(struct intel_gt *gt)
struct intel_engine_cs *engine;
enum intel_engine_id id;
+ /*
+ * Before we release the resources held by engine, we must be certain
+ * that the HW is no longer accessing them -- having the GPU scribble
+ * to or read from a page being used for something else causes no end
+ * of fun.
+ *
+ * The GPU should be reset by this point, but assume the worst just
+ * in case we aborted before completely initialising the engines.
+ */
+ GEM_BUG_ON(intel_gt_pm_is_awake(gt));
+ if (!INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
+ __intel_gt_reset(gt, ALL_ENGINES);
+
/* Decouple the backend; but keep the layout for late GPU resets */
for_each_engine(engine, gt, id) {
+ intel_wakeref_wait_for_idle(&engine->wakeref);
+ GEM_BUG_ON(intel_engine_pm_is_awake(engine));
+
if (!engine->release)
continue;
@@ -432,9 +456,9 @@ int intel_engines_init_mmio(struct intel_gt *gt)
unsigned int i;
int err;
- WARN_ON(engine_mask == 0);
- WARN_ON(engine_mask &
- GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
+ drm_WARN_ON(&i915->drm, engine_mask == 0);
+ drm_WARN_ON(&i915->drm, engine_mask &
+ GENMASK(BITS_PER_TYPE(mask) - 1, I915_NUM_ENGINES));
if (i915_inject_probe_failure(i915))
return -ENODEV;
@@ -455,7 +479,7 @@ int intel_engines_init_mmio(struct intel_gt *gt)
* are added to the driver by a warning and disabling the forgotten
* engines.
*/
- if (WARN_ON(mask != engine_mask))
+ if (drm_WARN_ON(&i915->drm, mask != engine_mask))
device_info->engine_mask = mask;
RUNTIME_INFO(i915)->num_engines = hweight32(mask);
@@ -510,7 +534,6 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
{
unsigned int flags;
- flags = PIN_GLOBAL;
if (!HAS_LLC(engine->i915) && i915_ggtt_has_aperture(engine->gt->ggtt))
/*
* On g33, we cannot place HWS above 256MiB, so
@@ -523,11 +546,11 @@ static int pin_ggtt_status_page(struct intel_engine_cs *engine,
* above the mappable region (even though we never
* actually map it).
*/
- flags |= PIN_MAPPABLE;
+ flags = PIN_MAPPABLE;
else
- flags |= PIN_HIGH;
+ flags = PIN_HIGH;
- return i915_vma_pin(vma, 0, 0, flags);
+ return i915_ggtt_pin(vma, 0, flags);
}
static int init_status_page(struct intel_engine_cs *engine)
@@ -546,7 +569,8 @@ static int init_status_page(struct intel_engine_cs *engine)
*/
obj = i915_gem_object_create_internal(engine->i915, PAGE_SIZE);
if (IS_ERR(obj)) {
- DRM_ERROR("Failed to allocate status page\n");
+ drm_err(&engine->i915->drm,
+ "Failed to allocate status page\n");
return PTR_ERR(obj);
}
@@ -614,15 +638,15 @@ static int engine_setup_common(struct intel_engine_cs *engine)
struct measure_breadcrumb {
struct i915_request rq;
- struct intel_timeline timeline;
struct intel_ring ring;
u32 cs[1024];
};
-static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
+static int measure_breadcrumb_dw(struct intel_context *ce)
{
+ struct intel_engine_cs *engine = ce->engine;
struct measure_breadcrumb *frame;
- int dw = -ENOMEM;
+ int dw;
GEM_BUG_ON(!engine->gt->scratch);
@@ -630,39 +654,27 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
if (!frame)
return -ENOMEM;
- if (intel_timeline_init(&frame->timeline,
- engine->gt,
- engine->status_page.vma))
- goto out_frame;
-
- mutex_lock(&frame->timeline.mutex);
+ frame->rq.i915 = engine->i915;
+ frame->rq.engine = engine;
+ frame->rq.context = ce;
+ rcu_assign_pointer(frame->rq.timeline, ce->timeline);
frame->ring.vaddr = frame->cs;
frame->ring.size = sizeof(frame->cs);
frame->ring.effective_size = frame->ring.size;
intel_ring_update_space(&frame->ring);
-
- frame->rq.i915 = engine->i915;
- frame->rq.engine = engine;
frame->rq.ring = &frame->ring;
- rcu_assign_pointer(frame->rq.timeline, &frame->timeline);
-
- dw = intel_timeline_pin(&frame->timeline);
- if (dw < 0)
- goto out_timeline;
+ mutex_lock(&ce->timeline->mutex);
spin_lock_irq(&engine->active.lock);
+
dw = engine->emit_fini_breadcrumb(&frame->rq, frame->cs) - frame->cs;
+
spin_unlock_irq(&engine->active.lock);
+ mutex_unlock(&ce->timeline->mutex);
GEM_BUG_ON(dw & 1); /* RING_TAIL must be qword aligned */
- intel_timeline_unpin(&frame->timeline);
-
-out_timeline:
- mutex_unlock(&frame->timeline.mutex);
- intel_timeline_fini(&frame->timeline);
-out_frame:
kfree(frame);
return dw;
}
@@ -671,6 +683,7 @@ void
intel_engine_init_active(struct intel_engine_cs *engine, unsigned int subclass)
{
INIT_LIST_HEAD(&engine->active.requests);
+ INIT_LIST_HEAD(&engine->active.hold);
spin_lock_init(&engine->active.lock);
lockdep_set_subclass(&engine->active.lock, subclass);
@@ -736,12 +749,6 @@ static int engine_init_common(struct intel_engine_cs *engine)
engine->set_default_submission(engine);
- ret = measure_breadcrumb_dw(engine);
- if (ret < 0)
- return ret;
-
- engine->emit_fini_breadcrumb_dw = ret;
-
/*
* We may need to do things with the shrinker which
* require us to immediately switch back to the default
@@ -754,9 +761,18 @@ static int engine_init_common(struct intel_engine_cs *engine)
if (IS_ERR(ce))
return PTR_ERR(ce);
+ ret = measure_breadcrumb_dw(ce);
+ if (ret < 0)
+ goto err_context;
+
+ engine->emit_fini_breadcrumb_dw = ret;
engine->kernel_context = ce;
return 0;
+
+err_context:
+ intel_context_put(ce);
+ return ret;
}
int intel_engines_init(struct intel_gt *gt)
@@ -823,6 +839,20 @@ void intel_engine_cleanup_common(struct intel_engine_cs *engine)
intel_wa_list_free(&engine->whitelist);
}
+/**
+ * intel_engine_resume - re-initializes the HW state of the engine
+ * @engine: Engine to resume.
+ *
+ * Returns zero on success or an error code on failure.
+ */
+int intel_engine_resume(struct intel_engine_cs *engine)
+{
+ intel_engine_apply_workarounds(engine);
+ intel_engine_apply_whitelist(engine);
+
+ return engine->resume(engine);
+}
+
u64 intel_engine_get_active_head(const struct intel_engine_cs *engine)
{
struct drm_i915_private *i915 = engine->i915;
@@ -981,6 +1011,12 @@ void intel_engine_get_instdone(const struct intel_engine_cs *engine,
instdone->slice_common =
intel_uncore_read(uncore, GEN7_SC_INSTDONE);
+ if (INTEL_GEN(i915) >= 12) {
+ instdone->slice_common_extra[0] =
+ intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA);
+ instdone->slice_common_extra[1] =
+ intel_uncore_read(uncore, GEN12_SC_INSTDONE_EXTRA2);
+ }
for_each_instdone_slice_subslice(i915, sseu, slice, subslice) {
instdone->sampler[slice][subslice] =
read_subslice_reg(engine, slice, subslice,
@@ -1275,8 +1311,14 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
}
if (INTEL_GEN(dev_priv) >= 6) {
- drm_printf(m, "\tRING_IMR: %08x\n",
+ drm_printf(m, "\tRING_IMR: 0x%08x\n",
ENGINE_READ(engine, RING_IMR));
+ drm_printf(m, "\tRING_ESR: 0x%08x\n",
+ ENGINE_READ(engine, RING_ESR));
+ drm_printf(m, "\tRING_EMR: 0x%08x\n",
+ ENGINE_READ(engine, RING_EMR));
+ drm_printf(m, "\tRING_EIR: 0x%08x\n",
+ ENGINE_READ(engine, RING_EIR));
}
addr = intel_engine_get_active_head(engine);
@@ -1341,25 +1383,27 @@ static void intel_engine_print_registers(struct intel_engine_cs *engine,
execlists_active_lock_bh(execlists);
rcu_read_lock();
for (port = execlists->active; (rq = *port); port++) {
- char hdr[80];
+ char hdr[160];
int len;
- len = snprintf(hdr, sizeof(hdr),
- "\t\tActive[%d]: ",
- (int)(port - execlists->active));
+ len = scnprintf(hdr, sizeof(hdr),
+ "\t\tActive[%d]: ",
+ (int)(port - execlists->active));
if (!i915_request_signaled(rq)) {
struct intel_timeline *tl = get_timeline(rq);
- len += snprintf(hdr + len, sizeof(hdr) - len,
- "ring:{start:%08x, hwsp:%08x, seqno:%08x}, ",
- i915_ggtt_offset(rq->ring->vma),
- tl ? tl->hwsp_offset : 0,
- hwsp_seqno(rq));
+ len += scnprintf(hdr + len, sizeof(hdr) - len,
+ "ring:{start:%08x, hwsp:%08x, seqno:%08x, runtime:%llums}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ tl ? tl->hwsp_offset : 0,
+ hwsp_seqno(rq),
+ DIV_ROUND_CLOSEST_ULL(intel_context_get_total_runtime_ns(rq->context),
+ 1000 * 1000));
if (tl)
intel_timeline_put(tl);
}
- snprintf(hdr + len, sizeof(hdr) - len, "rq: ");
+ scnprintf(hdr + len, sizeof(hdr) - len, "rq: ");
print_request(m, rq, hdr);
}
for (port = execlists->pending; (rq = *port); port++) {
@@ -1422,6 +1466,17 @@ static void print_request_ring(struct drm_printer *m, struct i915_request *rq)
}
}
+static unsigned long list_count(struct list_head *list)
+{
+ struct list_head *pos;
+ unsigned long count = 0;
+
+ list_for_each(pos, list)
+ count++;
+
+ return count;
+}
+
void intel_engine_dump(struct intel_engine_cs *engine,
struct drm_printer *m,
const char *header, ...)
@@ -1491,6 +1546,7 @@ void intel_engine_dump(struct intel_engine_cs *engine,
hexdump(m, rq->context->lrc_reg_state, PAGE_SIZE);
}
}
+ drm_printf(m, "\tOn hold?: %lu\n", list_count(&engine->active.hold));
spin_unlock_irqrestore(&engine->active.lock, flags);
drm_printf(m, "\tMMIO base: 0x%08x\n", engine->mmio_base);
@@ -1644,6 +1700,23 @@ intel_engine_find_active_request(struct intel_engine_cs *engine)
* we only care about the snapshot of this moment.
*/
lockdep_assert_held(&engine->active.lock);
+
+ rcu_read_lock();
+ request = execlists_active(&engine->execlists);
+ if (request) {
+ struct intel_timeline *tl = request->context->timeline;
+
+ list_for_each_entry_from_reverse(request, &tl->requests, link) {
+ if (i915_request_completed(request))
+ break;
+
+ active = request;
+ }
+ }
+ rcu_read_unlock();
+ if (active)
+ return active;
+
list_for_each_entry(request, &engine->active.requests, sched.link) {
if (i915_request_completed(request))
continue;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
index 6c6fd185457c..dd825718e4e5 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_heartbeat.c
@@ -180,7 +180,7 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
struct i915_sched_attr attr = { .priority = I915_PRIORITY_BARRIER };
struct intel_context *ce = engine->kernel_context;
struct i915_request *rq;
- int err = 0;
+ int err;
if (!intel_engine_has_preemption(engine))
return -ENODEV;
@@ -188,8 +188,10 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
if (!intel_engine_pm_get_if_awake(engine))
return 0;
- if (mutex_lock_interruptible(&ce->timeline->mutex))
+ if (mutex_lock_interruptible(&ce->timeline->mutex)) {
+ err = -EINTR;
goto out_rpm;
+ }
intel_context_enter(ce);
rq = __i915_request_create(ce, GFP_NOWAIT | __GFP_NOWARN);
@@ -204,6 +206,8 @@ int intel_engine_pulse(struct intel_engine_cs *engine)
__i915_request_commit(rq);
__i915_request_queue(rq, &attr);
+ GEM_BUG_ON(rq->sched.attr.priority < I915_PRIORITY_BARRIER);
+ err = 0;
out_unlock:
mutex_unlock(&ce->timeline->mutex);
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_pm.c b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
index ea90ab3e396e..b6cf284e3a2d 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_pm.c
@@ -112,7 +112,7 @@ __queue_and_release_pm(struct i915_request *rq,
{
struct intel_gt_timelines *timelines = &engine->gt->timelines;
- ENGINE_TRACE(engine, "\n");
+ ENGINE_TRACE(engine, "parking\n");
/*
* We have to serialise all potential retirement paths with our
@@ -249,7 +249,7 @@ static int __engine_park(struct intel_wakeref *wf)
if (!switch_to_kernel_context(engine))
return -EBUSY;
- ENGINE_TRACE(engine, "\n");
+ ENGINE_TRACE(engine, "parked\n");
call_idle_barriers(engine); /* cleanup after wedging */
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
index 350da59e605b..80cdde712842 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
@@ -75,6 +75,7 @@ struct intel_instdone {
u32 instdone;
/* The following exist only in the RCS engine */
u32 slice_common;
+ u32 slice_common_extra[2];
u32 sampler[I915_MAX_SLICES][I915_MAX_SUBSLICES];
u32 row[I915_MAX_SLICES][I915_MAX_SUBSLICES];
};
@@ -126,7 +127,6 @@ DECLARE_EWMA(_engine_latency, 6, 4)
struct st_preempt_hang {
struct completion completion;
unsigned int count;
- bool inject_hang;
};
/**
@@ -157,6 +157,16 @@ struct intel_engine_execlists {
struct i915_priolist default_priolist;
/**
+ * @error_interrupt: CS Master EIR
+ *
+ * The CS generates an interrupt when it detects an error. We capture
+ * the first error interrupt, record the EIR and schedule the tasklet.
+ * In the tasklet, we process the pending CS events to ensure we have
+ * the guilty request, and then reset the engine.
+ */
+ u32 error_interrupt;
+
+ /**
* @no_priolist: priority lists disabled
*/
bool no_priolist;
@@ -295,6 +305,7 @@ struct intel_engine_cs {
struct {
spinlock_t lock;
struct list_head requests;
+ struct list_head hold; /* ready requests, but on hold */
} active;
struct llist_head barrier_tasks;
@@ -536,6 +547,7 @@ struct intel_engine_cs {
struct {
unsigned long heartbeat_interval_ms;
+ unsigned long max_busywait_duration_ns;
unsigned long preempt_timeout_ms;
unsigned long stop_timeout_ms;
unsigned long timeslice_duration_ms;
diff --git a/drivers/gpu/drm/i915/gt/intel_engine_user.c b/drivers/gpu/drm/i915/gt/intel_engine_user.c
index 9e7f12bef828..848decee9066 100644
--- a/drivers/gpu/drm/i915/gt/intel_engine_user.c
+++ b/drivers/gpu/drm/i915/gt/intel_engine_user.c
@@ -278,7 +278,8 @@ void intel_engines_driver_register(struct drm_i915_private *i915)
}
}
- if (WARN(errors, "Invalid UABI engine mapping found"))
+ if (drm_WARN(&i915->drm, errors,
+ "Invalid UABI engine mapping found"))
i915->uabi_engines = RB_ROOT;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_ggtt.c b/drivers/gpu/drm/i915/gt/intel_ggtt.c
index 531d501be01f..aed498a0d032 100644
--- a/drivers/gpu/drm/i915/gt/intel_ggtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_ggtt.c
@@ -8,6 +8,8 @@
#include <asm/set_memory.h>
#include <asm/smp.h>
+#include <drm/i915_drm.h>
+
#include "intel_gt.h"
#include "i915_drv.h"
#include "i915_scatterlist.h"
@@ -104,27 +106,17 @@ static bool needs_idle_maps(struct drm_i915_private *i915)
return IS_GEN(i915, 5) && IS_MOBILE(i915) && intel_vtd_active();
}
-static void ggtt_suspend_mappings(struct i915_ggtt *ggtt)
+void i915_ggtt_suspend(struct i915_ggtt *ggtt)
{
- struct drm_i915_private *i915 = ggtt->vm.i915;
-
- /*
- * Don't bother messing with faults pre GEN6 as we have little
- * documentation supporting that it's a good idea.
- */
- if (INTEL_GEN(i915) < 6)
- return;
+ struct i915_vma *vma;
- intel_gt_check_and_clear_faults(ggtt->vm.gt);
+ list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link)
+ i915_vma_wait_for_bind(vma);
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
-
ggtt->invalidate(ggtt);
-}
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915)
-{
- ggtt_suspend_mappings(&i915->ggtt);
+ intel_gt_check_and_clear_faults(ggtt->vm.gt);
}
void gen6_ggtt_invalidate(struct i915_ggtt *ggtt)
@@ -167,6 +159,13 @@ static void gmch_ggtt_invalidate(struct i915_ggtt *ggtt)
intel_gtt_chipset_flush();
}
+static u64 gen8_ggtt_pte_encode(dma_addr_t addr,
+ enum i915_cache_level level,
+ u32 flags)
+{
+ return addr | _PAGE_PRESENT;
+}
+
static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte)
{
writeq(pte, addr);
@@ -182,7 +181,7 @@ static void gen8_ggtt_insert_page(struct i915_address_space *vm,
gen8_pte_t __iomem *pte =
(gen8_pte_t __iomem *)ggtt->gsm + offset / I915_GTT_PAGE_SIZE;
- gen8_set_pte(pte, gen8_pte_encode(addr, level, 0));
+ gen8_set_pte(pte, gen8_ggtt_pte_encode(addr, level, 0));
ggtt->invalidate(ggtt);
}
@@ -195,7 +194,7 @@ static void gen8_ggtt_insert_entries(struct i915_address_space *vm,
struct i915_ggtt *ggtt = i915_vm_to_ggtt(vm);
struct sgt_iter sgt_iter;
gen8_pte_t __iomem *gtt_entries;
- const gen8_pte_t pte_encode = gen8_pte_encode(0, level, 0);
+ const gen8_pte_t pte_encode = gen8_ggtt_pte_encode(0, level, 0);
dma_addr_t addr;
/*
@@ -350,31 +349,6 @@ static void bxt_vtd_ggtt_insert_entries__BKL(struct i915_address_space *vm,
stop_machine(bxt_vtd_ggtt_insert_entries__cb, &arg, NULL);
}
-struct clear_range {
- struct i915_address_space *vm;
- u64 start;
- u64 length;
-};
-
-static int bxt_vtd_ggtt_clear_range__cb(void *_arg)
-{
- struct clear_range *arg = _arg;
-
- gen8_ggtt_clear_range(arg->vm, arg->start, arg->length);
- bxt_vtd_ggtt_wa(arg->vm);
-
- return 0;
-}
-
-static void bxt_vtd_ggtt_clear_range__BKL(struct i915_address_space *vm,
- u64 start,
- u64 length)
-{
- struct clear_range arg = { vm, start, length };
-
- stop_machine(bxt_vtd_ggtt_clear_range__cb, &arg, NULL);
-}
-
static void gen6_ggtt_clear_range(struct i915_address_space *vm,
u64 start, u64 length)
{
@@ -462,7 +436,7 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
u64 size;
int ret;
- if (!USES_GUC(ggtt->vm.i915))
+ if (!intel_uc_uses_guc(&ggtt->vm.gt->uc))
return 0;
GEM_BUG_ON(ggtt->vm.total <= GUC_GGTT_TOP);
@@ -472,7 +446,8 @@ static int ggtt_reserve_guc_top(struct i915_ggtt *ggtt)
GUC_GGTT_TOP, I915_COLOR_UNEVICTABLE,
PIN_NOEVICT);
if (ret)
- DRM_DEBUG_DRIVER("Failed to reserve top of GGTT for GuC\n");
+ drm_dbg(&ggtt->vm.i915->drm,
+ "Failed to reserve top of GGTT for GuC\n");
return ret;
}
@@ -544,8 +519,9 @@ static int init_ggtt(struct i915_ggtt *ggtt)
/* Clear any non-preallocated blocks */
drm_mm_for_each_hole(entry, &ggtt->vm.mm, hole_start, hole_end) {
- DRM_DEBUG_KMS("clearing unused GTT space: [%lx, %lx]\n",
- hole_start, hole_end);
+ drm_dbg_kms(&ggtt->vm.i915->drm,
+ "clearing unused GTT space: [%lx, %lx]\n",
+ hole_start, hole_end);
ggtt->vm.clear_range(&ggtt->vm, hole_start,
hole_end - hole_start);
}
@@ -879,8 +855,8 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
IS_CHERRYVIEW(i915) /* fails with concurrent use/update */) {
ggtt->vm.insert_entries = bxt_vtd_ggtt_insert_entries__BKL;
ggtt->vm.insert_page = bxt_vtd_ggtt_insert_page__BKL;
- if (ggtt->vm.clear_range != nop_clear_range)
- ggtt->vm.clear_range = bxt_vtd_ggtt_clear_range__BKL;
+ ggtt->vm.bind_async_flags =
+ I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND;
}
ggtt->invalidate = gen8_ggtt_invalidate;
@@ -890,7 +866,7 @@ static int gen8_gmch_probe(struct i915_ggtt *ggtt)
ggtt->vm.vma_ops.set_pages = ggtt_set_pages;
ggtt->vm.vma_ops.clear_pages = clear_pages;
- ggtt->vm.pte_encode = gen8_pte_encode;
+ ggtt->vm.pte_encode = gen8_ggtt_pte_encode;
setup_private_pat(ggtt->vm.gt->uncore);
@@ -1180,7 +1156,7 @@ void i915_ggtt_disable_guc(struct i915_ggtt *ggtt)
ggtt->invalidate(ggtt);
}
-static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
+void i915_ggtt_resume(struct i915_ggtt *ggtt)
{
struct i915_vma *vma;
bool flush = false;
@@ -1188,8 +1164,6 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
intel_gt_check_and_clear_faults(ggtt->vm.gt);
- mutex_lock(&ggtt->vm.mutex);
-
/* First fill our portion of the GTT with scratch pages */
ggtt->vm.clear_range(&ggtt->vm, 0, ggtt->vm.total);
@@ -1216,19 +1190,10 @@ static void ggtt_restore_mappings(struct i915_ggtt *ggtt)
atomic_set(&ggtt->vm.open, open);
ggtt->invalidate(ggtt);
- mutex_unlock(&ggtt->vm.mutex);
-
if (flush)
wbinvd_on_all_cpus();
-}
-
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915)
-{
- struct i915_ggtt *ggtt = &i915->ggtt;
-
- ggtt_restore_mappings(ggtt);
- if (INTEL_GEN(i915) >= 8)
+ if (INTEL_GEN(ggtt->vm.i915) >= 8)
setup_private_pat(ggtt->vm.gt->uncore);
}
@@ -1267,6 +1232,7 @@ intel_rotate_pages(struct intel_rotation_info *rot_info,
struct drm_i915_gem_object *obj)
{
unsigned int size = intel_rotation_info_size(rot_info);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
int ret = -ENOMEM;
@@ -1296,8 +1262,9 @@ err_sg_alloc:
kfree(st);
err_st_alloc:
- DRM_DEBUG_DRIVER("Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rot_info->plane[0].width, rot_info->plane[0].height, size);
+ drm_dbg(&i915->drm, "Failed to create rotated mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rot_info->plane[0].width,
+ rot_info->plane[0].height, size);
return ERR_PTR(ret);
}
@@ -1349,6 +1316,7 @@ intel_remap_pages(struct intel_remapped_info *rem_info,
struct drm_i915_gem_object *obj)
{
unsigned int size = intel_remapped_info_size(rem_info);
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
struct sg_table *st;
struct scatterlist *sg;
int ret = -ENOMEM;
@@ -1380,8 +1348,9 @@ err_sg_alloc:
kfree(st);
err_st_alloc:
- DRM_DEBUG_DRIVER("Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
- obj->base.size, rem_info->plane[0].width, rem_info->plane[0].height, size);
+ drm_dbg(&i915->drm, "Failed to create remapped mapping for object size %zu! (%ux%u tiles, %u pages)\n",
+ obj->base.size, rem_info->plane[0].width,
+ rem_info->plane[0].height, size);
return ERR_PTR(ret);
}
@@ -1479,8 +1448,9 @@ i915_get_ggtt_vma_pages(struct i915_vma *vma)
if (IS_ERR(vma->pages)) {
ret = PTR_ERR(vma->pages);
vma->pages = NULL;
- DRM_ERROR("Failed to get pages for VMA view type %u (%d)!\n",
- vma->ggtt_view.type, ret);
+ drm_err(&vma->vm->i915->drm,
+ "Failed to get pages for VMA view type %u (%d)!\n",
+ vma->ggtt_view.type, ret);
}
return ret;
}
diff --git a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
index 51b8718513bc..f04214a54f75 100644
--- a/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
+++ b/drivers/gpu/drm/i915/gt/intel_gpu_commands.h
@@ -292,10 +292,21 @@
#define MI_STORE_URB_MEM MI_INSTR(0x2D, 0)
#define MI_CONDITIONAL_BATCH_BUFFER_END MI_INSTR(0x36, 0)
-#define PIPELINE_SELECT ((0x3<<29)|(0x1<<27)|(0x1<<24)|(0x4<<16))
-#define GFX_OP_3DSTATE_VF_STATISTICS ((0x3<<29)|(0x1<<27)|(0x0<<24)|(0xB<<16))
-#define MEDIA_VFE_STATE ((0x3<<29)|(0x2<<27)|(0x0<<24)|(0x0<<16))
+#define STATE_BASE_ADDRESS \
+ ((0x3 << 29) | (0x0 << 27) | (0x1 << 24) | (0x1 << 16))
+#define BASE_ADDRESS_MODIFY REG_BIT(0)
+#define PIPELINE_SELECT \
+ ((0x3 << 29) | (0x1 << 27) | (0x1 << 24) | (0x4 << 16))
+#define PIPELINE_SELECT_MEDIA REG_BIT(0)
+#define GFX_OP_3DSTATE_VF_STATISTICS \
+ ((0x3 << 29) | (0x1 << 27) | (0x0 << 24) | (0xB << 16))
+#define MEDIA_VFE_STATE \
+ ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x0 << 16))
#define MEDIA_VFE_STATE_MMIO_ACCESS_MASK (0x18)
+#define MEDIA_INTERFACE_DESCRIPTOR_LOAD \
+ ((0x3 << 29) | (0x2 << 27) | (0x0 << 24) | (0x2 << 16))
+#define MEDIA_OBJECT \
+ ((0x3 << 29) | (0x2 << 27) | (0x1 << 24) | (0x0 << 16))
#define GPGPU_OBJECT ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x4<<16))
#define GPGPU_WALKER ((0x3<<29)|(0x2<<27)|(0x1<<24)|(0x5<<16))
#define GFX_OP_3DSTATE_DX9_CONSTANTF_VS \
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c
index da2b6e2ae692..d09f7596cb98 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt.c
@@ -198,16 +198,16 @@ static void gen6_check_faults(struct intel_gt *gt)
for_each_engine(engine, gt, id) {
fault = GEN6_RING_FAULT_REG_READ(engine);
if (fault & RING_FAULT_VALID) {
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08lx\n"
- "\tAddress space: %s\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- fault & PAGE_MASK,
- fault & RING_FAULT_GTTSEL_MASK ?
- "GGTT" : "PPGTT",
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ drm_dbg(&engine->i915->drm, "Unexpected fault\n"
+ "\tAddr: 0x%08lx\n"
+ "\tAddress space: %s\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ fault & PAGE_MASK,
+ fault & RING_FAULT_GTTSEL_MASK ?
+ "GGTT" : "PPGTT",
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
}
@@ -239,18 +239,17 @@ static void gen8_check_faults(struct intel_gt *gt)
fault_addr = ((u64)(fault_data1 & FAULT_VA_HIGH_BITS) << 44) |
((u64)fault_data0 << 12);
- DRM_DEBUG_DRIVER("Unexpected fault\n"
- "\tAddr: 0x%08x_%08x\n"
- "\tAddress space: %s\n"
- "\tEngine ID: %d\n"
- "\tSource ID: %d\n"
- "\tType: %d\n",
- upper_32_bits(fault_addr),
- lower_32_bits(fault_addr),
- fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
- GEN8_RING_FAULT_ENGINE_ID(fault),
- RING_FAULT_SRCID(fault),
- RING_FAULT_FAULT_TYPE(fault));
+ drm_dbg(&uncore->i915->drm, "Unexpected fault\n"
+ "\tAddr: 0x%08x_%08x\n"
+ "\tAddress space: %s\n"
+ "\tEngine ID: %d\n"
+ "\tSource ID: %d\n"
+ "\tType: %d\n",
+ upper_32_bits(fault_addr), lower_32_bits(fault_addr),
+ fault_data1 & FAULT_GTT_SEL ? "GGTT" : "PPGTT",
+ GEN8_RING_FAULT_ENGINE_ID(fault),
+ RING_FAULT_SRCID(fault),
+ RING_FAULT_FAULT_TYPE(fault));
}
}
@@ -345,7 +344,7 @@ static int intel_gt_init_scratch(struct intel_gt *gt, unsigned int size)
goto err_unref;
}
- ret = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ ret = i915_ggtt_pin(vma, 0, PIN_HIGH);
if (ret)
goto err_unref;
@@ -455,6 +454,11 @@ err_rq:
if (!rq)
continue;
+ if (rq->fence.error) {
+ err = -EIO;
+ goto out;
+ }
+
GEM_BUG_ON(!test_bit(CONTEXT_ALLOC_BIT, &rq->context->flags));
state = rq->context->state;
if (!state)
@@ -538,6 +542,10 @@ static int __engines_verify_workarounds(struct intel_gt *gt)
err = -EIO;
}
+ /* Flush and restore the kernel context for safety */
+ if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME)
+ err = -EIO;
+
return err;
}
@@ -584,7 +592,9 @@ int intel_gt_init(struct intel_gt *gt)
if (err)
goto err_engines;
- intel_uc_init(&gt->uc);
+ err = intel_uc_init(&gt->uc);
+ if (err)
+ goto err_engines;
err = intel_gt_resume(gt);
if (err)
@@ -634,6 +644,13 @@ void intel_gt_driver_remove(struct intel_gt *gt)
void intel_gt_driver_unregister(struct intel_gt *gt)
{
intel_rps_driver_unregister(&gt->rps);
+
+ /*
+ * Upon unregistering the device to prevent any new users, cancel
+ * all in-flight requests so that we can quickly unbind the active
+ * resources.
+ */
+ intel_gt_set_wedged(gt);
}
void intel_gt_driver_release(struct intel_gt *gt)
@@ -650,6 +667,9 @@ void intel_gt_driver_release(struct intel_gt *gt)
void intel_gt_driver_late_release(struct intel_gt *gt)
{
+ /* We need to wait for inflight RCU frees to release their grip */
+ rcu_barrier();
+
intel_uc_driver_late_release(&gt->uc);
intel_gt_fini_requests(gt);
intel_gt_fini_reset(gt);
diff --git a/drivers/gpu/drm/i915/gt/intel_gt.h b/drivers/gpu/drm/i915/gt/intel_gt.h
index 1dac441cb8f4..4fac043750aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt.h
@@ -14,7 +14,7 @@ struct drm_i915_private;
#define GT_TRACE(gt, fmt, ...) do { \
const struct intel_gt *gt__ __maybe_unused = (gt); \
- GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
+ GEM_TRACE("%s " fmt, dev_name(gt__->i915->drm.dev), \
##__VA_ARGS__); \
} while (0)
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.c b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
index f796bdf1ed30..f0e7fd95165a 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.c
@@ -24,6 +24,21 @@ cs_irq_handler(struct intel_engine_cs *engine, u32 iir)
{
bool tasklet = false;
+ if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) {
+ u32 eir;
+
+ eir = ENGINE_READ(engine, RING_EIR);
+ ENGINE_TRACE(engine, "CS error: %x\n", eir);
+
+ /* Disable the error interrupt until after the reset */
+ if (likely(eir)) {
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, eir);
+ WRITE_ONCE(engine->execlists.error_interrupt, eir);
+ tasklet = true;
+ }
+ }
+
if (iir & GT_CONTEXT_SWITCH_INTERRUPT)
tasklet = true;
@@ -210,7 +225,10 @@ void gen11_gt_irq_reset(struct intel_gt *gt)
void gen11_gt_irq_postinstall(struct intel_gt *gt)
{
- const u32 irqs = GT_RENDER_USER_INTERRUPT | GT_CONTEXT_SWITCH_INTERRUPT;
+ const u32 irqs =
+ GT_CS_MASTER_ERROR_INTERRUPT |
+ GT_RENDER_USER_INTERRUPT |
+ GT_CONTEXT_SWITCH_INTERRUPT;
struct intel_uncore *uncore = gt->uncore;
const u32 dmask = irqs << 16 | irqs;
const u32 smask = irqs << 16;
@@ -279,66 +297,56 @@ void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir)
if (gt_iir & (GT_BLT_CS_ERROR_INTERRUPT |
GT_BSD_CS_ERROR_INTERRUPT |
- GT_RENDER_CS_MASTER_ERROR_INTERRUPT))
+ GT_CS_MASTER_ERROR_INTERRUPT))
DRM_DEBUG("Command parser error, gt_iir 0x%08x\n", gt_iir);
if (gt_iir & GT_PARITY_ERROR(gt->i915))
gen7_parity_error_irq_handler(gt, gt_iir);
}
-void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl)
{
void __iomem * const regs = gt->uncore->regs;
+ u32 iir;
if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- gt_iir[0] = raw_reg_read(regs, GEN8_GT_IIR(0));
- if (likely(gt_iir[0]))
- raw_reg_write(regs, GEN8_GT_IIR(0), gt_iir[0]);
- }
-
- if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
- gt_iir[1] = raw_reg_read(regs, GEN8_GT_IIR(1));
- if (likely(gt_iir[1]))
- raw_reg_write(regs, GEN8_GT_IIR(1), gt_iir[1]);
- }
-
- if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gt_iir[2] = raw_reg_read(regs, GEN8_GT_IIR(2));
- if (likely(gt_iir[2]))
- raw_reg_write(regs, GEN8_GT_IIR(2), gt_iir[2]);
- }
-
- if (master_ctl & GEN8_GT_VECS_IRQ) {
- gt_iir[3] = raw_reg_read(regs, GEN8_GT_IIR(3));
- if (likely(gt_iir[3]))
- raw_reg_write(regs, GEN8_GT_IIR(3), gt_iir[3]);
- }
-}
-
-void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4])
-{
- if (master_ctl & (GEN8_GT_RCS_IRQ | GEN8_GT_BCS_IRQ)) {
- cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
- gt_iir[0] >> GEN8_RCS_IRQ_SHIFT);
- cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
- gt_iir[0] >> GEN8_BCS_IRQ_SHIFT);
+ iir = raw_reg_read(regs, GEN8_GT_IIR(0));
+ if (likely(iir)) {
+ cs_irq_handler(gt->engine_class[RENDER_CLASS][0],
+ iir >> GEN8_RCS_IRQ_SHIFT);
+ cs_irq_handler(gt->engine_class[COPY_ENGINE_CLASS][0],
+ iir >> GEN8_BCS_IRQ_SHIFT);
+ raw_reg_write(regs, GEN8_GT_IIR(0), iir);
+ }
}
if (master_ctl & (GEN8_GT_VCS0_IRQ | GEN8_GT_VCS1_IRQ)) {
- cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
- gt_iir[1] >> GEN8_VCS0_IRQ_SHIFT);
- cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
- gt_iir[1] >> GEN8_VCS1_IRQ_SHIFT);
+ iir = raw_reg_read(regs, GEN8_GT_IIR(1));
+ if (likely(iir)) {
+ cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][0],
+ iir >> GEN8_VCS0_IRQ_SHIFT);
+ cs_irq_handler(gt->engine_class[VIDEO_DECODE_CLASS][1],
+ iir >> GEN8_VCS1_IRQ_SHIFT);
+ raw_reg_write(regs, GEN8_GT_IIR(1), iir);
+ }
}
if (master_ctl & GEN8_GT_VECS_IRQ) {
- cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
- gt_iir[3] >> GEN8_VECS_IRQ_SHIFT);
+ iir = raw_reg_read(regs, GEN8_GT_IIR(3));
+ if (likely(iir)) {
+ cs_irq_handler(gt->engine_class[VIDEO_ENHANCEMENT_CLASS][0],
+ iir >> GEN8_VECS_IRQ_SHIFT);
+ raw_reg_write(regs, GEN8_GT_IIR(3), iir);
+ }
}
if (master_ctl & (GEN8_GT_PM_IRQ | GEN8_GT_GUC_IRQ)) {
- gen6_rps_irq_handler(&gt->rps, gt_iir[2]);
- guc_irq_handler(&gt->uc.guc, gt_iir[2] >> 16);
+ iir = raw_reg_read(regs, GEN8_GT_IIR(2));
+ if (likely(iir)) {
+ gen6_rps_irq_handler(&gt->rps, iir);
+ guc_irq_handler(&gt->uc.guc, iir >> 16);
+ raw_reg_write(regs, GEN8_GT_IIR(2), iir);
+ }
}
}
@@ -354,25 +362,18 @@ void gen8_gt_irq_reset(struct intel_gt *gt)
void gen8_gt_irq_postinstall(struct intel_gt *gt)
{
- struct intel_uncore *uncore = gt->uncore;
-
/* These are interrupts we'll toggle with the ring mask register */
- u32 gt_interrupts[] = {
- (GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_BCS_IRQ_SHIFT),
-
- (GT_RENDER_USER_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS0_IRQ_SHIFT |
- GT_RENDER_USER_INTERRUPT << GEN8_VCS1_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VCS1_IRQ_SHIFT),
-
+ const u32 irqs =
+ GT_CS_MASTER_ERROR_INTERRUPT |
+ GT_RENDER_USER_INTERRUPT |
+ GT_CONTEXT_SWITCH_INTERRUPT;
+ const u32 gt_interrupts[] = {
+ irqs << GEN8_RCS_IRQ_SHIFT | irqs << GEN8_BCS_IRQ_SHIFT,
+ irqs << GEN8_VCS0_IRQ_SHIFT | irqs << GEN8_VCS1_IRQ_SHIFT,
0,
-
- (GT_RENDER_USER_INTERRUPT << GEN8_VECS_IRQ_SHIFT |
- GT_CONTEXT_SWITCH_INTERRUPT << GEN8_VECS_IRQ_SHIFT)
+ irqs << GEN8_VECS_IRQ_SHIFT,
};
+ struct intel_uncore *uncore = gt->uncore;
gt->pm_ier = 0x0;
gt->pm_imr = ~gt->pm_ier;
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_irq.h b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
index 8f37593712c9..886c5cf408a2 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_irq.h
+++ b/drivers/gpu/drm/i915/gt/intel_gt_irq.h
@@ -36,9 +36,8 @@ void gen5_gt_enable_irq(struct intel_gt *gt, u32 mask);
void gen6_gt_irq_handler(struct intel_gt *gt, u32 gt_iir);
-void gen8_gt_irq_ack(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
+void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl);
void gen8_gt_irq_reset(struct intel_gt *gt);
-void gen8_gt_irq_handler(struct intel_gt *gt, u32 master_ctl, u32 gt_iir[4]);
void gen8_gt_irq_postinstall(struct intel_gt *gt);
#endif /* INTEL_GT_IRQ_H */
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm.c b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
index d1c2f034296a..8b653c0f5e5f 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_pm.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_pm.c
@@ -216,7 +216,7 @@ int intel_gt_resume(struct intel_gt *gt)
intel_engine_pm_get(engine);
engine->serial++; /* kernel context lost */
- err = engine->resume(engine);
+ err = intel_engine_resume(engine);
intel_engine_pm_put(engine);
if (err) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gt_requests.c b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
index 7ef1d37970f6..24c99d0838af 100644
--- a/drivers/gpu/drm/i915/gt/intel_gt_requests.c
+++ b/drivers/gpu/drm/i915/gt/intel_gt_requests.c
@@ -99,6 +99,9 @@ static bool add_retire(struct intel_engine_cs *engine,
void intel_engine_add_retire(struct intel_engine_cs *engine,
struct intel_timeline *tl)
{
+ /* We don't deal well with the engine disappearing beneath us */
+ GEM_BUG_ON(intel_engine_is_virtual(engine));
+
if (add_retire(engine, tl))
schedule_work(&engine->retire_work);
}
@@ -144,24 +147,32 @@ long intel_gt_retire_requests_timeout(struct intel_gt *gt, long timeout)
fence = i915_active_fence_get(&tl->last_request);
if (fence) {
+ mutex_unlock(&tl->mutex);
+
timeout = dma_fence_wait_timeout(fence,
interruptible,
timeout);
dma_fence_put(fence);
+
+ /* Retirement is best effort */
+ if (!mutex_trylock(&tl->mutex)) {
+ active_count++;
+ goto out_active;
+ }
}
}
if (!retire_requests(tl) || flush_submission(gt))
active_count++;
+ mutex_unlock(&tl->mutex);
- spin_lock(&timelines->lock);
+out_active: spin_lock(&timelines->lock);
- /* Resume iteration after dropping lock */
+ /* Resume list iteration after reacquiring spinlock */
list_safe_reset_next(tl, tn, link);
if (atomic_dec_and_test(&tl->active_count))
list_del(&tl->link);
- mutex_unlock(&tl->mutex);
/* Defer the final release to after the spinlock */
if (refcount_dec_and_test(&tl->kref.refcount)) {
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.c b/drivers/gpu/drm/i915/gt/intel_gtt.c
index 16acdc5d6734..2a72cce63fd9 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.c
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.c
@@ -171,7 +171,9 @@ void __i915_vm_close(struct i915_address_space *vm)
{
struct i915_vma *vma, *vn;
- mutex_lock(&vm->mutex);
+ if (!atomic_dec_and_mutex_lock(&vm->open, &vm->mutex))
+ return;
+
list_for_each_entry_safe(vma, vn, &vm->bound_list, vm_link) {
struct drm_i915_gem_object *obj = vma->obj;
@@ -186,6 +188,7 @@ void __i915_vm_close(struct i915_address_space *vm)
i915_gem_object_put(obj);
}
GEM_BUG_ON(!list_empty(&vm->bound_list));
+
mutex_unlock(&vm->mutex);
}
@@ -299,6 +302,25 @@ fill_page_dma(const struct i915_page_dma *p, const u64 val, unsigned int count)
kunmap_atomic(memset64(kmap_atomic(p->page), val, count));
}
+static void poison_scratch_page(struct page *page, unsigned long size)
+{
+ if (!IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ return;
+
+ GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
+
+ do {
+ void *vaddr;
+
+ vaddr = kmap(page);
+ memset(vaddr, POISON_FREE, PAGE_SIZE);
+ kunmap(page);
+
+ page = pfn_to_page(page_to_pfn(page) + 1);
+ size -= PAGE_SIZE;
+ } while (size);
+}
+
int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
{
unsigned long size;
@@ -331,6 +353,17 @@ int setup_scratch_page(struct i915_address_space *vm, gfp_t gfp)
if (unlikely(!page))
goto skip;
+ /*
+ * Use a non-zero scratch page for debugging.
+ *
+ * We want a value that should be reasonably obvious
+ * to spot in the error state, while also causing a GPU hang
+ * if executed. We prefer using a clear page in production, so
+ * should it ever be accidentally used, the effect should be
+ * fairly benign.
+ */
+ poison_scratch_page(page, size);
+
addr = dma_map_page_attrs(vm->dma,
page, 0, size,
PCI_DMA_BIDIRECTIONAL,
@@ -448,36 +481,12 @@ void gtt_write_workarounds(struct intel_gt *gt)
intel_uncore_write(uncore,
HSW_GTT_CACHE_EN,
can_use_gtt_cache ? GTT_CACHE_EN_ALL : 0);
- WARN_ON_ONCE(can_use_gtt_cache &&
- intel_uncore_read(uncore,
- HSW_GTT_CACHE_EN) == 0);
+ drm_WARN_ON_ONCE(&i915->drm, can_use_gtt_cache &&
+ intel_uncore_read(uncore,
+ HSW_GTT_CACHE_EN) == 0);
}
}
-u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags)
-{
- gen8_pte_t pte = addr | _PAGE_PRESENT | _PAGE_RW;
-
- if (unlikely(flags & PTE_READ_ONLY))
- pte &= ~_PAGE_RW;
-
- switch (level) {
- case I915_CACHE_NONE:
- pte |= PPAT_UNCACHED;
- break;
- case I915_CACHE_WT:
- pte |= PPAT_DISPLAY_ELLC;
- break;
- default:
- pte |= PPAT_CACHED;
- break;
- }
-
- return pte;
-}
-
static void tgl_setup_private_ppat(struct intel_uncore *uncore)
{
/* TGL doesn't support LLC or AGE settings */
diff --git a/drivers/gpu/drm/i915/gt/intel_gtt.h b/drivers/gpu/drm/i915/gt/intel_gtt.h
index 7da7681c20b1..b3116fe8d180 100644
--- a/drivers/gpu/drm/i915/gt/intel_gtt.h
+++ b/drivers/gpu/drm/i915/gt/intel_gtt.h
@@ -429,8 +429,7 @@ static inline void
i915_vm_close(struct i915_address_space *vm)
{
GEM_BUG_ON(!atomic_read(&vm->open));
- if (atomic_dec_and_test(&vm->open))
- __i915_vm_close(vm);
+ __i915_vm_close(vm);
i915_vm_put(vm);
}
@@ -512,12 +511,8 @@ int i915_ppgtt_init_hw(struct intel_gt *gt);
struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt);
-void i915_gem_suspend_gtt_mappings(struct drm_i915_private *i915);
-void i915_gem_restore_gtt_mappings(struct drm_i915_private *i915);
-
-u64 gen8_pte_encode(dma_addr_t addr,
- enum i915_cache_level level,
- u32 flags);
+void i915_ggtt_suspend(struct i915_ggtt *gtt);
+void i915_ggtt_resume(struct i915_ggtt *ggtt);
int setup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
void cleanup_page_dma(struct i915_address_space *vm, struct i915_page_dma *p);
diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c
index ceb785b75c25..e3f637b3650e 100644
--- a/drivers/gpu/drm/i915/gt/intel_llc.c
+++ b/drivers/gpu/drm/i915/gt/intel_llc.c
@@ -50,6 +50,9 @@ static bool get_ia_constants(struct intel_llc *llc,
struct drm_i915_private *i915 = llc_to_gt(llc)->i915;
struct intel_rps *rps = &llc_to_gt(llc)->rps;
+ if (!HAS_LLC(i915) || IS_DGFX(i915))
+ return false;
+
if (rps->max_freq <= rps->min_freq)
return false;
@@ -147,8 +150,7 @@ static void gen6_update_ring_freq(struct intel_llc *llc)
void intel_llc_enable(struct intel_llc *llc)
{
- if (HAS_LLC(llc_to_gt(llc)->i915))
- gen6_update_ring_freq(llc);
+ gen6_update_ring_freq(llc);
}
void intel_llc_disable(struct intel_llc *llc)
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
index 0cf0f6fae675..112531b29f59 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
+++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
@@ -176,8 +176,6 @@
/* Typical size of the average request (2 pipecontrols and a MI_BB) */
#define EXECLISTS_REQUEST_SIZE 64 /* bytes */
-#define WA_TAIL_DWORDS 2
-#define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
struct virtual_engine {
struct intel_engine_cs base;
@@ -237,7 +235,8 @@ static void execlists_init_reg_state(u32 *reg_state,
bool close);
static void
__execlists_update_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine);
+ const struct intel_engine_cs *engine,
+ u32 head);
static void mark_eio(struct i915_request *rq)
{
@@ -246,7 +245,7 @@ static void mark_eio(struct i915_request *rq)
GEM_BUG_ON(i915_request_signaled(rq));
- dma_fence_set_error(&rq->fence, -EIO);
+ i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
@@ -294,7 +293,7 @@ static inline struct i915_priolist *to_priolist(struct rb_node *rb)
static inline int rq_prio(const struct i915_request *rq)
{
- return rq->sched.attr.priority;
+ return READ_ONCE(rq->sched.attr.priority);
}
static int effective_prio(const struct i915_request *rq)
@@ -985,6 +984,8 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
list_move(&rq->sched.link, pl);
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
active = rq;
} else {
struct intel_engine_cs *owner = rq->context->engine;
@@ -1003,7 +1004,7 @@ __unwind_incomplete_requests(struct intel_engine_cs *engine)
i915_request_cancel_breadcrumb(rq);
spin_unlock(&rq->lock);
}
- rq->engine = owner;
+ WRITE_ONCE(rq->engine, owner);
owner->submit_request(rq);
active = NULL;
}
@@ -1184,17 +1185,58 @@ static void reset_active(struct i915_request *rq,
head = rq->tail;
else
head = active_request(ce->timeline, rq)->head;
- ce->ring->head = intel_ring_wrap(ce->ring, head);
- intel_ring_update_space(ce->ring);
+ head = intel_ring_wrap(ce->ring, head);
/* Scrub the context image to prevent replaying the previous batch */
restore_default_state(ce, engine);
- __execlists_update_reg_state(ce, engine);
+ __execlists_update_reg_state(ce, engine, head);
/* We've switched away, so this should be a no-op, but intent matters */
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
}
+static u32 intel_context_get_runtime(const struct intel_context *ce)
+{
+ /*
+ * We can use either ppHWSP[16] which is recorded before the context
+ * switch (and so excludes the cost of context switches) or use the
+ * value from the context image itself, which is saved/restored earlier
+ * and so includes the cost of the save.
+ */
+ return READ_ONCE(ce->lrc_reg_state[CTX_TIMESTAMP]);
+}
+
+static void st_update_runtime_underflow(struct intel_context *ce, s32 dt)
+{
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+ ce->runtime.num_underflow += dt < 0;
+ ce->runtime.max_underflow = max_t(u32, ce->runtime.max_underflow, -dt);
+#endif
+}
+
+static void intel_context_update_runtime(struct intel_context *ce)
+{
+ u32 old;
+ s32 dt;
+
+ if (intel_context_is_barrier(ce))
+ return;
+
+ old = ce->runtime.last;
+ ce->runtime.last = intel_context_get_runtime(ce);
+ dt = ce->runtime.last - old;
+
+ if (unlikely(dt <= 0)) {
+ CE_TRACE(ce, "runtime underflow: last=%u, new=%u, delta=%d\n",
+ old, ce->runtime.last, dt);
+ st_update_runtime_underflow(ce, dt);
+ return;
+ }
+
+ ewma_runtime_add(&ce->runtime.avg, dt);
+ ce->runtime.total += dt;
+}
+
static inline struct intel_engine_cs *
__execlists_schedule_in(struct i915_request *rq)
{
@@ -1209,12 +1251,12 @@ __execlists_schedule_in(struct i915_request *rq)
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
execlists_check_context(ce, engine);
+ ce->lrc_desc &= ~GENMASK_ULL(47, 37);
if (ce->tag) {
/* Use a fixed tag for OA and friends */
ce->lrc_desc |= (u64)ce->tag << 32;
} else {
/* We don't need a strict matching tag, just different values */
- ce->lrc_desc &= ~GENMASK_ULL(47, 37);
ce->lrc_desc |=
(u64)(++engine->context_tag % NUM_CONTEXT_TAG) <<
GEN11_SW_CTX_ID_SHIFT;
@@ -1274,10 +1316,11 @@ __execlists_schedule_out(struct i915_request *rq,
* If we have just completed this context, the engine may now be
* idle and we want to re-enter powersaving.
*/
- if (list_is_last(&rq->link, &ce->timeline->requests) &&
+ if (list_is_last_rcu(&rq->link, &ce->timeline->requests) &&
i915_request_completed(rq))
intel_engine_add_retire(engine, ce->timeline);
+ intel_context_update_runtime(ce);
intel_engine_context_out(engine);
execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
intel_gt_pm_put_async(engine->gt);
@@ -1319,7 +1362,7 @@ static u64 execlists_update_context(struct i915_request *rq)
{
struct intel_context *ce = rq->context;
u64 desc = ce->lrc_desc;
- u32 tail;
+ u32 tail, prev;
/*
* WaIdleLiteRestore:bdw,skl
@@ -1332,9 +1375,15 @@ static u64 execlists_update_context(struct i915_request *rq)
* subsequent resubmissions (for lite restore). Should that fail us,
* and we try and submit the same tail again, force the context
* reload.
+ *
+ * If we need to return to a preempted context, we need to skip the
+ * lite-restore and force it to reload the RING_TAIL. Otherwise, the
+ * HW has a tendency to ignore us rewinding the TAIL to the end of
+ * an earlier request.
*/
tail = intel_ring_set_tail(rq->ring, rq->tail);
- if (unlikely(ce->lrc_reg_state[CTX_RING_TAIL] == tail))
+ prev = ce->lrc_reg_state[CTX_RING_TAIL];
+ if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0))
desc |= CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state[CTX_RING_TAIL] = tail;
rq->tail = rq->wa_tail;
@@ -1387,15 +1436,26 @@ trace_ports(const struct intel_engine_execlists *execlists,
ports[1] ? ports[1]->fence.seqno : 0);
}
+static inline bool
+reset_in_progress(const struct intel_engine_execlists *execlists)
+{
+ return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
+}
+
static __maybe_unused bool
assert_pending_valid(const struct intel_engine_execlists *execlists,
const char *msg)
{
struct i915_request * const *port, *rq;
struct intel_context *ce = NULL;
+ bool sentinel = false;
trace_ports(execlists, msg, execlists->pending);
+ /* We may be messing around with the lists during reset, lalala */
+ if (reset_in_progress(execlists))
+ return true;
+
if (!execlists->pending[0]) {
GEM_TRACE_ERR("Nothing pending for promotion!\n");
return false;
@@ -1422,6 +1482,26 @@ assert_pending_valid(const struct intel_engine_execlists *execlists,
}
ce = rq->context;
+ /*
+ * Sentinels are supposed to be lonely so they flush the
+ * current exection off the HW. Check that they are the
+ * only request in the pending submission.
+ */
+ if (sentinel) {
+ GEM_TRACE_ERR("context:%llx after sentinel in pending[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
+ sentinel = i915_request_has_sentinel(rq);
+ if (sentinel && port != execlists->pending) {
+ GEM_TRACE_ERR("sentinel context:%llx not in prime position[%zd]\n",
+ ce->timeline->fence_context,
+ port - execlists->pending);
+ return false;
+ }
+
/* Hold tightly onto the lock to prevent concurrent retires! */
if (!spin_trylock_irqsave(&rq->lock, flags))
continue;
@@ -1517,6 +1597,11 @@ static bool can_merge_ctx(const struct intel_context *prev,
return true;
}
+static unsigned long i915_request_flags(const struct i915_request *rq)
+{
+ return READ_ONCE(rq->fence.flags);
+}
+
static bool can_merge_rq(const struct i915_request *prev,
const struct i915_request *next)
{
@@ -1534,13 +1619,15 @@ static bool can_merge_rq(const struct i915_request *prev,
if (i915_request_completed(next))
return true;
- if (unlikely((prev->fence.flags ^ next->fence.flags) &
- (I915_FENCE_FLAG_NOPREEMPT | I915_FENCE_FLAG_SENTINEL)))
+ if (unlikely((i915_request_flags(prev) ^ i915_request_flags(next)) &
+ (BIT(I915_FENCE_FLAG_NOPREEMPT) |
+ BIT(I915_FENCE_FLAG_SENTINEL))))
return false;
if (!can_merge_ctx(prev->context, next->context))
return false;
+ GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno));
return true;
}
@@ -1591,16 +1678,15 @@ static void virtual_xfer_breadcrumbs(struct virtual_engine *ve,
spin_unlock(&old->breadcrumbs.irq_lock);
}
-static struct i915_request *
-last_active(const struct intel_engine_execlists *execlists)
-{
- struct i915_request * const *last = READ_ONCE(execlists->active);
+#define for_each_waiter(p__, rq__) \
+ list_for_each_entry_lockless(p__, \
+ &(rq__)->sched.waiters_list, \
+ wait_link)
- while (*last && i915_request_completed(*last))
- last++;
-
- return *last;
-}
+#define for_each_signaler(p__, rq__) \
+ list_for_each_entry_rcu(p__, \
+ &(rq__)->sched.signalers_list, \
+ signal_link)
static void defer_request(struct i915_request *rq, struct list_head * const pl)
{
@@ -1619,7 +1705,7 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
GEM_BUG_ON(i915_request_is_active(rq));
list_move_tail(&rq->sched.link, pl);
- list_for_each_entry(p, &rq->sched.waiters_list, wait_link) {
+ for_each_waiter(p, rq) {
struct i915_request *w =
container_of(p->waiter, typeof(*w), sched);
@@ -1632,8 +1718,8 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
!i915_request_completed(rq));
GEM_BUG_ON(i915_request_is_active(w));
- if (list_empty(&w->sched.link))
- continue; /* Not yet submitted; unready */
+ if (!i915_request_is_ready(w))
+ continue;
if (rq_prio(w) < rq_prio(rq))
continue;
@@ -1665,11 +1751,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
if (!intel_engine_has_timeslices(engine))
return false;
- if (list_is_last(&rq->sched.link, &engine->active.requests))
- return false;
-
- hint = max(rq_prio(list_next_entry(rq, sched.link)),
- engine->execlists.queue_priority_hint);
+ hint = engine->execlists.queue_priority_hint;
+ if (!list_is_last(&rq->sched.link, &engine->active.requests))
+ hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
return hint >= effective_prio(rq);
}
@@ -1692,12 +1776,13 @@ timeslice(const struct intel_engine_cs *engine)
static unsigned long
active_timeslice(const struct intel_engine_cs *engine)
{
- const struct i915_request *rq = *engine->execlists.active;
+ const struct intel_engine_execlists *execlists = &engine->execlists;
+ const struct i915_request *rq = *execlists->active;
if (!rq || i915_request_completed(rq))
return 0;
- if (engine->execlists.switch_priority_hint < effective_prio(rq))
+ if (READ_ONCE(execlists->switch_priority_hint) < effective_prio(rq))
return 0;
return timeslice(engine);
@@ -1711,16 +1796,29 @@ static void set_timeslice(struct intel_engine_cs *engine)
set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
}
+static void start_timeslice(struct intel_engine_cs *engine)
+{
+ struct intel_engine_execlists *execlists = &engine->execlists;
+ int prio = queue_prio(execlists);
+
+ WRITE_ONCE(execlists->switch_priority_hint, prio);
+ if (prio == INT_MIN)
+ return;
+
+ if (timer_pending(&execlists->timer))
+ return;
+
+ set_timer_ms(&execlists->timer, timeslice(engine));
+}
+
static void record_preemption(struct intel_engine_execlists *execlists)
{
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
}
-static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
+static unsigned long active_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
- struct i915_request *rq;
-
- rq = last_active(&engine->execlists);
if (!rq)
return 0;
@@ -1731,13 +1829,14 @@ static unsigned long active_preempt_timeout(struct intel_engine_cs *engine)
return READ_ONCE(engine->props.preempt_timeout_ms);
}
-static void set_preempt_timeout(struct intel_engine_cs *engine)
+static void set_preempt_timeout(struct intel_engine_cs *engine,
+ const struct i915_request *rq)
{
if (!intel_engine_has_preempt_reset(engine))
return;
set_timer_ms(&engine->execlists.preempt,
- active_preempt_timeout(engine));
+ active_preempt_timeout(engine, rq));
}
static inline void clear_ports(struct i915_request **ports, int count)
@@ -1750,6 +1849,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct i915_request **port = execlists->pending;
struct i915_request ** const last_port = port + execlists->port_mask;
+ struct i915_request * const *active;
struct i915_request *last;
struct rb_node *rb;
bool submit = false;
@@ -1804,7 +1904,10 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* i.e. we will retrigger preemption following the ack in case
* of trouble.
*/
- last = last_active(execlists);
+ active = READ_ONCE(execlists->active);
+ while ((last = *active) && i915_request_completed(last))
+ active++;
+
if (last) {
if (need_preempt(engine, last, rb)) {
ENGINE_TRACE(engine,
@@ -1831,14 +1934,6 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
*/
__unwind_incomplete_requests(engine);
- /*
- * If we need to return to the preempted context, we
- * need to skip the lite-restore and force it to
- * reload the RING_TAIL. Otherwise, the HW has a
- * tendency to ignore us rewinding the TAIL to the
- * end of an earlier request.
- */
- last->context->lrc_desc |= CTX_DESC_FORCE_RESTORE;
last = NULL;
} else if (need_timeslice(engine, last) &&
timer_expired(&engine->execlists.timer)) {
@@ -1882,11 +1977,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be.
*/
- if (!execlists->timer.expires &&
- need_timeslice(engine, last))
- set_timer_ms(&execlists->timer,
- timeslice(engine));
-
+ start_timeslice(engine);
return;
}
}
@@ -1921,7 +2012,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock);
- return; /* leave this for another */
+ start_timeslice(engine);
+ return; /* leave this for another sibling */
}
ENGINE_TRACE(engine,
@@ -1933,13 +2025,14 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
"",
yesno(engine != ve->siblings[0]));
- ve->request = NULL;
- ve->base.execlists.queue_priority_hint = INT_MIN;
+ WRITE_ONCE(ve->request, NULL);
+ WRITE_ONCE(ve->base.execlists.queue_priority_hint,
+ INT_MIN);
rb_erase_cached(rb, &execlists->virtual);
RB_CLEAR_NODE(rb);
GEM_BUG_ON(!(rq->execution_mask & engine->mask));
- rq->engine = engine;
+ WRITE_ONCE(rq->engine, engine);
if (engine != ve->siblings[0]) {
u32 *regs = ve->context.lrc_reg_state;
@@ -2059,6 +2152,9 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
GEM_BUG_ON(last &&
!can_merge_ctx(last->context,
rq->context));
+ GEM_BUG_ON(last &&
+ i915_seqno_passed(last->fence.seqno,
+ rq->fence.seqno));
submit = true;
last = rq;
@@ -2097,7 +2193,7 @@ done:
* Skip if we ended up with exactly the same set of requests,
* e.g. trying to timeslice a pair of ordered contexts
*/
- if (!memcmp(execlists->active, execlists->pending,
+ if (!memcmp(active, execlists->pending,
(port - execlists->pending + 1) * sizeof(*port))) {
do
execlists_schedule_out(fetch_and_zero(port));
@@ -2108,7 +2204,7 @@ done:
clear_ports(port + 1, last_port - port);
execlists_submit_ports(engine);
- set_preempt_timeout(engine);
+ set_preempt_timeout(engine, *active);
} else {
skip_submit:
ring_set_paused(engine, 0);
@@ -2129,6 +2225,7 @@ cancel_port_requests(struct intel_engine_execlists * const execlists)
execlists_schedule_out(*port);
clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight));
+ smp_wmb(); /* complete the seqlock for execlists_active() */
WRITE_ONCE(execlists->active, execlists->inflight);
}
@@ -2139,12 +2236,6 @@ invalidate_csb_entries(const u32 *first, const u32 *last)
clflush((void *)last);
}
-static inline bool
-reset_in_progress(const struct intel_engine_execlists *execlists)
-{
- return unlikely(!__tasklet_is_enabled(&execlists->tasklet));
-}
-
/*
* Starting with Gen12, the status has a new format:
*
@@ -2235,7 +2326,6 @@ static void process_csb(struct intel_engine_cs *engine)
*/
head = execlists->csb_head;
tail = READ_ONCE(*execlists->csb_write);
- ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
if (unlikely(head == tail))
return;
@@ -2249,6 +2339,7 @@ static void process_csb(struct intel_engine_cs *engine)
*/
rmb();
+ ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail);
do {
bool promote;
@@ -2283,11 +2374,13 @@ static void process_csb(struct intel_engine_cs *engine)
if (promote) {
struct i915_request * const *old = execlists->active;
+ GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
+
+ ring_set_paused(engine, 0);
+
/* Point active to the new ELSP; prevent overwriting */
WRITE_ONCE(execlists->active, execlists->pending);
-
- if (!inject_preempt_hang(execlists))
- ring_set_paused(engine, 0);
+ smp_wmb(); /* notify execlists_active() */
/* cancel old inflight, prepare for switch */
trace_ports(execlists, "preempted", old);
@@ -2295,12 +2388,12 @@ static void process_csb(struct intel_engine_cs *engine)
execlists_schedule_out(*old++);
/* switch pending to inflight */
- GEM_BUG_ON(!assert_pending_valid(execlists, "promote"));
- WRITE_ONCE(execlists->active,
- memcpy(execlists->inflight,
- execlists->pending,
- execlists_num_ports(execlists) *
- sizeof(*execlists->pending)));
+ memcpy(execlists->inflight,
+ execlists->pending,
+ execlists_num_ports(execlists) *
+ sizeof(*execlists->pending));
+ smp_wmb(); /* complete the seqlock */
+ WRITE_ONCE(execlists->active, execlists->inflight);
WRITE_ONCE(execlists->pending[0], NULL);
} else {
@@ -2315,8 +2408,37 @@ static void process_csb(struct intel_engine_cs *engine)
* coherent (visible from the CPU) before the
* user interrupt and CSB is processed.
*/
- GEM_BUG_ON(!i915_request_completed(*execlists->active) &&
- !reset_in_progress(execlists));
+ if (GEM_SHOW_DEBUG() &&
+ !i915_request_completed(*execlists->active) &&
+ !reset_in_progress(execlists)) {
+ struct i915_request *rq __maybe_unused =
+ *execlists->active;
+ const u32 *regs __maybe_unused =
+ rq->context->lrc_reg_state;
+
+ ENGINE_TRACE(engine,
+ "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n",
+ ENGINE_READ(engine, RING_START),
+ ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR,
+ ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR,
+ ENGINE_READ(engine, RING_CTL),
+ ENGINE_READ(engine, RING_MI_MODE));
+ ENGINE_TRACE(engine,
+ "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ",
+ i915_ggtt_offset(rq->ring->vma),
+ rq->head, rq->tail,
+ rq->fence.context,
+ lower_32_bits(rq->fence.seqno),
+ hwsp_seqno(rq));
+ ENGINE_TRACE(engine,
+ "ctx:{start:%08x, head:%04x, tail:%04x}, ",
+ regs[CTX_RING_START],
+ regs[CTX_RING_HEAD],
+ regs[CTX_RING_TAIL]);
+
+ GEM_BUG_ON("context completed before request");
+ }
+
execlists_schedule_out(*execlists->active++);
GEM_BUG_ON(execlists->active - execlists->inflight >
@@ -2344,31 +2466,348 @@ static void process_csb(struct intel_engine_cs *engine)
static void __execlists_submission_tasklet(struct intel_engine_cs *const engine)
{
lockdep_assert_held(&engine->active.lock);
- if (!engine->execlists.pending[0]) {
+ if (!READ_ONCE(engine->execlists.pending[0])) {
rcu_read_lock(); /* protect peeking at execlists->active */
execlists_dequeue(engine);
rcu_read_unlock();
}
}
-static noinline void preempt_reset(struct intel_engine_cs *engine)
+static void __execlists_hold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ if (i915_request_is_active(rq))
+ __i915_request_unsubmit(rq);
+
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ list_move_tail(&rq->sched.link, &rq->engine->active.hold);
+ i915_request_set_hold(rq);
+ RQ_TRACE(rq, "on hold\n");
+
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_is_ready(w))
+ continue;
+
+ if (i915_request_completed(w))
+ continue;
+
+ if (i915_request_on_hold(w))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static bool execlists_hold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->active.lock);
+
+ if (i915_request_completed(rq)) { /* too late! */
+ rq = NULL;
+ goto unlock;
+ }
+
+ if (rq->engine != engine) { /* preempted virtual engine */
+ struct virtual_engine *ve = to_virtual_engine(rq->engine);
+
+ /*
+ * intel_context_inflight() is only protected by virtue
+ * of process_csb() being called only by the tasklet (or
+ * directly from inside reset while the tasklet is suspended).
+ * Assert that neither of those are allowed to run while we
+ * poke at the request queues.
+ */
+ GEM_BUG_ON(!reset_in_progress(&engine->execlists));
+
+ /*
+ * An unsubmitted request along a virtual engine will
+ * remain on the active (this) engine until we are able
+ * to process the context switch away (and so mark the
+ * context as no longer in flight). That cannot have happened
+ * yet, otherwise we would not be hanging!
+ */
+ spin_lock(&ve->base.active.lock);
+ GEM_BUG_ON(intel_context_inflight(rq->context) != engine);
+ GEM_BUG_ON(ve->request != rq);
+ ve->request = NULL;
+ spin_unlock(&ve->base.active.lock);
+ i915_request_put(rq);
+
+ rq->engine = engine;
+ }
+
+ /*
+ * Transfer this request onto the hold queue to prevent it
+ * being resumbitted to HW (and potentially completed) before we have
+ * released it. Since we may have already submitted following
+ * requests, we need to remove those as well.
+ */
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ GEM_BUG_ON(rq->engine != engine);
+ __execlists_hold(rq);
+ GEM_BUG_ON(list_empty(&engine->active.hold));
+
+unlock:
+ spin_unlock_irq(&engine->active.lock);
+ return rq;
+}
+
+static bool hold_request(const struct i915_request *rq)
+{
+ struct i915_dependency *p;
+ bool result = false;
+
+ /*
+ * If one of our ancestors is on hold, we must also be on hold,
+ * otherwise we will bypass it and execute before it.
+ */
+ rcu_read_lock();
+ for_each_signaler(p, rq) {
+ const struct i915_request *s =
+ container_of(p->signaler, typeof(*s), sched);
+
+ if (s->engine != rq->engine)
+ continue;
+
+ result = i915_request_on_hold(s);
+ if (result)
+ break;
+ }
+ rcu_read_unlock();
+
+ return result;
+}
+
+static void __execlists_unhold(struct i915_request *rq)
+{
+ LIST_HEAD(list);
+
+ do {
+ struct i915_dependency *p;
+
+ RQ_TRACE(rq, "hold release\n");
+
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+ GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit));
+
+ i915_request_clear_hold(rq);
+ list_move_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(rq->engine,
+ rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+
+ /* Also release any children on this engine that are ready */
+ for_each_waiter(p, rq) {
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
+ /* Propagate any change in error status */
+ if (rq->fence.error)
+ i915_request_set_error_once(w, rq->fence.error);
+
+ if (w->engine != rq->engine)
+ continue;
+
+ if (!i915_request_on_hold(w))
+ continue;
+
+ /* Check that no other parents are also on hold */
+ if (hold_request(w))
+ continue;
+
+ list_move_tail(&w->sched.link, &list);
+ }
+
+ rq = list_first_entry_or_null(&list, typeof(*rq), sched.link);
+ } while (rq);
+}
+
+static void execlists_unhold(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ spin_lock_irq(&engine->active.lock);
+
+ /*
+ * Move this request back to the priority queue, and all of its
+ * children and grandchildren that were suspended along with it.
+ */
+ __execlists_unhold(rq);
+
+ if (rq_prio(rq) > engine->execlists.queue_priority_hint) {
+ engine->execlists.queue_priority_hint = rq_prio(rq);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
+ }
+
+ spin_unlock_irq(&engine->active.lock);
+}
+
+struct execlists_capture {
+ struct work_struct work;
+ struct i915_request *rq;
+ struct i915_gpu_coredump *error;
+};
+
+static void execlists_capture_work(struct work_struct *work)
+{
+ struct execlists_capture *cap = container_of(work, typeof(*cap), work);
+ const gfp_t gfp = GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ struct intel_engine_cs *engine = cap->rq->engine;
+ struct intel_gt_coredump *gt = cap->error->gt;
+ struct intel_engine_capture_vma *vma;
+
+ /* Compress all the objects attached to the request, slow! */
+ vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp);
+ if (vma) {
+ struct i915_vma_compress *compress =
+ i915_vma_capture_prepare(gt);
+
+ intel_engine_coredump_add_vma(gt->engine, vma, compress);
+ i915_vma_capture_finish(gt, compress);
+ }
+
+ gt->simulated = gt->engine->simulated;
+ cap->error->simulated = gt->simulated;
+
+ /* Publish the error state, and announce it to the world */
+ i915_error_state_store(cap->error);
+ i915_gpu_coredump_put(cap->error);
+
+ /* Return this request and all that depend upon it for signaling */
+ execlists_unhold(engine, cap->rq);
+ i915_request_put(cap->rq);
+
+ kfree(cap);
+}
+
+static struct execlists_capture *capture_regs(struct intel_engine_cs *engine)
+{
+ const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN;
+ struct execlists_capture *cap;
+
+ cap = kmalloc(sizeof(*cap), gfp);
+ if (!cap)
+ return NULL;
+
+ cap->error = i915_gpu_coredump_alloc(engine->i915, gfp);
+ if (!cap->error)
+ goto err_cap;
+
+ cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp);
+ if (!cap->error->gt)
+ goto err_gpu;
+
+ cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp);
+ if (!cap->error->gt->engine)
+ goto err_gt;
+
+ return cap;
+
+err_gt:
+ kfree(cap->error->gt);
+err_gpu:
+ kfree(cap->error);
+err_cap:
+ kfree(cap);
+ return NULL;
+}
+
+static bool execlists_capture(struct intel_engine_cs *engine)
+{
+ struct execlists_capture *cap;
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR))
+ return true;
+
+ /*
+ * We need to _quickly_ capture the engine state before we reset.
+ * We are inside an atomic section (softirq) here and we are delaying
+ * the forced preemption event.
+ */
+ cap = capture_regs(engine);
+ if (!cap)
+ return true;
+
+ spin_lock_irq(&engine->active.lock);
+ cap->rq = execlists_active(&engine->execlists);
+ if (cap->rq) {
+ cap->rq = active_request(cap->rq->context->timeline, cap->rq);
+ cap->rq = i915_request_get_rcu(cap->rq);
+ }
+ spin_unlock_irq(&engine->active.lock);
+ if (!cap->rq)
+ goto err_free;
+
+ /*
+ * Remove the request from the execlists queue, and take ownership
+ * of the request. We pass it to our worker who will _slowly_ compress
+ * all the pages the _user_ requested for debugging their batch, after
+ * which we return it to the queue for signaling.
+ *
+ * By removing them from the execlists queue, we also remove the
+ * requests from being processed by __unwind_incomplete_requests()
+ * during the intel_engine_reset(), and so they will *not* be replayed
+ * afterwards.
+ *
+ * Note that because we have not yet reset the engine at this point,
+ * it is possible for the request that we have identified as being
+ * guilty, did in fact complete and we will then hit an arbitration
+ * point allowing the outstanding preemption to succeed. The likelihood
+ * of that is very low (as capturing of the engine registers should be
+ * fast enough to run inside an irq-off atomic section!), so we will
+ * simply hold that request accountable for being non-preemptible
+ * long enough to force the reset.
+ */
+ if (!execlists_hold(engine, cap->rq))
+ goto err_rq;
+
+ INIT_WORK(&cap->work, execlists_capture_work);
+ schedule_work(&cap->work);
+ return true;
+
+err_rq:
+ i915_request_put(cap->rq);
+err_free:
+ i915_gpu_coredump_put(cap->error);
+ kfree(cap);
+ return false;
+}
+
+static void execlists_reset(struct intel_engine_cs *engine, const char *msg)
{
const unsigned int bit = I915_RESET_ENGINE + engine->id;
unsigned long *lock = &engine->gt->reset.flags;
- if (i915_modparams.reset < 3)
+ if (!intel_has_reset_engine(engine->gt))
return;
if (test_and_set_bit(bit, lock))
return;
+ ENGINE_TRACE(engine, "reset for %s\n", msg);
+
/* Mark this tasklet as disabled to avoid waiting for it to complete */
tasklet_disable_nosync(&engine->execlists.tasklet);
- ENGINE_TRACE(engine, "preempt timeout %lu+%ums\n",
- READ_ONCE(engine->props.preempt_timeout_ms),
- jiffies_to_msecs(jiffies - engine->execlists.preempt.expires));
- intel_engine_reset(engine, "preemption time out");
+ ring_set_paused(engine, 1); /* Freeze the current request in place */
+ if (execlists_capture(engine))
+ intel_engine_reset(engine, msg);
+ else
+ ring_set_paused(engine, 0);
tasklet_enable(&engine->execlists.tasklet);
clear_and_wake_up_bit(bit, lock);
@@ -2397,6 +2836,13 @@ static void execlists_submission_tasklet(unsigned long data)
bool timeout = preempt_timeout(engine);
process_csb(engine);
+
+ if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) {
+ engine->execlists.error_interrupt = 0;
+ if (ENGINE_READ(engine, RING_ESR)) /* confirm the error */
+ execlists_reset(engine, "CS error");
+ }
+
if (!READ_ONCE(engine->execlists.pending[0]) || timeout) {
unsigned long flags;
@@ -2405,8 +2851,8 @@ static void execlists_submission_tasklet(unsigned long data)
spin_unlock_irqrestore(&engine->active.lock, flags);
/* Recheck after serialising with direct-submission */
- if (timeout && preempt_timeout(engine))
- preempt_reset(engine);
+ if (unlikely(timeout && preempt_timeout(engine)))
+ execlists_reset(engine, "preemption time out");
}
}
@@ -2430,11 +2876,12 @@ static void execlists_preempt(struct timer_list *timer)
}
static void queue_request(struct intel_engine_cs *engine,
- struct i915_sched_node *node,
- int prio)
+ struct i915_request *rq)
{
- GEM_BUG_ON(!list_empty(&node->link));
- list_add_tail(&node->link, i915_sched_lookup_priolist(engine, prio));
+ GEM_BUG_ON(!list_empty(&rq->sched.link));
+ list_add_tail(&rq->sched.link,
+ i915_sched_lookup_priolist(engine, rq_prio(rq)));
+ set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
}
static void __submit_queue_imm(struct intel_engine_cs *engine)
@@ -2462,6 +2909,13 @@ static void submit_queue(struct intel_engine_cs *engine,
__submit_queue_imm(engine);
}
+static bool ancestor_on_hold(const struct intel_engine_cs *engine,
+ const struct i915_request *rq)
+{
+ GEM_BUG_ON(i915_request_on_hold(rq));
+ return !list_empty(&engine->active.hold) && hold_request(rq);
+}
+
static void execlists_submit_request(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -2470,12 +2924,18 @@ static void execlists_submit_request(struct i915_request *request)
/* Will be called from irq-context when using foreign fences. */
spin_lock_irqsave(&engine->active.lock, flags);
- queue_request(engine, &request->sched, rq_prio(request));
+ if (unlikely(ancestor_on_hold(engine, request))) {
+ RQ_TRACE(request, "ancestor on hold\n");
+ list_add_tail(&request->sched.link, &engine->active.hold);
+ i915_request_set_hold(request);
+ } else {
+ queue_request(engine, request);
- GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
- GEM_BUG_ON(list_empty(&request->sched.link));
+ GEM_BUG_ON(RB_EMPTY_ROOT(&engine->execlists.queue.rb_root));
+ GEM_BUG_ON(list_empty(&request->sched.link));
- submit_queue(engine, request);
+ submit_queue(engine, request);
+ }
spin_unlock_irqrestore(&engine->active.lock, flags);
}
@@ -2531,22 +2991,23 @@ static void execlists_context_unpin(struct intel_context *ce)
ce->engine);
i915_gem_object_unpin_map(ce->state->obj);
- intel_ring_reset(ce->ring, ce->ring->tail);
}
static void
__execlists_update_reg_state(const struct intel_context *ce,
- const struct intel_engine_cs *engine)
+ const struct intel_engine_cs *engine,
+ u32 head)
{
struct intel_ring *ring = ce->ring;
u32 *regs = ce->lrc_reg_state;
- GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
+ GEM_BUG_ON(!intel_ring_offset_valid(ring, head));
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
regs[CTX_RING_START] = i915_ggtt_offset(ring->vma);
- regs[CTX_RING_HEAD] = ring->head;
+ regs[CTX_RING_HEAD] = head;
regs[CTX_RING_TAIL] = ring->tail;
+ regs[CTX_RING_CTL] = RING_CTL_SIZE(ring->size) | RING_VALID;
/* RPCS */
if (engine->class == RENDER_CLASS) {
@@ -2574,7 +3035,7 @@ __execlists_context_pin(struct intel_context *ce,
ce->lrc_desc = lrc_descriptor(ce, engine) | CTX_DESC_FORCE_RESTORE;
ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
- __execlists_update_reg_state(ce, engine);
+ __execlists_update_reg_state(ce, engine, ce->ring->tail);
return 0;
}
@@ -2594,28 +3055,12 @@ static void execlists_context_reset(struct intel_context *ce)
CE_TRACE(ce, "reset\n");
GEM_BUG_ON(!intel_context_is_pinned(ce));
- /*
- * Because we emit WA_TAIL_DWORDS there may be a disparity
- * between our bookkeeping in ce->ring->head and ce->ring->tail and
- * that stored in context. As we only write new commands from
- * ce->ring->tail onwards, everything before that is junk. If the GPU
- * starts reading from its RING_HEAD from the context, it may try to
- * execute that junk and die.
- *
- * The contexts that are stilled pinned on resume belong to the
- * kernel, and are local to each engine. All other contexts will
- * have their head/tail sanitized upon pinning before use, so they
- * will never see garbage,
- *
- * So to avoid that we reset the context images upon resume. For
- * simplicity, we just zero everything out.
- */
intel_ring_reset(ce->ring, ce->ring->emit);
/* Scrub away the garbage */
execlists_init_reg_state(ce->lrc_reg_state,
ce, ce->engine, ce->ring, true);
- __execlists_update_reg_state(ce, ce->engine);
+ __execlists_update_reg_state(ce, ce->engine, ce->ring->tail);
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE;
}
@@ -2637,7 +3082,8 @@ static int gen8_emit_init_breadcrumb(struct i915_request *rq)
{
u32 *cs;
- GEM_BUG_ON(!i915_request_timeline(rq)->has_initial_breadcrumb);
+ if (!i915_request_timeline(rq)->has_initial_breadcrumb)
+ return 0;
cs = intel_ring_begin(rq, 6);
if (IS_ERR(cs))
@@ -2930,7 +3376,7 @@ static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
goto err;
}
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_ggtt_pin(vma, 0, PIN_HIGH);
if (err)
goto err;
@@ -3019,6 +3465,49 @@ static int intel_init_workaround_bb(struct intel_engine_cs *engine)
return ret;
}
+static void enable_error_interrupt(struct intel_engine_cs *engine)
+{
+ u32 status;
+
+ engine->execlists.error_interrupt = 0;
+ ENGINE_WRITE(engine, RING_EMR, ~0u);
+ ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */
+
+ status = ENGINE_READ(engine, RING_ESR);
+ if (unlikely(status)) {
+ dev_err(engine->i915->drm.dev,
+ "engine '%s' resumed still in error: %08x\n",
+ engine->name, status);
+ __intel_gt_reset(engine->gt, engine->mask);
+ }
+
+ /*
+ * On current gen8+, we have 2 signals to play with
+ *
+ * - I915_ERROR_INSTUCTION (bit 0)
+ *
+ * Generate an error if the command parser encounters an invalid
+ * instruction
+ *
+ * This is a fatal error.
+ *
+ * - CP_PRIV (bit 2)
+ *
+ * Generate an error on privilege violation (where the CP replaces
+ * the instruction with a no-op). This also fires for writes into
+ * read-only scratch pages.
+ *
+ * This is a non-fatal error, parsing continues.
+ *
+ * * there are a few others defined for odd HW that we do not use
+ *
+ * Since CP_PRIV fires for cases where we have chosen to ignore the
+ * error (as the HW is validating and suppressing the mistakes), we
+ * only unmask the instruction error bit.
+ */
+ ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION);
+}
+
static void enable_execlists(struct intel_engine_cs *engine)
{
u32 mode;
@@ -3040,6 +3529,8 @@ static void enable_execlists(struct intel_engine_cs *engine)
i915_ggtt_offset(engine->status_page.vma));
ENGINE_POSTING_READ(engine, RING_HWS_PGA);
+ enable_error_interrupt(engine);
+
engine->context_tag = 0;
}
@@ -3057,9 +3548,6 @@ static bool unexpected_starting_state(struct intel_engine_cs *engine)
static int execlists_resume(struct intel_engine_cs *engine)
{
- intel_engine_apply_workarounds(engine);
- intel_engine_apply_whitelist(engine);
-
intel_mocs_init_engine(engine);
intel_engine_reset_breadcrumbs(engine);
@@ -3170,6 +3658,7 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
struct intel_engine_execlists * const execlists = &engine->execlists;
struct intel_context *ce;
struct i915_request *rq;
+ u32 head;
mb(); /* paranoia: read the CSB pointers from after the reset */
clflush(execlists->csb_write);
@@ -3189,23 +3678,24 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
if (!rq)
goto unwind;
- /* We still have requests in-flight; the engine should be active */
- GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
-
ce = rq->context;
GEM_BUG_ON(!i915_vma_is_pinned(ce->state));
if (i915_request_completed(rq)) {
/* Idle context; tidy up the ring so we can restart afresh */
- ce->ring->head = intel_ring_wrap(ce->ring, rq->tail);
+ head = intel_ring_wrap(ce->ring, rq->tail);
goto out_replay;
}
+ /* We still have requests in-flight; the engine should be active */
+ GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
+
/* Context has requests still in-flight; it should not be idle! */
GEM_BUG_ON(i915_active_is_idle(&ce->active));
+
rq = active_request(ce->timeline, rq);
- ce->ring->head = intel_ring_wrap(ce->ring, rq->head);
- GEM_BUG_ON(ce->ring->head == ce->ring->tail);
+ head = intel_ring_wrap(ce->ring, rq->head);
+ GEM_BUG_ON(head == ce->ring->tail);
/*
* If this request hasn't started yet, e.g. it is waiting on a
@@ -3250,10 +3740,9 @@ static void __execlists_reset(struct intel_engine_cs *engine, bool stalled)
out_replay:
ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n",
- ce->ring->head, ce->ring->tail);
- intel_ring_update_space(ce->ring);
+ head, ce->ring->tail);
__execlists_reset_reg_state(ce, engine);
- __execlists_update_reg_state(ce, engine);
+ __execlists_update_reg_state(ce, engine, head);
ce->lrc_desc |= CTX_DESC_FORCE_RESTORE; /* paranoid: GPU was reset! */
unwind:
@@ -3277,7 +3766,10 @@ static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled)
static void nop_submission_tasklet(unsigned long data)
{
+ struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
+
/* The driver is wedged; don't process any more events. */
+ WRITE_ONCE(engine->execlists.queue_priority_hint, INT_MIN);
}
static void execlists_reset_cancel(struct intel_engine_cs *engine)
@@ -3325,6 +3817,10 @@ static void execlists_reset_cancel(struct intel_engine_cs *engine)
i915_priolist_free(p);
}
+ /* On-hold requests will be flushed to timeline upon their release */
+ list_for_each_entry(rq, &engine->active.hold, sched.link)
+ mark_eio(rq);
+
/* Cancel all attached virtual engines */
while ((rb = rb_first_cached(&execlists->virtual))) {
struct virtual_engine *ve =
@@ -3669,26 +4165,6 @@ static int gen12_emit_flush_render(struct i915_request *request,
*cs++ = preparser_disable(false);
intel_ring_advance(request, cs);
-
- /*
- * Wa_1604544889:tgl
- */
- if (IS_TGL_REVID(request->i915, TGL_REVID_A0, TGL_REVID_A0)) {
- flags = 0;
- flags |= PIPE_CONTROL_CS_STALL;
- flags |= PIPE_CONTROL_HDC_PIPELINE_FLUSH;
-
- flags |= PIPE_CONTROL_STORE_DATA_INDEX;
- flags |= PIPE_CONTROL_QW_WRITE;
-
- cs = intel_ring_begin(request, 6);
- if (IS_ERR(cs))
- return PTR_ERR(cs);
-
- cs = gen8_emit_pipe_control(cs, flags,
- LRC_PPHWSP_SCRATCH_ADDR);
- intel_ring_advance(request, cs);
- }
}
return 0;
@@ -3962,6 +4438,7 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
+ engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift;
}
static void rcs_submission_override(struct intel_engine_cs *engine)
@@ -4203,8 +4680,13 @@ populate_lr_context(struct intel_context *ce,
inhibit = false;
}
- /* The second page of the context object contains some fields which must
- * be set up prior to the first execution. */
+ /* Clear the ppHWSP (inc. per-context counters) */
+ memset(vaddr, 0, PAGE_SIZE);
+
+ /*
+ * The second page of the context object contains some registers which
+ * must be set up prior to the first execution.
+ */
execlists_init_reg_state(vaddr + LRC_STATE_PN * PAGE_SIZE,
ce, engine, ring, inhibit);
@@ -4242,8 +4724,17 @@ static int __execlists_context_alloc(struct intel_context *ce,
if (!ce->timeline) {
struct intel_timeline *tl;
+ struct i915_vma *hwsp;
- tl = intel_timeline_create(engine->gt, NULL);
+ /*
+ * Use the static global HWSP for the kernel context, and
+ * a dynamically allocated cacheline for everyone else.
+ */
+ hwsp = NULL;
+ if (unlikely(intel_context_is_barrier(ce)))
+ hwsp = engine->status_page.vma;
+
+ tl = intel_timeline_create(engine->gt, hwsp);
if (IS_ERR(tl)) {
ret = PTR_ERR(tl);
goto error_deref_obj;
@@ -4412,7 +4903,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
mask = rq->execution_mask;
if (unlikely(!mask)) {
/* Invalid selection, submit to a random engine in error */
- i915_request_skip(rq, -ENODEV);
+ i915_request_set_error_once(rq, -ENODEV);
mask = ve->siblings[0]->mask;
}
@@ -4426,7 +4917,7 @@ static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve)
static void virtual_submission_tasklet(unsigned long data)
{
struct virtual_engine * const ve = (struct virtual_engine *)data;
- const int prio = ve->base.execlists.queue_priority_hint;
+ const int prio = READ_ONCE(ve->base.execlists.queue_priority_hint);
intel_engine_mask_t mask;
unsigned int n;
@@ -4822,11 +5313,15 @@ void intel_execlists_show_requests(struct intel_engine_cs *engine,
show_request(m, last, "\t\tE ");
}
- last = NULL;
- count = 0;
+ if (execlists->switch_priority_hint != INT_MIN)
+ drm_printf(m, "\t\tSwitch priority hint: %d\n",
+ READ_ONCE(execlists->switch_priority_hint));
if (execlists->queue_priority_hint != INT_MIN)
drm_printf(m, "\t\tQueue priority hint: %d\n",
- execlists->queue_priority_hint);
+ READ_ONCE(execlists->queue_priority_hint));
+
+ last = NULL;
+ count = 0;
for (rb = rb_first_cached(&execlists->queue); rb; rb = rb_next(rb)) {
struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
int i;
@@ -4892,10 +5387,7 @@ void intel_lr_context_reset(struct intel_engine_cs *engine,
restore_default_state(ce, engine);
/* Rerun the request; its payload has been neutered (if guilty). */
- ce->ring->head = head;
- intel_ring_update_space(ce->ring);
-
- __execlists_update_reg_state(ce, engine);
+ __execlists_update_reg_state(ce, engine, head);
}
bool
diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
index 08a3be65f700..d39b72590e40 100644
--- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
+++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h
@@ -17,6 +17,7 @@
#define CTX_RING_CTL (0x0a + 1)
#define CTX_BB_STATE (0x10 + 1)
#define CTX_BB_PER_CTX_PTR (0x18 + 1)
+#define CTX_TIMESTAMP (0x22 + 1)
#define CTX_PDP3_UDW (0x24 + 1)
#define CTX_PDP3_LDW (0x26 + 1)
#define CTX_PDP2_UDW (0x28 + 1)
diff --git a/drivers/gpu/drm/i915/gt/intel_mocs.c b/drivers/gpu/drm/i915/gt/intel_mocs.c
index eeef90b55c64..632e08a4592b 100644
--- a/drivers/gpu/drm/i915/gt/intel_mocs.c
+++ b/drivers/gpu/drm/i915/gt/intel_mocs.c
@@ -280,9 +280,32 @@ static const struct drm_i915_mocs_entry icl_mocs_table[] = {
GEN11_MOCS_ENTRIES
};
-static bool get_mocs_settings(const struct drm_i915_private *i915,
- struct drm_i915_mocs_table *table)
+enum {
+ HAS_GLOBAL_MOCS = BIT(0),
+ HAS_ENGINE_MOCS = BIT(1),
+ HAS_RENDER_L3CC = BIT(2),
+};
+
+static bool has_l3cc(const struct drm_i915_private *i915)
{
+ return true;
+}
+
+static bool has_global_mocs(const struct drm_i915_private *i915)
+{
+ return HAS_GLOBAL_MOCS_REGISTERS(i915);
+}
+
+static bool has_mocs(const struct drm_i915_private *i915)
+{
+ return !IS_DGFX(i915);
+}
+
+static unsigned int get_mocs_settings(const struct drm_i915_private *i915,
+ struct drm_i915_mocs_table *table)
+{
+ unsigned int flags;
+
if (INTEL_GEN(i915) >= 12) {
table->size = ARRAY_SIZE(tgl_mocs_table);
table->table = tgl_mocs_table;
@@ -300,13 +323,13 @@ static bool get_mocs_settings(const struct drm_i915_private *i915,
table->n_entries = GEN9_NUM_MOCS_ENTRIES;
table->table = broxton_mocs_table;
} else {
- WARN_ONCE(INTEL_GEN(i915) >= 9,
- "Platform that should have a MOCS table does not.\n");
- return false;
+ drm_WARN_ONCE(&i915->drm, INTEL_GEN(i915) >= 9,
+ "Platform that should have a MOCS table does not.\n");
+ return 0;
}
if (GEM_DEBUG_WARN_ON(table->size > table->n_entries))
- return false;
+ return 0;
/* WaDisableSkipCaching:skl,bxt,kbl,glk */
if (IS_GEN(i915, 9)) {
@@ -315,10 +338,20 @@ static bool get_mocs_settings(const struct drm_i915_private *i915,
for (i = 0; i < table->size; i++)
if (GEM_DEBUG_WARN_ON(table->table[i].l3cc_value &
(L3_ESC(1) | L3_SCC(0x7))))
- return false;
+ return 0;
}
- return true;
+ flags = 0;
+ if (has_mocs(i915)) {
+ if (has_global_mocs(i915))
+ flags |= HAS_GLOBAL_MOCS;
+ else
+ flags |= HAS_ENGINE_MOCS;
+ }
+ if (has_l3cc(i915))
+ flags |= HAS_RENDER_L3CC;
+
+ return flags;
}
/*
@@ -411,18 +444,20 @@ static void init_l3cc_table(struct intel_engine_cs *engine,
void intel_mocs_init_engine(struct intel_engine_cs *engine)
{
struct drm_i915_mocs_table table;
+ unsigned int flags;
/* Called under a blanket forcewake */
assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL);
- if (!get_mocs_settings(engine->i915, &table))
+ flags = get_mocs_settings(engine->i915, &table);
+ if (!flags)
return;
/* Platforms with global MOCS do not need per-engine initialization. */
- if (!HAS_GLOBAL_MOCS_REGISTERS(engine->i915))
+ if (flags & HAS_ENGINE_MOCS)
init_mocs_table(engine, &table);
- if (engine->class == RENDER_CLASS)
+ if (flags & HAS_RENDER_L3CC && engine->class == RENDER_CLASS)
init_l3cc_table(engine, &table);
}
@@ -431,26 +466,17 @@ static u32 global_mocs_offset(void)
return i915_mmio_reg_offset(GEN12_GLOBAL_MOCS(0));
}
-static void init_global_mocs(struct intel_gt *gt)
+void intel_mocs_init(struct intel_gt *gt)
{
struct drm_i915_mocs_table table;
+ unsigned int flags;
/*
* LLC and eDRAM control values are not applicable to dgfx
*/
- if (IS_DGFX(gt->i915))
- return;
-
- if (!get_mocs_settings(gt->i915, &table))
- return;
-
- __init_mocs_table(gt->uncore, &table, global_mocs_offset());
-}
-
-void intel_mocs_init(struct intel_gt *gt)
-{
- if (HAS_GLOBAL_MOCS_REGISTERS(gt->i915))
- init_global_mocs(gt);
+ flags = get_mocs_settings(gt->i915, &table);
+ if (flags & HAS_GLOBAL_MOCS)
+ __init_mocs_table(gt->uncore, &table, global_mocs_offset());
}
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c
index 9e303c29d6e3..66c07c32745c 100644
--- a/drivers/gpu/drm/i915/gt/intel_rc6.c
+++ b/drivers/gpu/drm/i915/gt/intel_rc6.c
@@ -7,6 +7,7 @@
#include <linux/pm_runtime.h>
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "intel_gt.h"
#include "intel_gt_pm.h"
#include "intel_rc6.h"
@@ -226,10 +227,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6)
set(uncore, GEN6_RC_SLEEP, 0);
set(uncore, GEN6_RC1e_THRESHOLD, 1000);
- if (IS_IVYBRIDGE(i915))
- set(uncore, GEN6_RC6_THRESHOLD, 125000);
- else
- set(uncore, GEN6_RC6_THRESHOLD, 50000);
+ set(uncore, GEN6_RC6_THRESHOLD, 50000);
set(uncore, GEN6_RC6p_THRESHOLD, 150000);
set(uncore, GEN6_RC6pp_THRESHOLD, 64000); /* unused */
@@ -299,7 +297,6 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
pcbr_offset = (pcbr & ~4095) - i915->dsm.start;
pctx = i915_gem_object_create_stolen_for_preallocated(i915,
pcbr_offset,
- I915_GTT_OFFSET_NONE,
pctx_size);
if (IS_ERR(pctx))
return PTR_ERR(pctx);
@@ -323,10 +320,10 @@ static int vlv_rc6_init(struct intel_rc6 *rc6)
return PTR_ERR(pctx);
}
- GEM_BUG_ON(range_overflows_t(u64,
- i915->dsm.start,
- pctx->stolen->start,
- U32_MAX));
+ GEM_BUG_ON(range_overflows_end_t(u64,
+ i915->dsm.start,
+ pctx->stolen->start,
+ U32_MAX));
pctx_paddr = i915->dsm.start + pctx->stolen->start;
intel_uncore_write(uncore, VLV_PCBR, pctx_paddr);
@@ -542,6 +539,8 @@ void intel_rc6_init(struct intel_rc6 *rc6)
void intel_rc6_sanitize(struct intel_rc6 *rc6)
{
+ memset(rc6->prev_hw_residency, 0, sizeof(rc6->prev_hw_residency));
+
if (rc6->enabled) { /* unbalanced suspend/resume */
rpm_get(rc6);
rc6->enabled = false;
@@ -713,7 +712,7 @@ u64 intel_rc6_residency_ns(struct intel_rc6 *rc6, const i915_reg_t reg)
*/
i = (i915_mmio_reg_offset(reg) -
i915_mmio_reg_offset(GEN6_GT_GFX_RC6_LOCKED)) / sizeof(u32);
- if (WARN_ON_ONCE(i >= ARRAY_SIZE(rc6->cur_residency)))
+ if (drm_WARN_ON_ONCE(&i915->drm, i >= ARRAY_SIZE(rc6->cur_residency)))
return 0;
fw_domains = intel_uncore_forcewake_for_reg(uncore, reg, FW_REG_READ);
diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c
index beee0cf89bce..8b170c1876b3 100644
--- a/drivers/gpu/drm/i915/gt/intel_reset.c
+++ b/drivers/gpu/drm/i915/gt/intel_reset.c
@@ -48,8 +48,10 @@ static void engine_skip_context(struct i915_request *rq)
lockdep_assert_held(&engine->active.lock);
list_for_each_entry_continue(rq, &engine->active.requests, sched.link)
- if (rq->context == hung_ctx)
- i915_request_skip(rq, -EIO);
+ if (rq->context == hung_ctx) {
+ i915_request_set_error_once(rq, -EIO);
+ __i915_request_skip(rq);
+ }
}
static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
@@ -72,9 +74,10 @@ static void client_mark_guilty(struct i915_gem_context *ctx, bool banned)
if (score) {
atomic_add(score, &file_priv->ban_score);
- DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
- ctx->name, score,
- atomic_read(&file_priv->ban_score));
+ drm_dbg(&ctx->i915->drm,
+ "client %s: gained %u ban score, now %u\n",
+ ctx->name, score,
+ atomic_read(&file_priv->ban_score));
}
}
@@ -91,13 +94,7 @@ static bool mark_guilty(struct i915_request *rq)
ctx = NULL;
rcu_read_unlock();
if (!ctx)
- return false;
-
- if (i915_gem_context_is_closed(ctx)) {
- intel_context_set_banned(rq->context);
- banned = true;
- goto out;
- }
+ return intel_context_is_banned(rq->context);
atomic_inc(&ctx->guilty_count);
@@ -122,8 +119,8 @@ static bool mark_guilty(struct i915_request *rq)
if (time_before(jiffies, prev_hang + CONTEXT_FAST_HANG_JIFFIES))
banned = true;
if (banned) {
- DRM_DEBUG_DRIVER("context %s: guilty %d, banned\n",
- ctx->name, atomic_read(&ctx->guilty_count));
+ drm_dbg(&ctx->i915->drm, "context %s: guilty %d, banned\n",
+ ctx->name, atomic_read(&ctx->guilty_count));
intel_context_set_banned(rq->context);
}
@@ -153,11 +150,12 @@ void __i915_request_reset(struct i915_request *rq, bool guilty)
rcu_read_lock(); /* protect the GEM context */
if (guilty) {
- i915_request_skip(rq, -EIO);
+ i915_request_set_error_once(rq, -EIO);
+ __i915_request_skip(rq);
if (mark_guilty(rq))
engine_skip_context(rq);
} else {
- dma_fence_set_error(&rq->fence, -EAGAIN);
+ i915_request_set_error_once(rq, -EAGAIN);
mark_innocent(rq);
}
rcu_read_unlock();
@@ -226,7 +224,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_MEDIA | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+ drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
goto out;
}
@@ -234,7 +232,7 @@ static int g4x_do_reset(struct intel_gt *gt,
GRDOM_RENDER | GRDOM_RESET_ENABLE);
ret = wait_for_atomic(g4x_reset_complete(pdev), 50);
if (ret) {
- DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+ drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
goto out;
}
@@ -260,7 +258,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- DRM_DEBUG_DRIVER("Wait for render reset failed\n");
+ drm_dbg(&gt->i915->drm, "Wait for render reset failed\n");
goto out;
}
@@ -271,7 +269,7 @@ static int ilk_do_reset(struct intel_gt *gt, intel_engine_mask_t engine_mask,
5000, 0,
NULL);
if (ret) {
- DRM_DEBUG_DRIVER("Wait for media reset failed\n");
+ drm_dbg(&gt->i915->drm, "Wait for media reset failed\n");
goto out;
}
@@ -300,8 +298,9 @@ static int gen6_hw_domain_reset(struct intel_gt *gt, u32 hw_domain_mask)
500, 0,
NULL);
if (err)
- DRM_DEBUG_DRIVER("Wait for 0x%08x engines reset failed\n",
- hw_domain_mask);
+ drm_dbg(&gt->i915->drm,
+ "Wait for 0x%08x engines reset failed\n",
+ hw_domain_mask);
return err;
}
@@ -401,7 +400,8 @@ static int gen11_lock_sfc(struct intel_engine_cs *engine, u32 *hw_mask)
return 0;
if (ret) {
- DRM_DEBUG_DRIVER("Wait for SFC forced lock ack failed\n");
+ drm_dbg(&engine->i915->drm,
+ "Wait for SFC forced lock ack failed\n");
return ret;
}
@@ -515,9 +515,10 @@ static int gen8_engine_reset_prepare(struct intel_engine_cs *engine)
ret = __intel_wait_for_register_fw(uncore, reg, mask, ack,
700, 0, NULL);
if (ret)
- DRM_ERROR("%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
- engine->name, request,
- intel_uncore_read_fw(uncore, reg));
+ drm_err(&engine->i915->drm,
+ "%s reset request timed out: {request: %08x, RESET_CTL: %08x}\n",
+ engine->name, request,
+ intel_uncore_read_fw(uncore, reg));
return ret;
}
@@ -781,7 +782,7 @@ static void nop_submit_request(struct i915_request *request)
unsigned long flags;
RQ_TRACE(request, "-EIO\n");
- dma_fence_set_error(&request->fence, -EIO);
+ i915_request_set_error_once(request, -EIO);
spin_lock_irqsave(&engine->active.lock, flags);
__i915_request_submit(request);
@@ -800,13 +801,6 @@ static void __intel_gt_set_wedged(struct intel_gt *gt)
if (test_bit(I915_WEDGED, &gt->reset.flags))
return;
- if (GEM_SHOW_DEBUG() && !intel_engines_are_idle(gt)) {
- struct drm_printer p = drm_debug_printer(__func__);
-
- for_each_engine(engine, gt, id)
- intel_engine_dump(engine, &p, "%s\n", engine->name);
- }
-
GT_TRACE(gt, "start\n");
/*
@@ -845,10 +839,30 @@ void intel_gt_set_wedged(struct intel_gt *gt)
{
intel_wakeref_t wakeref;
+ if (test_bit(I915_WEDGED, &gt->reset.flags))
+ return;
+
+ wakeref = intel_runtime_pm_get(gt->uncore->rpm);
mutex_lock(&gt->reset.mutex);
- with_intel_runtime_pm(gt->uncore->rpm, wakeref)
- __intel_gt_set_wedged(gt);
+
+ if (GEM_SHOW_DEBUG()) {
+ struct drm_printer p = drm_debug_printer(__func__);
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ drm_printf(&p, "called from %pS\n", (void *)_RET_IP_);
+ for_each_engine(engine, gt, id) {
+ if (intel_engine_is_idle(engine))
+ continue;
+
+ intel_engine_dump(engine, &p, "%s\n", engine->name);
+ }
+ }
+
+ __intel_gt_set_wedged(gt);
+
mutex_unlock(&gt->reset.mutex);
+ intel_runtime_pm_put(gt->uncore->rpm, wakeref);
}
static bool __intel_gt_unset_wedged(struct intel_gt *gt)
@@ -969,7 +983,7 @@ static int resume(struct intel_gt *gt)
int ret;
for_each_engine(engine, gt, id) {
- ret = engine->resume(engine);
+ ret = intel_engine_resume(engine);
if (ret)
return ret;
}
@@ -1022,7 +1036,7 @@ void intel_gt_reset(struct intel_gt *gt,
if (i915_modparams.reset)
dev_err(gt->i915->drm.dev, "GPU reset not supported\n");
else
- DRM_DEBUG_DRIVER("GPU reset disabled\n");
+ drm_dbg(&gt->i915->drm, "GPU reset disabled\n");
goto error;
}
@@ -1049,8 +1063,9 @@ void intel_gt_reset(struct intel_gt *gt,
*/
ret = intel_gt_init_hw(gt);
if (ret) {
- DRM_ERROR("Failed to initialise HW following reset (%d)\n",
- ret);
+ drm_err(&gt->i915->drm,
+ "Failed to initialise HW following reset (%d)\n",
+ ret);
goto taint;
}
@@ -1126,9 +1141,8 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
ret = intel_guc_reset_engine(&engine->gt->uc.guc, engine);
if (ret) {
/* If we fail here, we expect to fallback to a global reset */
- DRM_DEBUG_DRIVER("%sFailed to reset %s, ret=%d\n",
- uses_guc ? "GuC " : "",
- engine->name, ret);
+ drm_dbg(&gt->i915->drm, "%sFailed to reset %s, ret=%d\n",
+ uses_guc ? "GuC " : "", engine->name, ret);
goto out;
}
@@ -1144,7 +1158,7 @@ int intel_engine_reset(struct intel_engine_cs *engine, const char *msg)
* have been reset to their default values. Follow the init_ring
* process to program RING_MODE, HWSP and re-enable submission.
*/
- ret = engine->resume(engine);
+ ret = intel_engine_resume(engine);
out:
intel_engine_cancel_stop_cs(engine);
@@ -1165,7 +1179,7 @@ static void intel_gt_reset_global(struct intel_gt *gt,
kobject_uevent_env(kobj, KOBJ_CHANGE, error_event);
- DRM_DEBUG_DRIVER("resetting chip\n");
+ drm_dbg(&gt->i915->drm, "resetting chip, engines=%x\n", engine_mask);
kobject_uevent_env(kobj, KOBJ_CHANGE, reset_event);
/* Use a watchdog to ensure that our reset completes */
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c
index 374b28f13ca0..8cda1b7e17ba 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring.c
@@ -31,17 +31,15 @@ int intel_ring_pin(struct intel_ring *ring)
if (atomic_fetch_inc(&ring->pin_count))
return 0;
- flags = PIN_GLOBAL;
-
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+ flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
if (vma->obj->stolen)
flags |= PIN_MAPPABLE;
else
flags |= PIN_HIGH;
- ret = i915_vma_pin(vma, 0, 0, flags);
+ ret = i915_ggtt_pin(vma, 0, flags);
if (unlikely(ret))
goto err_unpin;
@@ -145,6 +143,7 @@ intel_engine_create_ring(struct intel_engine_cs *engine, int size)
kref_init(&ring->ref);
ring->size = size;
+ ring->wrap = BITS_PER_TYPE(ring->size) - ilog2(size);
/*
* Workaround an erratum on the i830 which causes a hang if
diff --git a/drivers/gpu/drm/i915/gt/intel_ring.h b/drivers/gpu/drm/i915/gt/intel_ring.h
index ea2839d9e044..5bdce24994aa 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring.h
@@ -56,6 +56,14 @@ static inline u32 intel_ring_wrap(const struct intel_ring *ring, u32 pos)
return pos & (ring->size - 1);
}
+static inline int intel_ring_direction(const struct intel_ring *ring,
+ u32 next, u32 prev)
+{
+ typecheck(typeof(ring->size), next);
+ typecheck(typeof(ring->size), prev);
+ return (next - prev) << ring->wrap;
+}
+
static inline bool
intel_ring_offset_valid(const struct intel_ring *ring,
unsigned int pos)
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
index bc44fe8e5ffa..1424582e4a9b 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c
+++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c
@@ -29,11 +29,10 @@
#include <linux/log2.h>
-#include <drm/i915_drm.h>
-
#include "gem/i915_gem_context.h"
#include "gen6_ppgtt.h"
+#include "gen7_renderclear.h"
#include "i915_drv.h"
#include "i915_trace.h"
#include "intel_context.h"
@@ -568,7 +567,8 @@ static void flush_cs_tlb(struct intel_engine_cs *engine)
return;
/* ring should be idle before issuing a sync flush*/
- WARN_ON((ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+ drm_WARN_ON(&dev_priv->drm,
+ (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
ENGINE_WRITE(engine, RING_INSTPM,
_MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
@@ -626,6 +626,27 @@ static bool stop_ring(struct intel_engine_cs *engine)
return (ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR) == 0;
}
+static struct i915_address_space *vm_alias(struct i915_address_space *vm)
+{
+ if (i915_is_ggtt(vm))
+ vm = &i915_vm_to_ggtt(vm)->alias->vm;
+
+ return vm;
+}
+
+static void set_pp_dir(struct intel_engine_cs *engine)
+{
+ struct i915_address_space *vm = vm_alias(engine->gt->vm);
+
+ if (vm) {
+ struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm);
+
+ ENGINE_WRITE(engine, RING_PP_DIR_DCLV, PP_DIR_DCLV_2G);
+ ENGINE_WRITE(engine, RING_PP_DIR_BASE,
+ px_base(ppgtt->pd)->ggtt_offset << 10);
+ }
+}
+
static int xcs_resume(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
@@ -684,6 +705,8 @@ static int xcs_resume(struct intel_engine_cs *engine)
GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
intel_ring_update_space(ring);
+ set_pp_dir(engine);
+
/* First wake the ring up to an empty/idle ring */
ENGINE_WRITE(engine, RING_HEAD, ring->head);
ENGINE_WRITE(engine, RING_TAIL, ring->head);
@@ -857,43 +880,6 @@ static int rcs_resume(struct intel_engine_cs *engine)
intel_uncore_write(uncore, ECOSKPD,
_MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE));
- /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
- if (IS_GEN_RANGE(i915, 4, 6))
- intel_uncore_write(uncore, MI_MODE,
- _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
-
- /* We need to disable the AsyncFlip performance optimisations in order
- * to use MI_WAIT_FOR_EVENT within the CS. It should already be
- * programmed to '1' on all products.
- *
- * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
- */
- if (IS_GEN_RANGE(i915, 6, 7))
- intel_uncore_write(uncore, MI_MODE,
- _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
-
- /* Required for the hardware to program scanline values for waiting */
- /* WaEnableFlushTlbInvalidationMode:snb */
- if (IS_GEN(i915, 6))
- intel_uncore_write(uncore, GFX_MODE,
- _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
-
- /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
- if (IS_GEN(i915, 7))
- intel_uncore_write(uncore, GFX_MODE_GEN7,
- _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
- _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
-
- if (IS_GEN(i915, 6)) {
- /* From the Sandybridge PRM, volume 1 part 3, page 24:
- * "If this bit is set, STCunit will have LRA as replacement
- * policy. [...] This bit must be reset. LRA replacement
- * policy is not supported."
- */
- intel_uncore_write(uncore, CACHE_MODE_0,
- _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
- }
-
if (IS_GEN_RANGE(i915, 6, 7))
intel_uncore_write(uncore, INSTPM,
_MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
@@ -910,9 +896,7 @@ static void reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(request))
- dma_fence_set_error(&request->fence, -EIO);
-
+ i915_request_set_error_once(request, -EIO);
i915_request_mark_complete(request);
}
@@ -1197,23 +1181,12 @@ static void ring_context_destroy(struct kref *ref)
intel_context_free(ce);
}
-static struct i915_address_space *vm_alias(struct intel_context *ce)
-{
- struct i915_address_space *vm;
-
- vm = ce->vm;
- if (i915_is_ggtt(vm))
- vm = &i915_vm_to_ggtt(vm)->alias->vm;
-
- return vm;
-}
-
static int __context_pin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
int err = 0;
- vm = vm_alias(ce);
+ vm = vm_alias(ce->vm);
if (vm)
err = gen6_ppgtt_pin(i915_vm_to_ppgtt((vm)));
@@ -1224,7 +1197,7 @@ static void __context_unpin_ppgtt(struct intel_context *ce)
{
struct i915_address_space *vm;
- vm = vm_alias(ce);
+ vm = vm_alias(ce->vm);
if (vm)
gen6_ppgtt_unpin(i915_vm_to_ppgtt(vm));
}
@@ -1384,7 +1357,9 @@ static int load_pd_dir(struct i915_request *rq,
return rq->engine->emit_flush(rq, EMIT_FLUSH);
}
-static inline int mi_set_context(struct i915_request *rq, u32 flags)
+static inline int mi_set_context(struct i915_request *rq,
+ struct intel_context *ce,
+ u32 flags)
{
struct drm_i915_private *i915 = rq->i915;
struct intel_engine_cs *engine = rq->engine;
@@ -1459,7 +1434,7 @@ static inline int mi_set_context(struct i915_request *rq, u32 flags)
*cs++ = MI_NOOP;
*cs++ = MI_SET_CONTEXT;
- *cs++ = i915_ggtt_offset(rq->context->state) | flags;
+ *cs++ = i915_ggtt_offset(ce->state) | flags;
/*
* w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
* WaMiSetContext_Hang:snb,ivb,vlv
@@ -1574,21 +1549,64 @@ static int switch_mm(struct i915_request *rq, struct i915_address_space *vm)
return rq->engine->emit_flush(rq, EMIT_INVALIDATE);
}
+static int clear_residuals(struct i915_request *rq)
+{
+ struct intel_engine_cs *engine = rq->engine;
+ int ret;
+
+ ret = switch_mm(rq, vm_alias(engine->kernel_context->vm));
+ if (ret)
+ return ret;
+
+ if (engine->kernel_context->state) {
+ ret = mi_set_context(rq,
+ engine->kernel_context,
+ MI_MM_SPACE_GTT | MI_RESTORE_INHIBIT);
+ if (ret)
+ return ret;
+ }
+
+ ret = engine->emit_bb_start(rq,
+ engine->wa_ctx.vma->node.start, 0,
+ 0);
+ if (ret)
+ return ret;
+
+ ret = engine->emit_flush(rq, EMIT_FLUSH);
+ if (ret)
+ return ret;
+
+ /* Always invalidate before the next switch_mm() */
+ return engine->emit_flush(rq, EMIT_INVALIDATE);
+}
+
static int switch_context(struct i915_request *rq)
{
+ struct intel_engine_cs *engine = rq->engine;
struct intel_context *ce = rq->context;
+ void **residuals = NULL;
int ret;
GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
- ret = switch_mm(rq, vm_alias(ce));
+ if (engine->wa_ctx.vma && ce != engine->kernel_context) {
+ if (engine->wa_ctx.vma->private != ce) {
+ ret = clear_residuals(rq);
+ if (ret)
+ return ret;
+
+ residuals = &engine->wa_ctx.vma->private;
+ }
+ }
+
+ ret = switch_mm(rq, vm_alias(ce->vm));
if (ret)
return ret;
if (ce->state) {
u32 flags;
- GEM_BUG_ON(rq->engine->id != RCS0);
+ GEM_BUG_ON(engine->id != RCS0);
/* For resource streamer on HSW+ and power context elsewhere */
BUILD_BUG_ON(HSW_MI_RS_SAVE_STATE_EN != MI_SAVE_EXT_STATE_EN);
@@ -1600,7 +1618,7 @@ static int switch_context(struct i915_request *rq)
else
flags |= MI_RESTORE_INHIBIT;
- ret = mi_set_context(rq, flags);
+ ret = mi_set_context(rq, ce, flags);
if (ret)
return ret;
}
@@ -1609,6 +1627,20 @@ static int switch_context(struct i915_request *rq)
if (ret)
return ret;
+ /*
+ * Now past the point of no return, this request _will_ be emitted.
+ *
+ * Or at least this preamble will be emitted, the request may be
+ * interrupted prior to submitting the user payload. If so, we
+ * still submit the "empty" request in order to preserve global
+ * state tracking such as this, our tracking of the current
+ * dirty context.
+ */
+ if (residuals) {
+ intel_context_put(*residuals);
+ *residuals = intel_context_get(ce);
+ }
+
return 0;
}
@@ -1662,7 +1694,8 @@ static void gen6_bsd_submit_request(struct i915_request *request)
GEN6_BSD_SLEEP_INDICATOR,
0,
1000, 0, NULL))
- DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
+ drm_err(&uncore->i915->drm,
+ "timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
i9xx_submit_request(request);
@@ -1787,11 +1820,16 @@ static void ring_release(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
- WARN_ON(INTEL_GEN(dev_priv) > 2 &&
- (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
+ drm_WARN_ON(&dev_priv->drm, INTEL_GEN(dev_priv) > 2 &&
+ (ENGINE_READ(engine, RING_MI_MODE) & MODE_IDLE) == 0);
intel_engine_cleanup_common(engine);
+ if (engine->wa_ctx.vma) {
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, 0);
+ }
+
intel_ring_unpin(engine->legacy.ring);
intel_ring_put(engine->legacy.ring);
@@ -1939,6 +1977,64 @@ static void setup_vecs(struct intel_engine_cs *engine)
engine->emit_fini_breadcrumb = gen7_xcs_emit_breadcrumb;
}
+static int gen7_ctx_switch_bb_setup(struct intel_engine_cs * const engine,
+ struct i915_vma * const vma)
+{
+ return gen7_setup_clear_gpr_bb(engine, vma);
+}
+
+static int gen7_ctx_switch_bb_init(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int size;
+ int err;
+
+ size = gen7_ctx_switch_bb_setup(engine, NULL /* probe size */);
+ if (size <= 0)
+ return size;
+
+ size = ALIGN(size, PAGE_SIZE);
+ obj = i915_gem_object_create_internal(engine->i915, size);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ err = PTR_ERR(vma);
+ goto err_obj;
+ }
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ err = PTR_ERR(vma->private);
+ goto err_obj;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err)
+ goto err_private;
+
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_unpin;
+
+ err = gen7_ctx_switch_bb_setup(engine, vma);
+ if (err)
+ goto err_unpin;
+
+ engine->wa_ctx.vma = vma;
+ return 0;
+
+err_unpin:
+ i915_vma_unpin(vma);
+err_private:
+ intel_context_put(vma->private);
+err_obj:
+ i915_gem_object_put(obj);
+ return err;
+}
+
int intel_ring_submission_setup(struct intel_engine_cs *engine)
{
struct intel_timeline *timeline;
@@ -1992,11 +2088,19 @@ int intel_ring_submission_setup(struct intel_engine_cs *engine)
GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
+ if (IS_GEN(engine->i915, 7) && engine->class == RENDER_CLASS) {
+ err = gen7_ctx_switch_bb_init(engine);
+ if (err)
+ goto err_ring_unpin;
+ }
+
/* Finally, take ownership and responsibility for cleanup! */
engine->release = ring_release;
return 0;
+err_ring_unpin:
+ intel_ring_unpin(ring);
err_ring:
intel_ring_put(ring);
err_timeline_unpin:
@@ -2007,3 +2111,7 @@ err:
intel_engine_cleanup_common(engine);
return err;
}
+
+#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
+#include "selftest_ring_submission.c"
+#endif
diff --git a/drivers/gpu/drm/i915/gt/intel_ring_types.h b/drivers/gpu/drm/i915/gt/intel_ring_types.h
index d9f17f38e0cc..1a189ea00fd8 100644
--- a/drivers/gpu/drm/i915/gt/intel_ring_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_ring_types.h
@@ -39,12 +39,13 @@ struct intel_ring {
*/
atomic_t pin_count;
- u32 head;
- u32 tail;
- u32 emit;
+ u32 head; /* updated during retire, loosely tracks RING_HEAD */
+ u32 tail; /* updated on submission, used for RING_TAIL */
+ u32 emit; /* updated during request construction */
u32 space;
u32 size;
+ u32 wrap;
u32 effective_size;
};
diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c
index d2a3d935d186..87f9638d2cbf 100644
--- a/drivers/gpu/drm/i915/gt/intel_rps.c
+++ b/drivers/gpu/drm/i915/gt/intel_rps.c
@@ -4,6 +4,8 @@
* Copyright © 2019 Intel Corporation
*/
+#include <drm/i915_drm.h>
+
#include "i915_drv.h"
#include "intel_gt.h"
#include "intel_gt_irq.h"
@@ -55,7 +57,7 @@ static u32 rps_pm_mask(struct intel_rps *rps, u8 val)
if (val < rps->max_freq_softlimit)
mask |= GEN6_PM_RP_UP_EI_EXPIRED | GEN6_PM_RP_UP_THRESHOLD;
- mask &= rps->pm_events;
+ mask &= READ_ONCE(rps->pm_events);
return rps_pm_sanitize_mask(rps, ~mask);
}
@@ -68,17 +70,19 @@ static void rps_reset_ei(struct intel_rps *rps)
static void rps_enable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
rps_reset_ei(rps);
if (IS_VALLEYVIEW(gt->i915))
/* WaGsvRC0ResidencyMethod:vlv */
- rps->pm_events = GEN6_PM_RP_UP_EI_EXPIRED;
+ events = GEN6_PM_RP_UP_EI_EXPIRED;
else
- rps->pm_events = (GEN6_PM_RP_UP_THRESHOLD |
- GEN6_PM_RP_DOWN_THRESHOLD |
- GEN6_PM_RP_DOWN_TIMEOUT);
+ events = (GEN6_PM_RP_UP_THRESHOLD |
+ GEN6_PM_RP_DOWN_THRESHOLD |
+ GEN6_PM_RP_DOWN_TIMEOUT);
+ WRITE_ONCE(rps->pm_events, events);
spin_lock_irq(&gt->irq_lock);
gen6_gt_pm_enable_irq(gt, rps->pm_events);
spin_unlock_irq(&gt->irq_lock);
@@ -115,8 +119,7 @@ static void rps_disable_interrupts(struct intel_rps *rps)
{
struct intel_gt *gt = rps_to_gt(rps);
- rps->pm_events = 0;
-
+ WRITE_ONCE(rps->pm_events, 0);
set(gt->uncore, GEN6_PMINTRMSK, rps_pm_sanitize_mask(rps, ~0u));
spin_lock_irq(&gt->irq_lock);
@@ -642,7 +645,7 @@ void intel_rps_mark_interactive(struct intel_rps *rps, bool interactive)
{
mutex_lock(&rps->power.mutex);
if (interactive) {
- if (!rps->power.interactive++ && rps->active)
+ if (!rps->power.interactive++ && READ_ONCE(rps->active))
rps_set_power(rps, HIGH_POWER);
} else {
GEM_BUG_ON(!rps->power.interactive);
@@ -719,11 +722,15 @@ void intel_rps_unpark(struct intel_rps *rps)
* performance, jump directly to RPe as our starting frequency.
*/
mutex_lock(&rps->lock);
- rps->active = true;
+
+ WRITE_ONCE(rps->active, true);
+
freq = max(rps->cur_freq, rps->efficient_freq),
freq = clamp(freq, rps->min_freq_softlimit, rps->max_freq_softlimit);
intel_rps_set(rps, freq);
+
rps->last_adj = 0;
+
mutex_unlock(&rps->lock);
if (INTEL_GEN(rps_to_i915(rps)) >= 6)
@@ -743,7 +750,7 @@ void intel_rps_park(struct intel_rps *rps)
if (INTEL_GEN(i915) >= 6)
rps_disable_interrupts(rps);
- rps->active = false;
+ WRITE_ONCE(rps->active, false);
if (rps->last_freq <= rps->idle_freq)
return;
@@ -767,10 +774,10 @@ void intel_rps_park(struct intel_rps *rps)
void intel_rps_boost(struct i915_request *rq)
{
- struct intel_rps *rps = &rq->engine->gt->rps;
+ struct intel_rps *rps = &READ_ONCE(rq->engine)->gt->rps;
unsigned long flags;
- if (i915_request_signaled(rq) || !rps->active)
+ if (i915_request_signaled(rq) || !READ_ONCE(rps->active))
return;
/* Serializes with i915_request_retire() */
@@ -1026,7 +1033,8 @@ static bool chv_rps_enable(struct intel_rps *rps)
vlv_punit_put(i915);
/* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+ drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
+ "GPLL not enabled\n");
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
@@ -1123,7 +1131,8 @@ static bool vlv_rps_enable(struct intel_rps *rps)
vlv_punit_put(i915);
/* RPS code assumes GPLL is used */
- WARN_ONCE((val & GPLLENABLE) == 0, "GPLL not enabled\n");
+ drm_WARN_ONCE(&i915->drm, (val & GPLLENABLE) == 0,
+ "GPLL not enabled\n");
DRM_DEBUG_DRIVER("GPLL enabled? %s\n", yesno(val & GPLLENABLE));
DRM_DEBUG_DRIVER("GPU status: 0x%08x\n", val);
@@ -1191,11 +1200,11 @@ void intel_rps_enable(struct intel_rps *rps)
if (!rps->enabled)
return;
- WARN_ON(rps->max_freq < rps->min_freq);
- WARN_ON(rps->idle_freq > rps->max_freq);
+ drm_WARN_ON(&i915->drm, rps->max_freq < rps->min_freq);
+ drm_WARN_ON(&i915->drm, rps->idle_freq > rps->max_freq);
- WARN_ON(rps->efficient_freq < rps->min_freq);
- WARN_ON(rps->efficient_freq > rps->max_freq);
+ drm_WARN_ON(&i915->drm, rps->efficient_freq < rps->min_freq);
+ drm_WARN_ON(&i915->drm, rps->efficient_freq > rps->max_freq);
}
static void gen6_rps_disable(struct intel_rps *rps)
@@ -1390,9 +1399,9 @@ static void chv_rps_init(struct intel_rps *rps)
BIT(VLV_IOSF_SB_NC) |
BIT(VLV_IOSF_SB_CCK));
- WARN_ONCE((rps->max_freq | rps->efficient_freq | rps->rp1_freq |
- rps->min_freq) & 1,
- "Odd GPU freq values\n");
+ drm_WARN_ONCE(&i915->drm, (rps->max_freq | rps->efficient_freq |
+ rps->rp1_freq | rps->min_freq) & 1,
+ "Odd GPU freq values\n");
}
static void vlv_c0_read(struct intel_uncore *uncore, struct intel_rps_ei *ei)
@@ -1451,12 +1460,12 @@ static void rps_work(struct work_struct *work)
u32 pm_iir = 0;
spin_lock_irq(&gt->irq_lock);
- pm_iir = fetch_and_zero(&rps->pm_iir);
+ pm_iir = fetch_and_zero(&rps->pm_iir) & READ_ONCE(rps->pm_events);
client_boost = atomic_read(&rps->num_waiters);
spin_unlock_irq(&gt->irq_lock);
/* Make sure we didn't queue anything we're not going to process. */
- if ((pm_iir & rps->pm_events) == 0 && !client_boost)
+ if (!pm_iir && !client_boost)
goto out;
mutex_lock(&rps->lock);
@@ -1552,11 +1561,15 @@ void gen11_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
void gen6_rps_irq_handler(struct intel_rps *rps, u32 pm_iir)
{
struct intel_gt *gt = rps_to_gt(rps);
+ u32 events;
- if (pm_iir & rps->pm_events) {
+ events = pm_iir & READ_ONCE(rps->pm_events);
+ if (events) {
spin_lock(&gt->irq_lock);
- gen6_gt_pm_mask_irq(gt, pm_iir & rps->pm_events);
- rps->pm_iir |= pm_iir & rps->pm_events;
+
+ gen6_gt_pm_mask_irq(gt, events);
+ rps->pm_iir |= events;
+
schedule_work(&rps->work);
spin_unlock(&gt->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/gt/intel_timeline.c b/drivers/gpu/drm/i915/gt/intel_timeline.c
index 87716529cd2f..91debbc97c9a 100644
--- a/drivers/gpu/drm/i915/gt/intel_timeline.c
+++ b/drivers/gpu/drm/i915/gt/intel_timeline.c
@@ -192,11 +192,15 @@ static void cacheline_release(struct intel_timeline_cacheline *cl)
static void cacheline_free(struct intel_timeline_cacheline *cl)
{
+ if (!i915_active_acquire_if_busy(&cl->active)) {
+ __idle_cacheline_free(cl);
+ return;
+ }
+
GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
- if (i915_active_is_idle(&cl->active))
- __idle_cacheline_free(cl);
+ i915_active_release(&cl->active);
}
int intel_timeline_init(struct intel_timeline *timeline,
@@ -308,7 +312,7 @@ int intel_timeline_pin(struct intel_timeline *tl)
if (atomic_add_unless(&tl->pin_count, 1, 0))
return 0;
- err = i915_vma_pin(tl->hwsp_ggtt, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_ggtt_pin(tl->hwsp_ggtt, 0, PIN_HIGH);
if (err)
return err;
@@ -406,6 +410,8 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
void *vaddr;
int err;
+ might_lock(&tl->gt->ggtt->vm.mutex);
+
/*
* If there is an outstanding GPU reference to this cacheline,
* such as it being sampled by a HW semaphore on another timeline,
@@ -431,7 +437,7 @@ __intel_timeline_get_seqno(struct intel_timeline *tl,
goto err_rollback;
}
- err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
+ err = i915_ggtt_pin(vma, 0, PIN_HIGH);
if (err) {
__idle_hwsp_free(vma->private, cacheline);
goto err_rollback;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c
index 4e292d4bf7b9..5176ad1a3976 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c
@@ -116,17 +116,17 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
} else {
wa_ = &wal->list[mid];
- if ((wa->mask & ~wa_->mask) == 0) {
- DRM_ERROR("Discarding overwritten w/a for reg %04x (mask: %08x, value: %08x)\n",
+ if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
+ DRM_ERROR("Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
i915_mmio_reg_offset(wa_->reg),
- wa_->mask, wa_->val);
+ wa_->clr, wa_->set);
- wa_->val &= ~wa->mask;
+ wa_->set &= ~wa->clr;
}
wal->wa_count++;
- wa_->val |= wa->val;
- wa_->mask |= wa->mask;
+ wa_->set |= wa->set;
+ wa_->clr |= wa->clr;
wa_->read |= wa->read;
return;
}
@@ -147,13 +147,13 @@ static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
}
}
-static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
- u32 val, u32 read_mask)
+static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
+ u32 clear, u32 set, u32 read_mask)
{
struct i915_wa wa = {
.reg = reg,
- .mask = mask,
- .val = val,
+ .clr = clear,
+ .set = set,
.read = read_mask,
};
@@ -161,38 +161,43 @@ static void wa_add(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
}
static void
-wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 mask,
- u32 val)
+wa_write_masked_or(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
{
- wa_add(wal, reg, mask, val, mask);
+ wa_add(wal, reg, clear, set, clear);
}
static void
-wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
+{
+ wa_write_masked_or(wal, reg, ~0, set);
+}
+
+static void
+wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
{
- wa_write_masked_or(wal, reg, val, _MASKED_BIT_ENABLE(val));
+ wa_write_masked_or(wal, reg, set, set);
}
static void
-wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- wa_write_masked_or(wal, reg, ~0, val);
+ wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val);
}
static void
-wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
+wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
{
- wa_write_masked_or(wal, reg, val, val);
+ wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val);
}
#define WA_SET_BIT_MASKED(addr, mask) \
- wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_ENABLE(mask))
+ wa_masked_en(wal, (addr), (mask))
#define WA_CLR_BIT_MASKED(addr, mask) \
- wa_write_masked_or(wal, (addr), (mask), _MASKED_BIT_DISABLE(mask))
+ wa_masked_dis(wal, (addr), (mask))
#define WA_SET_FIELD_MASKED(addr, mask, value) \
- wa_write_masked_or(wal, (addr), (mask), _MASKED_FIELD((mask), (value)))
+ wa_write_masked_or(wal, (addr), 0, _MASKED_FIELD((mask), (value)))
static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
@@ -570,29 +575,46 @@ static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
/* allow headerless messages for preemptible GPGPU context */
WA_SET_BIT_MASKED(GEN10_SAMPLER_MODE,
GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
+
+ /* Wa_1604278689:icl,ehl */
+ wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
+ wa_write_masked_or(wal, IVB_FBC_RT_BASE_UPPER,
+ 0, /* write-only register; skip validation */
+ 0xFFFFFFFF);
+
+ /* Wa_1406306137:icl,ehl */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
}
static void tgl_ctx_workarounds_init(struct intel_engine_cs *engine,
struct i915_wa_list *wal)
{
- u32 val;
-
- /* Wa_1409142259:tgl */
+ /*
+ * Wa_1409142259:tgl
+ * Wa_1409347922:tgl
+ * Wa_1409252684:tgl
+ * Wa_1409217633:tgl
+ * Wa_1409207793:tgl
+ * Wa_1409178076:tgl
+ * Wa_1408979724:tgl
+ */
WA_SET_BIT_MASKED(GEN11_COMMON_SLICE_CHICKEN3,
GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
- /* Wa_1604555607:tgl */
- val = intel_uncore_read(engine->uncore, FF_MODE2);
- val &= ~FF_MODE2_TDS_TIMER_MASK;
- val |= FF_MODE2_TDS_TIMER_128;
/*
- * FIXME: FF_MODE2 register is not readable till TGL B0. We can
- * enable verification of WA from the later steppings, which enables
- * the read of FF_MODE2.
+ * Wa_1604555607:gen12 and Wa_1608008084:gen12
+ * FF_MODE2 register will return the wrong value when read. The default
+ * value for this register is zero for all fields and there are no bit
+ * masks. So instead of doing a RMW we should just write the TDS timer
+ * value for Wa_1604555607.
*/
- wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK, val,
- IS_TGL_REVID(engine->i915, TGL_REVID_A0, TGL_REVID_A0) ? 0 :
- FF_MODE2_TDS_TIMER_MASK);
+ wa_add(wal, FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
+ FF_MODE2_TDS_TIMER_128, 0);
+
+ /* WaDisableGPGPUMidThreadPreemption:tgl */
+ WA_SET_FIELD_MASKED(GEN8_CS_CHICKEN1,
+ GEN9_PREEMPT_GPGPU_LEVEL_MASK,
+ GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
}
static void
@@ -662,7 +684,7 @@ int intel_engine_emit_ctx_wa(struct i915_request *rq)
*cs++ = MI_LOAD_REGISTER_IMM(wal->count);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
*cs++ = i915_mmio_reg_offset(wa->reg);
- *cs++ = wa->val;
+ *cs++ = wa->set;
}
*cs++ = MI_NOOP;
@@ -827,7 +849,7 @@ wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
DRM_WARN("No common index found between subslice mask %x and L3 bank mask %x!\n",
intel_sseu_get_subslices(sseu, slice), l3_en);
subslice = fls(l3_en);
- WARN_ON(!subslice);
+ drm_WARN_ON(&i915->drm, !subslice);
}
subslice--;
@@ -898,11 +920,6 @@ icl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
SLICE_UNIT_LEVEL_CLKGATE,
MSCUNIT_CLKGATE_DIS);
- /* Wa_1406680159:icl */
- wa_write_or(wal,
- SUBSLICE_UNIT_LEVEL_CLKGATE,
- GWUNIT_CLKGATE_DIS);
-
/* Wa_1406838659:icl (pre-prod) */
if (IS_ICL_REVID(i915, ICL_REVID_A0, ICL_REVID_B0))
wa_write_or(wal,
@@ -931,7 +948,7 @@ tgl_gt_workarounds_init(struct drm_i915_private *i915, struct i915_wa_list *wal)
SUBSLICE_UNIT_LEVEL_CLKGATE2,
CPSSUNIT_CLKGATE_DIS);
- /* Wa_1409180338:tgl */
+ /* Wa_1607087056:tgl also know as BUG:1409180338 */
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0))
wa_write_or(wal,
SLICE_UNIT_LEVEL_CLKGATE,
@@ -991,11 +1008,10 @@ wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
static bool
wa_verify(const struct i915_wa *wa, u32 cur, const char *name, const char *from)
{
- if ((cur ^ wa->val) & wa->read) {
- DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x, mask=%x)\n",
+ if ((cur ^ wa->set) & wa->read) {
+ DRM_ERROR("%s workaround lost on %s! (%x=%x/%x, expected %x)\n",
name, from, i915_mmio_reg_offset(wa->reg),
- cur, cur & wa->read,
- wa->val, wa->mask);
+ cur, cur & wa->read, wa->set);
return false;
}
@@ -1020,7 +1036,10 @@ wa_list_apply(struct intel_uncore *uncore, const struct i915_wa_list *wal)
intel_uncore_forcewake_get__locked(uncore, fw);
for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
- intel_uncore_rmw_fw(uncore, wa->reg, wa->mask, wa->val);
+ if (wa->clr)
+ intel_uncore_rmw_fw(uncore, wa->reg, wa->clr, wa->set);
+ else
+ intel_uncore_write_fw(uncore, wa->reg, wa->set);
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
wa_verify(wa,
intel_uncore_read_fw(uncore, wa->reg),
@@ -1244,6 +1263,7 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
case RENDER_CLASS:
/*
* WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
+ * Wa_1408556865:tgl
*
* This covers 4 registers which are next to one another :
* - PS_INVOCATION_COUNT
@@ -1254,6 +1274,12 @@ static void tgl_whitelist_build(struct intel_engine_cs *engine)
whitelist_reg_ext(w, PS_INVOCATION_COUNT,
RING_FORCE_TO_NONPRIV_ACCESS_RD |
RING_FORCE_TO_NONPRIV_RANGE_4);
+
+ /* Wa_1808121037:tgl */
+ whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
+
+ /* Wa_1806527549:tgl */
+ whitelist_reg(w, HIZ_CHICKEN);
break;
default:
break;
@@ -1320,19 +1346,21 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
struct drm_i915_private *i915 = engine->i915;
if (IS_TGL_REVID(i915, TGL_REVID_A0, TGL_REVID_A0)) {
- /* Wa_1606700617:tgl */
- wa_masked_en(wal,
- GEN9_CS_DEBUG_MODE1,
- FF_DOP_CLOCK_GATE_DISABLE);
-
- /* Wa_1607138336:tgl */
+ /*
+ * Wa_1607138336:tgl
+ * Wa_1607063988:tgl
+ */
wa_write_or(wal,
GEN9_CTX_PREEMPT_REG,
GEN12_DISABLE_POSH_BUSY_FF_DOP_CG);
- /* Wa_1607030317:tgl */
- /* Wa_1607186500:tgl */
- /* Wa_1607297627:tgl */
+ /*
+ * Wa_1607030317:tgl
+ * Wa_1607186500:tgl
+ * Wa_1607297627:tgl there is 3 entries for this WA on BSpec, 2
+ * of then says it is fixed on B0 the other one says it is
+ * permanent
+ */
wa_masked_en(wal,
GEN6_RC_SLEEP_PSMI_CONTROL,
GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
@@ -1345,6 +1373,35 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
wa_write_or(wal,
GEN7_SARCHKMD,
GEN7_DISABLE_SAMPLER_PREFETCH);
+
+ /* Wa_1407928979:tgl */
+ wa_write_or(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
+
+ /*
+ * Wa_1409085225:tgl
+ * Wa_14010229206:tgl
+ */
+ wa_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
+
+ /* Wa_1408615072:tgl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ VSUNIT_CLKGATE_DIS_TGL);
+ }
+
+ if (IS_TIGERLAKE(i915)) {
+ /* Wa_1606931601:tgl */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
+
+ /* Wa_1409804808:tgl */
+ wa_masked_en(wal, GEN7_ROW_CHICKEN2,
+ GEN12_PUSH_CONST_DEREF_HOLD_DIS);
+
+ /* Wa_1606700617:tgl */
+ wa_masked_en(wal,
+ GEN9_CS_DEBUG_MODE1,
+ FF_DOP_CLOCK_GATE_DISABLE);
}
if (IS_GEN(i915, 11)) {
@@ -1410,10 +1467,38 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN11_SCRATCH2,
GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
0);
+
+ /* WaEnable32PlaneMode:icl */
+ wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
+ GEN11_ENABLE_32_PLANE_MODE);
+
+ /*
+ * Wa_1408615072:icl,ehl (vsunit)
+ * Wa_1407596294:icl,ehl (hsunit)
+ */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
+ VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
+
+ /* Wa_1407352427:icl,ehl */
+ wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
+ PSDUNIT_CLKGATE_DIS);
+
+ /* Wa_1406680159:icl,ehl */
+ wa_write_or(wal,
+ SUBSLICE_UNIT_LEVEL_CLKGATE,
+ GWUNIT_CLKGATE_DIS);
+
+ /*
+ * Wa_1408767742:icl[a2..forever],ehl[all]
+ * Wa_1605460711:icl[a0..c0]
+ */
+ wa_write_or(wal,
+ GEN7_FF_THREAD_MODE,
+ GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
}
- if (IS_GEN_RANGE(i915, 9, 11)) {
- /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl */
+ if (IS_GEN_RANGE(i915, 9, 12)) {
+ /* FtrPerCtxtPreemptionGranularityControl:skl,bxt,kbl,cfl,cnl,icl,tgl */
wa_masked_en(wal,
GEN7_FF_SLICE_CS_CHICKEN1,
GEN9_FFSC_PERCTX_PREEMPT_CTRL);
@@ -1457,6 +1542,52 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
GEN8_L3SQCREG4,
GEN8_LQSC_FLUSH_COHERENT_LINES);
}
+
+ if (IS_GEN(i915, 7))
+ /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
+ wa_masked_en(wal,
+ GFX_MODE_GEN7,
+ GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
+
+ if (IS_GEN_RANGE(i915, 6, 7))
+ /*
+ * We need to disable the AsyncFlip performance optimisations in
+ * order to use MI_WAIT_FOR_EVENT within the CS. It should
+ * already be programmed to '1' on all products.
+ *
+ * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
+ */
+ wa_masked_en(wal,
+ MI_MODE,
+ ASYNC_FLIP_PERF_DISABLE);
+
+ if (IS_GEN(i915, 6)) {
+ /*
+ * Required for the hardware to program scanline values for
+ * waiting
+ * WaEnableFlushTlbInvalidationMode:snb
+ */
+ wa_masked_en(wal,
+ GFX_MODE,
+ GFX_TLB_INVALIDATE_EXPLICIT);
+
+ /*
+ * From the Sandybridge PRM, volume 1 part 3, page 24:
+ * "If this bit is set, STCunit will have LRA as replacement
+ * policy. [...] This bit must be reset. LRA replacement
+ * policy is not supported."
+ */
+ wa_masked_dis(wal,
+ CACHE_MODE_0,
+ CM0_STC_EVICT_DISABLE_LRA_SNB);
+ }
+
+ if (IS_GEN_RANGE(i915, 4, 6))
+ /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
+ wa_add(wal, MI_MODE,
+ 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
+ /* XXX bit doesn't stick on Broadwater */
+ IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH);
}
static void
@@ -1475,7 +1606,7 @@ xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
static void
engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
{
- if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 8))
+ if (I915_SELFTEST_ONLY(INTEL_GEN(engine->i915) < 4))
return;
if (engine->class == RENDER_CLASS)
@@ -1488,7 +1619,7 @@ void intel_engine_init_workarounds(struct intel_engine_cs *engine)
{
struct i915_wa_list *wal = &engine->wa_list;
- if (INTEL_GEN(engine->i915) < 8)
+ if (INTEL_GEN(engine->i915) < 4)
return;
wa_init_start(wal, "engine", engine->name);
@@ -1534,15 +1665,34 @@ err_obj:
return ERR_PTR(err);
}
+static const struct {
+ u32 start;
+ u32 end;
+} mcr_ranges_gen8[] = {
+ { .start = 0x5500, .end = 0x55ff },
+ { .start = 0x7000, .end = 0x7fff },
+ { .start = 0x9400, .end = 0x97ff },
+ { .start = 0xb000, .end = 0xb3ff },
+ { .start = 0xe000, .end = 0xe7ff },
+ {},
+};
+
static bool mcr_range(struct drm_i915_private *i915, u32 offset)
{
+ int i;
+
+ if (INTEL_GEN(i915) < 8)
+ return false;
+
/*
- * Registers in this range are affected by the MCR selector
+ * Registers in these ranges are affected by the MCR selector
* which only controls CPU initiated MMIO. Routing does not
* work for CS access so we cannot verify them on this path.
*/
- if (INTEL_GEN(i915) >= 8 && (offset >= 0xb000 && offset <= 0xb4ff))
- return true;
+ for (i = 0; mcr_ranges_gen8[i].start; i++)
+ if (offset >= mcr_ranges_gen8[i].start &&
+ offset <= mcr_ranges_gen8[i].end)
+ return true;
return false;
}
@@ -1612,6 +1762,16 @@ static int engine_wa_list_verify(struct intel_context *ce,
goto err_vma;
}
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(vma);
+ if (err) {
+ i915_request_add(rq);
+ goto err_vma;
+ }
+
err = wa_list_srm(rq, wal, vma);
if (err)
goto err_vma;
diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
index e27ab1b710b3..d166a7145720 100644
--- a/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
+++ b/drivers/gpu/drm/i915/gt/intel_workarounds_types.h
@@ -13,8 +13,8 @@
struct i915_wa {
i915_reg_t reg;
- u32 mask;
- u32 val;
+ u32 clr;
+ u32 set;
u32 read;
};
diff --git a/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
new file mode 100644
index 000000000000..610ca7687735
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/ivb_clear_kernel.c
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ *
+ * Generated by: IGT Gpu Tools on Fri 21 Feb 2020 05:29:32 AM UTC
+ */
+
+static const u32 ivb_clear_kernel[] = {
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000040, 0x20280c21, 0x00000028, 0x00000001,
+ 0x01000010, 0x20000c20, 0x0000002c, 0x00000000,
+ 0x00010220, 0x34001c00, 0x00001400, 0x0000002c,
+ 0x00600001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00000008, 0x20601c85, 0x00000e00, 0x0000000c,
+ 0x00000005, 0x20601ca5, 0x00000060, 0x00000001,
+ 0x00000008, 0x20641c85, 0x00000e00, 0x0000000d,
+ 0x00000005, 0x20641ca5, 0x00000064, 0x00000003,
+ 0x00000041, 0x207424a5, 0x00000064, 0x00000034,
+ 0x00000040, 0x206014a5, 0x00000060, 0x00000074,
+ 0x00000008, 0x20681c85, 0x00000e00, 0x00000008,
+ 0x00000005, 0x20681ca5, 0x00000068, 0x0000000f,
+ 0x00000041, 0x20701ca5, 0x00000060, 0x00000010,
+ 0x00000040, 0x206814a5, 0x00000068, 0x00000070,
+ 0x00600001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00000005, 0x206c1c85, 0x00000e00, 0x00000007,
+ 0x00000041, 0x206c1ca5, 0x0000006c, 0x00000004,
+ 0x00600001, 0x20800021, 0x008d0000, 0x00000000,
+ 0x00000001, 0x20800021, 0x0000006c, 0x00000000,
+ 0x00000001, 0x20840021, 0x00000068, 0x00000000,
+ 0x00000001, 0x20880061, 0x00000000, 0x00000003,
+ 0x00000005, 0x208c0d21, 0x00000086, 0xffffffff,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x02190001,
+ 0x00000040, 0x20a01ca5, 0x000000a0, 0x00000001,
+ 0x05600032, 0x20a00fa1, 0x008d0080, 0x040a8001,
+ 0x02000040, 0x20281c21, 0x00000028, 0xffffffff,
+ 0x00010220, 0x34001c00, 0x00001400, 0xfffffffc,
+ 0x00000001, 0x26020128, 0x00000024, 0x00000000,
+ 0x00000001, 0x220010e4, 0x00000000, 0x00000000,
+ 0x00000001, 0x220831ec, 0x00000000, 0x007f007f,
+ 0x00600001, 0x20400021, 0x008d0000, 0x00000000,
+ 0x00600001, 0x2fe00021, 0x008d0000, 0x00000000,
+ 0x00200001, 0x20400121, 0x00450020, 0x00000000,
+ 0x00000001, 0x20480061, 0x00000000, 0x000f000f,
+ 0x00000005, 0x204c0d21, 0x00000046, 0xffffffef,
+ 0x00800001, 0x20600061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20800061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20a00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20c00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x20e00061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21000061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21200061, 0x00000000, 0x00000000,
+ 0x00800001, 0x21400061, 0x00000000, 0x00000000,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x00000040, 0x20402d21, 0x00000020, 0x00100010,
+ 0x05600032, 0x20000fa0, 0x008d0040, 0x120a8000,
+ 0x02000040, 0x22083d8c, 0x00000208, 0xffffffff,
+ 0x00800001, 0xa0000109, 0x00000602, 0x00000000,
+ 0x00000040, 0x22001c84, 0x00000200, 0x00000020,
+ 0x00010220, 0x34001c00, 0x00001400, 0xfffffff8,
+ 0x07600032, 0x20000fa0, 0x008d0fe0, 0x82000010,
+};
diff --git a/drivers/gpu/drm/i915/gt/mock_engine.c b/drivers/gpu/drm/i915/gt/mock_engine.c
index a560b7eee2cd..4a53ded7c2dd 100644
--- a/drivers/gpu/drm/i915/gt/mock_engine.c
+++ b/drivers/gpu/drm/i915/gt/mock_engine.c
@@ -59,11 +59,29 @@ static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
ring->vaddr = (void *)(ring + 1);
atomic_set(&ring->pin_count, 1);
+ ring->vma = i915_vma_alloc();
+ if (!ring->vma) {
+ kfree(ring);
+ return NULL;
+ }
+ i915_active_init(&ring->vma->active, NULL, NULL);
+ __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma));
+ __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags);
+ ring->vma->node.size = sz;
+
intel_ring_update_space(ring);
return ring;
}
+static void mock_ring_free(struct intel_ring *ring)
+{
+ i915_active_fini(&ring->vma->active);
+ i915_vma_free(ring->vma);
+
+ kfree(ring);
+}
+
static struct i915_request *first_request(struct mock_engine *engine)
{
return list_first_entry_or_null(&engine->hw_queue,
@@ -121,7 +139,7 @@ static void mock_context_destroy(struct kref *ref)
GEM_BUG_ON(intel_context_is_pinned(ce));
if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
- kfree(ce->ring);
+ mock_ring_free(ce->ring);
mock_timeline_unpin(ce->timeline);
}
@@ -226,9 +244,7 @@ static void mock_reset_cancel(struct intel_engine_cs *engine)
/* Mark all submitted requests as skipped. */
list_for_each_entry(request, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(request))
- dma_fence_set_error(&request->fence, -EIO);
-
+ i915_request_set_error_once(request, -EIO);
i915_request_mark_complete(request);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
index 43d4d589749f..697114dd1f47 100644
--- a/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
+++ b/drivers/gpu/drm/i915/gt/selftest_engine_heartbeat.c
@@ -142,6 +142,24 @@ out:
return err;
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
static int live_idle_flush(void *arg)
{
struct intel_gt *gt = arg;
@@ -152,9 +170,11 @@ static int live_idle_flush(void *arg)
/* Check that we can flush the idle barriers */
for_each_engine(engine, gt, id) {
- intel_engine_pm_get(engine);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
err = __live_idle_pulse(engine, intel_engine_flush_barriers);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
if (err)
break;
}
@@ -172,9 +192,11 @@ static int live_idle_pulse(void *arg)
/* Check that heartbeat pulses flush the idle barriers */
for_each_engine(engine, gt, id) {
- intel_engine_pm_get(engine);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
err = __live_idle_pulse(engine, intel_engine_pulse);
- intel_engine_pm_put(engine);
+ engine_heartbeat_enable(engine, heartbeat);
if (err && err != -ENODEV)
break;
diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
index 3e5e6c86e843..2b2efff6e19d 100644
--- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
+++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c
@@ -268,7 +268,7 @@ hang_create_request(struct hang *h, struct intel_engine_cs *engine)
cancel_rq:
if (err) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
}
unpin_hws:
@@ -1640,7 +1640,7 @@ static int igt_reset_engines_atomic(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
igt_global_reset_lock(gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c
index fd3770e48ac7..a912159693fd 100644
--- a/drivers/gpu/drm/i915/gt/selftest_llc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_llc.c
@@ -18,10 +18,8 @@ static int gen6_verify_ring_freq(struct intel_llc *llc)
wakeref = intel_runtime_pm_get(llc_to_gt(llc)->uncore->rpm);
- if (!get_ia_constants(llc, &consts)) {
- err = -ENODEV;
+ if (!get_ia_constants(llc, &consts))
goto out_rpm;
- }
for (gpu_freq = consts.min_gpu_freq;
gpu_freq <= consts.max_gpu_freq;
@@ -71,10 +69,5 @@ out_rpm:
int st_llc_verify(struct intel_llc *llc)
{
- int err = 0;
-
- if (HAS_LLC(llc_to_gt(llc)->i915))
- err = gen6_verify_ring_freq(llc);
-
- return err;
+ return gen6_verify_ring_freq(llc);
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_lrc.c b/drivers/gpu/drm/i915/gt/selftest_lrc.c
index 15cda024e3e4..6f06ba750a0a 100644
--- a/drivers/gpu/drm/i915/gt/selftest_lrc.c
+++ b/drivers/gpu/drm/i915/gt/selftest_lrc.c
@@ -68,6 +68,71 @@ static void engine_heartbeat_enable(struct intel_engine_cs *engine,
engine->props.heartbeat_interval_ms = saved;
}
+static int wait_for_submit(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_is_active(rq))
+ return 0;
+
+ if (i915_request_started(rq)) /* that was quick! */
+ return 0;
+ } while (time_before(jiffies, timeout));
+
+ return -ETIME;
+}
+
+static int wait_for_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq,
+ unsigned long timeout)
+{
+ timeout += jiffies;
+
+ do {
+ cond_resched();
+ intel_engine_flush_submission(engine);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ continue;
+
+ if (i915_request_completed(rq))
+ break;
+
+ if (READ_ONCE(rq->fence.error))
+ break;
+ } while (time_before(jiffies, timeout));
+
+ flush_scheduled_work();
+
+ if (rq->fence.error != -EIO) {
+ pr_err("%s: hanging request %llx:%lld not reset\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -EINVAL;
+ }
+
+ /* Give the request a jiffie to complete after flushing the worker */
+ if (i915_request_wait(rq, 0,
+ max(0l, (long)(timeout - jiffies)) + 1) < 0) {
+ pr_err("%s: hanging request %llx:%lld did not complete\n",
+ engine->name,
+ rq->fence.context,
+ rq->fence.seqno);
+ return -ETIME;
+ }
+
+ return 0;
+}
+
static int live_sanitycheck(void *arg)
{
struct intel_gt *gt = arg;
@@ -186,7 +251,7 @@ static int live_unlite_restore(struct intel_gt *gt, int prio)
}
GEM_BUG_ON(!ce[1]->ring->size);
intel_ring_reset(ce[1]->ring, ce[1]->ring->size / 2);
- __execlists_update_reg_state(ce[1], engine);
+ __execlists_update_reg_state(ce[1], engine, ce[1]->ring->head);
rq[0] = igt_spinner_create_request(&spin, ce[0], MI_ARB_CHECK);
if (IS_ERR(rq[0])) {
@@ -285,6 +350,331 @@ static int live_unlite_preempt(void *arg)
return live_unlite_restore(arg, I915_USER_PRIORITY(I915_PRIORITY_MAX));
}
+static int live_pin_rewind(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * We have to be careful not to trust intel_ring too much, for example
+ * ring->head is updated upon retire which is out of sync with pinning
+ * the context. Thus we cannot use ring->head to set CTX_RING_HEAD,
+ * or else we risk writing an older, stale value.
+ *
+ * To simulate this, let's apply a bit of deliberate sabotague.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+ struct intel_ring *ring;
+ struct igt_live_test t;
+
+ if (igt_live_test_begin(&t, gt->i915, __func__, engine->name)) {
+ err = -EIO;
+ break;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ err = intel_context_pin(ce);
+ if (err) {
+ intel_context_put(ce);
+ break;
+ }
+
+ /* Keep the context awake while we play games */
+ err = i915_active_acquire(&ce->active);
+ if (err) {
+ intel_context_unpin(ce);
+ intel_context_put(ce);
+ break;
+ }
+ ring = ce->ring;
+
+ /* Poison the ring, and offset the next request from HEAD */
+ memset32(ring->vaddr, STACK_MAGIC, ring->size / sizeof(u32));
+ ring->emit = ring->size / 2;
+ ring->tail = ring->emit;
+ GEM_BUG_ON(ring->head);
+
+ intel_context_unpin(ce);
+
+ /* Submit a simple nop request */
+ GEM_BUG_ON(intel_context_is_pinned(ce));
+ rq = intel_context_create_request(ce);
+ i915_active_release(&ce->active); /* e.g. async retire */
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ break;
+ }
+ GEM_BUG_ON(!rq->head);
+ i915_request_add(rq);
+
+ /* Expect not to hang! */
+ if (igt_live_test_end(&t)) {
+ err = -EIO;
+ break;
+ }
+ }
+
+ return err;
+}
+
+static int live_hold_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ struct igt_spinner spin;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ if (igt_spinner_init(&spin, gt))
+ return -ENOMEM;
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce;
+ unsigned long heartbeat;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ break;
+ }
+
+ engine_heartbeat_disable(engine, &heartbeat);
+
+ rq = igt_spinner_create_request(&spin, ce, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out;
+ }
+
+ /* We have our request executing, now remove it and reset */
+
+ if (test_and_set_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags)) {
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ i915_request_get(rq);
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ intel_engine_reset(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + id,
+ &gt->reset.flags);
+
+ /* Check that we do not resubmit the held request */
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ i915_request_put(rq);
+ err = -EIO;
+ goto out;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+ i915_request_put(rq);
+
+out:
+ engine_heartbeat_enable(engine, heartbeat);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ igt_spinner_fini(&spin);
+ return err;
+}
+
+static const char *error_repr(int err)
+{
+ return err ? "bad" : "good";
+}
+
+static int live_error_interrupt(void *arg)
+{
+ static const struct error_phase {
+ enum { GOOD = 0, BAD = -EIO } error[2];
+ } phases[] = {
+ { { BAD, GOOD } },
+ { { BAD, BAD } },
+ { { BAD, GOOD } },
+ { { GOOD, GOOD } }, /* sentinel */
+ };
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * We hook up the CS_MASTER_ERROR_INTERRUPT to have forewarning
+ * of invalid commands in user batches that will cause a GPU hang.
+ * This is a faster mechanism than using hangcheck/heartbeats, but
+ * only detects problems the HW knows about -- it will not warn when
+ * we kill the HW!
+ *
+ * To verify our detection and reset, we throw some invalid commands
+ * at the HW and wait for the interrupt.
+ */
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ const struct error_phase *p;
+ unsigned long heartbeat;
+ int err = 0;
+
+ engine_heartbeat_disable(engine, &heartbeat);
+
+ for (p = phases; p->error[0] != GOOD; p++) {
+ struct i915_request *client[ARRAY_SIZE(phases->error)];
+ u32 *cs;
+ int i;
+
+ memset(client, 0, sizeof(*client));
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ struct intel_context *ce;
+ struct i915_request *rq;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
+ rq = intel_context_create_request(ce);
+ intel_context_put(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out;
+ }
+
+ if (rq->engine->emit_init_breadcrumb) {
+ err = rq->engine->emit_init_breadcrumb(rq);
+ if (err) {
+ i915_request_add(rq);
+ goto out;
+ }
+ }
+
+ cs = intel_ring_begin(rq, 2);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ err = PTR_ERR(cs);
+ goto out;
+ }
+
+ if (p->error[i]) {
+ *cs++ = 0xdeadbeef;
+ *cs++ = 0xdeadbeef;
+ } else {
+ *cs++ = MI_NOOP;
+ *cs++ = MI_NOOP;
+ }
+
+ client[i] = i915_request_get(rq);
+ i915_request_add(rq);
+ }
+
+ err = wait_for_submit(engine, client[0], HZ / 2);
+ if (err) {
+ pr_err("%s: first request did not start within time!\n",
+ engine->name);
+ err = -ETIME;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(client); i++) {
+ if (i915_request_wait(client[i], 0, HZ / 5) < 0)
+ pr_debug("%s: %s request incomplete!\n",
+ engine->name,
+ error_repr(p->error[i]));
+
+ if (!i915_request_started(client[i])) {
+ pr_debug("%s: %s request not stated!\n",
+ engine->name,
+ error_repr(p->error[i]));
+ err = -ETIME;
+ goto out;
+ }
+
+ /* Kick the tasklet to process the error */
+ intel_engine_flush_submission(engine);
+ if (client[i]->fence.error != p->error[i]) {
+ pr_err("%s: %s request completed with wrong error code: %d\n",
+ engine->name,
+ error_repr(p->error[i]),
+ client[i]->fence.error);
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(client); i++)
+ if (client[i])
+ i915_request_put(client[i]);
+ if (err) {
+ pr_err("%s: failed at phase[%zd] { %d, %d }\n",
+ engine->name, p - phases,
+ p->error[0], p->error[1]);
+ break;
+ }
+ }
+
+ engine_heartbeat_enable(engine, heartbeat);
+ if (err) {
+ intel_gt_set_wedged(gt);
+ return err;
+ }
+ }
+
+ return 0;
+}
+
static int
emit_semaphore_chain(struct i915_request *rq, struct i915_vma *vma, int idx)
{
@@ -479,6 +869,10 @@ static int live_timeslice_preempt(void *arg)
if (err)
goto err_map;
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
for_each_prime_number_from(count, 1, 16) {
struct intel_engine_cs *engine;
enum intel_engine_id id;
@@ -513,33 +907,227 @@ err_obj:
return err;
}
-static struct i915_request *nop_request(struct intel_engine_cs *engine)
+static struct i915_request *
+create_rewinder(struct intel_context *ce,
+ struct i915_request *wait,
+ void *slot, int idx)
{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
struct i915_request *rq;
+ u32 *cs;
+ int err;
- rq = intel_engine_create_kernel_request(engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return rq;
+ if (wait) {
+ err = i915_request_await_dma_fence(rq, &wait->fence);
+ if (err)
+ goto err;
+ }
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = offset + idx * sizeof(u32);
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_MASK;
+ err = 0;
+err:
i915_request_get(rq);
i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return ERR_PTR(err);
+ }
return rq;
}
-static int wait_for_submit(struct intel_engine_cs *engine,
- struct i915_request *rq,
- unsigned long timeout)
+static int live_timeslice_rewind(void *arg)
{
- timeout += jiffies;
- do {
- cond_resched();
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * The usual presumption on timeslice expiration is that we replace
+ * the active context with another. However, given a chain of
+ * dependencies we may end up with replacing the context with itself,
+ * but only a few of those requests, forcing us to rewind the
+ * RING_TAIL of the original request.
+ */
+ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ enum { A1, A2, B1 };
+ enum { X = 1, Y, Z };
+ struct i915_request *rq[3] = {};
+ struct intel_context *ce;
+ unsigned long heartbeat;
+ unsigned long timeslice;
+ int i, err = 0;
+ u32 *slot;
+
+ if (!intel_engine_has_timeslices(engine))
+ continue;
+
+ /*
+ * A:rq1 -- semaphore wait, timestamp X
+ * A:rq2 -- write timestamp Y
+ *
+ * B:rq1 [await A:rq1] -- write timestamp Z
+ *
+ * Force timeslice, release semaphore.
+ *
+ * Expect execution/evaluation order XZY
+ */
+
+ engine_heartbeat_disable(engine, &heartbeat);
+ timeslice = xchg(&engine->props.timeslice_duration_ms, 1);
+
+ slot = memset32(engine->status_page.addr + 1000, 0, 4);
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[0] = create_rewinder(ce, NULL, slot, 1);
+ if (IS_ERR(rq[0])) {
+ intel_context_put(ce);
+ goto err;
+ }
+
+ rq[1] = create_rewinder(ce, NULL, slot, 2);
+ intel_context_put(ce);
+ if (IS_ERR(rq[1]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[1], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit first context\n",
+ engine->name);
+ goto err;
+ }
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto err;
+ }
+
+ rq[2] = create_rewinder(ce, rq[0], slot, 3);
+ intel_context_put(ce);
+ if (IS_ERR(rq[2]))
+ goto err;
+
+ err = wait_for_submit(engine, rq[2], HZ / 2);
+ if (err) {
+ pr_err("%s: failed to submit second context\n",
+ engine->name);
+ goto err;
+ }
+ GEM_BUG_ON(!timer_pending(&engine->execlists.timer));
+
+ /* ELSP[] = { { A:rq1, A:rq2 }, { B:rq1 } } */
+ GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+ GEM_BUG_ON(!i915_request_is_active(rq[A2]));
+ GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+
+ /* Wait for the timeslice to kick in */
+ del_timer(&engine->execlists.timer);
+ tasklet_hi_schedule(&engine->execlists.tasklet);
intel_engine_flush_submission(engine);
- if (i915_request_is_active(rq))
- return 0;
- } while (time_before(jiffies, timeout));
- return -ETIME;
+ /* -> ELSP[] = { { A:rq1 }, { B:rq1 } } */
+ GEM_BUG_ON(!i915_request_is_active(rq[A1]));
+ GEM_BUG_ON(!i915_request_is_active(rq[B1]));
+ GEM_BUG_ON(i915_request_is_active(rq[A2]));
+
+ /* Release the hounds! */
+ slot[0] = 1;
+ wmb(); /* "pairs" with GPU; paranoid kick of internal CPU$ */
+
+ for (i = 1; i <= 3; i++) {
+ unsigned long timeout = jiffies + HZ / 2;
+
+ while (!READ_ONCE(slot[i]) &&
+ time_before(jiffies, timeout))
+ ;
+
+ if (!time_before(jiffies, timeout)) {
+ pr_err("%s: rq[%d] timed out\n",
+ engine->name, i - 1);
+ err = -ETIME;
+ goto err;
+ }
+
+ pr_debug("%s: slot[%d]:%x\n", engine->name, i, slot[i]);
+ }
+
+ /* XZY: XZ < XY */
+ if (slot[Z] - slot[X] >= slot[Y] - slot[X]) {
+ pr_err("%s: timeslicing did not run context B [%u] before A [%u]!\n",
+ engine->name,
+ slot[Z] - slot[X],
+ slot[Y] - slot[X]);
+ err = -EINVAL;
+ }
+
+err:
+ memset32(&slot[0], -1, 4);
+ wmb();
+
+ engine->props.timeslice_duration_ms = timeslice;
+ engine_heartbeat_enable(engine, heartbeat);
+ for (i = 0; i < 3; i++)
+ i915_request_put(rq[i]);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_request *nop_request(struct intel_engine_cs *engine)
+{
+ struct i915_request *rq;
+
+ rq = intel_engine_create_kernel_request(engine);
+ if (IS_ERR(rq))
+ return rq;
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ return rq;
}
static long timeslice_threshold(const struct intel_engine_cs *engine)
@@ -587,6 +1175,10 @@ static int live_timeslice_queue(void *arg)
if (err)
goto err_map;
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_pin;
+
for_each_engine(engine, gt, id) {
struct i915_sched_attr attr = {
.priority = I915_USER_PRIORITY(I915_PRIORITY_MAX),
@@ -673,6 +1265,7 @@ err_heartbeat:
break;
}
+err_pin:
i915_vma_unpin(vma);
err_map:
i915_gem_object_unpin_map(obj);
@@ -731,6 +1324,10 @@ static int live_busywait_preempt(void *arg)
if (err)
goto err_map;
+ err = i915_vma_sync(vma);
+ if (err)
+ goto err_vma;
+
for_each_engine(engine, gt, id) {
struct i915_request *lo, *hi;
struct igt_live_test t;
@@ -1251,14 +1848,9 @@ static int __cancel_active0(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
@@ -1316,10 +1908,9 @@ static int __cancel_active1(struct live_preempt_cancel *arg)
goto out;
igt_spinner_end(&arg->a.spin);
- if (i915_request_wait(rq[1], 0, HZ / 5) < 0) {
- err = -EIO;
+ err = wait_for_reset(arg->engine, rq[1], HZ / 2);
+ if (err)
goto out;
- }
if (rq[0]->fence.error != 0) {
pr_err("Normal inflight0 request did not complete\n");
@@ -1399,10 +1990,9 @@ static int __cancel_queued(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
- err = -EIO;
+ err = wait_for_reset(arg->engine, rq[2], HZ / 2);
+ if (err)
goto out;
- }
if (rq[0]->fence.error != -EIO) {
pr_err("Cancelled inflight0 request did not report -EIO\n");
@@ -1460,14 +2050,9 @@ static int __cancel_hostile(struct live_preempt_cancel *arg)
if (err)
goto out;
- if (i915_request_wait(rq, 0, HZ / 5) < 0) {
- err = -EIO;
- goto out;
- }
-
- if (rq->fence.error != -EIO) {
- pr_err("Cancelled inflight0 request did not report -EIO\n");
- err = -EINVAL;
+ err = wait_for_reset(arg->engine, rq, HZ / 2);
+ if (err) {
+ pr_err("Cancelled inflight0 request did not reset\n");
goto out;
}
@@ -1555,7 +2140,7 @@ static int live_suppress_self_preempt(void *arg)
if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0; /* presume black blox */
if (intel_vgpu_active(gt->i915))
@@ -2178,117 +2763,6 @@ static int live_preempt_gang(void *arg)
return 0;
}
-static int live_preempt_hang(void *arg)
-{
- struct intel_gt *gt = arg;
- struct i915_gem_context *ctx_hi, *ctx_lo;
- struct igt_spinner spin_hi, spin_lo;
- struct intel_engine_cs *engine;
- enum intel_engine_id id;
- int err = -ENOMEM;
-
- if (!HAS_LOGICAL_RING_PREEMPTION(gt->i915))
- return 0;
-
- if (!intel_has_reset_engine(gt))
- return 0;
-
- if (igt_spinner_init(&spin_hi, gt))
- return -ENOMEM;
-
- if (igt_spinner_init(&spin_lo, gt))
- goto err_spin_hi;
-
- ctx_hi = kernel_context(gt->i915);
- if (!ctx_hi)
- goto err_spin_lo;
- ctx_hi->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MAX_USER_PRIORITY);
-
- ctx_lo = kernel_context(gt->i915);
- if (!ctx_lo)
- goto err_ctx_hi;
- ctx_lo->sched.priority =
- I915_USER_PRIORITY(I915_CONTEXT_MIN_USER_PRIORITY);
-
- for_each_engine(engine, gt, id) {
- struct i915_request *rq;
-
- if (!intel_engine_has_preemption(engine))
- continue;
-
- rq = spinner_create_request(&spin_lo, ctx_lo, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- i915_request_add(rq);
- if (!igt_wait_for_spinner(&spin_lo, rq)) {
- GEM_TRACE("lo spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- rq = spinner_create_request(&spin_hi, ctx_hi, engine,
- MI_ARB_CHECK);
- if (IS_ERR(rq)) {
- igt_spinner_end(&spin_lo);
- err = PTR_ERR(rq);
- goto err_ctx_lo;
- }
-
- init_completion(&engine->execlists.preempt_hang.completion);
- engine->execlists.preempt_hang.inject_hang = true;
-
- i915_request_add(rq);
-
- if (!wait_for_completion_timeout(&engine->execlists.preempt_hang.completion,
- HZ / 10)) {
- pr_err("Preemption did not occur within timeout!");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- set_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
- intel_engine_reset(engine, NULL);
- clear_bit(I915_RESET_ENGINE + id, &gt->reset.flags);
-
- engine->execlists.preempt_hang.inject_hang = false;
-
- if (!igt_wait_for_spinner(&spin_hi, rq)) {
- GEM_TRACE("hi spinner failed to start\n");
- GEM_TRACE_DUMP();
- intel_gt_set_wedged(gt);
- err = -EIO;
- goto err_ctx_lo;
- }
-
- igt_spinner_end(&spin_hi);
- igt_spinner_end(&spin_lo);
- if (igt_flush_test(gt->i915)) {
- err = -EIO;
- goto err_ctx_lo;
- }
- }
-
- err = 0;
-err_ctx_lo:
- kernel_context_close(ctx_lo);
-err_ctx_hi:
- kernel_context_close(ctx_hi);
-err_spin_lo:
- igt_spinner_fini(&spin_lo);
-err_spin_hi:
- igt_spinner_fini(&spin_hi);
- return err;
-}
-
static int live_preempt_timeout(void *arg)
{
struct intel_gt *gt = arg;
@@ -2781,7 +3255,7 @@ static int live_virtual_engine(void *arg)
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for_each_engine(engine, gt, id) {
@@ -2914,7 +3388,7 @@ static int live_virtual_mask(void *arg)
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
@@ -2954,6 +3428,10 @@ static int preserved_virtual_engine(struct intel_gt *gt,
if (IS_ERR(scratch))
return PTR_ERR(scratch);
+ err = i915_vma_sync(scratch);
+ if (err)
+ goto out_scratch;
+
ve = intel_execlists_create_virtual(siblings, nsibling);
if (IS_ERR(ve)) {
err = PTR_ERR(ve);
@@ -3052,7 +3530,7 @@ static int live_virtual_preserved(void *arg)
* are preserved.
*/
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
/* As we use CS_GPR we cannot run before they existed on all engines. */
@@ -3142,15 +3620,21 @@ static int bond_virtual_engine(struct intel_gt *gt,
rq[0] = ERR_PTR(-ENOMEM);
for_each_engine(master, gt, id) {
struct i915_sw_fence fence = {};
+ struct intel_context *ce;
if (master->class == class)
continue;
+ ce = intel_context_create(master);
+ if (IS_ERR(ce)) {
+ err = PTR_ERR(ce);
+ goto out;
+ }
+
memset_p((void *)rq, ERR_PTR(-EINVAL), ARRAY_SIZE(rq));
- rq[0] = igt_spinner_create_request(&spin,
- master->kernel_context,
- MI_NOOP);
+ rq[0] = igt_spinner_create_request(&spin, ce, MI_NOOP);
+ intel_context_put(ce);
if (IS_ERR(rq[0])) {
err = PTR_ERR(rq[0]);
goto out;
@@ -3276,7 +3760,7 @@ static int live_virtual_bond(void *arg)
unsigned int class, inst;
int err;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
@@ -3309,13 +3793,172 @@ static int live_virtual_bond(void *arg)
return 0;
}
+static int reset_virtual_engine(struct intel_gt *gt,
+ struct intel_engine_cs **siblings,
+ unsigned int nsibling)
+{
+ struct intel_engine_cs *engine;
+ struct intel_context *ve;
+ unsigned long *heartbeat;
+ struct igt_spinner spin;
+ struct i915_request *rq;
+ unsigned int n;
+ int err = 0;
+
+ /*
+ * In order to support offline error capture for fast preempt reset,
+ * we need to decouple the guilty request and ensure that it and its
+ * descendents are not executed while the capture is in progress.
+ */
+
+ heartbeat = kmalloc_array(nsibling, sizeof(*heartbeat), GFP_KERNEL);
+ if (!heartbeat)
+ return -ENOMEM;
+
+ if (igt_spinner_init(&spin, gt)) {
+ err = -ENOMEM;
+ goto out_free;
+ }
+
+ ve = intel_execlists_create_virtual(siblings, nsibling);
+ if (IS_ERR(ve)) {
+ err = PTR_ERR(ve);
+ goto out_spin;
+ }
+
+ for (n = 0; n < nsibling; n++)
+ engine_heartbeat_disable(siblings[n], &heartbeat[n]);
+
+ rq = igt_spinner_create_request(&spin, ve, MI_ARB_CHECK);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto out_heartbeat;
+ }
+ i915_request_add(rq);
+
+ if (!igt_wait_for_spinner(&spin, rq)) {
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ goto out_heartbeat;
+ }
+
+ engine = rq->engine;
+ GEM_BUG_ON(engine == ve->engine);
+
+ /* Take ownership of the reset and tasklet */
+ if (test_and_set_bit(I915_RESET_ENGINE + engine->id,
+ &gt->reset.flags)) {
+ intel_gt_set_wedged(gt);
+ err = -EBUSY;
+ goto out_heartbeat;
+ }
+ tasklet_disable(&engine->execlists.tasklet);
+
+ engine->execlists.tasklet.func(engine->execlists.tasklet.data);
+ GEM_BUG_ON(execlists_active(&engine->execlists) != rq);
+
+ /* Fake a preemption event; failed of course */
+ spin_lock_irq(&engine->active.lock);
+ __unwind_incomplete_requests(engine);
+ spin_unlock_irq(&engine->active.lock);
+ GEM_BUG_ON(rq->engine != ve->engine);
+
+ /* Reset the engine while keeping our active request on hold */
+ execlists_hold(engine, rq);
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ intel_engine_reset(engine, NULL);
+ GEM_BUG_ON(rq->fence.error != -EIO);
+
+ /* Release our grasp on the engine, letting CS flow again */
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(I915_RESET_ENGINE + engine->id, &gt->reset.flags);
+
+ /* Check that we do not resubmit the held request */
+ i915_request_get(rq);
+ if (!i915_request_wait(rq, 0, HZ / 5)) {
+ pr_err("%s: on hold request completed!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -EIO;
+ goto out_rq;
+ }
+ GEM_BUG_ON(!i915_request_on_hold(rq));
+
+ /* But is resubmitted on release */
+ execlists_unhold(engine, rq);
+ if (i915_request_wait(rq, 0, HZ / 5) < 0) {
+ pr_err("%s: held request did not complete!\n",
+ engine->name);
+ intel_gt_set_wedged(gt);
+ err = -ETIME;
+ }
+
+out_rq:
+ i915_request_put(rq);
+out_heartbeat:
+ for (n = 0; n < nsibling; n++)
+ engine_heartbeat_enable(siblings[n], heartbeat[n]);
+
+ intel_context_put(ve);
+out_spin:
+ igt_spinner_fini(&spin);
+out_free:
+ kfree(heartbeat);
+ return err;
+}
+
+static int live_virtual_reset(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *siblings[MAX_ENGINE_INSTANCE + 1];
+ unsigned int class, inst;
+
+ /*
+ * Check that we handle a reset event within a virtual engine.
+ * Only the physical engine is reset, but we have to check the flow
+ * of the virtual requests around the reset, and make sure it is not
+ * forgotten.
+ */
+
+ if (intel_uc_uses_guc_submission(&gt->uc))
+ return 0;
+
+ if (!intel_has_reset_engine(gt))
+ return 0;
+
+ for (class = 0; class <= MAX_ENGINE_CLASS; class++) {
+ int nsibling, err;
+
+ nsibling = 0;
+ for (inst = 0; inst <= MAX_ENGINE_INSTANCE; inst++) {
+ if (!gt->engine_class[class][inst])
+ continue;
+
+ siblings[nsibling++] = gt->engine_class[class][inst];
+ }
+ if (nsibling < 2)
+ continue;
+
+ err = reset_virtual_engine(gt, siblings, nsibling);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
int intel_execlists_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_sanitycheck),
SUBTEST(live_unlite_switch),
SUBTEST(live_unlite_preempt),
+ SUBTEST(live_pin_rewind),
+ SUBTEST(live_hold_reset),
+ SUBTEST(live_error_interrupt),
SUBTEST(live_timeslice_preempt),
+ SUBTEST(live_timeslice_rewind),
SUBTEST(live_timeslice_queue),
SUBTEST(live_busywait_preempt),
SUBTEST(live_preempt),
@@ -3326,13 +3969,13 @@ int intel_execlists_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_suppress_wait_preempt),
SUBTEST(live_chain_preempt),
SUBTEST(live_preempt_gang),
- SUBTEST(live_preempt_hang),
SUBTEST(live_preempt_timeout),
SUBTEST(live_preempt_smoke),
SUBTEST(live_virtual_engine),
SUBTEST(live_virtual_mask),
SUBTEST(live_virtual_preserved),
SUBTEST(live_virtual_bond),
+ SUBTEST(live_virtual_reset),
};
if (!HAS_EXECLISTS(i915))
@@ -3373,6 +4016,62 @@ static void hexdump(const void *buf, size_t len)
}
}
+static int emit_semaphore_signal(struct intel_context *ce, void *slot)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ cs = intel_ring_begin(rq, 4);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return PTR_ERR(cs);
+ }
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = offset;
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+ i915_request_add(rq);
+ return 0;
+}
+
+static int context_flush(struct intel_context *ce, long timeout)
+{
+ struct i915_request *rq;
+ struct dma_fence *fence;
+ int err = 0;
+
+ rq = intel_engine_create_kernel_request(ce->engine);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ fence = i915_active_fence_get(&ce->timeline->last_request);
+ if (fence) {
+ i915_request_await_dma_fence(rq, fence);
+ dma_fence_put(fence);
+ }
+
+ rq = i915_request_get(rq);
+ i915_request_add(rq);
+ if (i915_request_wait(rq, 0, timeout) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ rmb(); /* We know the request is written, make sure all state is too! */
+ return err;
+}
+
static int live_lrc_layout(void *arg)
{
struct intel_gt *gt = arg;
@@ -3539,6 +4238,11 @@ static int live_lrc_fixed(void *arg)
CTX_BB_STATE - 1,
"BB_STATE"
},
+ {
+ i915_mmio_reg_offset(RING_CTX_TIMESTAMP(engine->mmio_base)),
+ CTX_TIMESTAMP - 1,
+ "RING_CTX_TIMESTAMP"
+ },
{ },
}, *t;
u32 *hw;
@@ -3622,8 +4326,16 @@ static int __live_lrc_state(struct intel_engine_cs *engine,
*cs++ = i915_ggtt_offset(scratch) + RING_TAIL_IDX * sizeof(u32);
*cs++ = 0;
+ i915_vma_lock(scratch);
+ err = i915_request_await_object(rq, scratch->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(scratch);
+
i915_request_get(rq);
i915_request_add(rq);
+ if (err)
+ goto err_rq;
intel_engine_flush_submission(engine);
expected[RING_TAIL_IDX] = ce->ring->tail;
@@ -3689,13 +4401,13 @@ static int live_lrc_state(void *arg)
return err;
}
-static int gpr_make_dirty(struct intel_engine_cs *engine)
+static int gpr_make_dirty(struct intel_context *ce)
{
struct i915_request *rq;
u32 *cs;
int n;
- rq = intel_engine_create_kernel_request(engine);
+ rq = intel_context_create_request(ce);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -3707,20 +4419,79 @@ static int gpr_make_dirty(struct intel_engine_cs *engine)
*cs++ = MI_LOAD_REGISTER_IMM(NUM_GPR_DW);
for (n = 0; n < NUM_GPR_DW; n++) {
- *cs++ = CS_GPR(engine, n);
+ *cs++ = CS_GPR(ce->engine, n);
*cs++ = STACK_MAGIC;
}
*cs++ = MI_NOOP;
intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
i915_request_add(rq);
return 0;
}
-static int __live_gpr_clear(struct intel_engine_cs *engine,
- struct i915_vma *scratch)
+static struct i915_request *
+__gpr_read(struct intel_context *ce, struct i915_vma *scratch, u32 *slot)
{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+ int n;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ cs = intel_ring_begin(rq, 6 + 4 * NUM_GPR_DW);
+ if (IS_ERR(cs)) {
+ i915_request_add(rq);
+ return ERR_CAST(cs);
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ for (n = 0; n < NUM_GPR_DW; n++) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = CS_GPR(ce->engine, n);
+ *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
+ *cs++ = 0;
+ }
+
+ i915_vma_lock(scratch);
+ err = i915_request_await_object(rq, scratch->obj, true);
+ if (!err)
+ err = i915_vma_move_to_active(scratch, rq, EXEC_OBJECT_WRITE);
+ i915_vma_unlock(scratch);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ rq = ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+static int __live_lrc_gpr(struct intel_engine_cs *engine,
+ struct i915_vma *scratch,
+ bool preempt)
+{
+ u32 *slot = memset32(engine->status_page.addr + 1000, 0, 4);
struct intel_context *ce;
struct i915_request *rq;
u32 *cs;
@@ -3730,7 +4501,7 @@ static int __live_gpr_clear(struct intel_engine_cs *engine,
if (INTEL_GEN(engine->i915) < 9 && engine->class != RENDER_CLASS)
return 0; /* GPR only on rcs0 for gen8 */
- err = gpr_make_dirty(engine);
+ err = gpr_make_dirty(engine->kernel_context);
if (err)
return err;
@@ -3738,28 +4509,28 @@ static int __live_gpr_clear(struct intel_engine_cs *engine,
if (IS_ERR(ce))
return PTR_ERR(ce);
- rq = intel_context_create_request(ce);
+ rq = __gpr_read(ce, scratch, slot);
if (IS_ERR(rq)) {
err = PTR_ERR(rq);
goto err_put;
}
- cs = intel_ring_begin(rq, 4 * NUM_GPR_DW);
- if (IS_ERR(cs)) {
- err = PTR_ERR(cs);
- i915_request_add(rq);
- goto err_put;
- }
+ err = wait_for_submit(engine, rq, HZ / 2);
+ if (err)
+ goto err_rq;
- for (n = 0; n < NUM_GPR_DW; n++) {
- *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
- *cs++ = CS_GPR(engine, n);
- *cs++ = i915_ggtt_offset(scratch) + n * sizeof(u32);
- *cs++ = 0;
- }
+ if (preempt) {
+ err = gpr_make_dirty(engine->kernel_context);
+ if (err)
+ goto err_rq;
- i915_request_get(rq);
- i915_request_add(rq);
+ err = emit_semaphore_signal(engine->kernel_context, slot);
+ if (err)
+ goto err_rq;
+ } else {
+ slot[0] = 1;
+ wmb();
+ }
if (i915_request_wait(rq, 0, HZ / 5) < 0) {
err = -ETIME;
@@ -3786,13 +4557,15 @@ static int __live_gpr_clear(struct intel_engine_cs *engine,
i915_gem_object_unpin_map(scratch->obj);
err_rq:
+ memset32(&slot[0], -1, 4);
+ wmb();
i915_request_put(rq);
err_put:
intel_context_put(ce);
return err;
}
-static int live_gpr_clear(void *arg)
+static int live_lrc_gpr(void *arg)
{
struct intel_gt *gt = arg;
struct intel_engine_cs *engine;
@@ -3810,7 +4583,971 @@ static int live_gpr_clear(void *arg)
return PTR_ERR(scratch);
for_each_engine(engine, gt, id) {
- err = __live_gpr_clear(engine, scratch);
+ unsigned long heartbeat;
+
+ engine_heartbeat_disable(engine, &heartbeat);
+
+ err = __live_lrc_gpr(engine, scratch, false);
+ if (err)
+ goto err;
+
+ err = __live_lrc_gpr(engine, scratch, true);
+ if (err)
+ goto err;
+
+err:
+ engine_heartbeat_enable(engine, heartbeat);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ break;
+ }
+
+ i915_vma_unpin_and_release(&scratch, 0);
+ return err;
+}
+
+static struct i915_request *
+create_timestamp(struct intel_context *ce, void *slot, int idx)
+{
+ const u32 offset =
+ i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(slot);
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return rq;
+
+ cs = intel_ring_begin(rq, 10);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = offset;
+ *cs++ = 0;
+
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8 | MI_USE_GGTT;
+ *cs++ = i915_mmio_reg_offset(RING_CTX_TIMESTAMP(rq->engine->mmio_base));
+ *cs++ = offset + idx * sizeof(u32);
+ *cs++ = 0;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_MASK;
+ err = 0;
+err:
+ i915_request_get(rq);
+ i915_request_add(rq);
+ if (err) {
+ i915_request_put(rq);
+ return ERR_PTR(err);
+ }
+
+ return rq;
+}
+
+struct lrc_timestamp {
+ struct intel_engine_cs *engine;
+ struct intel_context *ce[2];
+ u32 poison;
+};
+
+static bool timestamp_advanced(u32 start, u32 end)
+{
+ return (s32)(end - start) > 0;
+}
+
+static int __lrc_timestamp(const struct lrc_timestamp *arg, bool preempt)
+{
+ u32 *slot = memset32(arg->engine->status_page.addr + 1000, 0, 4);
+ struct i915_request *rq;
+ u32 timestamp;
+ int err = 0;
+
+ arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP] = arg->poison;
+ rq = create_timestamp(arg->ce[0], slot, 1);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ err = wait_for_submit(rq->engine, rq, HZ / 2);
+ if (err)
+ goto err;
+
+ if (preempt) {
+ arg->ce[1]->lrc_reg_state[CTX_TIMESTAMP] = 0xdeadbeef;
+ err = emit_semaphore_signal(arg->ce[1], slot);
+ if (err)
+ goto err;
+ } else {
+ slot[0] = 1;
+ wmb();
+ }
+
+ /* And wait for switch to kernel (to save our context to memory) */
+ err = context_flush(arg->ce[0], HZ / 2);
+ if (err)
+ goto err;
+
+ if (!timestamp_advanced(arg->poison, slot[1])) {
+ pr_err("%s(%s): invalid timestamp on restore, context:%x, request:%x\n",
+ arg->engine->name, preempt ? "preempt" : "simple",
+ arg->poison, slot[1]);
+ err = -EINVAL;
+ }
+
+ timestamp = READ_ONCE(arg->ce[0]->lrc_reg_state[CTX_TIMESTAMP]);
+ if (!timestamp_advanced(slot[1], timestamp)) {
+ pr_err("%s(%s): invalid timestamp on save, request:%x, context:%x\n",
+ arg->engine->name, preempt ? "preempt" : "simple",
+ slot[1], timestamp);
+ err = -EINVAL;
+ }
+
+err:
+ memset32(slot, -1, 4);
+ i915_request_put(rq);
+ return err;
+}
+
+static int live_lrc_timestamp(void *arg)
+{
+ struct lrc_timestamp data = {};
+ struct intel_gt *gt = arg;
+ enum intel_engine_id id;
+ const u32 poison[] = {
+ 0,
+ S32_MAX,
+ (u32)S32_MAX + 1,
+ U32_MAX,
+ };
+
+ /*
+ * We want to verify that the timestamp is saved and restore across
+ * context switches and is monotonic.
+ *
+ * So we do this with a little bit of LRC poisoning to check various
+ * boundary conditions, and see what happens if we preempt the context
+ * with a second request (carrying more poison into the timestamp).
+ */
+
+ for_each_engine(data.engine, gt, id) {
+ unsigned long heartbeat;
+ int i, err = 0;
+
+ engine_heartbeat_disable(data.engine, &heartbeat);
+
+ for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
+ struct intel_context *tmp;
+
+ tmp = intel_context_create(data.engine);
+ if (IS_ERR(tmp)) {
+ err = PTR_ERR(tmp);
+ goto err;
+ }
+
+ err = intel_context_pin(tmp);
+ if (err) {
+ intel_context_put(tmp);
+ goto err;
+ }
+
+ data.ce[i] = tmp;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(poison); i++) {
+ data.poison = poison[i];
+
+ err = __lrc_timestamp(&data, false);
+ if (err)
+ break;
+
+ err = __lrc_timestamp(&data, true);
+ if (err)
+ break;
+ }
+
+err:
+ engine_heartbeat_enable(data.engine, heartbeat);
+ for (i = 0; i < ARRAY_SIZE(data.ce); i++) {
+ if (!data.ce[i])
+ break;
+
+ intel_context_unpin(data.ce[i]);
+ intel_context_put(data.ce[i]);
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static struct i915_vma *
+create_user_vma(struct i915_address_space *vm, unsigned long size)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ int err;
+
+ obj = i915_gem_object_create_internal(vm->i915, size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ return vma;
+}
+
+static struct i915_vma *
+store_context(struct intel_context *ce, struct i915_vma *scratch)
+{
+ struct i915_vma *batch;
+ u32 dw, x, *cs, *hw;
+
+ batch = create_user_vma(ce->vm, SZ_64K);
+ if (IS_ERR(batch))
+ return batch;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(batch);
+ return ERR_CAST(cs);
+ }
+
+ x = 0;
+ dw = 0;
+ hw = ce->engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ while (len--) {
+ *cs++ = MI_STORE_REGISTER_MEM_GEN8;
+ *cs++ = hw[dw];
+ *cs++ = lower_32_bits(scratch->node.start + x);
+ *cs++ = upper_32_bits(scratch->node.start + x);
+
+ dw += 2;
+ x += 4;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+
+ return batch;
+}
+
+static int move_to_active(struct i915_request *rq,
+ struct i915_vma *vma,
+ unsigned int flags)
+{
+ int err;
+
+ i915_vma_lock(vma);
+ err = i915_request_await_object(rq, vma->obj, flags);
+ if (!err)
+ err = i915_vma_move_to_active(vma, rq, flags);
+ i915_vma_unlock(vma);
+
+ return err;
+}
+
+static struct i915_request *
+record_registers(struct intel_context *ce,
+ struct i915_vma *before,
+ struct i915_vma *after,
+ u32 *sema)
+{
+ struct i915_vma *b_before, *b_after;
+ struct i915_request *rq;
+ u32 *cs;
+ int err;
+
+ b_before = store_context(ce, before);
+ if (IS_ERR(b_before))
+ return ERR_CAST(b_before);
+
+ b_after = store_context(ce, after);
+ if (IS_ERR(b_after)) {
+ rq = ERR_CAST(b_after);
+ goto err_before;
+ }
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ goto err_after;
+
+ err = move_to_active(rq, before, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, b_before, 0);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, after, EXEC_OBJECT_WRITE);
+ if (err)
+ goto err_rq;
+
+ err = move_to_active(rq, b_after, 0);
+ if (err)
+ goto err_rq;
+
+ cs = intel_ring_begin(rq, 14);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(b_before->node.start);
+ *cs++ = upper_32_bits(b_before->node.start);
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
+ *cs++ = MI_SEMAPHORE_WAIT |
+ MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_POLL |
+ MI_SEMAPHORE_SAD_NEQ_SDD;
+ *cs++ = 0;
+ *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(sema);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(b_after->node.start);
+ *cs++ = upper_32_bits(b_after->node.start);
+
+ intel_ring_advance(rq, cs);
+
+ WRITE_ONCE(*sema, 0);
+ i915_request_get(rq);
+ i915_request_add(rq);
+err_after:
+ i915_vma_put(b_after);
+err_before:
+ i915_vma_put(b_before);
+ return rq;
+
+err_rq:
+ i915_request_add(rq);
+ rq = ERR_PTR(err);
+ goto err_after;
+}
+
+static struct i915_vma *load_context(struct intel_context *ce, u32 poison)
+{
+ struct i915_vma *batch;
+ u32 dw, *cs, *hw;
+
+ batch = create_user_vma(ce->vm, SZ_64K);
+ if (IS_ERR(batch))
+ return batch;
+
+ cs = i915_gem_object_pin_map(batch->obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_vma_put(batch);
+ return ERR_CAST(cs);
+ }
+
+ dw = 0;
+ hw = ce->engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ *cs++ = MI_LOAD_REGISTER_IMM(len);
+ while (len--) {
+ *cs++ = hw[dw];
+ *cs++ = poison;
+ dw += 2;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ *cs++ = MI_BATCH_BUFFER_END;
+
+ i915_gem_object_flush_map(batch->obj);
+ i915_gem_object_unpin_map(batch->obj);
+
+ return batch;
+}
+
+static int poison_registers(struct intel_context *ce, u32 poison, u32 *sema)
+{
+ struct i915_request *rq;
+ struct i915_vma *batch;
+ u32 *cs;
+ int err;
+
+ batch = load_context(ce, poison);
+ if (IS_ERR(batch))
+ return PTR_ERR(batch);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_batch;
+ }
+
+ err = move_to_active(rq, batch, 0);
+ if (err)
+ goto err_rq;
+
+ cs = intel_ring_begin(rq, 8);
+ if (IS_ERR(cs)) {
+ err = PTR_ERR(cs);
+ goto err_rq;
+ }
+
+ *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | BIT(8);
+ *cs++ = lower_32_bits(batch->node.start);
+ *cs++ = upper_32_bits(batch->node.start);
+
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = i915_ggtt_offset(ce->engine->status_page.vma) +
+ offset_in_page(sema);
+ *cs++ = 0;
+ *cs++ = 1;
+
+ intel_ring_advance(rq, cs);
+
+ rq->sched.attr.priority = I915_PRIORITY_BARRIER;
+err_rq:
+ i915_request_add(rq);
+err_batch:
+ i915_vma_put(batch);
+ return err;
+}
+
+static bool is_moving(u32 a, u32 b)
+{
+ return a != b;
+}
+
+static int compare_isolation(struct intel_engine_cs *engine,
+ struct i915_vma *ref[2],
+ struct i915_vma *result[2],
+ struct intel_context *ce,
+ u32 poison)
+{
+ u32 x, dw, *hw, *lrc;
+ u32 *A[2], *B[2];
+ int err = 0;
+
+ A[0] = i915_gem_object_pin_map(ref[0]->obj, I915_MAP_WC);
+ if (IS_ERR(A[0]))
+ return PTR_ERR(A[0]);
+
+ A[1] = i915_gem_object_pin_map(ref[1]->obj, I915_MAP_WC);
+ if (IS_ERR(A[1])) {
+ err = PTR_ERR(A[1]);
+ goto err_A0;
+ }
+
+ B[0] = i915_gem_object_pin_map(result[0]->obj, I915_MAP_WC);
+ if (IS_ERR(B[0])) {
+ err = PTR_ERR(B[0]);
+ goto err_A1;
+ }
+
+ B[1] = i915_gem_object_pin_map(result[1]->obj, I915_MAP_WC);
+ if (IS_ERR(B[1])) {
+ err = PTR_ERR(B[1]);
+ goto err_B0;
+ }
+
+ lrc = i915_gem_object_pin_map(ce->state->obj,
+ i915_coherent_map_type(engine->i915));
+ if (IS_ERR(lrc)) {
+ err = PTR_ERR(lrc);
+ goto err_B1;
+ }
+ lrc += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+
+ x = 0;
+ dw = 0;
+ hw = engine->pinned_default_state;
+ hw += LRC_STATE_PN * PAGE_SIZE / sizeof(*hw);
+ do {
+ u32 len = hw[dw] & 0x7f;
+
+ if (hw[dw] == 0) {
+ dw++;
+ continue;
+ }
+
+ if ((hw[dw] & GENMASK(31, 23)) != MI_INSTR(0x22, 0)) {
+ dw += len + 2;
+ continue;
+ }
+
+ dw++;
+ len = (len + 1) / 2;
+ while (len--) {
+ if (!is_moving(A[0][x], A[1][x]) &&
+ (A[0][x] != B[0][x] || A[1][x] != B[1][x])) {
+ switch (hw[dw] & 4095) {
+ case 0x30: /* RING_HEAD */
+ case 0x34: /* RING_TAIL */
+ break;
+
+ default:
+ pr_err("%s[%d]: Mismatch for register %4x, default %08x, reference %08x, result (%08x, %08x), poison %08x, context %08x\n",
+ engine->name, dw,
+ hw[dw], hw[dw + 1],
+ A[0][x], B[0][x], B[1][x],
+ poison, lrc[dw + 1]);
+ err = -EINVAL;
+ break;
+ }
+ }
+ dw += 2;
+ x++;
+ }
+ } while (dw < PAGE_SIZE / sizeof(u32) &&
+ (hw[dw] & ~BIT(0)) != MI_BATCH_BUFFER_END);
+
+ i915_gem_object_unpin_map(ce->state->obj);
+err_B1:
+ i915_gem_object_unpin_map(result[1]->obj);
+err_B0:
+ i915_gem_object_unpin_map(result[0]->obj);
+err_A1:
+ i915_gem_object_unpin_map(ref[1]->obj);
+err_A0:
+ i915_gem_object_unpin_map(ref[0]->obj);
+ return err;
+}
+
+static int __lrc_isolation(struct intel_engine_cs *engine, u32 poison)
+{
+ u32 *sema = memset32(engine->status_page.addr + 1000, 0, 1);
+ struct i915_vma *ref[2], *result[2];
+ struct intel_context *A, *B;
+ struct i915_request *rq;
+ int err;
+
+ A = intel_context_create(engine);
+ if (IS_ERR(A))
+ return PTR_ERR(A);
+
+ B = intel_context_create(engine);
+ if (IS_ERR(B)) {
+ err = PTR_ERR(B);
+ goto err_A;
+ }
+
+ ref[0] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(ref[0])) {
+ err = PTR_ERR(ref[0]);
+ goto err_B;
+ }
+
+ ref[1] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(ref[1])) {
+ err = PTR_ERR(ref[1]);
+ goto err_ref0;
+ }
+
+ rq = record_registers(A, ref[0], ref[1], sema);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_ref1;
+ }
+
+ WRITE_ONCE(*sema, 1);
+ wmb();
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_ref1;
+ }
+ i915_request_put(rq);
+
+ result[0] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(result[0])) {
+ err = PTR_ERR(result[0]);
+ goto err_ref1;
+ }
+
+ result[1] = create_user_vma(A->vm, SZ_64K);
+ if (IS_ERR(result[1])) {
+ err = PTR_ERR(result[1]);
+ goto err_result0;
+ }
+
+ rq = record_registers(A, result[0], result[1], sema);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_result1;
+ }
+
+ err = poison_registers(B, poison, sema);
+ if (err) {
+ WRITE_ONCE(*sema, -1);
+ i915_request_put(rq);
+ goto err_result1;
+ }
+
+ if (i915_request_wait(rq, 0, HZ / 2) < 0) {
+ i915_request_put(rq);
+ err = -ETIME;
+ goto err_result1;
+ }
+ i915_request_put(rq);
+
+ err = compare_isolation(engine, ref, result, A, poison);
+
+err_result1:
+ i915_vma_put(result[1]);
+err_result0:
+ i915_vma_put(result[0]);
+err_ref1:
+ i915_vma_put(ref[1]);
+err_ref0:
+ i915_vma_put(ref[0]);
+err_B:
+ intel_context_put(B);
+err_A:
+ intel_context_put(A);
+ return err;
+}
+
+static bool skip_isolation(const struct intel_engine_cs *engine)
+{
+ if (engine->class == COPY_ENGINE_CLASS && INTEL_GEN(engine->i915) == 9)
+ return true;
+
+ if (engine->class == RENDER_CLASS && INTEL_GEN(engine->i915) == 11)
+ return true;
+
+ return false;
+}
+
+static int live_lrc_isolation(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ const u32 poison[] = {
+ STACK_MAGIC,
+ 0x3a3a3a3a,
+ 0x5c5c5c5c,
+ 0xffffffff,
+ 0xffff0000,
+ };
+
+ /*
+ * Our goal is try and verify that per-context state cannot be
+ * tampered with by another non-privileged client.
+ *
+ * We take the list of context registers from the LRI in the default
+ * context image and attempt to modify that list from a remote context.
+ */
+
+ for_each_engine(engine, gt, id) {
+ int err = 0;
+ int i;
+
+ /* Just don't even ask */
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN) &&
+ skip_isolation(engine))
+ continue;
+
+ intel_engine_pm_get(engine);
+ if (engine->pinned_default_state) {
+ for (i = 0; i < ARRAY_SIZE(poison); i++) {
+ err = __lrc_isolation(engine, poison[i]);
+ if (err)
+ break;
+
+ err = __lrc_isolation(engine, ~poison[i]);
+ if (err)
+ break;
+ }
+ }
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static void garbage_reset(struct intel_engine_cs *engine,
+ struct i915_request *rq)
+{
+ const unsigned int bit = I915_RESET_ENGINE + engine->id;
+ unsigned long *lock = &engine->gt->reset.flags;
+
+ if (test_and_set_bit(bit, lock))
+ return;
+
+ tasklet_disable(&engine->execlists.tasklet);
+
+ if (!rq->fence.error)
+ intel_engine_reset(engine, NULL);
+
+ tasklet_enable(&engine->execlists.tasklet);
+ clear_and_wake_up_bit(bit, lock);
+}
+
+static struct i915_request *garbage(struct intel_context *ce,
+ struct rnd_state *prng)
+{
+ struct i915_request *rq;
+ int err;
+
+ err = intel_context_pin(ce);
+ if (err)
+ return ERR_PTR(err);
+
+ prandom_bytes_state(prng,
+ ce->lrc_reg_state,
+ ce->engine->context_size -
+ LRC_STATE_PN * PAGE_SIZE);
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_unpin;
+ }
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+ return rq;
+
+err_unpin:
+ intel_context_unpin(ce);
+ return ERR_PTR(err);
+}
+
+static int __lrc_garbage(struct intel_engine_cs *engine, struct rnd_state *prng)
+{
+ struct intel_context *ce;
+ struct i915_request *hang;
+ int err = 0;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ hang = garbage(ce, prng);
+ if (IS_ERR(hang)) {
+ err = PTR_ERR(hang);
+ goto err_ce;
+ }
+
+ if (wait_for_submit(engine, hang, HZ / 2)) {
+ i915_request_put(hang);
+ err = -ETIME;
+ goto err_ce;
+ }
+
+ intel_context_set_banned(ce);
+ garbage_reset(engine, hang);
+
+ intel_engine_flush_submission(engine);
+ if (!hang->fence.error) {
+ i915_request_put(hang);
+ pr_err("%s: corrupted context was not reset\n",
+ engine->name);
+ err = -EINVAL;
+ goto err_ce;
+ }
+
+ if (i915_request_wait(hang, 0, HZ / 2) < 0) {
+ pr_err("%s: corrupted context did not recover\n",
+ engine->name);
+ i915_request_put(hang);
+ err = -EIO;
+ goto err_ce;
+ }
+ i915_request_put(hang);
+
+err_ce:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_lrc_garbage(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Verify that we can recover if one context state is completely
+ * corrupted.
+ */
+
+ if (!IS_ENABLED(CONFIG_DRM_I915_SELFTEST_BROKEN))
+ return 0;
+
+ for_each_engine(engine, gt, id) {
+ I915_RND_STATE(prng);
+ int err = 0, i;
+
+ if (!intel_has_reset_engine(engine->gt))
+ continue;
+
+ intel_engine_pm_get(engine);
+ for (i = 0; i < 3; i++) {
+ err = __lrc_garbage(engine, &prng);
+ if (err)
+ break;
+ }
+ intel_engine_pm_put(engine);
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+static int __live_pphwsp_runtime(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ struct i915_request *rq;
+ IGT_TIMEOUT(end_time);
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ ce->runtime.num_underflow = 0;
+ ce->runtime.max_underflow = 0;
+
+ do {
+ unsigned int loop = 1024;
+
+ while (loop) {
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq)) {
+ err = PTR_ERR(rq);
+ goto err_rq;
+ }
+
+ if (--loop == 0)
+ i915_request_get(rq);
+
+ i915_request_add(rq);
+ }
+
+ if (__igt_timeout(end_time, NULL))
+ break;
+
+ i915_request_put(rq);
+ } while (1);
+
+ err = i915_request_wait(rq, 0, HZ / 5);
+ if (err < 0) {
+ pr_err("%s: request not completed!\n", engine->name);
+ goto err_wait;
+ }
+
+ igt_flush_test(engine->i915);
+
+ pr_info("%s: pphwsp runtime %lluns, average %lluns\n",
+ engine->name,
+ intel_context_get_total_runtime_ns(ce),
+ intel_context_get_avg_runtime_ns(ce));
+
+ err = 0;
+ if (ce->runtime.num_underflow) {
+ pr_err("%s: pphwsp underflow %u time(s), max %u cycles!\n",
+ engine->name,
+ ce->runtime.num_underflow,
+ ce->runtime.max_underflow);
+ GEM_TRACE_DUMP();
+ err = -EOVERFLOW;
+ }
+
+err_wait:
+ i915_request_put(rq);
+err_rq:
+ intel_context_put(ce);
+ return err;
+}
+
+static int live_pphwsp_runtime(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Check that cumulative context runtime as stored in the pphwsp[16]
+ * is monotonic.
+ */
+
+ for_each_engine(engine, gt, id) {
+ err = __live_pphwsp_runtime(engine);
if (err)
break;
}
@@ -3818,7 +5555,6 @@ static int live_gpr_clear(void *arg)
if (igt_flush_test(gt->i915))
err = -EIO;
- i915_vma_unpin_and_release(&scratch, 0);
return err;
}
@@ -3828,7 +5564,11 @@ int intel_lrc_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_lrc_layout),
SUBTEST(live_lrc_fixed),
SUBTEST(live_lrc_state),
- SUBTEST(live_gpr_clear),
+ SUBTEST(live_lrc_gpr),
+ SUBTEST(live_lrc_isolation),
+ SUBTEST(live_lrc_timestamp),
+ SUBTEST(live_lrc_garbage),
+ SUBTEST(live_pphwsp_runtime),
};
if (!HAS_LOGICAL_RING_CONTEXTS(i915))
diff --git a/drivers/gpu/drm/i915/gt/selftest_mocs.c b/drivers/gpu/drm/i915/gt/selftest_mocs.c
index de1f83100fb6..8831ffee2061 100644
--- a/drivers/gpu/drm/i915/gt/selftest_mocs.c
+++ b/drivers/gpu/drm/i915/gt/selftest_mocs.c
@@ -12,7 +12,8 @@
#include "selftests/igt_spinner.h"
struct live_mocs {
- struct drm_i915_mocs_table table;
+ struct drm_i915_mocs_table mocs;
+ struct drm_i915_mocs_table l3cc;
struct i915_vma *scratch;
void *vaddr;
};
@@ -70,11 +71,22 @@ static struct i915_vma *create_scratch(struct intel_gt *gt)
static int live_mocs_init(struct live_mocs *arg, struct intel_gt *gt)
{
+ struct drm_i915_mocs_table table;
+ unsigned int flags;
int err;
- if (!get_mocs_settings(gt->i915, &arg->table))
+ memset(arg, 0, sizeof(*arg));
+
+ flags = get_mocs_settings(gt->i915, &table);
+ if (!flags)
return -EINVAL;
+ if (flags & HAS_RENDER_L3CC)
+ arg->l3cc = table;
+
+ if (flags & (HAS_GLOBAL_MOCS | HAS_ENGINE_MOCS))
+ arg->mocs = table;
+
arg->scratch = create_scratch(gt);
if (IS_ERR(arg->scratch))
return PTR_ERR(arg->scratch);
@@ -223,9 +235,9 @@ static int check_mocs_engine(struct live_mocs *arg,
/* Read the mocs tables back using SRM */
offset = i915_ggtt_offset(vma);
if (!err)
- err = read_mocs_table(rq, &arg->table, &offset);
+ err = read_mocs_table(rq, &arg->mocs, &offset);
if (!err && ce->engine->class == RENDER_CLASS)
- err = read_l3cc_table(rq, &arg->table, &offset);
+ err = read_l3cc_table(rq, &arg->l3cc, &offset);
offset -= i915_ggtt_offset(vma);
GEM_BUG_ON(offset > PAGE_SIZE);
@@ -236,9 +248,9 @@ static int check_mocs_engine(struct live_mocs *arg,
/* Compare the results against the expected tables */
vaddr = arg->vaddr;
if (!err)
- err = check_mocs_table(ce->engine, &arg->table, &vaddr);
+ err = check_mocs_table(ce->engine, &arg->mocs, &vaddr);
if (!err && ce->engine->class == RENDER_CLASS)
- err = check_l3cc_table(ce->engine, &arg->table, &vaddr);
+ err = check_l3cc_table(ce->engine, &arg->l3cc, &vaddr);
if (err)
return err;
diff --git a/drivers/gpu/drm/i915/gt/selftest_rc6.c b/drivers/gpu/drm/i915/gt/selftest_rc6.c
index 8cc55a0e9e06..5f7e2dcf5686 100644
--- a/drivers/gpu/drm/i915/gt/selftest_rc6.c
+++ b/drivers/gpu/drm/i915/gt/selftest_rc6.c
@@ -56,9 +56,10 @@ int live_rc6_manual(void *arg)
res[1] = intel_rc6_residency_ns(rc6, GEN6_GT_GFX_RC6);
if (res[1] == res[0]) {
- pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x\n",
+ pr_err("Did not enter RC6! RC6_STATE=%08x, RC6_CONTROL=%08x, residency=%lld\n",
intel_uncore_read_fw(gt->uncore, GEN6_RC_STATE),
- intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL));
+ intel_uncore_read_fw(gt->uncore, GEN6_RC_CONTROL),
+ res[0]);
err = -EINVAL;
}
diff --git a/drivers/gpu/drm/i915/gt/selftest_reset.c b/drivers/gpu/drm/i915/gt/selftest_reset.c
index 6ad6aca315f6..35406ecdf0b2 100644
--- a/drivers/gpu/drm/i915/gt/selftest_reset.c
+++ b/drivers/gpu/drm/i915/gt/selftest_reset.c
@@ -115,7 +115,7 @@ static int igt_atomic_engine_reset(void *arg)
if (!intel_has_reset_engine(gt))
return 0;
- if (USES_GUC_SUBMISSION(gt->i915))
+ if (intel_uc_uses_guc_submission(&gt->uc))
return 0;
intel_gt_pm_get(gt);
diff --git a/drivers/gpu/drm/i915/gt/selftest_ring_submission.c b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
new file mode 100644
index 000000000000..9995faadd7e8
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/selftest_ring_submission.c
@@ -0,0 +1,296 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "intel_engine_pm.h"
+#include "selftests/igt_flush_test.h"
+
+static struct i915_vma *create_wally(struct intel_engine_cs *engine)
+{
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+ u32 *cs;
+ int err;
+
+ obj = i915_gem_object_create_internal(engine->i915, 4096);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ vma = i915_vma_instance(obj, engine->gt->vm, NULL);
+ if (IS_ERR(vma)) {
+ i915_gem_object_put(obj);
+ return vma;
+ }
+
+ err = i915_vma_pin(vma, 0, 0, PIN_USER | PIN_HIGH);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ err = i915_vma_sync(vma);
+ if (err) {
+ i915_gem_object_put(obj);
+ return ERR_PTR(err);
+ }
+
+ cs = i915_gem_object_pin_map(obj, I915_MAP_WC);
+ if (IS_ERR(cs)) {
+ i915_gem_object_put(obj);
+ return ERR_CAST(cs);
+ }
+
+ if (INTEL_GEN(engine->i915) >= 6) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4;
+ *cs++ = 0;
+ } else if (INTEL_GEN(engine->i915) >= 4) {
+ *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
+ *cs++ = 0;
+ } else {
+ *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
+ }
+ *cs++ = vma->node.start + 4000;
+ *cs++ = STACK_MAGIC;
+
+ *cs++ = MI_BATCH_BUFFER_END;
+ i915_gem_object_unpin_map(obj);
+
+ vma->private = intel_context_create(engine); /* dummy residuals */
+ if (IS_ERR(vma->private)) {
+ vma = ERR_CAST(vma->private);
+ i915_gem_object_put(obj);
+ }
+
+ return vma;
+}
+
+static int context_sync(struct intel_context *ce)
+{
+ struct i915_request *rq;
+ int err = 0;
+
+ rq = intel_context_create_request(ce);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_get(rq);
+ i915_request_add(rq);
+
+ if (i915_request_wait(rq, 0, HZ / 5) < 0)
+ err = -ETIME;
+ i915_request_put(rq);
+
+ return err;
+}
+
+static int new_context_sync(struct intel_engine_cs *engine)
+{
+ struct intel_context *ce;
+ int err;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = context_sync(ce);
+ intel_context_put(ce);
+
+ return err;
+}
+
+static int mixed_contexts_sync(struct intel_engine_cs *engine, u32 *result)
+{
+ int pass;
+ int err;
+
+ for (pass = 0; pass < 2; pass++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(engine->kernel_context);
+ if (err || READ_ONCE(*result)) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb emitted for the kernel context\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ WRITE_ONCE(*result, 0);
+ err = new_context_sync(engine);
+ if (READ_ONCE(*result) != STACK_MAGIC) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb *NOT* emitted after the kernel context\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+
+ WRITE_ONCE(*result, 0);
+ err = new_context_sync(engine);
+ if (READ_ONCE(*result) != STACK_MAGIC) {
+ if (!err) {
+ pr_err("pass[%d] wa_bb *NOT* emitted for the user context switch\n",
+ pass);
+ err = -EINVAL;
+ }
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+static int double_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+ struct intel_context *ce;
+ int err, i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < 2; i++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(ce);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ if (READ_ONCE(*result)) {
+ pr_err("wa_bb emitted between the same user context\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int kernel_context_sync_00(struct intel_engine_cs *engine, u32 *result)
+{
+ struct intel_context *ce;
+ int err, i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ for (i = 0; i < 2; i++) {
+ WRITE_ONCE(*result, 0);
+ err = context_sync(ce);
+ if (err)
+ break;
+
+ err = context_sync(engine->kernel_context);
+ if (err)
+ break;
+ }
+ intel_context_put(ce);
+ if (err)
+ return err;
+
+ if (READ_ONCE(*result)) {
+ pr_err("wa_bb emitted between the same user context [with intervening kernel]\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __live_ctx_switch_wa(struct intel_engine_cs *engine)
+{
+ struct i915_vma *bb;
+ u32 *result;
+ int err;
+
+ bb = create_wally(engine);
+ if (IS_ERR(bb))
+ return PTR_ERR(bb);
+
+ result = i915_gem_object_pin_map(bb->obj, I915_MAP_WC);
+ if (IS_ERR(result)) {
+ intel_context_put(bb->private);
+ i915_vma_unpin_and_release(&bb, 0);
+ return PTR_ERR(result);
+ }
+ result += 1000;
+
+ engine->wa_ctx.vma = bb;
+
+ err = mixed_contexts_sync(engine, result);
+ if (err)
+ goto out;
+
+ err = double_context_sync_00(engine, result);
+ if (err)
+ goto out;
+
+ err = kernel_context_sync_00(engine, result);
+ if (err)
+ goto out;
+
+out:
+ intel_context_put(engine->wa_ctx.vma->private);
+ i915_vma_unpin_and_release(&engine->wa_ctx.vma, I915_VMA_RELEASE_MAP);
+ return err;
+}
+
+static int live_ctx_switch_wa(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+
+ /*
+ * Exercise the inter-context wa batch.
+ *
+ * Between each user context we run a wa batch, and since it may
+ * have implications for user visible state, we have to check that
+ * we do actually execute it.
+ *
+ * The trick we use is to replace the normal wa batch with a custom
+ * one that writes to a marker within it, and we can then look for
+ * that marker to confirm if the batch was run when we expect it,
+ * and equally important it was wasn't run when we don't!
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_vma *saved_wa;
+ int err;
+
+ if (!intel_engine_can_store_dword(engine))
+ continue;
+
+ if (IS_GEN_RANGE(gt->i915, 4, 5))
+ continue; /* MI_STORE_DWORD is privileged! */
+
+ saved_wa = fetch_and_zero(&engine->wa_ctx.vma);
+
+ intel_engine_pm_get(engine);
+ err = __live_ctx_switch_wa(engine);
+ intel_engine_pm_put(engine);
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ engine->wa_ctx.vma = saved_wa;
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+int intel_ring_submission_live_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(live_ctx_switch_wa),
+ };
+
+ if (HAS_EXECLISTS(i915))
+ return 0;
+
+ return intel_gt_live_subtests(tests, &i915->gt);
+}
diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c
index e2d78cc22fb4..c2578a0f2f14 100644
--- a/drivers/gpu/drm/i915/gt/selftest_timeline.c
+++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c
@@ -6,6 +6,8 @@
#include <linux/prime_numbers.h>
+#include "intel_context.h"
+#include "intel_engine_heartbeat.h"
#include "intel_engine_pm.h"
#include "intel_gt.h"
#include "intel_gt_requests.h"
@@ -604,7 +606,6 @@ static int live_hwsp_alternate(void *arg)
tl = checked_intel_timeline_create(gt);
if (IS_ERR(tl)) {
- intel_engine_pm_put(engine);
err = PTR_ERR(tl);
goto out;
}
@@ -750,6 +751,189 @@ out_free:
return err;
}
+static void engine_heartbeat_disable(struct intel_engine_cs *engine,
+ unsigned long *saved)
+{
+ *saved = engine->props.heartbeat_interval_ms;
+ engine->props.heartbeat_interval_ms = 0;
+
+ intel_engine_pm_get(engine);
+ intel_engine_park_heartbeat(engine);
+}
+
+static void engine_heartbeat_enable(struct intel_engine_cs *engine,
+ unsigned long saved)
+{
+ intel_engine_pm_put(engine);
+
+ engine->props.heartbeat_interval_ms = saved;
+}
+
+static int live_hwsp_rollover_kernel(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Run the host for long enough, and even the kernel context will
+ * see a seqno rollover.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct intel_context *ce = engine->kernel_context;
+ struct intel_timeline *tl = ce->timeline;
+ struct i915_request *rq[3] = {};
+ unsigned long heartbeat;
+ int i;
+
+ engine_heartbeat_disable(engine, &heartbeat);
+ if (intel_gt_wait_for_idle(gt, HZ / 2)) {
+ err = -EIO;
+ goto out;
+ }
+
+ GEM_BUG_ON(i915_active_fence_isset(&tl->last_request));
+ tl->seqno = 0;
+ timeline_rollback(tl);
+ timeline_rollback(tl);
+ WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ struct i915_request *this;
+
+ this = i915_request_create(ce);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
+ goto out;
+ }
+
+ pr_debug("%s: create fence.seqnp:%d\n",
+ engine->name,
+ lower_32_bits(this->fence.seqno));
+
+ GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
+
+ rq[i] = i915_request_get(this);
+ i915_request_add(this);
+ }
+
+ /* We expected a wrap! */
+ GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
+
+ if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+ pr_err("Wait for timeline wrap timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ if (!i915_request_completed(rq[i])) {
+ pr_err("Pre-wrap request not completed!\n");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(rq); i++)
+ i915_request_put(rq[i]);
+ engine_heartbeat_enable(engine, heartbeat);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ return err;
+}
+
+static int live_hwsp_rollover_user(void *arg)
+{
+ struct intel_gt *gt = arg;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
+ int err = 0;
+
+ /*
+ * Simulate a long running user context, and force the seqno wrap
+ * on the user's timeline.
+ */
+
+ for_each_engine(engine, gt, id) {
+ struct i915_request *rq[3] = {};
+ struct intel_timeline *tl;
+ struct intel_context *ce;
+ int i;
+
+ ce = intel_context_create(engine);
+ if (IS_ERR(ce))
+ return PTR_ERR(ce);
+
+ err = intel_context_alloc_state(ce);
+ if (err)
+ goto out;
+
+ tl = ce->timeline;
+ if (!tl->has_initial_breadcrumb || !tl->hwsp_cacheline)
+ goto out;
+
+ timeline_rollback(tl);
+ timeline_rollback(tl);
+ WRITE_ONCE(*(u32 *)tl->hwsp_seqno, tl->seqno);
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ struct i915_request *this;
+
+ this = intel_context_create_request(ce);
+ if (IS_ERR(this)) {
+ err = PTR_ERR(this);
+ goto out;
+ }
+
+ pr_debug("%s: create fence.seqnp:%d\n",
+ engine->name,
+ lower_32_bits(this->fence.seqno));
+
+ GEM_BUG_ON(rcu_access_pointer(this->timeline) != tl);
+
+ rq[i] = i915_request_get(this);
+ i915_request_add(this);
+ }
+
+ /* We expected a wrap! */
+ GEM_BUG_ON(rq[2]->fence.seqno > rq[0]->fence.seqno);
+
+ if (i915_request_wait(rq[2], 0, HZ / 5) < 0) {
+ pr_err("Wait for timeline wrap timed out!\n");
+ err = -EIO;
+ goto out;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(rq); i++) {
+ if (!i915_request_completed(rq[i])) {
+ pr_err("Pre-wrap request not completed!\n");
+ err = -EINVAL;
+ goto out;
+ }
+ }
+
+out:
+ for (i = 0; i < ARRAY_SIZE(rq); i++)
+ i915_request_put(rq[i]);
+ intel_context_put(ce);
+ if (err)
+ break;
+ }
+
+ if (igt_flush_test(gt->i915))
+ err = -EIO;
+
+ return err;
+}
+
static int live_hwsp_recycle(void *arg)
{
struct intel_gt *gt = arg;
@@ -827,6 +1011,8 @@ int intel_timeline_live_selftests(struct drm_i915_private *i915)
SUBTEST(live_hwsp_engine),
SUBTEST(live_hwsp_alternate),
SUBTEST(live_hwsp_wrap),
+ SUBTEST(live_hwsp_rollover_kernel),
+ SUBTEST(live_hwsp_rollover_user),
};
if (intel_gt_is_wedged(&i915->gt))
diff --git a/drivers/gpu/drm/i915/gt/selftest_workarounds.c b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
index ac1921854cbf..5ed323254ee1 100644
--- a/drivers/gpu/drm/i915/gt/selftest_workarounds.c
+++ b/drivers/gpu/drm/i915/gt/selftest_workarounds.c
@@ -583,6 +583,15 @@ static int check_dirty_whitelist(struct intel_context *ce)
if (err)
goto err_request;
+ i915_vma_lock(scratch);
+ err = i915_request_await_object(rq, scratch->obj, true);
+ if (err == 0)
+ err = i915_vma_move_to_active(scratch, rq,
+ EXEC_OBJECT_WRITE);
+ i915_vma_unlock(scratch);
+ if (err)
+ goto err_request;
+
err = engine->emit_bb_start(rq,
batch->node.start, PAGE_SIZE,
0);
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.c b/drivers/gpu/drm/i915/gt/sysfs_engines.c
new file mode 100644
index 000000000000..8f9b2f33dbaf
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.c
@@ -0,0 +1,445 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kobject.h>
+#include <linux/sysfs.h>
+
+#include "i915_drv.h"
+#include "intel_engine.h"
+#include "intel_engine_heartbeat.h"
+#include "sysfs_engines.h"
+
+struct kobj_engine {
+ struct kobject base;
+ struct intel_engine_cs *engine;
+};
+
+static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
+{
+ return container_of(kobj, struct kobj_engine, base)->engine;
+}
+
+static ssize_t
+name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
+}
+
+static struct kobj_attribute name_attr =
+__ATTR(name, 0444, name_show, NULL);
+
+static ssize_t
+class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
+}
+
+static struct kobj_attribute class_attr =
+__ATTR(class, 0444, class_show, NULL);
+
+static ssize_t
+inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
+}
+
+static struct kobj_attribute inst_attr =
+__ATTR(instance, 0444, inst_show, NULL);
+
+static ssize_t
+mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
+}
+
+static struct kobj_attribute mmio_attr =
+__ATTR(mmio_base, 0444, mmio_show, NULL);
+
+static const char * const vcs_caps[] = {
+ [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
+ [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static const char * const vecs_caps[] = {
+ [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
+};
+
+static ssize_t repr_trim(char *buf, ssize_t len)
+{
+ /* Trim off the trailing space and replace with a newline */
+ if (len > PAGE_SIZE)
+ len = PAGE_SIZE;
+ if (len > 0)
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t
+__caps_show(struct intel_engine_cs *engine,
+ u32 caps, char *buf, bool show_unknown)
+{
+ const char * const *repr;
+ int count, n;
+ ssize_t len;
+
+ BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities));
+
+ switch (engine->class) {
+ case VIDEO_DECODE_CLASS:
+ repr = vcs_caps;
+ count = ARRAY_SIZE(vcs_caps);
+ break;
+
+ case VIDEO_ENHANCEMENT_CLASS:
+ repr = vecs_caps;
+ count = ARRAY_SIZE(vecs_caps);
+ break;
+
+ default:
+ repr = NULL;
+ count = 0;
+ break;
+ }
+ GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps)));
+
+ len = 0;
+ for_each_set_bit(n,
+ (unsigned long *)&caps,
+ show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) {
+ if (n >= count || !repr[n]) {
+ if (GEM_WARN_ON(show_unknown))
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "[%x] ", n);
+ } else {
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "%s ", repr[n]);
+ }
+ if (GEM_WARN_ON(len >= PAGE_SIZE))
+ break;
+ }
+ return repr_trim(buf, len);
+}
+
+static ssize_t
+caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return __caps_show(engine, engine->uabi_capabilities, buf, true);
+}
+
+static struct kobj_attribute caps_attr =
+__ATTR(capabilities, 0444, caps_show, NULL);
+
+static ssize_t
+all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ return __caps_show(kobj_to_engine(kobj), -1, buf, false);
+}
+
+static struct kobj_attribute all_caps_attr =
+__ATTR(known_capabilities, 0444, all_caps_show, NULL);
+
+static ssize_t
+max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * When waiting for a request, if is it currently being executed
+ * on the GPU, we busywait for a short while before sleeping. The
+ * premise is that most requests are short, and if it is already
+ * executing then there is a good chance that it will complete
+ * before we can setup the interrupt handler and go to sleep.
+ * We try to offset the cost of going to sleep, by first spinning
+ * on the request -- if it completed in less time than it would take
+ * to go sleep, process the interrupt and return back to the client,
+ * then we have saved the client some latency, albeit at the cost
+ * of spinning on an expensive CPU core.
+ *
+ * While we try to avoid waiting at all for a request that is unlikely
+ * to complete, deciding how long it is worth spinning is for is an
+ * arbitrary decision: trading off power vs latency.
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_nsecs(2))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
+
+ return count;
+}
+
+static ssize_t
+max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
+}
+
+static struct kobj_attribute max_spin_attr =
+__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
+
+static ssize_t
+timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * Execlists uses a scheduling quantum (a timeslice) to alternate
+ * execution between ready-to-run contexts of equal priority. This
+ * ensures that all users (though only if they of equal importance)
+ * have the opportunity to run and prevents livelocks where contexts
+ * may have implicit ordering due to userspace semaphores.
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
+
+ if (execlists_active(&engine->execlists))
+ set_timer_ms(&engine->execlists.timer, duration);
+
+ return count;
+}
+
+static ssize_t
+timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
+}
+
+static struct kobj_attribute timeslice_duration_attr =
+__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
+
+static ssize_t
+stop_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long duration;
+ int err;
+
+ /*
+ * When we allow ourselves to sleep before a GPU reset after disabling
+ * submission, even for a few milliseconds, gives an innocent context
+ * the opportunity to clear the GPU before the reset occurs. However,
+ * how long to sleep depends on the typical non-preemptible duration
+ * (a similar problem to determining the ideal preempt-reset timeout
+ * or even the heartbeat interval).
+ */
+
+ err = kstrtoull(buf, 0, &duration);
+ if (err)
+ return err;
+
+ if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.stop_timeout_ms, duration);
+ return count;
+}
+
+static ssize_t
+stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
+}
+
+static struct kobj_attribute stop_timeout_attr =
+__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
+
+static ssize_t
+preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long timeout;
+ int err;
+
+ /*
+ * After initialising a preemption request, we give the current
+ * resident a small amount of time to vacate the GPU. The preemption
+ * request is for a higher priority context and should be immediate to
+ * maintain high quality of service (and avoid priority inversion).
+ * However, the preemption granularity of the GPU can be quite coarse
+ * and so we need a compromise.
+ */
+
+ err = kstrtoull(buf, 0, &timeout);
+ if (err)
+ return err;
+
+ if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
+
+ if (READ_ONCE(engine->execlists.pending[0]))
+ set_timer_ms(&engine->execlists.preempt, timeout);
+
+ return count;
+}
+
+static ssize_t
+preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
+ char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
+}
+
+static struct kobj_attribute preempt_timeout_attr =
+__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
+
+static ssize_t
+heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+ unsigned long long delay;
+ int err;
+
+ /*
+ * We monitor the health of the system via periodic heartbeat pulses.
+ * The pulses also provide the opportunity to perform garbage
+ * collection. However, we interpret an incomplete pulse (a missed
+ * heartbeat) as an indication that the system is no longer responsive,
+ * i.e. hung, and perform an engine or full GPU reset. Given that the
+ * preemption granularity can be very coarse on a system, the optimal
+ * value for any workload is unknowable!
+ */
+
+ err = kstrtoull(buf, 0, &delay);
+ if (err)
+ return err;
+
+ if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
+ return -EINVAL;
+
+ err = intel_engine_set_heartbeat(engine, delay);
+ if (err)
+ return err;
+
+ return count;
+}
+
+static ssize_t
+heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
+{
+ struct intel_engine_cs *engine = kobj_to_engine(kobj);
+
+ return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
+}
+
+static struct kobj_attribute heartbeat_interval_attr =
+__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
+
+static void kobj_engine_release(struct kobject *kobj)
+{
+ kfree(kobj);
+}
+
+static struct kobj_type kobj_engine_type = {
+ .release = kobj_engine_release,
+ .sysfs_ops = &kobj_sysfs_ops
+};
+
+static struct kobject *
+kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
+{
+ struct kobj_engine *ke;
+
+ ke = kzalloc(sizeof(*ke), GFP_KERNEL);
+ if (!ke)
+ return NULL;
+
+ kobject_init(&ke->base, &kobj_engine_type);
+ ke->engine = engine;
+
+ if (kobject_add(&ke->base, dir, "%s", engine->name)) {
+ kobject_put(&ke->base);
+ return NULL;
+ }
+
+ /* xfer ownership to sysfs tree */
+ return &ke->base;
+}
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915)
+{
+ static const struct attribute *files[] = {
+ &name_attr.attr,
+ &class_attr.attr,
+ &inst_attr.attr,
+ &mmio_attr.attr,
+ &caps_attr.attr,
+ &all_caps_attr.attr,
+ &max_spin_attr.attr,
+ &stop_timeout_attr.attr,
+#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
+ &heartbeat_interval_attr.attr,
+#endif
+ NULL
+ };
+
+ struct device *kdev = i915->drm.primary->kdev;
+ struct intel_engine_cs *engine;
+ struct kobject *dir;
+
+ dir = kobject_create_and_add("engine", &kdev->kobj);
+ if (!dir)
+ return;
+
+ for_each_uabi_engine(engine, i915) {
+ struct kobject *kobj;
+
+ kobj = kobj_engine(dir, engine);
+ if (!kobj)
+ goto err_engine;
+
+ if (sysfs_create_files(kobj, files))
+ goto err_object;
+
+ if (intel_engine_has_timeslices(engine) &&
+ sysfs_create_file(kobj, &timeslice_duration_attr.attr))
+ goto err_engine;
+
+ if (intel_engine_has_preempt_reset(engine) &&
+ sysfs_create_file(kobj, &preempt_timeout_attr.attr))
+ goto err_engine;
+
+ if (0) {
+err_object:
+ kobject_put(kobj);
+err_engine:
+ dev_err(kdev, "Failed to add sysfs engine '%s'\n",
+ engine->name);
+ break;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/i915/gt/sysfs_engines.h b/drivers/gpu/drm/i915/gt/sysfs_engines.h
new file mode 100644
index 000000000000..9546fffe03a7
--- /dev/null
+++ b/drivers/gpu/drm/i915/gt/sysfs_engines.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef INTEL_ENGINE_SYSFS_H
+#define INTEL_ENGINE_SYSFS_H
+
+struct drm_i915_private;
+
+void intel_engines_add_sysfs(struct drm_i915_private *i915);
+
+#endif /* INTEL_ENGINE_SYSFS_H */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
index 5d00a3b2d914..819f09ef51fc 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c
@@ -207,7 +207,7 @@ static u32 guc_ctl_feature_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (!intel_guc_is_submission_supported(guc))
+ if (!intel_guc_submission_is_used(guc))
flags |= GUC_CTL_DISABLE_SCHEDULER;
return flags;
@@ -217,7 +217,7 @@ static u32 guc_ctl_ctxinfo_flags(struct intel_guc *guc)
{
u32 flags = 0;
- if (intel_guc_is_submission_supported(guc)) {
+ if (intel_guc_submission_is_used(guc)) {
u32 ctxnum, base;
base = intel_guc_ggtt_offset(guc, guc->stage_desc_pool);
@@ -333,7 +333,7 @@ int intel_guc_init(struct intel_guc *guc)
ret = intel_uc_fw_init(&guc->fw);
if (ret)
- goto err_fetch;
+ goto out;
ret = intel_guc_log_create(&guc->log);
if (ret)
@@ -348,7 +348,7 @@ int intel_guc_init(struct intel_guc *guc)
if (ret)
goto err_ads;
- if (intel_guc_is_submission_supported(guc)) {
+ if (intel_guc_submission_is_used(guc)) {
/*
* This is stuff we need to have available at fw load time
* if we are planning to enable submission later
@@ -364,6 +364,8 @@ int intel_guc_init(struct intel_guc *guc)
/* We need to notify the guc whenever we change the GGTT */
i915_ggtt_enable_guc(gt->ggtt);
+ intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
return 0;
err_ct:
@@ -374,9 +376,8 @@ err_log:
intel_guc_log_destroy(&guc->log);
err_fw:
intel_uc_fw_fini(&guc->fw);
-err_fetch:
- intel_uc_fw_cleanup_fetch(&guc->fw);
- DRM_DEV_DEBUG_DRIVER(gt->i915->drm.dev, "failed with %d\n", ret);
+out:
+ i915_probe_error(gt->i915, "failed with %d\n", ret);
return ret;
}
@@ -384,12 +385,12 @@ void intel_guc_fini(struct intel_guc *guc)
{
struct intel_gt *gt = guc_to_gt(guc);
- if (!intel_uc_fw_is_available(&guc->fw))
+ if (!intel_uc_fw_is_loadable(&guc->fw))
return;
i915_ggtt_disable_guc(gt->ggtt);
- if (intel_guc_is_submission_supported(guc))
+ if (intel_guc_submission_is_used(guc))
intel_guc_submission_fini(guc);
intel_guc_ct_fini(&guc->ct);
@@ -397,9 +398,6 @@ void intel_guc_fini(struct intel_guc *guc)
intel_guc_ads_destroy(guc);
intel_guc_log_destroy(&guc->log);
intel_uc_fw_fini(&guc->fw);
- intel_uc_fw_cleanup_fetch(&guc->fw);
-
- intel_uc_fw_change_status(&guc->fw, INTEL_UC_FIRMWARE_DISABLED);
}
/*
@@ -544,7 +542,7 @@ int intel_guc_suspend(struct intel_guc *guc)
* If GuC communication is enabled but submission is not supported,
* we do not need to suspend the GuC.
*/
- if (!intel_guc_submission_is_enabled(guc))
+ if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
return 0;
/*
@@ -609,7 +607,7 @@ int intel_guc_resume(struct intel_guc *guc)
* we do not need to resume the GuC but we do need to enable the
* GuC communication on resume (above).
*/
- if (!intel_guc_submission_is_enabled(guc))
+ if (!intel_guc_submission_is_used(guc) || !intel_guc_is_ready(guc))
return 0;
return intel_guc_send(guc, action, ARRAY_SIZE(action));
@@ -678,8 +676,8 @@ struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size)
if (IS_ERR(vma))
goto err;
- flags = PIN_GLOBAL | PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
- ret = i915_vma_pin(vma, 0, 0, flags);
+ flags = PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
+ ret = i915_ggtt_pin(vma, 0, flags);
if (ret) {
vma = ERR_PTR(ret);
goto err;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
index 910d49590068..4594ccbeaa34 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h
@@ -39,7 +39,7 @@ struct intel_guc {
void (*disable)(struct intel_guc *guc);
} interrupts;
- bool submission_supported;
+ bool submission_selected;
struct i915_vma *ads_vma;
struct __guc_ads_blob *ads_blob;
@@ -143,29 +143,36 @@ static inline bool intel_guc_is_supported(struct intel_guc *guc)
return intel_uc_fw_is_supported(&guc->fw);
}
-static inline bool intel_guc_is_enabled(struct intel_guc *guc)
+static inline bool intel_guc_is_wanted(struct intel_guc *guc)
{
return intel_uc_fw_is_enabled(&guc->fw);
}
-static inline bool intel_guc_is_running(struct intel_guc *guc)
+static inline bool intel_guc_is_used(struct intel_guc *guc)
+{
+ GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+ return intel_uc_fw_is_available(&guc->fw);
+}
+
+static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
{
return intel_uc_fw_is_running(&guc->fw);
}
+static inline bool intel_guc_is_ready(struct intel_guc *guc)
+{
+ return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
+}
+
static inline int intel_guc_sanitize(struct intel_guc *guc)
{
intel_uc_fw_sanitize(&guc->fw);
+ intel_guc_ct_sanitize(&guc->ct);
guc->mmio_msg = 0;
return 0;
}
-static inline bool intel_guc_is_submission_supported(struct intel_guc *guc)
-{
- return guc->submission_supported;
-}
-
static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
{
spin_lock_irq(&guc->irq_lock);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
index c6f971a049f9..11742fca0e9e 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.c
@@ -5,11 +5,15 @@
#include "i915_drv.h"
#include "intel_guc_ct.h"
+#include "gt/intel_gt.h"
+#define CT_ERROR(_ct, _fmt, ...) \
+ DRM_DEV_ERROR(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
#ifdef CONFIG_DRM_I915_DEBUG_GUC
-#define CT_DEBUG_DRIVER(...) DRM_DEBUG_DRIVER(__VA_ARGS__)
+#define CT_DEBUG(_ct, _fmt, ...) \
+ DRM_DEV_DEBUG_DRIVER(ct_to_dev(_ct), "CT: " _fmt, ##__VA_ARGS__)
#else
-#define CT_DEBUG_DRIVER(...) do { } while (0)
+#define CT_DEBUG(...) do { } while (0)
#endif
struct ct_request {
@@ -48,6 +52,21 @@ static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
return container_of(ct, struct intel_guc, ct);
}
+static inline struct intel_gt *ct_to_gt(struct intel_guc_ct *ct)
+{
+ return guc_to_gt(ct_to_guc(ct));
+}
+
+static inline struct drm_i915_private *ct_to_i915(struct intel_guc_ct *ct)
+{
+ return ct_to_gt(ct)->i915;
+}
+
+static inline struct device *ct_to_dev(struct intel_guc_ct *ct)
+{
+ return ct_to_i915(ct)->drm.dev;
+}
+
static inline const char *guc_ct_buffer_type_to_str(u32 type)
{
switch (type) {
@@ -63,7 +82,6 @@ static inline const char *guc_ct_buffer_type_to_str(u32 type)
static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
u32 cmds_addr, u32 size)
{
- CT_DEBUG_DRIVER("CT: init addr=%#x size=%u\n", cmds_addr, size);
memset(desc, 0, sizeof(*desc));
desc->addr = cmds_addr;
desc->size = size;
@@ -72,8 +90,6 @@ static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
{
- CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
- desc, desc->head, desc->tail);
desc->head = 0;
desc->tail = 0;
desc->is_in_error = 0;
@@ -89,31 +105,40 @@ static int guc_action_register_ct_buffer(struct intel_guc *guc,
sizeof(struct guc_ct_buffer_desc),
type
};
- int err;
/* Can't use generic send(), CT registration must go over MMIO */
- err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
- if (err)
- DRM_ERROR("CT: register %s buffer failed; err=%d\n",
- guc_ct_buffer_type_to_str(type), err);
+ return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+}
+
+static int ct_register_buffer(struct intel_guc_ct *ct, u32 desc_addr, u32 type)
+{
+ int err = guc_action_register_ct_buffer(ct_to_guc(ct), desc_addr, type);
+
+ if (unlikely(err))
+ CT_ERROR(ct, "Failed to register %s buffer (err=%d)\n",
+ guc_ct_buffer_type_to_str(type), err);
return err;
}
-static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
- u32 type)
+static int guc_action_deregister_ct_buffer(struct intel_guc *guc, u32 type)
{
u32 action[] = {
INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
CTB_OWNER_HOST,
type
};
- int err;
/* Can't use generic send(), CT deregistration must go over MMIO */
- err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
- if (err)
- DRM_ERROR("CT: deregister %s buffer failed; err=%d\n",
- guc_ct_buffer_type_to_str(type), err);
+ return intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
+}
+
+static int ct_deregister_buffer(struct intel_guc_ct *ct, u32 type)
+{
+ int err = guc_action_deregister_ct_buffer(ct_to_guc(ct), type);
+
+ if (unlikely(err))
+ CT_ERROR(ct, "Failed to deregister %s buffer (err=%d)\n",
+ guc_ct_buffer_type_to_str(type), err);
return err;
}
@@ -157,13 +182,12 @@ int intel_guc_ct_init(struct intel_guc_ct *ct)
*/
err = intel_guc_allocate_and_map_vma(guc, PAGE_SIZE, &ct->vma, &blob);
- if (err) {
- DRM_ERROR("CT: channel allocation failed; err=%d\n", err);
+ if (unlikely(err)) {
+ CT_ERROR(ct, "Failed to allocate CT channel (err=%d)\n", err);
return err;
}
- CT_DEBUG_DRIVER("CT: vma base=%#x\n",
- intel_guc_ggtt_offset(guc, ct->vma));
+ CT_DEBUG(ct, "vma base=%#x\n", intel_guc_ggtt_offset(guc, ct->vma));
/* store pointers to desc and cmds */
for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
@@ -197,7 +221,7 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct)
int intel_guc_ct_enable(struct intel_guc_ct *ct)
{
struct intel_guc *guc = ct_to_guc(ct);
- u32 base;
+ u32 base, cmds, size;
int err;
int i;
@@ -212,23 +236,23 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
*/
for (i = 0; i < ARRAY_SIZE(ct->ctbs); i++) {
GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
- guc_ct_buffer_desc_init(ct->ctbs[i].desc,
- base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
- PAGE_SIZE/4);
+ cmds = base + PAGE_SIZE / 4 * i + PAGE_SIZE / 2;
+ size = PAGE_SIZE / 4;
+ CT_DEBUG(ct, "%d: addr=%#x size=%u\n", i, cmds, size);
+ guc_ct_buffer_desc_init(ct->ctbs[i].desc, cmds, size);
}
- /* register buffers, starting wirh RECV buffer
- * descriptors are in first half of the blob
+ /*
+ * Register both CT buffers starting with RECV buffer.
+ * Descriptors are in first half of the blob.
*/
- err = guc_action_register_ct_buffer(guc,
- base + PAGE_SIZE/4 * CTB_RECV,
- INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_RECV,
+ INTEL_GUC_CT_BUFFER_TYPE_RECV);
if (unlikely(err))
goto err_out;
- err = guc_action_register_ct_buffer(guc,
- base + PAGE_SIZE/4 * CTB_SEND,
- INTEL_GUC_CT_BUFFER_TYPE_SEND);
+ err = ct_register_buffer(ct, base + PAGE_SIZE / 4 * CTB_SEND,
+ INTEL_GUC_CT_BUFFER_TYPE_SEND);
if (unlikely(err))
goto err_deregister;
@@ -237,10 +261,9 @@ int intel_guc_ct_enable(struct intel_guc_ct *ct)
return 0;
err_deregister:
- guc_action_deregister_ct_buffer(guc,
- INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
err_out:
- DRM_ERROR("CT: can't open channel; err=%d\n", err);
+ CT_ERROR(ct, "Failed to open open CT channel (err=%d)\n", err);
return err;
}
@@ -256,18 +279,16 @@ void intel_guc_ct_disable(struct intel_guc_ct *ct)
ct->enabled = false;
- if (intel_guc_is_running(guc)) {
- guc_action_deregister_ct_buffer(guc,
- INTEL_GUC_CT_BUFFER_TYPE_SEND);
- guc_action_deregister_ct_buffer(guc,
- INTEL_GUC_CT_BUFFER_TYPE_RECV);
+ if (intel_guc_is_fw_running(guc)) {
+ ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_SEND);
+ ct_deregister_buffer(ct, INTEL_GUC_CT_BUFFER_TYPE_RECV);
}
}
static u32 ct_get_next_fence(struct intel_guc_ct *ct)
{
/* For now it's trivial */
- return ++ct->requests.next_fence;
+ return ++ct->requests.last_fence;
}
/**
@@ -288,25 +309,33 @@ static u32 ct_get_next_fence(struct intel_guc_ct *ct)
* ^-----------------len-------------------^
*/
-static int ctb_write(struct intel_guc_ct_buffer *ctb,
- const u32 *action,
- u32 len /* in dwords */,
- u32 fence,
- bool want_response)
+static int ct_write(struct intel_guc_ct *ct,
+ const u32 *action,
+ u32 len /* in dwords */,
+ u32 fence,
+ bool want_response)
{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_SEND];
struct guc_ct_buffer_desc *desc = ctb->desc;
- u32 head = desc->head / 4; /* in dwords */
- u32 tail = desc->tail / 4; /* in dwords */
- u32 size = desc->size / 4; /* in dwords */
- u32 used; /* in dwords */
+ u32 head = desc->head;
+ u32 tail = desc->tail;
+ u32 size = desc->size;
+ u32 used;
u32 header;
u32 *cmds = ctb->cmds;
unsigned int i;
- GEM_BUG_ON(desc->size % 4);
- GEM_BUG_ON(desc->head % 4);
- GEM_BUG_ON(desc->tail % 4);
- GEM_BUG_ON(tail >= size);
+ if (unlikely(desc->is_in_error))
+ return -EPIPE;
+
+ if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
+ (tail | head) >= size))
+ goto corrupted;
+
+ /* later calculations will be done in dwords */
+ head /= 4;
+ tail /= 4;
+ size /= 4;
/*
* tail == head condition indicates empty. GuC FW does not support
@@ -332,9 +361,8 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
(want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
(action[0] << GUC_CT_MSG_ACTION_SHIFT);
- CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
- 4, &header, 4, &fence,
- 4 * (len - 1), &action[1]);
+ CT_DEBUG(ct, "writing %*ph %*ph %*ph\n",
+ 4, &header, 4, &fence, 4 * (len - 1), &action[1]);
cmds[tail] = header;
tail = (tail + 1) % size;
@@ -346,12 +374,17 @@ static int ctb_write(struct intel_guc_ct_buffer *ctb,
cmds[tail] = action[i];
tail = (tail + 1) % size;
}
+ GEM_BUG_ON(tail > size);
/* now update desc tail (back in bytes) */
desc->tail = tail * 4;
- GEM_BUG_ON(desc->tail > desc->size);
-
return 0;
+
+corrupted:
+ CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
+ desc->addr, desc->head, desc->tail, desc->size);
+ desc->is_in_error = 1;
+ return -EPIPE;
}
/**
@@ -469,7 +502,7 @@ static int ct_send(struct intel_guc_ct *ct,
list_add_tail(&request.link, &ct->requests.pending);
spin_unlock_irqrestore(&ct->requests.lock, flags);
- err = ctb_write(ctb, action, len, fence, !!response_buf);
+ err = ct_write(ct, action, len, fence, !!response_buf);
if (unlikely(err))
goto unlink;
@@ -526,11 +559,11 @@ int intel_guc_ct_send(struct intel_guc_ct *ct, const u32 *action, u32 len,
ret = ct_send(ct, action, len, response_buf, response_buf_size, &status);
if (unlikely(ret < 0)) {
- DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
- action[0], ret, status);
+ CT_ERROR(ct, "Sending action %#x failed (err=%d status=%#X)\n",
+ action[0], ret, status);
} else if (unlikely(ret)) {
- CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
- action[0], ret, ret);
+ CT_DEBUG(ct, "send action %#x returned %d (%#x)\n",
+ action[0], ret, ret);
}
mutex_unlock(&guc->send_mutex);
@@ -552,22 +585,29 @@ static inline bool ct_header_is_response(u32 header)
return !!(header & GUC_CT_MSG_IS_RESPONSE);
}
-static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
+static int ct_read(struct intel_guc_ct *ct, u32 *data)
{
+ struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
struct guc_ct_buffer_desc *desc = ctb->desc;
- u32 head = desc->head / 4; /* in dwords */
- u32 tail = desc->tail / 4; /* in dwords */
- u32 size = desc->size / 4; /* in dwords */
+ u32 head = desc->head;
+ u32 tail = desc->tail;
+ u32 size = desc->size;
u32 *cmds = ctb->cmds;
- s32 available; /* in dwords */
+ s32 available;
unsigned int len;
unsigned int i;
- GEM_BUG_ON(desc->size % 4);
- GEM_BUG_ON(desc->head % 4);
- GEM_BUG_ON(desc->tail % 4);
- GEM_BUG_ON(tail >= size);
- GEM_BUG_ON(head >= size);
+ if (unlikely(desc->is_in_error))
+ return -EPIPE;
+
+ if (unlikely(!IS_ALIGNED(head | tail | size, 4) ||
+ (tail | head) >= size))
+ goto corrupted;
+
+ /* later calculations will be done in dwords */
+ head /= 4;
+ tail /= 4;
+ size /= 4;
/* tail == head condition indicates empty */
available = tail - head;
@@ -577,7 +617,7 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
/* beware of buffer wrap case */
if (unlikely(available < 0))
available += size;
- CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
+ CT_DEBUG(ct, "available %d (%u:%u)\n", available, head, tail);
GEM_BUG_ON(available < 0);
data[0] = cmds[head];
@@ -586,23 +626,29 @@ static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
/* message len with header */
len = ct_header_get_len(data[0]) + 1;
if (unlikely(len > (u32)available)) {
- DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
- 4, data,
- 4 * (head + available - 1 > size ?
- size - head : available - 1), &cmds[head],
- 4 * (head + available - 1 > size ?
- available - 1 - size + head : 0), &cmds[0]);
- return -EPROTO;
+ CT_ERROR(ct, "Incomplete message %*ph %*ph %*ph\n",
+ 4, data,
+ 4 * (head + available - 1 > size ?
+ size - head : available - 1), &cmds[head],
+ 4 * (head + available - 1 > size ?
+ available - 1 - size + head : 0), &cmds[0]);
+ goto corrupted;
}
for (i = 1; i < len; i++) {
data[i] = cmds[head];
head = (head + 1) % size;
}
- CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
+ CT_DEBUG(ct, "received %*ph\n", 4 * len, data);
desc->head = head * 4;
return 0;
+
+corrupted:
+ CT_ERROR(ct, "Corrupted descriptor addr=%#x head=%u tail=%u size=%u\n",
+ desc->addr, desc->head, desc->tail, desc->size);
+ desc->is_in_error = 1;
+ return -EPIPE;
}
/**
@@ -627,7 +673,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
{
u32 header = msg[0];
u32 len = ct_header_get_len(header);
- u32 msglen = len + 1; /* total message length including header */
+ u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
u32 fence;
u32 status;
u32 datalen;
@@ -639,7 +685,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
/* Response payload shall at least include fence and status */
if (unlikely(len < 2)) {
- DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
+ CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
return -EPROTO;
}
@@ -649,22 +695,22 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
/* Format of the status follows RESPONSE message */
if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
- DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
+ CT_ERROR(ct, "Corrupted response %*ph\n", msgsize, msg);
return -EPROTO;
}
- CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
+ CT_DEBUG(ct, "response fence %u status %#x\n", fence, status);
spin_lock(&ct->requests.lock);
list_for_each_entry(req, &ct->requests.pending, link) {
if (unlikely(fence != req->fence)) {
- CT_DEBUG_DRIVER("CT: request %u awaits response\n",
- req->fence);
+ CT_DEBUG(ct, "request %u awaits response\n",
+ req->fence);
continue;
}
if (unlikely(datalen > req->response_len)) {
- DRM_ERROR("CT: response %u too long %*ph\n",
- req->fence, 4 * msglen, msg);
+ CT_ERROR(ct, "Response for %u is too long %*ph\n",
+ req->fence, msgsize, msg);
datalen = 0;
}
if (datalen)
@@ -677,7 +723,7 @@ static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
spin_unlock(&ct->requests.lock);
if (!found)
- DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
+ CT_ERROR(ct, "Unsolicited response %*ph\n", msgsize, msg);
return 0;
}
@@ -687,7 +733,7 @@ static void ct_process_request(struct intel_guc_ct *ct,
struct intel_guc *guc = ct_to_guc(ct);
int ret;
- CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
+ CT_DEBUG(ct, "request %x %*ph\n", action, 4 * len, payload);
switch (action) {
case INTEL_GUC_ACTION_DEFAULT:
@@ -698,8 +744,8 @@ static void ct_process_request(struct intel_guc_ct *ct,
default:
fail_unexpected:
- DRM_ERROR("CT: unexpected request %x %*ph\n",
- action, 4 * len, payload);
+ CT_ERROR(ct, "Unexpected request %x %*ph\n",
+ action, 4 * len, payload);
break;
}
}
@@ -767,18 +813,18 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
{
u32 header = msg[0];
u32 len = ct_header_get_len(header);
- u32 msglen = len + 1; /* total message length including header */
+ u32 msgsize = (len + 1) * sizeof(u32); /* msg size in bytes w/header */
struct ct_incoming_request *request;
unsigned long flags;
GEM_BUG_ON(ct_header_is_response(header));
- request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
+ request = kmalloc(sizeof(*request) + msgsize, GFP_ATOMIC);
if (unlikely(!request)) {
- DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
+ CT_ERROR(ct, "Dropping request %*ph\n", msgsize, msg);
return 0; /* XXX: -ENOMEM ? */
}
- memcpy(request->msg, msg, 4 * msglen);
+ memcpy(request->msg, msg, msgsize);
spin_lock_irqsave(&ct->requests.lock, flags);
list_add_tail(&request->link, &ct->requests.incoming);
@@ -794,7 +840,6 @@ static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
*/
void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
{
- struct intel_guc_ct_buffer *ctb = &ct->ctbs[CTB_RECV];
u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
int err = 0;
@@ -804,7 +849,7 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
}
do {
- err = ctb_read(ctb, msg);
+ err = ct_read(ct, msg);
if (err)
break;
@@ -813,10 +858,4 @@ void intel_guc_ct_event_handler(struct intel_guc_ct *ct)
else
err = ct_handle_request(ct, msg);
} while (!err);
-
- if (GEM_WARN_ON(err == -EPROTO)) {
- DRM_ERROR("CT: corrupted message detected!\n");
- ctb->desc->is_in_error = 1;
- }
}
-
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
index 3e7fe237cfa5..494a51a5200f 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ct.h
@@ -49,7 +49,7 @@ struct intel_guc_ct {
struct intel_guc_ct_buffer ctbs[2];
struct {
- u32 next_fence; /* fence to be used with next request to send */
+ u32 last_fence; /* last fence used to send request */
spinlock_t lock; /* protects pending requests list */
struct list_head pending; /* requests waiting for response */
@@ -65,6 +65,11 @@ void intel_guc_ct_fini(struct intel_guc_ct *ct);
int intel_guc_ct_enable(struct intel_guc_ct *ct);
void intel_guc_ct_disable(struct intel_guc_ct *ct);
+static inline void intel_guc_ct_sanitize(struct intel_guc_ct *ct)
+{
+ ct->enabled = false;
+}
+
static inline bool intel_guc_ct_enabled(struct intel_guc_ct *ct)
{
return ct->enabled;
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
index 9e42324fdecd..fe7778c28d2d 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c
@@ -456,9 +456,7 @@ static void guc_reset_cancel(struct intel_engine_cs *engine)
/* Mark all executing requests as skipped. */
list_for_each_entry(rq, &engine->active.requests, sched.link) {
- if (!i915_request_signaled(rq))
- dma_fence_set_error(&rq->fence, -EIO);
-
+ i915_request_set_error_once(rq, -EIO);
i915_request_mark_complete(rq);
}
@@ -660,12 +658,9 @@ void intel_guc_submission_disable(struct intel_guc *guc)
guc_proc_desc_fini(guc);
}
-static bool __guc_submission_support(struct intel_guc *guc)
+static bool __guc_submission_selected(struct intel_guc *guc)
{
- /* XXX: GuC submission is unavailable for now */
- return false;
-
- if (!intel_guc_is_supported(guc))
+ if (!intel_guc_submission_is_supported(guc))
return false;
return i915_modparams.enable_guc & ENABLE_GUC_SUBMISSION;
@@ -673,7 +668,7 @@ static bool __guc_submission_support(struct intel_guc *guc)
void intel_guc_submission_init_early(struct intel_guc *guc)
{
- guc->submission_supported = __guc_submission_support(guc);
+ guc->submission_selected = __guc_submission_selected(guc);
}
bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine)
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
index e402a2932592..4cf9d3e50263 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.h
@@ -8,7 +8,8 @@
#include <linux/types.h>
-struct intel_guc;
+#include "intel_guc.h"
+
struct intel_engine_cs;
void intel_guc_submission_init_early(struct intel_guc *guc);
@@ -20,4 +21,20 @@ int intel_guc_preempt_work_create(struct intel_guc *guc);
void intel_guc_preempt_work_destroy(struct intel_guc *guc);
bool intel_engine_in_guc_submission_mode(const struct intel_engine_cs *engine);
+static inline bool intel_guc_submission_is_supported(struct intel_guc *guc)
+{
+ /* XXX: GuC submission is unavailable for now */
+ return false;
+}
+
+static inline bool intel_guc_submission_is_wanted(struct intel_guc *guc)
+{
+ return guc->submission_selected;
+}
+
+static inline bool intel_guc_submission_is_used(struct intel_guc *guc)
+{
+ return intel_guc_is_used(guc) && intel_guc_submission_is_wanted(guc);
+}
+
#endif
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.c b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
index 32a069841c14..a74b65694512 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.c
@@ -121,19 +121,20 @@ int intel_huc_init(struct intel_huc *huc)
if (err)
goto out_fini;
+ intel_uc_fw_change_status(&huc->fw, INTEL_UC_FIRMWARE_LOADABLE);
+
return 0;
out_fini:
intel_uc_fw_fini(&huc->fw);
out:
- intel_uc_fw_cleanup_fetch(&huc->fw);
- DRM_DEV_DEBUG_DRIVER(i915->drm.dev, "failed with %d\n", err);
+ i915_probe_error(i915, "failed with %d\n", err);
return err;
}
void intel_huc_fini(struct intel_huc *huc)
{
- if (!intel_uc_fw_is_available(&huc->fw))
+ if (!intel_uc_fw_is_loadable(&huc->fw))
return;
intel_huc_rsa_data_destroy(huc);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc.h b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
index 644c059fe01d..a40b9cfc6c22 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc.h
@@ -41,11 +41,17 @@ static inline bool intel_huc_is_supported(struct intel_huc *huc)
return intel_uc_fw_is_supported(&huc->fw);
}
-static inline bool intel_huc_is_enabled(struct intel_huc *huc)
+static inline bool intel_huc_is_wanted(struct intel_huc *huc)
{
return intel_uc_fw_is_enabled(&huc->fw);
}
+static inline bool intel_huc_is_used(struct intel_huc *huc)
+{
+ GEM_BUG_ON(__intel_uc_fw_status(&huc->fw) == INTEL_UC_FIRMWARE_SELECTED);
+ return intel_uc_fw_is_available(&huc->fw);
+}
+
static inline bool intel_huc_is_authenticated(struct intel_huc *huc)
{
return intel_uc_fw_is_running(&huc->fw);
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
index eee193bf2cc4..9cdf4cbe691c 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_huc_fw.c
@@ -20,7 +20,7 @@ void intel_huc_fw_init_early(struct intel_huc *huc)
struct drm_i915_private *i915 = gt->i915;
intel_uc_fw_init_early(&huc->fw, INTEL_UC_FW_TYPE_HUC,
- intel_uc_uses_guc(uc),
+ intel_uc_wants_guc(uc),
INTEL_INFO(i915)->platform, INTEL_REVID(i915));
}
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.c b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
index 64934a876a50..a4cbe06e06bd 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.c
@@ -48,17 +48,17 @@ static void __confirm_options(struct intel_uc *uc)
DRM_DEV_DEBUG_DRIVER(i915->drm.dev,
"enable_guc=%d (guc:%s submission:%s huc:%s)\n",
i915_modparams.enable_guc,
- yesno(intel_uc_uses_guc(uc)),
- yesno(intel_uc_uses_guc_submission(uc)),
- yesno(intel_uc_uses_huc(uc)));
+ yesno(intel_uc_wants_guc(uc)),
+ yesno(intel_uc_wants_guc_submission(uc)),
+ yesno(intel_uc_wants_huc(uc)));
if (i915_modparams.enable_guc == -1)
return;
if (i915_modparams.enable_guc == 0) {
- GEM_BUG_ON(intel_uc_uses_guc(uc));
- GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
- GEM_BUG_ON(intel_uc_uses_huc(uc));
+ GEM_BUG_ON(intel_uc_wants_guc(uc));
+ GEM_BUG_ON(intel_uc_wants_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_wants_huc(uc));
return;
}
@@ -93,7 +93,7 @@ void intel_uc_init_early(struct intel_uc *uc)
__confirm_options(uc);
- if (intel_uc_uses_guc(uc))
+ if (intel_uc_wants_guc(uc))
uc->ops = &uc_ops_on;
else
uc->ops = &uc_ops_off;
@@ -257,13 +257,13 @@ static void __uc_fetch_firmwares(struct intel_uc *uc)
{
int err;
- GEM_BUG_ON(!intel_uc_uses_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
err = intel_uc_fw_fetch(&uc->guc.fw);
if (err)
return;
- if (intel_uc_uses_huc(uc))
+ if (intel_uc_wants_huc(uc))
intel_uc_fw_fetch(&uc->huc.fw);
}
@@ -273,25 +273,38 @@ static void __uc_cleanup_firmwares(struct intel_uc *uc)
intel_uc_fw_cleanup_fetch(&uc->guc.fw);
}
-static void __uc_init(struct intel_uc *uc)
+static int __uc_init(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
struct intel_huc *huc = &uc->huc;
int ret;
- GEM_BUG_ON(!intel_uc_uses_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
+
+ if (!intel_uc_uses_guc(uc))
+ return 0;
+
+ if (i915_inject_probe_failure(uc_to_gt(uc)->i915))
+ return -ENOMEM;
/* XXX: GuC submission is unavailable for now */
- GEM_BUG_ON(intel_uc_supports_guc_submission(uc));
+ GEM_BUG_ON(intel_uc_uses_guc_submission(uc));
ret = intel_guc_init(guc);
- if (ret) {
- intel_uc_fw_cleanup_fetch(&huc->fw);
- return;
+ if (ret)
+ return ret;
+
+ if (intel_uc_uses_huc(uc)) {
+ ret = intel_huc_init(huc);
+ if (ret)
+ goto out_guc;
}
- if (intel_uc_uses_huc(uc))
- intel_huc_init(huc);
+ return 0;
+
+out_guc:
+ intel_guc_fini(guc);
+ return ret;
}
static void __uc_fini(struct intel_uc *uc)
@@ -402,12 +415,12 @@ static int __uc_init_hw(struct intel_uc *uc)
int ret, attempts;
GEM_BUG_ON(!intel_uc_supports_guc(uc));
- GEM_BUG_ON(!intel_uc_uses_guc(uc));
+ GEM_BUG_ON(!intel_uc_wants_guc(uc));
- if (!intel_uc_fw_is_available(&guc->fw)) {
+ if (!intel_uc_fw_is_loadable(&guc->fw)) {
ret = __uc_check_hw(uc) ||
intel_uc_fw_is_overridden(&guc->fw) ||
- intel_uc_supports_guc_submission(uc) ?
+ intel_uc_wants_guc_submission(uc) ?
intel_uc_fw_status_to_error(guc->fw.status) : 0;
goto err_out;
}
@@ -459,14 +472,14 @@ static int __uc_init_hw(struct intel_uc *uc)
if (ret)
goto err_communication;
- if (intel_uc_supports_guc_submission(uc))
+ if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_enable(guc);
dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
intel_uc_fw_type_repr(INTEL_UC_FW_TYPE_GUC), guc->fw.path,
guc->fw.major_ver_found, guc->fw.minor_ver_found,
"submission",
- enableddisabled(intel_uc_supports_guc_submission(uc)));
+ enableddisabled(intel_uc_uses_guc_submission(uc)));
if (intel_uc_uses_huc(uc)) {
dev_info(i915->drm.dev, "%s firmware %s version %u.%u %s:%s\n",
@@ -505,10 +518,10 @@ static void __uc_fini_hw(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_fw_running(guc))
return;
- if (intel_uc_supports_guc_submission(uc))
+ if (intel_uc_uses_guc_submission(uc))
intel_guc_submission_disable(guc);
if (guc_communication_enabled(guc))
@@ -527,7 +540,7 @@ void intel_uc_reset_prepare(struct intel_uc *uc)
{
struct intel_guc *guc = &uc->guc;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_ready(guc))
return;
guc_disable_communication(guc);
@@ -539,7 +552,7 @@ void intel_uc_runtime_suspend(struct intel_uc *uc)
struct intel_guc *guc = &uc->guc;
int err;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_ready(guc))
return;
err = intel_guc_suspend(guc);
@@ -554,7 +567,7 @@ void intel_uc_suspend(struct intel_uc *uc)
struct intel_guc *guc = &uc->guc;
intel_wakeref_t wakeref;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_ready(guc))
return;
with_intel_runtime_pm(uc_to_gt(uc)->uncore->rpm, wakeref)
@@ -566,7 +579,7 @@ static int __uc_resume(struct intel_uc *uc, bool enable_communication)
struct intel_guc *guc = &uc->guc;
int err;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_fw_running(guc))
return 0;
/* Make sure we enable communication if and only if it's disabled */
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc.h b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
index 49c913524686..5ae7b50b7dc1 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc.h
@@ -7,6 +7,7 @@
#define _INTEL_UC_H_
#include "intel_guc.h"
+#include "intel_guc_submission.h"
#include "intel_huc.h"
#include "i915_params.h"
@@ -16,7 +17,7 @@ struct intel_uc_ops {
int (*sanitize)(struct intel_uc *uc);
void (*init_fw)(struct intel_uc *uc);
void (*fini_fw)(struct intel_uc *uc);
- void (*init)(struct intel_uc *uc);
+ int (*init)(struct intel_uc *uc);
void (*fini)(struct intel_uc *uc);
int (*init_hw)(struct intel_uc *uc);
void (*fini_hw)(struct intel_uc *uc);
@@ -40,35 +41,44 @@ void intel_uc_runtime_suspend(struct intel_uc *uc);
int intel_uc_resume(struct intel_uc *uc);
int intel_uc_runtime_resume(struct intel_uc *uc);
-static inline bool intel_uc_supports_guc(struct intel_uc *uc)
-{
- return intel_guc_is_supported(&uc->guc);
-}
-
-static inline bool intel_uc_uses_guc(struct intel_uc *uc)
-{
- return intel_guc_is_enabled(&uc->guc);
-}
+/*
+ * We need to know as early as possible if we're going to use GuC or not to
+ * take the correct setup paths. Additionally, once we've started loading the
+ * GuC, it is unsafe to keep executing without it because some parts of the HW,
+ * a subset of which is not cleaned on GT reset, will start expecting the GuC FW
+ * to be running.
+ * To solve both these requirements, we commit to using the microcontrollers if
+ * the relevant modparam is set and the blobs are found on the system. At this
+ * stage, the only thing that can stop us from attempting to load the blobs on
+ * the HW and use them is a fundamental issue (e.g. no memory for our
+ * structures); if we hit such a problem during driver load we're broken even
+ * without GuC, so there is no point in trying to fall back.
+ *
+ * Given the above, we can be in one of 4 states, with the last one implying
+ * we're committed to using the microcontroller:
+ * - Not supported: not available in HW and/or firmware not defined.
+ * - Supported: available in HW and firmware defined.
+ * - Wanted: supported + enabled in modparam.
+ * - In use: wanted + firmware found on the system and successfully fetched.
+ */
-static inline bool intel_uc_supports_guc_submission(struct intel_uc *uc)
-{
- return intel_guc_is_submission_supported(&uc->guc);
+#define __uc_state_checker(x, func, state, required) \
+static inline bool intel_uc_##state##_##func(struct intel_uc *uc) \
+{ \
+ return intel_##func##_is_##required(&uc->x); \
}
-static inline bool intel_uc_uses_guc_submission(struct intel_uc *uc)
-{
- return intel_guc_is_submission_supported(&uc->guc);
-}
+#define uc_state_checkers(x, func) \
+__uc_state_checker(x, func, supports, supported) \
+__uc_state_checker(x, func, wants, wanted) \
+__uc_state_checker(x, func, uses, used)
-static inline bool intel_uc_supports_huc(struct intel_uc *uc)
-{
- return intel_uc_supports_guc(uc);
-}
+uc_state_checkers(guc, guc);
+uc_state_checkers(huc, huc);
+uc_state_checkers(guc, guc_submission);
-static inline bool intel_uc_uses_huc(struct intel_uc *uc)
-{
- return intel_huc_is_enabled(&uc->huc);
-}
+#undef uc_state_checkers
+#undef __uc_state_checker
#define intel_uc_ops_function(_NAME, _OPS, _TYPE, _RET) \
static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
@@ -80,7 +90,7 @@ static inline _TYPE intel_uc_##_NAME(struct intel_uc *uc) \
intel_uc_ops_function(sanitize, sanitize, int, 0);
intel_uc_ops_function(fetch_firmwares, init_fw, void, );
intel_uc_ops_function(cleanup_firmwares, fini_fw, void, );
-intel_uc_ops_function(init, init, void, );
+intel_uc_ops_function(init, init, int, 0);
intel_uc_ops_function(fini, fini, void, );
intel_uc_ops_function(init_hw, init_hw, int, 0);
intel_uc_ops_function(fini_hw, fini_hw, void, );
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
index 8ee0a0c7f447..18c755203688 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.c
@@ -43,7 +43,7 @@ void intel_uc_fw_change_status(struct intel_uc_fw *uc_fw,
* features.
*/
#define INTEL_UC_FIRMWARE_DEFS(fw_def, guc_def, huc_def) \
- fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 3)) \
+ fw_def(TIGERLAKE, 0, guc_def(tgl, 35, 2, 0), huc_def(tgl, 7, 0, 12)) \
fw_def(ELKHARTLAKE, 0, guc_def(ehl, 33, 0, 4), huc_def(ehl, 9, 0, 0)) \
fw_def(ICELAKE, 0, guc_def(icl, 33, 0, 0), huc_def(icl, 9, 0, 0)) \
fw_def(COFFEELAKE, 5, guc_def(cml, 33, 0, 0), huc_def(cml, 4, 0, 0)) \
@@ -279,7 +279,7 @@ int intel_uc_fw_fetch(struct intel_uc_fw *uc_fw)
err = i915_inject_probe_error(i915, -ENXIO);
if (err)
- return err;
+ goto fail;
__force_fw_fetch_failures(uc_fw, -EINVAL);
__force_fw_fetch_failures(uc_fw, -ESTALE);
@@ -501,7 +501,7 @@ int intel_uc_fw_upload(struct intel_uc_fw *uc_fw, u32 dst_offset, u32 dma_flags)
if (err)
return err;
- if (!intel_uc_fw_is_available(uc_fw))
+ if (!intel_uc_fw_is_loadable(uc_fw))
return -ENOEXEC;
/* Call custom loader */
@@ -544,7 +544,10 @@ int intel_uc_fw_init(struct intel_uc_fw *uc_fw)
void intel_uc_fw_fini(struct intel_uc_fw *uc_fw)
{
- intel_uc_fw_cleanup_fetch(uc_fw);
+ if (i915_gem_object_has_pinned_pages(uc_fw->obj))
+ i915_gem_object_unpin_pages(uc_fw->obj);
+
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
}
/**
diff --git a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
index 1f30543d0d2d..888ff0de0244 100644
--- a/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
+++ b/drivers/gpu/drm/i915/gt/uc/intel_uc_fw.h
@@ -29,8 +29,11 @@ struct intel_gt;
* | | SELECTED |
* +------------+- / | \ -+
* | | MISSING <--/ | \--> ERROR |
- * | fetch | | |
- * | | /------> AVAILABLE <---<-----------\ |
+ * | fetch | V |
+ * | | AVAILABLE |
+ * +------------+- | -+
+ * | init | V |
+ * | | /------> LOADABLE <----<-----------\ |
* +------------+- \ / \ \ \ -+
* | | FAIL <--< \--> TRANSFERRED \ |
* | upload | \ / \ / |
@@ -46,6 +49,7 @@ enum intel_uc_fw_status {
INTEL_UC_FIRMWARE_MISSING, /* blob not found on the system */
INTEL_UC_FIRMWARE_ERROR, /* invalid format or version */
INTEL_UC_FIRMWARE_AVAILABLE, /* blob found and copied in mem */
+ INTEL_UC_FIRMWARE_LOADABLE, /* all fw-required objects are ready */
INTEL_UC_FIRMWARE_FAIL, /* failed to xfer or init/auth the fw */
INTEL_UC_FIRMWARE_TRANSFERRED, /* dma xfer done */
INTEL_UC_FIRMWARE_RUNNING /* init/auth done */
@@ -115,6 +119,8 @@ const char *intel_uc_fw_status_repr(enum intel_uc_fw_status status)
return "ERROR";
case INTEL_UC_FIRMWARE_AVAILABLE:
return "AVAILABLE";
+ case INTEL_UC_FIRMWARE_LOADABLE:
+ return "LOADABLE";
case INTEL_UC_FIRMWARE_FAIL:
return "FAIL";
case INTEL_UC_FIRMWARE_TRANSFERRED:
@@ -143,6 +149,7 @@ static inline int intel_uc_fw_status_to_error(enum intel_uc_fw_status status)
case INTEL_UC_FIRMWARE_SELECTED:
return -ESTALE;
case INTEL_UC_FIRMWARE_AVAILABLE:
+ case INTEL_UC_FIRMWARE_LOADABLE:
case INTEL_UC_FIRMWARE_TRANSFERRED:
case INTEL_UC_FIRMWARE_RUNNING:
return 0;
@@ -184,6 +191,11 @@ static inline bool intel_uc_fw_is_available(struct intel_uc_fw *uc_fw)
return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_AVAILABLE;
}
+static inline bool intel_uc_fw_is_loadable(struct intel_uc_fw *uc_fw)
+{
+ return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_LOADABLE;
+}
+
static inline bool intel_uc_fw_is_loaded(struct intel_uc_fw *uc_fw)
{
return __intel_uc_fw_status(uc_fw) >= INTEL_UC_FIRMWARE_TRANSFERRED;
@@ -202,7 +214,7 @@ static inline bool intel_uc_fw_is_overridden(const struct intel_uc_fw *uc_fw)
static inline void intel_uc_fw_sanitize(struct intel_uc_fw *uc_fw)
{
if (intel_uc_fw_is_loaded(uc_fw))
- intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_AVAILABLE);
+ intel_uc_fw_change_status(uc_fw, INTEL_UC_FIRMWARE_LOADABLE);
}
static inline u32 __intel_uc_fw_get_upload_size(struct intel_uc_fw *uc_fw)
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 771420453f82..8b13f091cee2 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -41,7 +41,7 @@
static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_gt *gt = gvt->gt;
unsigned int flags;
u64 start, end, size;
struct drm_mm_node *node;
@@ -61,14 +61,14 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
flags = PIN_MAPPABLE;
}
- mutex_lock(&dev_priv->ggtt.vm.mutex);
- mmio_hw_access_pre(dev_priv);
- ret = i915_gem_gtt_insert(&dev_priv->ggtt.vm, node,
+ mutex_lock(&gt->ggtt->vm.mutex);
+ mmio_hw_access_pre(gt);
+ ret = i915_gem_gtt_insert(&gt->ggtt->vm, node,
size, I915_GTT_PAGE_SIZE,
I915_COLOR_UNEVICTABLE,
start, end, flags);
- mmio_hw_access_post(dev_priv);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mmio_hw_access_post(gt);
+ mutex_unlock(&gt->ggtt->vm.mutex);
if (ret)
gvt_err("fail to alloc %s gm space from host\n",
high_gm ? "high" : "low");
@@ -79,7 +79,7 @@ static int alloc_gm(struct intel_vgpu *vgpu, bool high_gm)
static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_gt *gt = gvt->gt;
int ret;
ret = alloc_gm(vgpu, false);
@@ -98,20 +98,21 @@ static int alloc_vgpu_gm(struct intel_vgpu *vgpu)
return 0;
out_free_aperture:
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mutex_unlock(&gt->ggtt->vm.mutex);
return ret;
}
static void free_vgpu_gm(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
+ struct intel_gt *gt = gvt->gt;
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gt->ggtt->vm.mutex);
drm_mm_remove_node(&vgpu->gm.low_gm_node);
drm_mm_remove_node(&vgpu->gm.high_gm_node);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mutex_unlock(&gt->ggtt->vm.mutex);
}
/**
@@ -128,28 +129,29 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *i915 = gvt->gt->i915;
+ struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
i915_reg_t fence_reg_lo, fence_reg_hi;
- assert_rpm_wakelock_held(&dev_priv->runtime_pm);
+ assert_rpm_wakelock_held(uncore->rpm);
- if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
+ if (drm_WARN_ON(&i915->drm, fence >= vgpu_fence_sz(vgpu)))
return;
reg = vgpu->fence.regs[fence];
- if (WARN_ON(!reg))
+ if (drm_WARN_ON(&i915->drm, !reg))
return;
fence_reg_lo = FENCE_REG_GEN6_LO(reg->id);
fence_reg_hi = FENCE_REG_GEN6_HI(reg->id);
- I915_WRITE(fence_reg_lo, 0);
- POSTING_READ(fence_reg_lo);
+ intel_uncore_write(uncore, fence_reg_lo, 0);
+ intel_uncore_posting_read(uncore, fence_reg_lo);
- I915_WRITE(fence_reg_hi, upper_32_bits(value));
- I915_WRITE(fence_reg_lo, lower_32_bits(value));
- POSTING_READ(fence_reg_lo);
+ intel_uncore_write(uncore, fence_reg_hi, upper_32_bits(value));
+ intel_uncore_write(uncore, fence_reg_lo, lower_32_bits(value));
+ intel_uncore_posting_read(uncore, fence_reg_lo);
}
static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
@@ -163,42 +165,43 @@ static void _clear_vgpu_fence(struct intel_vgpu *vgpu)
static void free_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
+ intel_wakeref_t wakeref;
u32 i;
- if (WARN_ON(!vgpu_fence_sz(vgpu)))
+ if (drm_WARN_ON(&gvt->gt->i915->drm, !vgpu_fence_sz(vgpu)))
return;
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ wakeref = intel_runtime_pm_get(uncore->rpm);
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gvt->gt->ggtt->vm.mutex);
_clear_vgpu_fence(vgpu);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
reg = vgpu->fence.regs[i];
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
+ mutex_unlock(&gvt->gt->ggtt->vm.mutex);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
}
static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
+ struct intel_uncore *uncore = gvt->gt->uncore;
struct i915_fence_reg *reg;
+ intel_wakeref_t wakeref;
int i;
- intel_runtime_pm_get(rpm);
+ wakeref = intel_runtime_pm_get(uncore->rpm);
/* Request fences from host */
- mutex_lock(&dev_priv->ggtt.vm.mutex);
+ mutex_lock(&gvt->gt->ggtt->vm.mutex);
for (i = 0; i < vgpu_fence_sz(vgpu); i++) {
- reg = i915_reserve_fence(&dev_priv->ggtt);
+ reg = i915_reserve_fence(gvt->gt->ggtt);
if (IS_ERR(reg))
goto out_free_fence;
@@ -207,9 +210,10 @@ static int alloc_vgpu_fence(struct intel_vgpu *vgpu)
_clear_vgpu_fence(vgpu);
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
- intel_runtime_pm_put_unchecked(rpm);
+ mutex_unlock(&gvt->gt->ggtt->vm.mutex);
+ intel_runtime_pm_put(uncore->rpm, wakeref);
return 0;
+
out_free_fence:
gvt_vgpu_err("Failed to alloc fences\n");
/* Return fences to host, if fail */
@@ -220,8 +224,8 @@ out_free_fence:
i915_unreserve_fence(reg);
vgpu->fence.regs[i] = NULL;
}
- mutex_unlock(&dev_priv->ggtt.vm.mutex);
- intel_runtime_pm_put_unchecked(rpm);
+ mutex_unlock(&gvt->gt->ggtt->vm.mutex);
+ intel_runtime_pm_put_unchecked(uncore->rpm);
return -ENOSPC;
}
@@ -315,11 +319,11 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu)
*/
void intel_vgpu_reset_resource(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
+ intel_wakeref_t wakeref;
- intel_runtime_pm_get(&dev_priv->runtime_pm);
- _clear_vgpu_fence(vgpu);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ with_intel_runtime_pm(gvt->gt->uncore->rpm, wakeref)
+ _clear_vgpu_fence(vgpu);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/cfg_space.c b/drivers/gpu/drm/i915/gvt/cfg_space.c
index 19cf1bbe059d..072725a448db 100644
--- a/drivers/gpu/drm/i915/gvt/cfg_space.c
+++ b/drivers/gpu/drm/i915/gvt/cfg_space.c
@@ -106,10 +106,13 @@ static void vgpu_pci_cfg_mem_write(struct intel_vgpu *vgpu, unsigned int off,
int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- if (WARN_ON(bytes > 4))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN_ON(&i915->drm, bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
+ if (drm_WARN_ON(&i915->drm,
+ offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
memcpy(p_data, vgpu_cfg_space(vgpu) + offset, bytes);
@@ -297,34 +300,36 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int ret;
- if (WARN_ON(bytes > 4))
+ if (drm_WARN_ON(&i915->drm, bytes > 4))
return -EINVAL;
- if (WARN_ON(offset + bytes > vgpu->gvt->device_info.cfg_space_size))
+ if (drm_WARN_ON(&i915->drm,
+ offset + bytes > vgpu->gvt->device_info.cfg_space_size))
return -EINVAL;
/* First check if it's PCI_COMMAND */
if (IS_ALIGNED(offset, 2) && offset == PCI_COMMAND) {
- if (WARN_ON(bytes > 2))
+ if (drm_WARN_ON(&i915->drm, bytes > 2))
return -EINVAL;
return emulate_pci_command_write(vgpu, offset, p_data, bytes);
}
switch (rounddown(offset, 4)) {
case PCI_ROM_ADDRESS:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_rom_bar_write(vgpu, offset, p_data, bytes);
case PCI_BASE_ADDRESS_0 ... PCI_BASE_ADDRESS_5:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
return emulate_pci_bar_write(vgpu, offset, p_data, bytes);
case INTEL_GVT_PCI_SWSCI:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_emulate_opregion_request(vgpu, *(u32 *)p_data);
if (ret)
@@ -332,7 +337,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
break;
case INTEL_GVT_PCI_OPREGION:
- if (WARN_ON(!IS_ALIGNED(offset, 4)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4)))
return -EINVAL;
ret = intel_vgpu_opregion_base_write_handler(vgpu,
*(u32 *)p_data);
@@ -391,9 +396,9 @@ void intel_vgpu_init_cfg_space(struct intel_vgpu *vgpu,
memset(vgpu_cfg_space(vgpu) + INTEL_GVT_PCI_OPREGION, 0, 4);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_GTTMMIO].size =
- pci_resource_len(gvt->dev_priv->drm.pdev, 0);
+ pci_resource_len(gvt->gt->i915->drm.pdev, 0);
vgpu->cfg_space.bar[INTEL_GVT_PCI_BAR_APERTURE].size =
- pci_resource_len(gvt->dev_priv->drm.pdev, 2);
+ pci_resource_len(gvt->gt->i915->drm.pdev, 2);
memset(vgpu_cfg_space(vgpu) + PCI_ROM_ADDRESS, 0, 4);
}
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 21a176cd8acc..9e065ad0658f 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -462,7 +462,7 @@ enum {
struct parser_exec_state {
struct intel_vgpu *vgpu;
- int ring_id;
+ const struct intel_engine_cs *engine;
int buf_type;
@@ -635,39 +635,42 @@ static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
},
};
-static inline u32 get_opcode(u32 cmd, int ring_id)
+static inline u32 get_opcode(u32 cmd, const struct intel_engine_cs *engine)
{
const struct decode_info *d_info;
- d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL)
return INVALID_OP;
return cmd >> (32 - d_info->op_len);
}
-static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
- unsigned int opcode, int ring_id)
+static inline const struct cmd_info *
+find_cmd_entry(struct intel_gvt *gvt, unsigned int opcode,
+ const struct intel_engine_cs *engine)
{
struct cmd_entry *e;
hash_for_each_possible(gvt->cmd_table, e, hlist, opcode) {
- if (opcode == e->info->opcode && e->info->rings & BIT(ring_id))
+ if (opcode == e->info->opcode &&
+ e->info->rings & engine->mask)
return e->info;
}
return NULL;
}
-static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
- u32 cmd, int ring_id)
+static inline const struct cmd_info *
+get_cmd_info(struct intel_gvt *gvt, u32 cmd,
+ const struct intel_engine_cs *engine)
{
u32 opcode;
- opcode = get_opcode(cmd, ring_id);
+ opcode = get_opcode(cmd, engine);
if (opcode == INVALID_OP)
return NULL;
- return find_cmd_entry(gvt, opcode, ring_id);
+ return find_cmd_entry(gvt, opcode, engine);
}
static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
@@ -675,12 +678,12 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
return (cmd >> low) & ((1U << (hi - low + 1)) - 1);
}
-static inline void print_opcode(u32 cmd, int ring_id)
+static inline void print_opcode(u32 cmd, const struct intel_engine_cs *engine)
{
const struct decode_info *d_info;
int i;
- d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
+ d_info = ring_decode_info[engine->id][CMD_TYPE(cmd)];
if (d_info == NULL)
return;
@@ -709,10 +712,11 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
int cnt = 0;
int i;
- gvt_dbg_cmd(" vgpu%d RING%d: ring_start(%08lx) ring_end(%08lx)"
- " ring_head(%08lx) ring_tail(%08lx)\n", s->vgpu->id,
- s->ring_id, s->ring_start, s->ring_start + s->ring_size,
- s->ring_head, s->ring_tail);
+ gvt_dbg_cmd(" vgpu%d RING%s: ring_start(%08lx) ring_end(%08lx)"
+ " ring_head(%08lx) ring_tail(%08lx)\n",
+ s->vgpu->id, s->engine->name,
+ s->ring_start, s->ring_start + s->ring_size,
+ s->ring_head, s->ring_tail);
gvt_dbg_cmd(" %s %s ip_gma(%08lx) ",
s->buf_type == RING_BUFFER_INSTRUCTION ?
@@ -729,7 +733,7 @@ static void parser_exec_state_dump(struct parser_exec_state *s)
s->ip_va, cmd_val(s, 0), cmd_val(s, 1),
cmd_val(s, 2), cmd_val(s, 3));
- print_opcode(cmd_val(s, 0), s->ring_id);
+ print_opcode(cmd_val(s, 0), s->engine);
s->ip_va = (u32 *)((((u64)s->ip_va) >> 12) << 12);
@@ -840,7 +844,6 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
unsigned int data;
u32 ring_base;
u32 nopid;
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
if (!strcmp(cmd, "lri"))
data = cmd_val(s, index + 1);
@@ -850,7 +853,7 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
return -EINVAL;
}
- ring_base = dev_priv->engine[s->ring_id]->mmio_base;
+ ring_base = s->engine->mmio_base;
nopid = i915_mmio_reg_offset(RING_NOPID(ring_base));
if (!intel_gvt_in_force_nonpriv_whitelist(gvt, data) &&
@@ -926,9 +929,9 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* update reg values in it into vregs, so LRIs in workload with
* inhibit context will restore with correct values
*/
- if (IS_GEN(gvt->dev_priv, 9) &&
- intel_gvt_mmio_is_in_ctx(gvt, offset) &&
- !strncmp(cmd, "lri", 3)) {
+ if (IS_GEN(s->engine->i915, 9) &&
+ intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+ !strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu,
s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
/* check inhibit context */
@@ -964,7 +967,6 @@ static int cmd_handler_lri(struct parser_exec_state *s)
{
int i, ret = 0;
int cmd_len = cmd_length(s);
- struct intel_gvt *gvt = s->vgpu->gvt;
u32 valid_len = CMD_LEN(1);
/*
@@ -979,8 +981,8 @@ static int cmd_handler_lri(struct parser_exec_state *s)
}
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(gvt->dev_priv) && s->ring_id != RCS0) {
- if (s->ring_id == BCS0 &&
+ if (IS_BROADWELL(s->engine->i915) && s->engine->id != RCS0) {
+ if (s->engine->id == BCS0 &&
cmd_reg(s, i) == i915_mmio_reg_offset(DERRMR))
ret |= 0;
else
@@ -1001,9 +1003,9 @@ static int cmd_handler_lrr(struct parser_exec_state *s)
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len; i += 2) {
- if (IS_BROADWELL(s->vgpu->gvt->dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
ret |= ((cmd_reg_inhibit(s, i) ||
- (cmd_reg_inhibit(s, i + 1)))) ?
+ (cmd_reg_inhibit(s, i + 1)))) ?
-EBADRQC : 0;
if (ret)
break;
@@ -1029,7 +1031,7 @@ static int cmd_handler_lrm(struct parser_exec_state *s)
int cmd_len = cmd_length(s);
for (i = 1; i < cmd_len;) {
- if (IS_BROADWELL(gvt->dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
ret |= (cmd_reg_inhibit(s, i)) ? -EBADRQC : 0;
if (ret)
break;
@@ -1141,7 +1143,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
if (ret)
return ret;
if (index_mode) {
- hws_pga = s->vgpu->hws_pga[s->ring_id];
+ hws_pga = s->vgpu->hws_pga[s->engine->id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 2), gma);
val = cmd_val(s, 1) & (~(1 << 21));
@@ -1155,15 +1157,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
return ret;
if (cmd_val(s, 1) & PIPE_CONTROL_NOTIFY)
- set_bit(cmd_interrupt_events[s->ring_id].pipe_control_notify,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].pipe_control_notify,
+ s->workload->pending_events);
return 0;
}
static int cmd_handler_mi_user_interrupt(struct parser_exec_state *s)
{
- set_bit(cmd_interrupt_events[s->ring_id].mi_user_interrupt,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].mi_user_interrupt,
+ s->workload->pending_events);
patch_value(s, cmd_ptr(s, 0), MI_NOOP);
return 0;
}
@@ -1213,7 +1215,7 @@ struct plane_code_mapping {
static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct plane_code_mapping gen8_plane_code[] = {
[0] = {PIPE_A, PLANE_A, PRIMARY_A_FLIP_DONE},
[1] = {PIPE_B, PLANE_A, PRIMARY_B_FLIP_DONE},
@@ -1230,7 +1232,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
dword2 = cmd_val(s, 2);
v = (dword0 & GENMASK(21, 19)) >> 19;
- if (WARN_ON(v >= ARRAY_SIZE(gen8_plane_code)))
+ if (drm_WARN_ON(&dev_priv->drm, v >= ARRAY_SIZE(gen8_plane_code)))
return -EBADRQC;
info->pipe = gen8_plane_code[v].pipe;
@@ -1250,7 +1252,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
info->stride_reg = SPRSTRIDE(info->pipe);
info->surf_reg = SPRSURF(info->pipe);
} else {
- WARN_ON(1);
+ drm_WARN_ON(&dev_priv->drm, 1);
return -EBADRQC;
}
return 0;
@@ -1259,7 +1261,7 @@ static int gen8_decode_mi_display_flip(struct parser_exec_state *s,
static int skl_decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu;
u32 dword0 = cmd_val(s, 0);
u32 dword1 = cmd_val(s, 1);
@@ -1318,13 +1320,12 @@ static int skl_decode_mi_display_flip(struct parser_exec_state *s,
static int gen8_check_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
u32 stride, tile;
if (!info->async_flip)
return 0;
- if (INTEL_GEN(dev_priv) >= 9) {
+ if (INTEL_GEN(s->engine->i915) >= 9) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10;
@@ -1347,7 +1348,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = s->engine->i915;
struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
@@ -1378,11 +1379,9 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
static int decode_mi_display_flip(struct parser_exec_state *s,
struct mi_display_flip_command_info *info)
{
- struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
-
- if (IS_BROADWELL(dev_priv))
+ if (IS_BROADWELL(s->engine->i915))
return gen8_decode_mi_display_flip(s, info);
- if (INTEL_GEN(dev_priv) >= 9)
+ if (INTEL_GEN(s->engine->i915) >= 9)
return skl_decode_mi_display_flip(s, info);
return -ENODEV;
@@ -1667,7 +1666,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (ret)
return ret;
if (index_mode) {
- hws_pga = s->vgpu->hws_pga[s->ring_id];
+ hws_pga = s->vgpu->hws_pga[s->engine->id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 1), gma);
val = cmd_val(s, 0) & (~(1 << 21));
@@ -1676,8 +1675,8 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
}
/* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8)))
- set_bit(cmd_interrupt_events[s->ring_id].mi_flush_dw,
- s->workload->pending_events);
+ set_bit(cmd_interrupt_events[s->engine->id].mi_flush_dw,
+ s->workload->pending_events);
return ret;
}
@@ -1725,12 +1724,18 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
/* Decide privilege based on address space */
- if (cmd_val(s, 0) & (1 << 8) &&
- !(s->vgpu->scan_nonprivbb & (1 << s->ring_id)))
+ if (cmd_val(s, 0) & BIT(8) &&
+ !(s->vgpu->scan_nonprivbb & s->engine->mask))
return 0;
+
return 1;
}
+static const char *repr_addr_type(unsigned int type)
+{
+ return type == PPGTT_BUFFER ? "ppgtt" : "ggtt";
+}
+
static int find_bb_size(struct parser_exec_state *s,
unsigned long *bb_size,
unsigned long *bb_end_cmd_offset)
@@ -1753,24 +1758,24 @@ static int find_bb_size(struct parser_exec_state *s,
return -EFAULT;
cmd = cmd_val(s, 0);
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
do {
if (copy_gma_to_hva(s->vgpu, mm,
- gma, gma + 4, &cmd) < 0)
+ gma, gma + 4, &cmd) < 0)
return -EFAULT;
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
@@ -1799,12 +1804,12 @@ static int audit_bb_end(struct parser_exec_state *s, void *va)
u32 cmd = *(u32 *)va;
const struct cmd_info *info;
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
@@ -1857,7 +1862,7 @@ static int perform_bb_shadow(struct parser_exec_state *s)
if (bb->ppgtt)
start_offset = gma & ~I915_GTT_PAGE_MASK;
- bb->obj = i915_gem_object_create_shmem(s->vgpu->gvt->dev_priv,
+ bb->obj = i915_gem_object_create_shmem(s->engine->i915,
round_up(bb_size + start_offset,
PAGE_SIZE));
if (IS_ERR(bb->obj)) {
@@ -2666,25 +2671,25 @@ static int cmd_parser_exec(struct parser_exec_state *s)
if (cmd == MI_NOOP)
info = &cmd_info[mi_noop_index];
else
- info = get_cmd_info(s->vgpu->gvt, cmd, s->ring_id);
+ info = get_cmd_info(s->vgpu->gvt, cmd, s->engine);
if (info == NULL) {
- gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %d, workload=%p\n",
- cmd, get_opcode(cmd, s->ring_id),
- (s->buf_addr_type == PPGTT_BUFFER) ?
- "ppgtt" : "ggtt", s->ring_id, s->workload);
+ gvt_vgpu_err("unknown cmd 0x%x, opcode=0x%x, addr_type=%s, ring %s, workload=%p\n",
+ cmd, get_opcode(cmd, s->engine),
+ repr_addr_type(s->buf_addr_type),
+ s->engine->name, s->workload);
return -EBADRQC;
}
s->info = info;
- trace_gvt_command(vgpu->id, s->ring_id, s->ip_gma, s->ip_va,
+ trace_gvt_command(vgpu->id, s->engine->id, s->ip_gma, s->ip_va,
cmd_length(s), s->buf_type, s->buf_addr_type,
s->workload, info->name);
if ((info->flag & F_LEN_MASK) == F_LEN_VAR_FIXED) {
ret = gvt_check_valid_cmd_length(cmd_length(s),
- info->valid_len);
+ info->valid_len);
if (ret)
return ret;
}
@@ -2781,7 +2786,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
- s.ring_id = workload->ring_id;
+ s.engine = workload->engine;
s.ring_start = workload->rb_start;
s.ring_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
s.ring_head = gma_head;
@@ -2790,8 +2795,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
s.workload = workload;
s.is_ctx_wa = false;
- if ((bypass_scan_mask & (1 << workload->ring_id)) ||
- gma_head == gma_tail)
+ if (bypass_scan_mask & workload->engine->mask || gma_head == gma_tail)
return 0;
ret = ip_gma_set(&s, gma_head);
@@ -2830,7 +2834,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
s.buf_type = RING_BUFFER_INSTRUCTION;
s.buf_addr_type = GTT_BUFFER;
s.vgpu = workload->vgpu;
- s.ring_id = workload->ring_id;
+ s.engine = workload->engine;
s.ring_start = wa_ctx->indirect_ctx.guest_gma;
s.ring_size = ring_size;
s.ring_head = gma_head;
@@ -2855,7 +2859,6 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &vgpu->submission;
unsigned long gma_head, gma_tail, gma_top, guest_rb_size;
void *shadow_ring_buffer_va;
- int ring_id = workload->ring_id;
int ret;
guest_rb_size = _RING_CTL_BUF_SIZE(workload->rb_ctl);
@@ -2868,21 +2871,21 @@ static int shadow_workload_ring_buffer(struct intel_vgpu_workload *workload)
gma_tail = workload->rb_start + workload->rb_tail;
gma_top = workload->rb_start + guest_rb_size;
- if (workload->rb_len > s->ring_scan_buffer_size[ring_id]) {
+ if (workload->rb_len > s->ring_scan_buffer_size[workload->engine->id]) {
void *p;
/* realloc the new ring buffer if needed */
- p = krealloc(s->ring_scan_buffer[ring_id], workload->rb_len,
- GFP_KERNEL);
+ p = krealloc(s->ring_scan_buffer[workload->engine->id],
+ workload->rb_len, GFP_KERNEL);
if (!p) {
gvt_vgpu_err("fail to re-alloc ring scan buffer\n");
return -ENOMEM;
}
- s->ring_scan_buffer[ring_id] = p;
- s->ring_scan_buffer_size[ring_id] = workload->rb_len;
+ s->ring_scan_buffer[workload->engine->id] = p;
+ s->ring_scan_buffer_size[workload->engine->id] = workload->rb_len;
}
- shadow_ring_buffer_va = s->ring_scan_buffer[ring_id];
+ shadow_ring_buffer_va = s->ring_scan_buffer[workload->engine->id];
/* get shadow ring buffer va */
workload->shadow_ring_buffer_va = shadow_ring_buffer_va;
@@ -2940,7 +2943,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
int ret = 0;
void *map;
- obj = i915_gem_object_create_shmem(workload->vgpu->gvt->dev_priv,
+ obj = i915_gem_object_create_shmem(workload->engine->i915,
roundup(ctx_size + CACHELINE_BYTES,
PAGE_SIZE));
if (IS_ERR(obj))
@@ -3029,30 +3032,14 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0;
}
-static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
- unsigned int opcode, unsigned long rings)
-{
- const struct cmd_info *info = NULL;
- unsigned int ring;
-
- for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
- info = find_cmd_entry(gvt, opcode, ring);
- if (info)
- break;
- }
- return info;
-}
-
static int init_cmd_table(struct intel_gvt *gvt)
{
+ unsigned int gen_type = intel_gvt_get_device_type(gvt);
int i;
- struct cmd_entry *e;
- const struct cmd_info *info;
- unsigned int gen_type;
-
- gen_type = intel_gvt_get_device_type(gvt);
for (i = 0; i < ARRAY_SIZE(cmd_info); i++) {
+ struct cmd_entry *e;
+
if (!(cmd_info[i].devices & gen_type))
continue;
@@ -3061,23 +3048,16 @@ static int init_cmd_table(struct intel_gvt *gvt)
return -ENOMEM;
e->info = &cmd_info[i];
- info = find_cmd_entry_any_ring(gvt,
- e->info->opcode, e->info->rings);
- if (info) {
- gvt_err("%s %s duplicated\n", e->info->name,
- info->name);
- kfree(e);
- return -EEXIST;
- }
if (cmd_info[i].opcode == OP_MI_NOOP)
mi_noop_index = i;
INIT_HLIST_NODE(&e->hlist);
add_cmd_entry(gvt, e);
gvt_dbg_cmd("add %-30s op %04x flag %x devs %02x rings %02x\n",
- e->info->name, e->info->opcode, e->info->flag,
- e->info->devices, e->info->rings);
+ e->info->name, e->info->opcode, e->info->flag,
+ e->info->devices, e->info->rings);
}
+
return 0;
}
diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c
index 285f6011a537..ec47d4114554 100644
--- a/drivers/gpu/drm/i915/gvt/debugfs.c
+++ b/drivers/gpu/drm/i915/gvt/debugfs.c
@@ -58,12 +58,11 @@ static int mmio_offset_compare(void *priv,
static inline int mmio_diff_handler(struct intel_gvt *gvt,
u32 offset, void *data)
{
- struct drm_i915_private *i915 = gvt->dev_priv;
struct mmio_diff_param *param = data;
struct diff_mmio *node;
u32 preg, vreg;
- preg = intel_uncore_read_notrace(&i915->uncore, _MMIO(offset));
+ preg = intel_uncore_read_notrace(gvt->gt->uncore, _MMIO(offset));
vreg = vgpu_vreg(param->vgpu, offset);
if (preg != vreg) {
@@ -98,10 +97,10 @@ static int vgpu_mmio_diff_show(struct seq_file *s, void *unused)
mutex_lock(&gvt->lock);
spin_lock_bh(&gvt->scheduler.mmio_context_lock);
- mmio_hw_access_pre(gvt->dev_priv);
+ mmio_hw_access_pre(gvt->gt);
/* Recognize all the diff mmios to list. */
intel_gvt_for_each_tracked_mmio(gvt, mmio_diff_handler, &param);
- mmio_hw_access_post(gvt->dev_priv);
+ mmio_hw_access_post(gvt->gt);
spin_unlock_bh(&gvt->scheduler.mmio_context_lock);
mutex_unlock(&gvt->lock);
@@ -128,6 +127,7 @@ static int
vgpu_scan_nonprivbb_get(void *data, u64 *val)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
+
*val = vgpu->scan_nonprivbb;
return 0;
}
@@ -142,42 +142,7 @@ static int
vgpu_scan_nonprivbb_set(void *data, u64 val)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)data;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- enum intel_engine_id id;
- char buf[128], *s;
- int len;
-
- val &= (1 << I915_NUM_ENGINES) - 1;
-
- if (vgpu->scan_nonprivbb == val)
- return 0;
-
- if (!val)
- goto done;
-
- len = sprintf(buf,
- "gvt: vgpu %d turns on non-privileged batch buffers scanning on Engines:",
- vgpu->id);
-
- s = buf + len;
-
- for (id = 0; id < I915_NUM_ENGINES; id++) {
- struct intel_engine_cs *engine;
-
- engine = dev_priv->engine[id];
- if (engine && (val & (1 << id))) {
- len = snprintf(s, 4, "%d, ", engine->id);
- s += len;
- } else
- val &= ~(1 << id);
- }
-
- if (val)
- sprintf(s, "low performance expected.");
-
- pr_warn("%s\n", buf);
-done:
vgpu->scan_nonprivbb = val;
return 0;
}
@@ -220,7 +185,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu)
*/
void intel_gvt_debugfs_init(struct intel_gvt *gvt)
{
- struct drm_minor *minor = gvt->dev_priv->drm.primary;
+ struct drm_minor *minor = gvt->gt->i915->drm.primary;
gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root);
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index e1c313da6c00..6e5c9885d9fe 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -57,7 +57,7 @@ static int get_edp_pipe(struct intel_vgpu *vgpu)
static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0;
@@ -69,9 +69,10 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
- if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
+ if (drm_WARN_ON(&dev_priv->drm,
+ pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL;
if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
@@ -168,7 +169,7 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
int pipe;
if (IS_BROXTON(dev_priv)) {
@@ -319,9 +320,10 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
- if (WARN_ON(resolution >= GVT_EDID_NUM))
+ if (drm_WARN_ON(&i915->drm, resolution >= GVT_EDID_NUM))
return -EINVAL;
port->edid = kzalloc(sizeof(*(port->edid)), GFP_KERNEL);
@@ -389,7 +391,7 @@ void intel_gvt_check_vblank_emulation(struct intel_gvt *gvt)
static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_vgpu_irq *irq = &vgpu->irq;
int vblank_event[] = {
[PIPE_A] = PIPE_A_VBLANK,
@@ -421,7 +423,7 @@ static void emulate_vblank(struct intel_vgpu *vgpu)
int pipe;
mutex_lock(&vgpu->vgpu_lock);
- for_each_pipe(vgpu->gvt->dev_priv, pipe)
+ for_each_pipe(vgpu->gvt->gt->i915, pipe)
emulate_vblank_on_pipe(vgpu, pipe);
mutex_unlock(&vgpu->vgpu_lock);
}
@@ -454,10 +456,11 @@ void intel_gvt_emulate_vblank(struct intel_gvt *gvt)
*/
void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* TODO: add more platforms support */
- if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
+ if (IS_SKYLAKE(i915) || IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915)) {
if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED;
@@ -483,7 +486,7 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
*/
void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv))
@@ -505,7 +508,7 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
*/
int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
intel_vgpu_init_i2c_edid(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c
index 2477a1e5a166..37fc460414a8 100644
--- a/drivers/gpu/drm/i915/gvt/dmabuf.c
+++ b/drivers/gpu/drm/i915/gvt/dmabuf.c
@@ -67,11 +67,11 @@ static int vgpu_gem_get_pages(
u32 page_num;
fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
- if (WARN_ON(!fb_info))
+ if (drm_WARN_ON(&dev_priv->drm, !fb_info))
return -ENODEV;
vgpu = fb_info->obj->vgpu;
- if (WARN_ON(!vgpu))
+ if (drm_WARN_ON(&dev_priv->drm, !vgpu))
return -ENODEV;
st = kmalloc(sizeof(*st), GFP_KERNEL);
@@ -151,12 +151,12 @@ static void dmabuf_gem_object_free(struct kref *kref)
dmabuf_obj = container_of(pos,
struct intel_vgpu_dmabuf_obj, list);
if (dmabuf_obj == obj) {
+ list_del(pos);
intel_gvt_hypervisor_put_vfio_device(vgpu);
idr_remove(&vgpu->object_idr,
dmabuf_obj->dmabuf_id);
kfree(dmabuf_obj->info);
kfree(dmabuf_obj);
- list_del(pos);
break;
}
}
@@ -417,7 +417,7 @@ static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
{
- struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct vfio_device_gfx_plane_info *gfx_plane_info = args;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct intel_vgpu_fb_info fb_info;
@@ -523,7 +523,7 @@ out:
/* To associate an exposed dmabuf with the dmabuf_obj */
int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
{
- struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
+ struct drm_device *dev = &vgpu->gvt->gt->i915->drm;
struct intel_vgpu_dmabuf_obj *dmabuf_obj;
struct drm_i915_gem_object *obj;
struct dma_buf *dmabuf;
diff --git a/drivers/gpu/drm/i915/gvt/edid.c b/drivers/gpu/drm/i915/gvt/edid.c
index 1fe6124918f1..190651df5db1 100644
--- a/drivers/gpu/drm/i915/gvt/edid.c
+++ b/drivers/gpu/drm/i915/gvt/edid.c
@@ -135,7 +135,7 @@ static void reset_gmbus_controller(struct intel_vgpu *vgpu)
static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int port, pin_select;
memcpy(&vgpu_vreg(vgpu, offset), p_data, bytes);
@@ -147,13 +147,13 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (pin_select == 0)
return 0;
- if (IS_BROXTON(dev_priv))
+ if (IS_BROXTON(i915))
port = bxt_get_port_from_gmbus0(pin_select);
- else if (IS_COFFEELAKE(dev_priv))
+ else if (IS_COFFEELAKE(i915))
port = cnp_get_port_from_gmbus0(pin_select);
else
port = get_port_from_gmbus0(pin_select);
- if (WARN_ON(port < 0))
+ if (drm_WARN_ON(&i915->drm, port < 0))
return 0;
vgpu->display.i2c_edid.state = I2C_GMBUS;
@@ -276,7 +276,9 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gmbus3_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- WARN_ON(1);
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ drm_WARN_ON(&i915->drm, 1);
return 0;
}
@@ -371,7 +373,9 @@ static int gmbus2_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS2))
@@ -399,7 +403,9 @@ int intel_gvt_i2c_handle_gmbus_read(struct intel_vgpu *vgpu,
int intel_gvt_i2c_handle_gmbus_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
- if (WARN_ON(bytes > 8 && (offset & (bytes - 1))))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN_ON(&i915->drm, bytes > 8 && (offset & (bytes - 1))))
return -EINVAL;
if (offset == i915_mmio_reg_offset(PCH_GMBUS0))
@@ -473,6 +479,7 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
unsigned int offset,
void *p_data)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_i2c_edid *i2c_edid = &vgpu->display.i2c_edid;
int msg_length, ret_msg_size;
int msg, addr, ctrl, op;
@@ -532,9 +539,9 @@ void intel_gvt_i2c_handle_aux_ch_write(struct intel_vgpu *vgpu,
* support the gfx driver to do EDID access.
*/
} else {
- if (WARN_ON((op & 0x1) != GVT_AUX_I2C_READ))
+ if (drm_WARN_ON(&i915->drm, (op & 0x1) != GVT_AUX_I2C_READ))
return;
- if (WARN_ON(msg_length != 4))
+ if (drm_WARN_ON(&i915->drm, msg_length != 4))
return;
if (i2c_edid->edid_available && i2c_edid->slave_selected) {
unsigned char val = edid_get_byte(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/execlist.c b/drivers/gpu/drm/i915/gvt/execlist.c
index d6e7a1189bad..dd25c3024370 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.c
+++ b/drivers/gpu/drm/i915/gvt/execlist.c
@@ -39,8 +39,7 @@
#define _EL_OFFSET_STATUS_BUF 0x370
#define _EL_OFFSET_STATUS_PTR 0x3A0
-#define execlist_ring_mmio(gvt, ring_id, offset) \
- (gvt->dev_priv->engine[ring_id]->mmio_base + (offset))
+#define execlist_ring_mmio(e, offset) ((e)->mmio_base + (offset))
#define valid_context(ctx) ((ctx)->valid)
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \
@@ -54,12 +53,12 @@ static int context_switch_events[] = {
[VECS0] = VECS_AS_CONTEXT_SWITCH,
};
-static int ring_id_to_context_switch_event(unsigned int ring_id)
+static int to_context_switch_event(const struct intel_engine_cs *engine)
{
- if (WARN_ON(ring_id >= ARRAY_SIZE(context_switch_events)))
+ if (WARN_ON(engine->id >= ARRAY_SIZE(context_switch_events)))
return -EINVAL;
- return context_switch_events[ring_id];
+ return context_switch_events[engine->id];
}
static void switch_virtual_execlist_slot(struct intel_vgpu_execlist *execlist)
@@ -93,9 +92,8 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
struct execlist_ctx_descriptor_format *desc = execlist->running_context;
struct intel_vgpu *vgpu = execlist->vgpu;
struct execlist_status_format status;
- int ring_id = execlist->ring_id;
- u32 status_reg = execlist_ring_mmio(vgpu->gvt,
- ring_id, _EL_OFFSET_STATUS);
+ u32 status_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
status.ldw = vgpu_vreg(vgpu, status_reg);
status.udw = vgpu_vreg(vgpu, status_reg + 4);
@@ -124,21 +122,19 @@ static void emulate_execlist_status(struct intel_vgpu_execlist *execlist)
}
static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
- struct execlist_context_status_format *status,
- bool trigger_interrupt_later)
+ struct execlist_context_status_format *status,
+ bool trigger_interrupt_later)
{
struct intel_vgpu *vgpu = execlist->vgpu;
- int ring_id = execlist->ring_id;
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 write_pointer;
u32 ctx_status_ptr_reg, ctx_status_buf_reg, offset;
unsigned long hwsp_gpa;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_PTR);
- ctx_status_buf_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_BUF);
+ ctx_status_ptr_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_PTR);
+ ctx_status_buf_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS_BUF);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
@@ -161,26 +157,24 @@ static void emulate_csb_update(struct intel_vgpu_execlist *execlist,
/* Update the CSB and CSB write pointer in HWSP */
hwsp_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
- vgpu->hws_pga[ring_id]);
+ vgpu->hws_pga[execlist->engine->id]);
if (hwsp_gpa != INTEL_GVT_INVALID_ADDR) {
intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 +
- write_pointer * 8,
- status, 8);
+ hwsp_gpa + I915_HWS_CSB_BUF0_INDEX * 4 + write_pointer * 8,
+ status, 8);
intel_gvt_hypervisor_write_gpa(vgpu,
- hwsp_gpa +
- intel_hws_csb_write_index(dev_priv) * 4,
- &write_pointer, 4);
+ hwsp_gpa + intel_hws_csb_write_index(execlist->engine->i915) * 4,
+ &write_pointer, 4);
}
gvt_dbg_el("vgpu%d: w pointer %u reg %x csb l %x csb h %x\n",
- vgpu->id, write_pointer, offset, status->ldw, status->udw);
+ vgpu->id, write_pointer, offset, status->ldw, status->udw);
if (trigger_interrupt_later)
return;
intel_vgpu_trigger_virtual_event(vgpu,
- ring_id_to_context_switch_event(execlist->ring_id));
+ to_context_switch_event(execlist->engine));
}
static int emulate_execlist_ctx_schedule_out(
@@ -261,9 +255,8 @@ static struct intel_vgpu_execlist_slot *get_next_execlist_slot(
struct intel_vgpu_execlist *execlist)
{
struct intel_vgpu *vgpu = execlist->vgpu;
- int ring_id = execlist->ring_id;
- u32 status_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS);
+ u32 status_reg =
+ execlist_ring_mmio(execlist->engine, _EL_OFFSET_STATUS);
struct execlist_status_format status;
status.ldw = vgpu_vreg(vgpu, status_reg);
@@ -379,7 +372,6 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
struct execlist_ctx_descriptor_format ctx[2];
- int ring_id = workload->ring_id;
int ret;
if (!workload->emulate_schedule_in)
@@ -388,7 +380,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
ctx[0] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 0);
ctx[1] = *get_desc_from_elsp_dwords(&workload->elsp_dwords, 1);
- ret = emulate_execlist_schedule_in(&s->execlist[ring_id], ctx);
+ ret = emulate_execlist_schedule_in(&s->execlist[workload->engine->id],
+ ctx);
if (ret) {
gvt_vgpu_err("fail to emulate execlist schedule in\n");
return ret;
@@ -399,21 +392,21 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
- int ring_id = workload->ring_id;
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist =
+ &s->execlist[workload->engine->id];
struct intel_vgpu_workload *next_workload;
- struct list_head *next = workload_q_head(vgpu, ring_id)->next;
+ struct list_head *next = workload_q_head(vgpu, workload->engine)->next;
bool lite_restore = false;
int ret = 0;
- gvt_dbg_el("complete workload %p status %d\n", workload,
- workload->status);
+ gvt_dbg_el("complete workload %p status %d\n",
+ workload, workload->status);
- if (workload->status || (vgpu->resetting_eng & BIT(ring_id)))
+ if (workload->status || vgpu->resetting_eng & workload->engine->mask)
goto out;
- if (!list_empty(workload_q_head(vgpu, ring_id))) {
+ if (!list_empty(workload_q_head(vgpu, workload->engine))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next,
@@ -436,14 +429,15 @@ out:
return ret;
}
-static int submit_context(struct intel_vgpu *vgpu, int ring_id,
- struct execlist_ctx_descriptor_format *desc,
- bool emulate_schedule_in)
+static int submit_context(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
+ struct execlist_ctx_descriptor_format *desc,
+ bool emulate_schedule_in)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_vgpu_workload *workload = NULL;
- workload = intel_vgpu_create_workload(vgpu, ring_id, desc);
+ workload = intel_vgpu_create_workload(vgpu, engine, desc);
if (IS_ERR(workload))
return PTR_ERR(workload);
@@ -452,19 +446,20 @@ static int submit_context(struct intel_vgpu *vgpu, int ring_id,
workload->emulate_schedule_in = emulate_schedule_in;
if (emulate_schedule_in)
- workload->elsp_dwords = s->execlist[ring_id].elsp_dwords;
+ workload->elsp_dwords = s->execlist[engine->id].elsp_dwords;
gvt_dbg_el("workload %p emulate schedule_in %d\n", workload,
- emulate_schedule_in);
+ emulate_schedule_in);
intel_vgpu_queue_workload(workload);
return 0;
}
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
struct execlist_ctx_descriptor_format *desc[2];
int i, ret;
@@ -489,7 +484,7 @@ int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id)
for (i = 0; i < ARRAY_SIZE(desc); i++) {
if (!desc[i]->valid)
continue;
- ret = submit_context(vgpu, ring_id, desc[i], i == 0);
+ ret = submit_context(vgpu, engine, desc[i], i == 0);
if (ret) {
gvt_vgpu_err("failed to submit desc %d\n", i);
return ret;
@@ -504,22 +499,22 @@ inv_desc:
return -EINVAL;
}
-static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
+static void init_vgpu_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct intel_vgpu_execlist *execlist = &s->execlist[ring_id];
+ struct intel_vgpu_execlist *execlist = &s->execlist[engine->id];
struct execlist_context_status_pointer_format ctx_status_ptr;
u32 ctx_status_ptr_reg;
memset(execlist, 0, sizeof(*execlist));
execlist->vgpu = vgpu;
- execlist->ring_id = ring_id;
+ execlist->engine = engine;
execlist->slot[0].index = 0;
execlist->slot[1].index = 1;
- ctx_status_ptr_reg = execlist_ring_mmio(vgpu->gvt, ring_id,
- _EL_OFFSET_STATUS_PTR);
+ ctx_status_ptr_reg = execlist_ring_mmio(engine, _EL_OFFSET_STATUS_PTR);
ctx_status_ptr.dw = vgpu_vreg(vgpu, ctx_status_ptr_reg);
ctx_status_ptr.read_ptr = 0;
ctx_status_ptr.write_ptr = 0x7;
@@ -529,7 +524,7 @@ static void init_vgpu_execlist(struct intel_vgpu *vgpu, int ring_id)
static void clean_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
struct intel_vgpu_submission *s = &vgpu->submission;
intel_engine_mask_t tmp;
@@ -544,12 +539,12 @@ static void clean_execlist(struct intel_vgpu *vgpu,
static void reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
intel_engine_mask_t tmp;
for_each_engine_masked(engine, &dev_priv->gt, engine_mask, tmp)
- init_vgpu_execlist(vgpu, engine->id);
+ init_vgpu_execlist(vgpu, engine);
}
static int init_execlist(struct intel_vgpu *vgpu,
diff --git a/drivers/gpu/drm/i915/gvt/execlist.h b/drivers/gpu/drm/i915/gvt/execlist.h
index 5c0c1fd30c83..d62cd14605a3 100644
--- a/drivers/gpu/drm/i915/gvt/execlist.h
+++ b/drivers/gpu/drm/i915/gvt/execlist.h
@@ -170,16 +170,17 @@ struct intel_vgpu_execlist {
struct intel_vgpu_execlist_slot *running_slot;
struct intel_vgpu_execlist_slot *pending_slot;
struct execlist_ctx_descriptor_format *running_context;
- int ring_id;
struct intel_vgpu *vgpu;
struct intel_vgpu_elsp_dwords elsp_dwords;
+ const struct intel_engine_cs *engine;
};
void intel_vgpu_clean_execlist(struct intel_vgpu *vgpu);
int intel_vgpu_init_execlist(struct intel_vgpu *vgpu);
-int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu, int ring_id);
+int intel_vgpu_submit_execlist(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine);
void intel_vgpu_reset_execlist(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask);
diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c
index 8bb292b01271..0889ad8291b0 100644
--- a/drivers/gpu/drm/i915/gvt/fb_decoder.c
+++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c
@@ -146,7 +146,7 @@ static int skl_format_to_drm(int format, bool rgb_order, bool alpha,
static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 tiled, int stride_mask, int bpp)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg;
@@ -202,8 +202,8 @@ static int get_active_pipe(struct intel_vgpu *vgpu)
int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_primary_plane_format *plane)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 val, fmt;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
pipe = get_active_pipe(vgpu);
@@ -332,9 +332,9 @@ static int cursor_mode_to_drm(int mode)
int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
struct intel_vgpu_cursor_plane_format *plane)
{
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 val, mode, index;
u32 alpha_plane, alpha_force;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
int pipe;
pipe = get_active_pipe(vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/firmware.c b/drivers/gpu/drm/i915/gvt/firmware.c
index 049775e8e350..990a181094e3 100644
--- a/drivers/gpu/drm/i915/gvt/firmware.c
+++ b/drivers/gpu/drm/i915/gvt/firmware.c
@@ -68,9 +68,7 @@ static struct bin_attribute firmware_attr = {
static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
{
- struct drm_i915_private *i915 = gvt->dev_priv;
-
- *(u32 *)(data + offset) = intel_uncore_read_notrace(&i915->uncore,
+ *(u32 *)(data + offset) = intel_uncore_read_notrace(gvt->gt->uncore,
_MMIO(offset));
return 0;
}
@@ -78,7 +76,7 @@ static int mmio_snapshot_handler(struct intel_gvt *gvt, u32 offset, void *data)
static int expose_firmware_sysfs(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct gvt_firmware_header *h;
void *firmware;
void *p;
@@ -129,7 +127,7 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
static void clean_firmware_sysfs(struct intel_gvt *gvt)
{
- struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
device_remove_bin_file(&pdev->dev, &firmware_attr);
vfree(firmware_attr.private);
@@ -146,15 +144,14 @@ void intel_gvt_free_firmware(struct intel_gvt *gvt)
clean_firmware_sysfs(gvt);
kfree(gvt->firmware.cfg_space);
- kfree(gvt->firmware.mmio);
+ vfree(gvt->firmware.mmio);
}
static int verify_firmware(struct intel_gvt *gvt,
const struct firmware *fw)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct gvt_firmware_header *h;
unsigned long id, crc32_start;
const void *mem;
@@ -208,8 +205,7 @@ invalid_firmware:
int intel_gvt_load_firmware(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- struct pci_dev *pdev = dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
struct intel_gvt_firmware *firmware = &gvt->firmware;
struct gvt_firmware_header *h;
const struct firmware *fw;
@@ -229,7 +225,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
firmware->cfg_space = mem;
- mem = kmalloc(info->mmio_size, GFP_KERNEL);
+ mem = vmalloc(info->mmio_size);
if (!mem) {
kfree(path);
kfree(firmware->cfg_space);
@@ -244,7 +240,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt)
gvt_dbg_core("request hw state firmware %s...\n", path);
- ret = request_firmware(&fw, path, &dev_priv->drm.pdev->dev);
+ ret = request_firmware(&fw, path, &gvt->gt->i915->drm.pdev->dev);
kfree(path);
if (ret)
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index 34cb404ba4b7..2a4b23f8aa74 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -71,8 +71,10 @@ bool intel_gvt_ggtt_validate_range(struct intel_vgpu *vgpu, u64 addr, u32 size)
/* translate a guest gmadr to host gmadr */
int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
{
- if (WARN(!vgpu_gmadr_is_valid(vgpu, g_addr),
- "invalid guest gmadr %llx\n", g_addr))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN(&i915->drm, !vgpu_gmadr_is_valid(vgpu, g_addr),
+ "invalid guest gmadr %llx\n", g_addr))
return -EACCES;
if (vgpu_gmadr_is_aperture(vgpu, g_addr))
@@ -87,8 +89,10 @@ int intel_gvt_ggtt_gmadr_g2h(struct intel_vgpu *vgpu, u64 g_addr, u64 *h_addr)
/* translate a host gmadr to guest gmadr */
int intel_gvt_ggtt_gmadr_h2g(struct intel_vgpu *vgpu, u64 h_addr, u64 *g_addr)
{
- if (WARN(!gvt_gmadr_is_valid(vgpu->gvt, h_addr),
- "invalid host gmadr %llx\n", h_addr))
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+
+ if (drm_WARN(&i915->drm, !gvt_gmadr_is_valid(vgpu->gvt, h_addr),
+ "invalid host gmadr %llx\n", h_addr))
return -EACCES;
if (gvt_gmadr_is_aperture(vgpu->gvt, h_addr))
@@ -275,24 +279,23 @@ static inline int get_pse_type(int type)
return gtt_type_table[type].pse_entry_type;
}
-static u64 read_pte64(struct drm_i915_private *dev_priv, unsigned long index)
+static u64 read_pte64(struct i915_ggtt *ggtt, unsigned long index)
{
- void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+ void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
return readq(addr);
}
-static void ggtt_invalidate(struct drm_i915_private *dev_priv)
+static void ggtt_invalidate(struct intel_gt *gt)
{
- mmio_hw_access_pre(dev_priv);
- I915_WRITE(GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
- mmio_hw_access_post(dev_priv);
+ mmio_hw_access_pre(gt);
+ intel_uncore_write(gt->uncore, GFX_FLSH_CNTL_GEN6, GFX_FLSH_CNTL_EN);
+ mmio_hw_access_post(gt);
}
-static void write_pte64(struct drm_i915_private *dev_priv,
- unsigned long index, u64 pte)
+static void write_pte64(struct i915_ggtt *ggtt, unsigned long index, u64 pte)
{
- void __iomem *addr = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm + index;
+ void __iomem *addr = (gen8_pte_t __iomem *)ggtt->gsm + index;
writeq(pte, addr);
}
@@ -315,7 +318,7 @@ static inline int gtt_get_entry64(void *pt,
if (WARN_ON(ret))
return ret;
} else if (!pt) {
- e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
+ e->val64 = read_pte64(vgpu->gvt->gt->ggtt, index);
} else {
e->val64 = *((u64 *)pt + index);
}
@@ -340,7 +343,7 @@ static inline int gtt_set_entry64(void *pt,
if (WARN_ON(ret))
return ret;
} else if (!pt) {
- write_pte64(vgpu->gvt->dev_priv, index, e->val64);
+ write_pte64(vgpu->gvt->gt->ggtt, index, e->val64);
} else {
*((u64 *)pt + index) = e->val64;
}
@@ -734,7 +737,7 @@ static int detach_oos_page(struct intel_vgpu *vgpu,
static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
{
- struct device *kdev = &spt->vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *kdev = &spt->vgpu->gvt->gt->i915->drm.pdev->dev;
trace_spt_free(spt->vgpu->id, spt, spt->guest_page.type);
@@ -819,7 +822,7 @@ static int reclaim_one_ppgtt_mm(struct intel_gvt *gvt);
static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
struct intel_vgpu *vgpu, enum intel_gvt_gtt_type type)
{
- struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *kdev = &vgpu->gvt->gt->i915->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL;
dma_addr_t daddr;
int ret;
@@ -940,6 +943,7 @@ static int ppgtt_invalidate_spt(struct intel_vgpu_ppgtt_spt *spt);
static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_entry *e)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_vgpu_ppgtt_spt *s;
enum intel_gvt_gtt_type cur_pt_type;
@@ -952,7 +956,9 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu,
if (!gtt_type_is_pt(cur_pt_type) ||
!gtt_type_is_pt(cur_pt_type + 1)) {
- WARN(1, "Invalid page table type, cur_pt_type is: %d\n", cur_pt_type);
+ drm_WARN(&i915->drm, 1,
+ "Invalid page table type, cur_pt_type is: %d\n",
+ cur_pt_type);
return -EINVAL;
}
@@ -1044,7 +1050,7 @@ fail:
static bool vgpu_ips_enabled(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
if (INTEL_GEN(dev_priv) == 9 || INTEL_GEN(dev_priv) == 10) {
u32 ips = vgpu_vreg_t(vgpu, GEN8_GAMW_ECO_DEV_RW_IA) &
@@ -1153,7 +1159,7 @@ static int is_2MB_gtt_possible(struct intel_vgpu *vgpu,
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
unsigned long pfn;
- if (!HAS_PAGE_SIZES(vgpu->gvt->dev_priv, I915_GTT_PAGE_SIZE_2M))
+ if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M))
return 0;
pfn = intel_gvt_hypervisor_gfn_to_mfn(vgpu, ops->get_pfn(entry));
@@ -1956,7 +1962,11 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
if (mm->type == INTEL_GVT_MM_PPGTT) {
list_del(&mm->ppgtt_mm.list);
+
+ mutex_lock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
list_del(&mm->ppgtt_mm.lru_list);
+ mutex_unlock(&mm->vgpu->gvt->gtt.ppgtt_mm_lock);
+
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
@@ -2310,7 +2320,7 @@ out:
ggtt_invalidate_pte(vgpu, &e);
ggtt_set_host_entry(ggtt_mm, &m, g_gtt_index);
- ggtt_invalidate(gvt->dev_priv);
+ ggtt_invalidate(gvt->gt);
return 0;
}
@@ -2343,16 +2353,18 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
static int alloc_scratch_pages(struct intel_vgpu *vgpu,
enum intel_gvt_gtt_type type)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int page_entry_num = I915_GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt;
int i;
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr;
- if (WARN_ON(type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
+ if (drm_WARN_ON(&i915->drm,
+ type < GTT_TYPE_PPGTT_PTE_PT || type >= GTT_TYPE_MAX))
return -EINVAL;
scratch_pt = (void *)get_zeroed_page(GFP_KERNEL);
@@ -2406,7 +2418,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
static int release_scratch_page_tree(struct intel_vgpu *vgpu)
{
int i;
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr;
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
@@ -2678,7 +2690,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
{
int ret;
void *page;
- struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr;
gvt_dbg_core("init gtt\n");
@@ -2727,7 +2739,7 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/
void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{
- struct device *dev = &gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &gvt->gt->i915->drm.pdev->dev;
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_mfn <<
I915_GTT_PAGE_SHIFT);
@@ -2775,7 +2787,6 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu)
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE};
struct intel_gvt_gtt_entry old_entry;
@@ -2805,7 +2816,7 @@ void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old)
ggtt_set_host_entry(vgpu->gtt.ggtt_mm, &entry, index++);
}
- ggtt_invalidate(dev_priv);
+ ggtt_invalidate(gvt->gt);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index 8f37eefa0a02..9e1787867894 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -35,6 +35,7 @@
#include <linux/kthread.h>
#include "i915_drv.h"
+#include "intel_gvt.h"
#include "gvt.h"
#include <linux/vfio.h>
#include <linux/mdev.h>
@@ -49,15 +50,15 @@ static const char * const supported_hypervisors[] = {
static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
const char *name)
{
+ const char *driver_name =
+ dev_driver_string(&gvt->gt->i915->drm.pdev->dev);
int i;
- struct intel_vgpu_type *t;
- const char *driver_name = dev_driver_string(
- &gvt->dev_priv->drm.pdev->dev);
+ name += strlen(driver_name) + 1;
for (i = 0; i < gvt->num_types; i++) {
- t = &gvt->types[i];
- if (!strncmp(t->name, name + strlen(driver_name) + 1,
- sizeof(t->name)))
+ struct intel_vgpu_type *t = &gvt->types[i];
+
+ if (!strncmp(t->name, name, sizeof(t->name)))
return t;
}
@@ -120,10 +121,8 @@ static struct attribute_group *gvt_vgpu_type_groups[] = {
[0 ... NR_MAX_INTEL_VGPU_TYPES - 1] = NULL,
};
-static bool intel_get_gvt_attrs(struct attribute ***type_attrs,
- struct attribute_group ***intel_vgpu_type_groups)
+static bool intel_get_gvt_attrs(struct attribute_group ***intel_vgpu_type_groups)
{
- *type_attrs = gvt_type_attrs;
*intel_vgpu_type_groups = gvt_vgpu_type_groups;
return true;
}
@@ -191,7 +190,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
static void init_device_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
+ struct pci_dev *pdev = gvt->gt->i915->drm.pdev;
info->max_support_vgpus = 8;
info->cfg_space_size = PCI_CFG_SPACE_EXP_SIZE;
@@ -257,17 +256,17 @@ static int init_service_thread(struct intel_gvt *gvt)
/**
* intel_gvt_clean_device - clean a GVT device
- * @dev_priv: i915 private
+ * @i915: i915 private
*
* This function is called at the driver unloading stage, to free the
* resources owned by a GVT device.
*
*/
-void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
+void intel_gvt_clean_device(struct drm_i915_private *i915)
{
- struct intel_gvt *gvt = to_gvt(dev_priv);
+ struct intel_gvt *gvt = fetch_and_zero(&i915->gvt);
- if (WARN_ON(!gvt))
+ if (drm_WARN_ON(&i915->drm, !gvt))
return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
@@ -285,13 +284,12 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
- kfree(dev_priv->gvt);
- dev_priv->gvt = NULL;
+ kfree(i915->gvt);
}
/**
* intel_gvt_init_device - initialize a GVT device
- * @dev_priv: drm i915 private data
+ * @i915: drm i915 private data
*
* This function is called at the initialization stage, to initialize
* necessary GVT components.
@@ -300,13 +298,13 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
* Zero on success, negative error code if failed.
*
*/
-int intel_gvt_init_device(struct drm_i915_private *dev_priv)
+int intel_gvt_init_device(struct drm_i915_private *i915)
{
struct intel_gvt *gvt;
struct intel_vgpu *vgpu;
int ret;
- if (WARN_ON(dev_priv->gvt))
+ if (drm_WARN_ON(&i915->drm, i915->gvt))
return -EEXIST;
gvt = kzalloc(sizeof(struct intel_gvt), GFP_KERNEL);
@@ -319,7 +317,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock);
- gvt->dev_priv = dev_priv;
+ gvt->gt = &i915->gt;
+ i915->gvt = gvt;
init_device_info(gvt);
@@ -378,8 +377,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
intel_gvt_debugfs_init(gvt);
gvt_dbg_core("gvt device initialization is done\n");
- dev_priv->gvt = gvt;
- intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
+ intel_gvt_host.dev = &i915->drm.pdev->dev;
intel_gvt_host.initialized = true;
return 0;
@@ -404,6 +402,7 @@ out_clean_mmio_info:
out_clean_idr:
idr_destroy(&gvt->vgpu_idr);
kfree(gvt);
+ i915->gvt = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index b47c6acaf9c0..58c2c7932e3f 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -196,41 +196,21 @@ struct intel_vgpu {
struct dentry *debugfs;
-#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
- struct {
- struct mdev_device *mdev;
- struct vfio_region *region;
- int num_regions;
- struct eventfd_ctx *intx_trigger;
- struct eventfd_ctx *msi_trigger;
-
- /*
- * Two caches are used to avoid mapping duplicated pages (eg.
- * scratch pages). This help to reduce dma setup overhead.
- */
- struct rb_root gfn_cache;
- struct rb_root dma_addr_cache;
- unsigned long nr_cache_entries;
- struct mutex cache_lock;
-
- struct notifier_block iommu_notifier;
- struct notifier_block group_notifier;
- struct kvm *kvm;
- struct work_struct release_work;
- atomic_t released;
- struct vfio_device *vfio_device;
- } vdev;
-#endif
+ /* Hypervisor-specific device state. */
+ void *vdev;
struct list_head dmabuf_obj_list_head;
struct mutex dmabuf_lock;
struct idr object_idr;
- struct completion vblank_done;
-
u32 scan_nonprivbb;
};
+static inline void *intel_vgpu_vdev(struct intel_vgpu *vgpu)
+{
+ return vgpu->vdev;
+}
+
/* validating GM healthy status*/
#define vgpu_is_vm_unhealthy(ret_val) \
(((ret_val) == -EBADRQC) || ((ret_val) == -EFAULT))
@@ -306,7 +286,7 @@ struct intel_gvt {
/* scheduler scope lock, protect gvt and vgpu schedule related data */
struct mutex sched_lock;
- struct drm_i915_private *dev_priv;
+ struct intel_gt *gt;
struct idr vgpu_idr; /* vGPU IDR pool */
struct intel_gvt_device_info device_info;
@@ -376,14 +356,15 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
#define HOST_FENCE 4
+#define gvt_to_ggtt(gvt) ((gvt)->gt->ggtt)
+
/* Aperture/GM space definitions for GVT device */
-#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
-#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.gmadr.start)
+#define gvt_aperture_sz(gvt) gvt_to_ggtt(gvt)->mappable_end
+#define gvt_aperture_pa_base(gvt) gvt_to_ggtt(gvt)->gmadr.start
-#define gvt_ggtt_gm_sz(gvt) (gvt->dev_priv->ggtt.vm.total)
-#define gvt_ggtt_sz(gvt) \
- ((gvt->dev_priv->ggtt.vm.total >> PAGE_SHIFT) << 3)
-#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
+#define gvt_ggtt_gm_sz(gvt) gvt_to_ggtt(gvt)->vm.total
+#define gvt_ggtt_sz(gvt) (gvt_to_ggtt(gvt)->vm.total >> PAGE_SHIFT << 3)
+#define gvt_hidden_sz(gvt) (gvt_ggtt_gm_sz(gvt) - gvt_aperture_sz(gvt))
#define gvt_aperture_gmadr_base(gvt) (0)
#define gvt_aperture_gmadr_end(gvt) (gvt_aperture_gmadr_base(gvt) \
@@ -394,7 +375,7 @@ int intel_gvt_load_firmware(struct intel_gvt *gvt);
#define gvt_hidden_gmadr_end(gvt) (gvt_hidden_gmadr_base(gvt) \
+ gvt_hidden_sz(gvt) - 1)
-#define gvt_fence_sz(gvt) ((gvt)->dev_priv->ggtt.num_fences)
+#define gvt_fence_sz(gvt) (gvt_to_ggtt(gvt)->num_fences)
/* Aperture/GM space definitions for vGPU */
#define vgpu_aperture_offset(vgpu) ((vgpu)->gm.low_gm_node.start)
@@ -570,8 +551,7 @@ struct intel_gvt_ops {
void (*vgpu_deactivate)(struct intel_vgpu *);
struct intel_vgpu_type *(*gvt_find_vgpu_type)(struct intel_gvt *gvt,
const char *name);
- bool (*get_gvt_attrs)(struct attribute ***type_attrs,
- struct attribute_group ***intel_vgpu_type_groups);
+ bool (*get_gvt_attrs)(struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
@@ -586,14 +566,14 @@ enum {
GVT_FAILSAFE_GUEST_ERR,
};
-static inline void mmio_hw_access_pre(struct drm_i915_private *dev_priv)
+static inline void mmio_hw_access_pre(struct intel_gt *gt)
{
- intel_runtime_pm_get(&dev_priv->runtime_pm);
+ intel_runtime_pm_get(gt->uncore->rpm);
}
-static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
+static inline void mmio_hw_access_post(struct intel_gt *gt)
{
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ intel_runtime_pm_put_unchecked(gt->uncore->rpm);
}
/**
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index 6d28d72e6c7e..0182e2a5acff 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -49,15 +49,17 @@
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
{
- if (IS_BROADWELL(gvt->dev_priv))
+ struct drm_i915_private *i915 = gvt->gt->i915;
+
+ if (IS_BROADWELL(i915))
return D_BDW;
- else if (IS_SKYLAKE(gvt->dev_priv))
+ else if (IS_SKYLAKE(i915))
return D_SKL;
- else if (IS_KABYLAKE(gvt->dev_priv))
+ else if (IS_KABYLAKE(i915))
return D_KBL;
- else if (IS_BROXTON(gvt->dev_priv))
+ else if (IS_BROXTON(i915))
return D_BXT;
- else if (IS_COFFEELAKE(gvt->dev_priv))
+ else if (IS_COFFEELAKE(i915))
return D_CFL;
return 0;
@@ -142,25 +144,25 @@ static int new_mmio_info(struct intel_gvt *gvt,
}
/**
- * intel_gvt_render_mmio_to_ring_id - convert a mmio offset into ring id
+ * intel_gvt_render_mmio_to_engine - convert a mmio offset into the engine
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
- * Ring ID on success, negative error code if failed.
+ * The engine containing the offset within its mmio page.
*/
-int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
- unsigned int offset)
+const struct intel_engine_cs *
+intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int offset)
{
- enum intel_engine_id id;
struct intel_engine_cs *engine;
+ enum intel_engine_id id;
offset &= ~GENMASK(11, 0);
- for_each_engine(engine, gvt->dev_priv, id) {
+ for_each_engine(engine, gvt->gt, id)
if (engine->mmio_base == offset)
- return id;
- }
- return -ENODEV;
+ return engine;
+
+ return NULL;
}
#define offset_to_fence_num(offset) \
@@ -217,7 +219,7 @@ static int gamw_echo_dev_rw_ia_write(struct intel_vgpu *vgpu,
{
u32 ips = (*(u32 *)p_data) & GAMW_ECO_ENABLE_64K_IPS_FIELD;
- if (INTEL_GEN(vgpu->gvt->dev_priv) <= 10) {
+ if (INTEL_GEN(vgpu->gvt->gt->i915) <= 10) {
if (ips == GAMW_ECO_ENABLE_64K_IPS_FIELD)
gvt_dbg_core("vgpu%d: ips enabled\n", vgpu->id);
else if (!ips)
@@ -253,7 +255,7 @@ static int fence_mmio_read(struct intel_vgpu *vgpu, unsigned int off,
static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct intel_gvt *gvt = vgpu->gvt;
unsigned int fence_num = offset_to_fence_num(off);
int ret;
@@ -262,10 +264,10 @@ static int fence_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret;
write_vreg(vgpu, off, p_data, bytes);
- mmio_hw_access_pre(dev_priv);
+ mmio_hw_access_pre(gvt->gt);
intel_vgpu_write_fence(vgpu, fence_num,
vgpu_vreg64(vgpu, fence_num_to_offset(fence_num)));
- mmio_hw_access_post(dev_priv);
+ mmio_hw_access_post(gvt->gt);
return 0;
}
@@ -283,7 +285,7 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
- if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) {
+ if (INTEL_GEN(vgpu->gvt->gt->i915) >= 9) {
switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
@@ -345,7 +347,7 @@ static int gdrst_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
gvt_dbg_mmio("vgpu%d: request GUC Reset\n", vgpu->id);
vgpu_vreg_t(vgpu, GUC_STATUS) |= GS_MIA_IN_RESET;
}
- engine_mask &= INTEL_INFO(vgpu->gvt->dev_priv)->engine_mask;
+ engine_mask &= INTEL_INFO(vgpu->gvt->gt->i915)->engine_mask;
}
/* vgpu_lock already hold by emulate mmio r/w */
@@ -492,7 +494,7 @@ static i915_reg_t force_nonpriv_white_list[] = {
};
/* a simple bsearch */
-static inline bool in_whitelist(unsigned int reg)
+static inline bool in_whitelist(u32 reg)
{
int left = 0, right = ARRAY_SIZE(force_nonpriv_white_list);
i915_reg_t *array = force_nonpriv_white_list;
@@ -514,26 +516,21 @@ static int force_nonpriv_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
u32 reg_nonpriv = (*(u32 *)p_data) & REG_GENMASK(25, 2);
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
- u32 ring_base;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- int ret = -EINVAL;
-
- if ((bytes != 4) || ((offset & (bytes - 1)) != 0) || ring_id < 0) {
- gvt_err("vgpu(%d) ring %d Invalid FORCE_NONPRIV offset %x(%dB)\n",
- vgpu->id, ring_id, offset, bytes);
- return ret;
- }
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
- ring_base = dev_priv->engine[ring_id]->mmio_base;
+ if (bytes != 4 || !IS_ALIGNED(offset, bytes) || !engine) {
+ gvt_err("vgpu(%d) Invalid FORCE_NONPRIV offset %x(%dB)\n",
+ vgpu->id, offset, bytes);
+ return -EINVAL;
+ }
- if (in_whitelist(reg_nonpriv) ||
- reg_nonpriv == i915_mmio_reg_offset(RING_NOPID(ring_base))) {
- ret = intel_vgpu_default_mmio_write(vgpu, offset, p_data,
- bytes);
- } else
+ if (!in_whitelist(reg_nonpriv) &&
+ reg_nonpriv != i915_mmio_reg_offset(RING_NOPID(engine->mmio_base))) {
gvt_err("vgpu(%d) Invalid FORCE_NONPRIV write %x at offset %x\n",
- vgpu->id, *(u32 *)p_data, offset);
+ vgpu->id, reg_nonpriv, offset);
+ } else
+ intel_vgpu_default_mmio_write(vgpu, offset, p_data, bytes);
return 0;
}
@@ -756,7 +753,7 @@ static int south_chicken2_mmio_write(struct intel_vgpu *vgpu,
static int pri_surf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
u32 pipe = DSPSURF_TO_PIPE(offset);
int event = SKL_FLIP_EVENT(pipe, PLANE_PRIMARY);
@@ -797,7 +794,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data,
unsigned int bytes)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
enum pipe pipe = REG_50080_TO_PIPE(offset);
enum plane_id plane = REG_50080_TO_PLANE(offset);
int event = SKL_FLIP_EVENT(pipe, plane);
@@ -821,7 +818,7 @@ static int reg50080_mmio_write(struct intel_vgpu *vgpu,
static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
unsigned int reg)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
enum intel_gvt_event_type event;
if (reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_A)))
@@ -836,7 +833,7 @@ static int trigger_aux_channel_interrupt(struct intel_vgpu *vgpu,
reg == i915_mmio_reg_offset(DP_AUX_CH_CTL(AUX_CH_D)))
event = AUX_CHANNEL_D;
else {
- WARN_ON(true);
+ drm_WARN_ON(&dev_priv->drm, true);
return -EINVAL;
}
@@ -924,11 +921,11 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset);
- if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
+ if ((INTEL_GEN(vgpu->gvt->gt->i915) >= 9)
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */
return 0;
- } else if (IS_BROADWELL(vgpu->gvt->dev_priv) &&
+ } else if (IS_BROADWELL(vgpu->gvt->gt->i915) &&
offset != _REG_HSW_DP_AUX_CH_CTL(port_index)) {
/* write to the data registers */
return 0;
@@ -1244,8 +1241,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct kobject *kobj = &dev_priv->drm.primary->kdev->kobj;
+ struct kobject *kobj = &vgpu->gvt->gt->i915->drm.primary->kdev->kobj;
char *env[3] = {NULL, NULL, NULL};
char vmid_str[20];
char display_ready_str[20];
@@ -1306,13 +1302,15 @@ static int pvinfo_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
static int pf_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 val = *(u32 *)p_data;
if ((offset == _PS_1A_CTRL || offset == _PS_2A_CTRL ||
offset == _PS_1B_CTRL || offset == _PS_2B_CTRL ||
offset == _PS_1C_CTRL) && (val & PS_PLANE_SEL_MASK) != 0) {
- WARN_ONCE(true, "VM(%d): guest is trying to scaling a plane\n",
- vgpu->id);
+ drm_WARN_ONCE(&i915->drm, true,
+ "VM(%d): guest is trying to scaling a plane\n",
+ vgpu->id);
return 0;
}
@@ -1360,13 +1358,15 @@ static int fpga_dbg_mmio_write(struct intel_vgpu *vgpu,
static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 mode;
write_vreg(vgpu, offset, p_data, bytes);
mode = vgpu_vreg(vgpu, offset);
if (GFX_MODE_BIT_SET_IN_MASK(mode, START_DMA)) {
- WARN_ONCE(1, "VM(%d): iGVT-g doesn't support GuC\n",
+ drm_WARN_ONCE(&i915->drm, 1,
+ "VM(%d): iGVT-g doesn't support GuC\n",
vgpu->id);
return 0;
}
@@ -1377,10 +1377,12 @@ static int dma_ctrl_write(struct intel_vgpu *vgpu, unsigned int offset,
static int gen9_trtte_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
u32 trtte = *(u32 *)p_data;
if ((trtte & 1) && (trtte & (1 << 1)) == 0) {
- WARN(1, "VM(%d): Use physical address for TRTT!\n",
+ drm_WARN(&i915->drm, 1,
+ "VM(%d): Use physical address for TRTT!\n",
vgpu->id);
return -EINVAL;
}
@@ -1427,9 +1429,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
- || IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
+ if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
+ IS_KABYLAKE(vgpu->gvt->gt->i915) ||
+ IS_COFFEELAKE(vgpu->gvt->gt->i915)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1439,7 +1441,7 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
*data0 = 0x1e1a1100;
else
*data0 = 0x61514b3d;
- } else if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ } else if (IS_BROXTON(vgpu->gvt->gt->i915)) {
/**
* "Read memory latency" command on gen9.
* Below memory latency values are read
@@ -1452,9 +1454,9 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
}
break;
case SKL_PCODE_CDCLK_CONTROL:
- if (IS_SKYLAKE(vgpu->gvt->dev_priv)
- || IS_KABYLAKE(vgpu->gvt->dev_priv)
- || IS_COFFEELAKE(vgpu->gvt->dev_priv))
+ if (IS_SKYLAKE(vgpu->gvt->gt->i915) ||
+ IS_KABYLAKE(vgpu->gvt->gt->i915) ||
+ IS_COFFEELAKE(vgpu->gvt->gt->i915))
*data0 = SKL_CDCLK_READY_FOR_CHANGE;
break;
case GEN6_PCODE_READ_RC6VIDS:
@@ -1478,24 +1480,26 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 value = *(u32 *)p_data;
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
offset, value);
return -EINVAL;
}
+
/*
* Need to emulate all the HWSP register write to ensure host can
* update the VM CSB status correctly. Here listed registers can
* support BDW, SKL or other platforms with same HWSP registers.
*/
- if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
+ if (unlikely(!engine)) {
gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
offset);
return -EINVAL;
}
- vgpu->hws_pga[ring_id] = value;
+ vgpu->hws_pga[engine->id] = value;
gvt_dbg_mmio("VM(%d) write: 0x%x to HWSP: 0x%x\n",
vgpu->id, value, offset);
@@ -1507,7 +1511,7 @@ static int skl_power_well_ctl_write(struct intel_vgpu *vgpu,
{
u32 v = *(u32 *)p_data;
- if (IS_BROXTON(vgpu->gvt->dev_priv))
+ if (IS_BROXTON(vgpu->gvt->gt->i915))
v &= (1 << 31) | (1 << 29);
else
v &= (1 << 31) | (1 << 29) | (1 << 9) |
@@ -1654,26 +1658,24 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
- int ring_id;
- u32 ring_base;
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(gvt, offset);
- ring_id = intel_gvt_render_mmio_to_ring_id(gvt, offset);
/**
* Read HW reg in following case
* a. the offset isn't a ring mmio
* b. the offset's ring is running on hw.
* c. the offset is ring time stamp mmio
*/
- if (ring_id >= 0)
- ring_base = dev_priv->engine[ring_id]->mmio_base;
-
- if (ring_id < 0 || vgpu == gvt->scheduler.engine_owner[ring_id] ||
- offset == i915_mmio_reg_offset(RING_TIMESTAMP(ring_base)) ||
- offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(ring_base))) {
- mmio_hw_access_pre(dev_priv);
- vgpu_vreg(vgpu, offset) = I915_READ(_MMIO(offset));
- mmio_hw_access_post(dev_priv);
+
+ if (!engine ||
+ vgpu == gvt->scheduler.engine_owner[engine->id] ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP(engine->mmio_base)) ||
+ offset == i915_mmio_reg_offset(RING_TIMESTAMP_UDW(engine->mmio_base))) {
+ mmio_hw_access_pre(gvt->gt);
+ vgpu_vreg(vgpu, offset) =
+ intel_uncore_read(gvt->gt->uncore, _MMIO(offset));
+ mmio_hw_access_post(gvt->gt);
}
return intel_vgpu_default_mmio_read(vgpu, offset, p_data, bytes);
@@ -1682,22 +1684,23 @@ static int mmio_read_from_hw(struct intel_vgpu *vgpu,
static int elsp_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
+ const struct intel_engine_cs *engine = intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
struct intel_vgpu_execlist *execlist;
u32 data = *(u32 *)p_data;
int ret = 0;
- if (WARN_ON(ring_id < 0 || ring_id >= I915_NUM_ENGINES))
+ if (drm_WARN_ON(&i915->drm, !engine))
return -EINVAL;
- execlist = &vgpu->submission.execlist[ring_id];
+ execlist = &vgpu->submission.execlist[engine->id];
execlist->elsp_dwords.data[3 - execlist->elsp_dwords.index] = data;
if (execlist->elsp_dwords.index == 3) {
- ret = intel_vgpu_submit_execlist(vgpu, ring_id);
+ ret = intel_vgpu_submit_execlist(vgpu, engine);
if(ret)
- gvt_vgpu_err("fail submit workload on ring %d\n",
- ring_id);
+ gvt_vgpu_err("fail submit workload on ring %s\n",
+ engine->name);
}
++execlist->elsp_dwords.index;
@@ -1709,12 +1712,13 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes)
{
u32 data = *(u32 *)p_data;
- int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
+ const struct intel_engine_cs *engine =
+ intel_gvt_render_mmio_to_engine(vgpu->gvt, offset);
bool enable_execlist;
int ret;
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(1);
- if (IS_COFFEELAKE(vgpu->gvt->dev_priv))
+ if (IS_COFFEELAKE(vgpu->gvt->gt->i915))
(*(u32 *)p_data) &= ~_MASKED_BIT_ENABLE(2);
write_vreg(vgpu, offset, p_data, bytes);
@@ -1723,7 +1727,7 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
return 0;
}
- if (IS_COFFEELAKE(vgpu->gvt->dev_priv) &&
+ if (IS_COFFEELAKE(vgpu->gvt->gt->i915) &&
data & _MASKED_BIT_ENABLE(2)) {
enter_failsafe_mode(vgpu, GVT_FAILSAFE_UNSUPPORTED_GUEST);
return 0;
@@ -1743,16 +1747,16 @@ static int ring_mode_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
|| (data & _MASKED_BIT_DISABLE(GFX_RUN_LIST_ENABLE))) {
enable_execlist = !!(data & GFX_RUN_LIST_ENABLE);
- gvt_dbg_core("EXECLIST %s on ring %d\n",
- (enable_execlist ? "enabling" : "disabling"),
- ring_id);
+ gvt_dbg_core("EXECLIST %s on ring %s\n",
+ (enable_execlist ? "enabling" : "disabling"),
+ engine->name);
if (!enable_execlist)
return 0;
ret = intel_vgpu_select_submission_ops(vgpu,
- BIT(ring_id),
- INTEL_VGPU_EXECLIST_SUBMISSION);
+ engine->mask,
+ INTEL_VGPU_EXECLIST_SUBMISSION);
if (ret)
return ret;
@@ -1876,7 +1880,7 @@ static int csfe_chicken1_mmio_write(struct intel_vgpu *vgpu,
static int init_generic_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_RING_DFH(RING_IMR, D_ALL, F_CMD_ACCESS, NULL,
@@ -2415,9 +2419,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
MMIO_F(_MMIO(0x7144c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
MMIO_F(_MMIO(0x7244c), 0xc, 0, 0, 0, D_PRE_SKL, NULL, NULL);
- MMIO_D(PIPE_WM_LINETIME(PIPE_A), D_ALL);
- MMIO_D(PIPE_WM_LINETIME(PIPE_B), D_ALL);
- MMIO_D(PIPE_WM_LINETIME(PIPE_C), D_ALL);
+ MMIO_D(WM_LINETIME(PIPE_A), D_ALL);
+ MMIO_D(WM_LINETIME(PIPE_B), D_ALL);
+ MMIO_D(WM_LINETIME(PIPE_C), D_ALL);
MMIO_D(SPLL_CTL, D_ALL);
MMIO_D(_MMIO(_WRPLL_CTL1), D_ALL);
MMIO_D(_MMIO(_WRPLL_CTL2), D_ALL);
@@ -2693,7 +2697,7 @@ static int init_generic_mmio_info(struct intel_gvt *gvt)
static int init_bdw_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(GEN8_GT_IMR(0), D_BDW_PLUS, NULL, intel_vgpu_reg_imr_handler);
@@ -2882,7 +2886,7 @@ static int init_bdw_mmio_info(struct intel_gvt *gvt)
static int init_skl_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_DH(FORCEWAKE_RENDER_GEN9, D_SKL_PLUS, NULL, mul_force_wake_write);
@@ -2902,7 +2906,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_D(HSW_PWR_WELL_CTL1, D_SKL_PLUS);
MMIO_DH(HSW_PWR_WELL_CTL2, D_SKL_PLUS, NULL, skl_power_well_ctl_write);
- MMIO_DH(DBUF_CTL, D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
+ MMIO_DH(DBUF_CTL_S(0), D_SKL_PLUS, NULL, gen9_dbuf_ctl_mmio_write);
MMIO_D(GEN9_PG_ENABLE, D_SKL_PLUS);
MMIO_D(GEN9_MEDIA_PG_IDLE_HYSTERESIS, D_SKL_PLUS);
@@ -3131,7 +3135,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
static int init_bxt_mmio_info(struct intel_gvt *gvt)
{
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *dev_priv = gvt->gt->i915;
int ret;
MMIO_F(_MMIO(0x80000), 0x3000, 0, 0, 0, D_BXT, NULL, NULL);
@@ -3367,7 +3371,7 @@ static struct gvt_mmio_block mmio_blocks[] = {
int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
{
struct intel_gvt_device_info *info = &gvt->device_info;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
+ struct drm_i915_private *i915 = gvt->gt->i915;
int size = info->mmio_size / 4 * sizeof(*gvt->mmio.mmio_attribute);
int ret;
@@ -3379,20 +3383,20 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (ret)
goto err;
- if (IS_BROADWELL(dev_priv)) {
+ if (IS_BROADWELL(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
- } else if (IS_SKYLAKE(dev_priv)
- || IS_KABYLAKE(dev_priv)
- || IS_COFFEELAKE(dev_priv)) {
+ } else if (IS_SKYLAKE(i915) ||
+ IS_KABYLAKE(i915) ||
+ IS_COFFEELAKE(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
ret = init_skl_mmio_info(gvt);
if (ret)
goto err;
- } else if (IS_BROXTON(dev_priv)) {
+ } else if (IS_BROXTON(i915)) {
ret = init_bdw_mmio_info(gvt);
if (ret)
goto err;
@@ -3541,13 +3545,14 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio_info;
struct gvt_mmio_block *mmio_block;
gvt_mmio_func func;
int ret;
- if (WARN_ON(bytes > 8))
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
return -EINVAL;
/*
diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c
index 11accd3e1023..540017fed908 100644
--- a/drivers/gpu/drm/i915/gvt/interrupt.c
+++ b/drivers/gpu/drm/i915/gvt/interrupt.c
@@ -245,6 +245,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
struct intel_gvt_irq_info *info;
u32 ier = *(u32 *)p_data;
@@ -255,7 +256,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
vgpu_vreg(vgpu, reg) = ier;
info = regbase_to_irq_info(gvt, ier_to_regbase(reg));
- if (WARN_ON(!info))
+ if (drm_WARN_ON(&i915->drm, !info))
return -EINVAL;
if (info->has_upstream_irq)
@@ -282,6 +283,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu,
int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
void *p_data, unsigned int bytes)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq_info *info = regbase_to_irq_info(vgpu->gvt,
iir_to_regbase(reg));
u32 iir = *(u32 *)p_data;
@@ -289,7 +291,7 @@ int intel_vgpu_reg_iir_handler(struct intel_vgpu *vgpu, unsigned int reg,
trace_write_ir(vgpu->id, "IIR", reg, iir, vgpu_vreg(vgpu, reg),
(vgpu_vreg(vgpu, reg) ^ iir));
- if (WARN_ON(!info))
+ if (drm_WARN_ON(&i915->drm, !info))
return -EINVAL;
vgpu_vreg(vgpu, reg) &= ~iir;
@@ -319,6 +321,7 @@ static struct intel_gvt_irq_map gen8_irq_map[] = {
static void update_upstream_irq(struct intel_vgpu *vgpu,
struct intel_gvt_irq_info *info)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt_irq *irq = &vgpu->gvt->irq;
struct intel_gvt_irq_map *map = irq->irq_map;
struct intel_gvt_irq_info *up_irq_info = NULL;
@@ -340,7 +343,8 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
if (!up_irq_info)
up_irq_info = irq->info[map->up_irq_group];
else
- WARN_ON(up_irq_info != irq->info[map->up_irq_group]);
+ drm_WARN_ON(&i915->drm, up_irq_info !=
+ irq->info[map->up_irq_group]);
bit = map->up_irq_bit;
@@ -350,7 +354,7 @@ static void update_upstream_irq(struct intel_vgpu *vgpu,
clear_bits |= (1 << bit);
}
- if (WARN_ON(!up_irq_info))
+ if (drm_WARN_ON(&i915->drm, !up_irq_info))
return;
if (up_irq_info->group == INTEL_GVT_IRQ_INFO_MASTER) {
@@ -536,7 +540,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, VCS_MI_FLUSH_DW, INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 8, VCS_AS_CONTEXT_SWITCH, INTEL_GVT_IRQ_INFO_GT1);
- if (HAS_ENGINE(gvt->dev_priv, VCS1)) {
+ if (HAS_ENGINE(gvt->gt->i915, VCS1)) {
SET_BIT_INFO(irq, 16, VCS2_MI_USER_INTERRUPT,
INTEL_GVT_IRQ_INFO_GT1);
SET_BIT_INFO(irq, 20, VCS2_MI_FLUSH_DW,
@@ -568,7 +572,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 22, DP_C_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 23, DP_D_HOTPLUG, INTEL_GVT_IRQ_INFO_PCH);
- if (IS_BROADWELL(gvt->dev_priv)) {
+ if (IS_BROADWELL(gvt->gt->i915)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_PCH);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_PCH);
@@ -581,7 +585,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
- } else if (INTEL_GEN(gvt->dev_priv) >= 9) {
+ } else if (INTEL_GEN(gvt->gt->i915) >= 9) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
@@ -618,13 +622,14 @@ static struct intel_gvt_irq_ops gen8_irq_ops = {
void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu,
enum intel_gvt_event_type event)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_irq *irq = &gvt->irq;
gvt_event_virt_handler_t handler;
struct intel_gvt_irq_ops *ops = gvt->irq.ops;
handler = get_event_virt_handler(irq, event);
- WARN_ON(!handler);
+ drm_WARN_ON(&i915->drm, !handler);
handler(irq, event, vgpu);
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index 3259a1fa69e1..074c4efb58eb 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -108,6 +108,36 @@ struct gvt_dma {
struct kref ref;
};
+struct kvmgt_vdev {
+ struct intel_vgpu *vgpu;
+ struct mdev_device *mdev;
+ struct vfio_region *region;
+ int num_regions;
+ struct eventfd_ctx *intx_trigger;
+ struct eventfd_ctx *msi_trigger;
+
+ /*
+ * Two caches are used to avoid mapping duplicated pages (eg.
+ * scratch pages). This help to reduce dma setup overhead.
+ */
+ struct rb_root gfn_cache;
+ struct rb_root dma_addr_cache;
+ unsigned long nr_cache_entries;
+ struct mutex cache_lock;
+
+ struct notifier_block iommu_notifier;
+ struct notifier_block group_notifier;
+ struct kvm *kvm;
+ struct work_struct release_work;
+ atomic_t released;
+ struct vfio_device *vfio_device;
+};
+
+static inline struct kvmgt_vdev *kvmgt_vdev(struct intel_vgpu *vgpu)
+{
+ return intel_vgpu_vdev(vgpu);
+}
+
static inline bool handle_valid(unsigned long handle)
{
return !!(handle & ~0xff);
@@ -120,6 +150,7 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info);
static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long size)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
int total_pages;
int npage;
int ret;
@@ -129,8 +160,8 @@ static void gvt_unpin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
for (npage = 0; npage < total_pages; npage++) {
unsigned long cur_gfn = gfn + npage;
- ret = vfio_unpin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1);
- WARN_ON(ret != 1);
+ ret = vfio_unpin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1);
+ drm_WARN_ON(&i915->drm, ret != 1);
}
}
@@ -152,7 +183,7 @@ static int gvt_pin_guest_page(struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long cur_gfn = gfn + npage;
unsigned long pfn;
- ret = vfio_pin_pages(mdev_dev(vgpu->vdev.mdev), &cur_gfn, 1,
+ ret = vfio_pin_pages(mdev_dev(kvmgt_vdev(vgpu)->mdev), &cur_gfn, 1,
IOMMU_READ | IOMMU_WRITE, &pfn);
if (ret != 1) {
gvt_vgpu_err("vfio_pin_pages failed for gfn 0x%lx, ret %d\n",
@@ -187,7 +218,7 @@ err:
static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t *dma_addr, unsigned long size)
{
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
struct page *page = NULL;
int ret;
@@ -210,7 +241,7 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t dma_addr, unsigned long size)
{
- struct device *dev = &vgpu->gvt->dev_priv->drm.pdev->dev;
+ struct device *dev = &vgpu->gvt->gt->i915->drm.pdev->dev;
dma_unmap_page(dev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
gvt_unpin_guest_page(vgpu, gfn, size);
@@ -219,7 +250,7 @@ static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
dma_addr_t dma_addr)
{
- struct rb_node *node = vgpu->vdev.dma_addr_cache.rb_node;
+ struct rb_node *node = kvmgt_vdev(vgpu)->dma_addr_cache.rb_node;
struct gvt_dma *itr;
while (node) {
@@ -237,7 +268,7 @@ static struct gvt_dma *__gvt_cache_find_dma_addr(struct intel_vgpu *vgpu,
static struct gvt_dma *__gvt_cache_find_gfn(struct intel_vgpu *vgpu, gfn_t gfn)
{
- struct rb_node *node = vgpu->vdev.gfn_cache.rb_node;
+ struct rb_node *node = kvmgt_vdev(vgpu)->gfn_cache.rb_node;
struct gvt_dma *itr;
while (node) {
@@ -258,6 +289,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
{
struct gvt_dma *new, *itr;
struct rb_node **link, *parent = NULL;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
new = kzalloc(sizeof(struct gvt_dma), GFP_KERNEL);
if (!new)
@@ -270,7 +302,7 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
kref_init(&new->ref);
/* gfn_cache maps gfn to struct gvt_dma. */
- link = &vgpu->vdev.gfn_cache.rb_node;
+ link = &vdev->gfn_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, gfn_node);
@@ -281,11 +313,11 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->gfn_node, parent, link);
- rb_insert_color(&new->gfn_node, &vgpu->vdev.gfn_cache);
+ rb_insert_color(&new->gfn_node, &vdev->gfn_cache);
/* dma_addr_cache maps dma addr to struct gvt_dma. */
parent = NULL;
- link = &vgpu->vdev.dma_addr_cache.rb_node;
+ link = &vdev->dma_addr_cache.rb_node;
while (*link) {
parent = *link;
itr = rb_entry(parent, struct gvt_dma, dma_addr_node);
@@ -296,46 +328,51 @@ static int __gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn,
link = &parent->rb_right;
}
rb_link_node(&new->dma_addr_node, parent, link);
- rb_insert_color(&new->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+ rb_insert_color(&new->dma_addr_node, &vdev->dma_addr_cache);
- vgpu->vdev.nr_cache_entries++;
+ vdev->nr_cache_entries++;
return 0;
}
static void __gvt_cache_remove_entry(struct intel_vgpu *vgpu,
struct gvt_dma *entry)
{
- rb_erase(&entry->gfn_node, &vgpu->vdev.gfn_cache);
- rb_erase(&entry->dma_addr_node, &vgpu->vdev.dma_addr_cache);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+
+ rb_erase(&entry->gfn_node, &vdev->gfn_cache);
+ rb_erase(&entry->dma_addr_node, &vdev->dma_addr_cache);
kfree(entry);
- vgpu->vdev.nr_cache_entries--;
+ vdev->nr_cache_entries--;
}
static void gvt_cache_destroy(struct intel_vgpu *vgpu)
{
struct gvt_dma *dma;
struct rb_node *node = NULL;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
for (;;) {
- mutex_lock(&vgpu->vdev.cache_lock);
- node = rb_first(&vgpu->vdev.gfn_cache);
+ mutex_lock(&vdev->cache_lock);
+ node = rb_first(&vdev->gfn_cache);
if (!node) {
- mutex_unlock(&vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
break;
}
dma = rb_entry(node, struct gvt_dma, gfn_node);
gvt_dma_unmap_page(vgpu, dma->gfn, dma->dma_addr, dma->size);
__gvt_cache_remove_entry(vgpu, dma);
- mutex_unlock(&vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
}
}
static void gvt_cache_init(struct intel_vgpu *vgpu)
{
- vgpu->vdev.gfn_cache = RB_ROOT;
- vgpu->vdev.dma_addr_cache = RB_ROOT;
- vgpu->vdev.nr_cache_entries = 0;
- mutex_init(&vgpu->vdev.cache_lock);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+
+ vdev->gfn_cache = RB_ROOT;
+ vdev->dma_addr_cache = RB_ROOT;
+ vdev->nr_cache_entries = 0;
+ mutex_init(&vdev->cache_lock);
}
static void kvmgt_protect_table_init(struct kvmgt_guest_info *info)
@@ -409,16 +446,18 @@ static void kvmgt_protect_table_del(struct kvmgt_guest_info *info,
static size_t intel_vgpu_reg_rw_opregion(struct intel_vgpu *vgpu, char *buf,
size_t count, loff_t *ppos, bool iswrite)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
- void *base = vgpu->vdev.region[i].data;
+ void *base = vdev->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
- if (pos >= vgpu->vdev.region[i].size || iswrite) {
+
+ if (pos >= vdev->region[i].size || iswrite) {
gvt_vgpu_err("invalid op or offset for Intel vgpu OpRegion\n");
return -EINVAL;
}
- count = min(count, (size_t)(vgpu->vdev.region[i].size - pos));
+ count = min(count, (size_t)(vdev->region[i].size - pos));
memcpy(buf, base + pos, count);
return count;
@@ -512,7 +551,7 @@ static size_t intel_vgpu_reg_rw_edid(struct intel_vgpu *vgpu, char *buf,
unsigned int i = VFIO_PCI_OFFSET_TO_INDEX(*ppos) -
VFIO_PCI_NUM_REGIONS;
struct vfio_edid_region *region =
- (struct vfio_edid_region *)vgpu->vdev.region[i].data;
+ (struct vfio_edid_region *)kvmgt_vdev(vgpu)->region[i].data;
loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
if (pos < region->vfio_edid_regs.edid_offset) {
@@ -544,32 +583,34 @@ static int intel_vgpu_register_reg(struct intel_vgpu *vgpu,
const struct intel_vgpu_regops *ops,
size_t size, u32 flags, void *data)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct vfio_region *region;
- region = krealloc(vgpu->vdev.region,
- (vgpu->vdev.num_regions + 1) * sizeof(*region),
+ region = krealloc(vdev->region,
+ (vdev->num_regions + 1) * sizeof(*region),
GFP_KERNEL);
if (!region)
return -ENOMEM;
- vgpu->vdev.region = region;
- vgpu->vdev.region[vgpu->vdev.num_regions].type = type;
- vgpu->vdev.region[vgpu->vdev.num_regions].subtype = subtype;
- vgpu->vdev.region[vgpu->vdev.num_regions].ops = ops;
- vgpu->vdev.region[vgpu->vdev.num_regions].size = size;
- vgpu->vdev.region[vgpu->vdev.num_regions].flags = flags;
- vgpu->vdev.region[vgpu->vdev.num_regions].data = data;
- vgpu->vdev.num_regions++;
+ vdev->region = region;
+ vdev->region[vdev->num_regions].type = type;
+ vdev->region[vdev->num_regions].subtype = subtype;
+ vdev->region[vdev->num_regions].ops = ops;
+ vdev->region[vdev->num_regions].size = size;
+ vdev->region[vdev->num_regions].flags = flags;
+ vdev->region[vdev->num_regions].data = data;
+ vdev->num_regions++;
return 0;
}
static int kvmgt_get_vfio_device(void *p_vgpu)
{
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
- vgpu->vdev.vfio_device = vfio_device_get_from_dev(
- mdev_dev(vgpu->vdev.mdev));
- if (!vgpu->vdev.vfio_device) {
+ vdev->vfio_device = vfio_device_get_from_dev(
+ mdev_dev(vdev->mdev));
+ if (!vdev->vfio_device) {
gvt_vgpu_err("failed to get vfio device\n");
return -ENODEV;
}
@@ -637,10 +678,12 @@ static int kvmgt_set_edid(void *p_vgpu, int port_num)
static void kvmgt_put_vfio_device(void *vgpu)
{
- if (WARN_ON(!((struct intel_vgpu *)vgpu)->vdev.vfio_device))
+ struct kvmgt_vdev *vdev = kvmgt_vdev((struct intel_vgpu *)vgpu);
+
+ if (WARN_ON(!vdev->vfio_device))
return;
- vfio_device_put(((struct intel_vgpu *)vgpu)->vdev.vfio_device);
+ vfio_device_put(vdev->vfio_device);
}
static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
@@ -669,9 +712,9 @@ static int intel_vgpu_create(struct kobject *kobj, struct mdev_device *mdev)
goto out;
}
- INIT_WORK(&vgpu->vdev.release_work, intel_vgpu_release_work);
+ INIT_WORK(&kvmgt_vdev(vgpu)->release_work, intel_vgpu_release_work);
- vgpu->vdev.mdev = mdev;
+ kvmgt_vdev(vgpu)->mdev = mdev;
mdev_set_drvdata(mdev, vgpu);
gvt_dbg_core("intel_vgpu_create succeeded for mdev: %s\n",
@@ -696,9 +739,10 @@ static int intel_vgpu_remove(struct mdev_device *mdev)
static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct intel_vgpu *vgpu = container_of(nb,
- struct intel_vgpu,
- vdev.iommu_notifier);
+ struct kvmgt_vdev *vdev = container_of(nb,
+ struct kvmgt_vdev,
+ iommu_notifier);
+ struct intel_vgpu *vgpu = vdev->vgpu;
if (action == VFIO_IOMMU_NOTIFY_DMA_UNMAP) {
struct vfio_iommu_type1_dma_unmap *unmap = data;
@@ -708,7 +752,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
iov_pfn = unmap->iova >> PAGE_SHIFT;
end_iov_pfn = iov_pfn + unmap->size / PAGE_SIZE;
- mutex_lock(&vgpu->vdev.cache_lock);
+ mutex_lock(&vdev->cache_lock);
for (; iov_pfn < end_iov_pfn; iov_pfn++) {
entry = __gvt_cache_find_gfn(vgpu, iov_pfn);
if (!entry)
@@ -718,7 +762,7 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
entry->size);
__gvt_cache_remove_entry(vgpu, entry);
}
- mutex_unlock(&vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
}
return NOTIFY_OK;
@@ -727,16 +771,16 @@ static int intel_vgpu_iommu_notifier(struct notifier_block *nb,
static int intel_vgpu_group_notifier(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct intel_vgpu *vgpu = container_of(nb,
- struct intel_vgpu,
- vdev.group_notifier);
+ struct kvmgt_vdev *vdev = container_of(nb,
+ struct kvmgt_vdev,
+ group_notifier);
/* the only action we care about */
if (action == VFIO_GROUP_NOTIFY_SET_KVM) {
- vgpu->vdev.kvm = data;
+ vdev->kvm = data;
if (!data)
- schedule_work(&vgpu->vdev.release_work);
+ schedule_work(&vdev->release_work);
}
return NOTIFY_OK;
@@ -745,15 +789,16 @@ static int intel_vgpu_group_notifier(struct notifier_block *nb,
static int intel_vgpu_open(struct mdev_device *mdev)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long events;
int ret;
- vgpu->vdev.iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
- vgpu->vdev.group_notifier.notifier_call = intel_vgpu_group_notifier;
+ vdev->iommu_notifier.notifier_call = intel_vgpu_iommu_notifier;
+ vdev->group_notifier.notifier_call = intel_vgpu_group_notifier;
events = VFIO_IOMMU_NOTIFY_DMA_UNMAP;
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY, &events,
- &vgpu->vdev.iommu_notifier);
+ &vdev->iommu_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for iommu failed: %d\n",
ret);
@@ -762,7 +807,7 @@ static int intel_vgpu_open(struct mdev_device *mdev)
events = VFIO_GROUP_NOTIFY_SET_KVM;
ret = vfio_register_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY, &events,
- &vgpu->vdev.group_notifier);
+ &vdev->group_notifier);
if (ret != 0) {
gvt_vgpu_err("vfio_register_notifier for group failed: %d\n",
ret);
@@ -781,51 +826,56 @@ static int intel_vgpu_open(struct mdev_device *mdev)
intel_gvt_ops->vgpu_activate(vgpu);
- atomic_set(&vgpu->vdev.released, 0);
+ atomic_set(&vdev->released, 0);
return ret;
undo_group:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_GROUP_NOTIFY,
- &vgpu->vdev.group_notifier);
+ &vdev->group_notifier);
undo_iommu:
vfio_unregister_notifier(mdev_dev(mdev), VFIO_IOMMU_NOTIFY,
- &vgpu->vdev.iommu_notifier);
+ &vdev->iommu_notifier);
out:
return ret;
}
static void intel_vgpu_release_msi_eventfd_ctx(struct intel_vgpu *vgpu)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
struct eventfd_ctx *trigger;
- trigger = vgpu->vdev.msi_trigger;
+ trigger = vdev->msi_trigger;
if (trigger) {
eventfd_ctx_put(trigger);
- vgpu->vdev.msi_trigger = NULL;
+ vdev->msi_trigger = NULL;
}
}
static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct kvmgt_guest_info *info;
int ret;
if (!handle_valid(vgpu->handle))
return;
- if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
+ if (atomic_cmpxchg(&vdev->released, 0, 1))
return;
intel_gvt_ops->vgpu_release(vgpu);
- ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
- &vgpu->vdev.iommu_notifier);
- WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
+ ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_IOMMU_NOTIFY,
+ &vdev->iommu_notifier);
+ drm_WARN(&i915->drm, ret,
+ "vfio_unregister_notifier for iommu failed: %d\n", ret);
- ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_GROUP_NOTIFY,
- &vgpu->vdev.group_notifier);
- WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
+ ret = vfio_unregister_notifier(mdev_dev(vdev->mdev), VFIO_GROUP_NOTIFY,
+ &vdev->group_notifier);
+ drm_WARN(&i915->drm, ret,
+ "vfio_unregister_notifier for group failed: %d\n", ret);
/* dereference module reference taken at open */
module_put(THIS_MODULE);
@@ -835,7 +885,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
intel_vgpu_release_msi_eventfd_ctx(vgpu);
- vgpu->vdev.kvm = NULL;
+ vdev->kvm = NULL;
vgpu->handle = 0;
}
@@ -848,10 +898,10 @@ static void intel_vgpu_release(struct mdev_device *mdev)
static void intel_vgpu_release_work(struct work_struct *work)
{
- struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
- vdev.release_work);
+ struct kvmgt_vdev *vdev = container_of(work, struct kvmgt_vdev,
+ release_work);
- __intel_vgpu_release(vgpu);
+ __intel_vgpu_release(vdev->vgpu);
}
static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
@@ -913,7 +963,7 @@ static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
return -EINVAL;
}
- aperture_va = io_mapping_map_wc(&vgpu->gvt->dev_priv->ggtt.iomap,
+ aperture_va = io_mapping_map_wc(&vgpu->gvt->gt->ggtt->iomap,
ALIGN_DOWN(off, PAGE_SIZE),
count + offset_in_page(off));
if (!aperture_va)
@@ -933,12 +983,13 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
size_t count, loff_t *ppos, bool is_write)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL;
- if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions) {
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions) {
gvt_vgpu_err("invalid index: %u\n", index);
return -EINVAL;
}
@@ -967,11 +1018,11 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
case VFIO_PCI_ROM_REGION_INDEX:
break;
default:
- if (index >= VFIO_PCI_NUM_REGIONS + vgpu->vdev.num_regions)
+ if (index >= VFIO_PCI_NUM_REGIONS + vdev->num_regions)
return -EINVAL;
index -= VFIO_PCI_NUM_REGIONS;
- return vgpu->vdev.region[index].ops->rw(vgpu, buf, count,
+ return vdev->region[index].ops->rw(vgpu, buf, count,
ppos, is_write);
}
@@ -1224,7 +1275,7 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
gvt_vgpu_err("eventfd_ctx_fdget failed\n");
return PTR_ERR(trigger);
}
- vgpu->vdev.msi_trigger = trigger;
+ kvmgt_vdev(vgpu)->msi_trigger = trigger;
} else if ((flags & VFIO_IRQ_SET_DATA_NONE) && !count)
intel_vgpu_release_msi_eventfd_ctx(vgpu);
@@ -1276,6 +1327,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
unsigned long arg)
{
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
unsigned long minsz;
gvt_dbg_core("vgpu%d ioctl, cmd: %d\n", vgpu->id, cmd);
@@ -1294,7 +1346,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
info.flags = VFIO_DEVICE_FLAGS_PCI;
info.flags |= VFIO_DEVICE_FLAGS_RESET;
info.num_regions = VFIO_PCI_NUM_REGIONS +
- vgpu->vdev.num_regions;
+ vdev->num_regions;
info.num_irqs = VFIO_PCI_NUM_IRQS;
return copy_to_user((void __user *)arg, &info, minsz) ?
@@ -1385,22 +1437,22 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
.header.version = 1 };
if (info.index >= VFIO_PCI_NUM_REGIONS +
- vgpu->vdev.num_regions)
+ vdev->num_regions)
return -EINVAL;
info.index =
array_index_nospec(info.index,
VFIO_PCI_NUM_REGIONS +
- vgpu->vdev.num_regions);
+ vdev->num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
info.offset =
VFIO_PCI_INDEX_TO_OFFSET(info.index);
- info.size = vgpu->vdev.region[i].size;
- info.flags = vgpu->vdev.region[i].flags;
+ info.size = vdev->region[i].size;
+ info.flags = vdev->region[i].flags;
- cap_type.type = vgpu->vdev.region[i].type;
- cap_type.subtype = vgpu->vdev.region[i].subtype;
+ cap_type.type = vdev->region[i].type;
+ cap_type.subtype = vdev->region[i].subtype;
ret = vfio_info_add_capability(&caps,
&cap_type.header,
@@ -1597,12 +1649,10 @@ static struct mdev_parent_ops intel_vgpu_ops = {
static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
{
- struct attribute **kvm_type_attrs;
struct attribute_group **kvm_vgpu_type_groups;
intel_gvt_ops = ops;
- if (!intel_gvt_ops->get_gvt_attrs(&kvm_type_attrs,
- &kvm_vgpu_type_groups))
+ if (!intel_gvt_ops->get_gvt_attrs(&kvm_vgpu_type_groups))
return -EFAULT;
intel_vgpu_ops.supported_type_groups = kvm_vgpu_type_groups;
@@ -1742,13 +1792,15 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
struct kvm *kvm;
vgpu = mdev_get_drvdata(mdev);
if (handle_valid(vgpu->handle))
return -EEXIST;
- kvm = vgpu->vdev.kvm;
+ vdev = kvmgt_vdev(vgpu);
+ kvm = vdev->kvm;
if (!kvm || kvm->mm != current->mm) {
gvt_vgpu_err("KVM is required to use Intel vGPU\n");
return -ESRCH;
@@ -1769,8 +1821,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
- init_completion(&vgpu->vblank_done);
-
info->track_node.track_write = kvmgt_page_track_write;
info->track_node.track_flush_slot = kvmgt_page_track_flush_slot;
kvm_page_track_register_notifier(kvm, &info->track_node);
@@ -1778,7 +1828,7 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
info->debugfs_cache_entries = debugfs_create_ulong(
"kvmgt_nr_cache_entries",
0444, vgpu->debugfs,
- &vgpu->vdev.nr_cache_entries);
+ &vdev->nr_cache_entries);
return 0;
}
@@ -1795,9 +1845,17 @@ static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
return true;
}
-static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
+static int kvmgt_attach_vgpu(void *p_vgpu, unsigned long *handle)
{
- /* nothing to do here */
+ struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+
+ vgpu->vdev = kzalloc(sizeof(struct kvmgt_vdev), GFP_KERNEL);
+
+ if (!vgpu->vdev)
+ return -ENOMEM;
+
+ kvmgt_vdev(vgpu)->vgpu = vgpu;
+
return 0;
}
@@ -1805,29 +1863,34 @@ static void kvmgt_detach_vgpu(void *p_vgpu)
{
int i;
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
+ struct kvmgt_vdev *vdev = kvmgt_vdev(vgpu);
- if (!vgpu->vdev.region)
+ if (!vdev->region)
return;
- for (i = 0; i < vgpu->vdev.num_regions; i++)
- if (vgpu->vdev.region[i].ops->release)
- vgpu->vdev.region[i].ops->release(vgpu,
- &vgpu->vdev.region[i]);
- vgpu->vdev.num_regions = 0;
- kfree(vgpu->vdev.region);
- vgpu->vdev.region = NULL;
+ for (i = 0; i < vdev->num_regions; i++)
+ if (vdev->region[i].ops->release)
+ vdev->region[i].ops->release(vgpu,
+ &vdev->region[i]);
+ vdev->num_regions = 0;
+ kfree(vdev->region);
+ vdev->region = NULL;
+
+ kfree(vdev);
}
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
{
struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
if (!handle_valid(handle))
return -ESRCH;
info = (struct kvmgt_guest_info *)handle;
vgpu = info->vgpu;
+ vdev = kvmgt_vdev(vgpu);
/*
* When guest is poweroff, msi_trigger is set to NULL, but vgpu's
@@ -1838,10 +1901,10 @@ static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
* enabled by guest. so if msi_trigger is null, success is still
* returned and don't inject interrupt into guest.
*/
- if (vgpu->vdev.msi_trigger == NULL)
+ if (vdev->msi_trigger == NULL)
return 0;
- if (eventfd_signal(vgpu->vdev.msi_trigger, 1) == 1)
+ if (eventfd_signal(vdev->msi_trigger, 1) == 1)
return 0;
return -EFAULT;
@@ -1867,26 +1930,26 @@ static unsigned long kvmgt_gfn_to_pfn(unsigned long handle, unsigned long gfn)
static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
unsigned long size, dma_addr_t *dma_addr)
{
- struct kvmgt_guest_info *info;
struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret;
if (!handle_valid(handle))
return -EINVAL;
- info = (struct kvmgt_guest_info *)handle;
- vgpu = info->vgpu;
+ vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
+ vdev = kvmgt_vdev(vgpu);
- mutex_lock(&info->vgpu->vdev.cache_lock);
+ mutex_lock(&vdev->cache_lock);
- entry = __gvt_cache_find_gfn(info->vgpu, gfn);
+ entry = __gvt_cache_find_gfn(vgpu, gfn);
if (!entry) {
ret = gvt_dma_map_page(vgpu, gfn, dma_addr, size);
if (ret)
goto err_unlock;
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else if (entry->size != size) {
@@ -1898,7 +1961,7 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
if (ret)
goto err_unlock;
- ret = __gvt_cache_add(info->vgpu, gfn, *dma_addr, size);
+ ret = __gvt_cache_add(vgpu, gfn, *dma_addr, size);
if (ret)
goto err_unmap;
} else {
@@ -1906,19 +1969,20 @@ static int kvmgt_dma_map_guest_page(unsigned long handle, unsigned long gfn,
*dma_addr = entry->dma_addr;
}
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
return 0;
err_unmap:
gvt_dma_unmap_page(vgpu, gfn, *dma_addr, size);
err_unlock:
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
return ret;
}
static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
{
struct kvmgt_guest_info *info;
+ struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
int ret = 0;
@@ -1926,14 +1990,15 @@ static int kvmgt_dma_pin_guest_page(unsigned long handle, dma_addr_t dma_addr)
return -ENODEV;
info = (struct kvmgt_guest_info *)handle;
+ vdev = kvmgt_vdev(info->vgpu);
- mutex_lock(&info->vgpu->vdev.cache_lock);
+ mutex_lock(&vdev->cache_lock);
entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
if (entry)
kref_get(&entry->ref);
else
ret = -ENOMEM;
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
return ret;
}
@@ -1949,19 +2014,21 @@ static void __gvt_dma_release(struct kref *ref)
static void kvmgt_dma_unmap_guest_page(unsigned long handle, dma_addr_t dma_addr)
{
- struct kvmgt_guest_info *info;
+ struct intel_vgpu *vgpu;
+ struct kvmgt_vdev *vdev;
struct gvt_dma *entry;
if (!handle_valid(handle))
return;
- info = (struct kvmgt_guest_info *)handle;
+ vgpu = ((struct kvmgt_guest_info *)handle)->vgpu;
+ vdev = kvmgt_vdev(vgpu);
- mutex_lock(&info->vgpu->vdev.cache_lock);
- entry = __gvt_cache_find_dma_addr(info->vgpu, dma_addr);
+ mutex_lock(&vdev->cache_lock);
+ entry = __gvt_cache_find_dma_addr(vgpu, dma_addr);
if (entry)
kref_put(&entry->ref, __gvt_dma_release);
- mutex_unlock(&info->vgpu->vdev.cache_lock);
+ mutex_unlock(&vdev->cache_lock);
}
static int kvmgt_rw_gpa(unsigned long handle, unsigned long gpa,
diff --git a/drivers/gpu/drm/i915/gvt/mmio.c b/drivers/gpu/drm/i915/gvt/mmio.c
index a55178884d67..291993615af9 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.c
+++ b/drivers/gpu/drm/i915/gvt/mmio.c
@@ -103,6 +103,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
unsigned int offset = 0;
int ret = -EINVAL;
@@ -114,15 +115,17 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
- if (WARN_ON(bytes > 8))
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
goto err;
if (reg_is_gtt(gvt, offset)) {
- if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) &&
+ !IS_ALIGNED(offset, 8)))
goto err;
- if (WARN_ON(bytes != 4 && bytes != 8))
+ if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8))
goto err;
- if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ if (drm_WARN_ON(&i915->drm,
+ !reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset,
@@ -132,16 +135,16 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
goto out;
}
- if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa, p_data, bytes);
goto out;
}
- if (WARN_ON(!reg_is_mmio(gvt, offset + bytes - 1)))
+ if (drm_WARN_ON(&i915->drm, !reg_is_mmio(gvt, offset + bytes - 1)))
goto err;
if (!intel_gvt_mmio_is_unalign(gvt, offset)) {
- if (WARN_ON(!IS_ALIGNED(offset, bytes)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, bytes)))
goto err;
}
@@ -175,6 +178,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
unsigned int offset = 0;
int ret = -EINVAL;
@@ -187,15 +191,17 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
- if (WARN_ON(bytes > 8))
+ if (drm_WARN_ON(&i915->drm, bytes > 8))
goto err;
if (reg_is_gtt(gvt, offset)) {
- if (WARN_ON(!IS_ALIGNED(offset, 4) && !IS_ALIGNED(offset, 8)))
+ if (drm_WARN_ON(&i915->drm, !IS_ALIGNED(offset, 4) &&
+ !IS_ALIGNED(offset, 8)))
goto err;
- if (WARN_ON(bytes != 4 && bytes != 8))
+ if (drm_WARN_ON(&i915->drm, bytes != 4 && bytes != 8))
goto err;
- if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
+ if (drm_WARN_ON(&i915->drm,
+ !reg_is_gtt(gvt, offset + bytes - 1)))
goto err;
ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
@@ -205,7 +211,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
goto out;
}
- if (WARN_ON_ONCE(!reg_is_mmio(gvt, offset))) {
+ if (drm_WARN_ON_ONCE(&i915->drm, !reg_is_mmio(gvt, offset))) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, pa, p_data, bytes);
goto out;
}
@@ -245,7 +251,7 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
/* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
- if (IS_BROXTON(vgpu->gvt->dev_priv)) {
+ if (IS_BROXTON(vgpu->gvt->gt->i915)) {
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &=
~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index 2e68f4b02c94..cc4812648bf4 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -69,8 +69,8 @@ struct intel_gvt_mmio_info {
struct hlist_node node;
};
-int intel_gvt_render_mmio_to_ring_id(struct intel_gvt *gvt,
- unsigned int reg);
+const struct intel_engine_cs *
+intel_gvt_render_mmio_to_engine(struct intel_gvt *gvt, unsigned int reg);
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt);
bool intel_gvt_match_device(struct intel_gvt *gvt, unsigned long device);
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index aaf15916d29a..2ccaf78f96e8 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -157,12 +157,13 @@ static u32 gen9_mocs_mmio_offset_list[] = {
[VECS0] = 0xcb00,
};
-static void load_render_mocs(struct drm_i915_private *dev_priv)
+static void load_render_mocs(const struct intel_engine_cs *engine)
{
- struct intel_gvt *gvt = dev_priv->gvt;
- i915_reg_t offset;
+ struct intel_gvt *gvt = engine->i915->gvt;
+ struct intel_uncore *uncore = engine->uncore;
u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
+ i915_reg_t offset;
int ring_id, i;
/* Platform doesn't have mocs mmios. */
@@ -170,12 +171,13 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
return;
for (ring_id = 0; ring_id < cnt; ring_id++) {
- if (!HAS_ENGINE(dev_priv, ring_id))
+ if (!HAS_ENGINE(engine->i915, ring_id))
continue;
+
offset.reg = regs[ring_id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
gen9_render_mocs.control_table[ring_id][i] =
- I915_READ_FW(offset);
+ intel_uncore_read_fw(uncore, offset);
offset.reg += 4;
}
}
@@ -183,7 +185,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
gen9_render_mocs.l3cc_table[i] =
- I915_READ_FW(offset);
+ intel_uncore_read_fw(uncore, offset);
offset.reg += 4;
}
gen9_render_mocs.initialized = true;
@@ -214,13 +216,11 @@ restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
*cs++ = MI_LOAD_REGISTER_IMM(count);
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->ring_id != ring_id ||
- !mmio->in_context)
+ if (mmio->id != ring_id || !mmio->in_context)
continue;
*cs++ = i915_mmio_reg_offset(mmio->reg);
- *cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
- (mmio->mask << 16);
+ *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16);
gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
*(cs-2), *(cs-1), vgpu->id, ring_id);
}
@@ -344,10 +344,10 @@ static u32 gen8_tlb_mmio_offset_list[] = {
[VECS0] = 0x4270,
};
-static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
+static void handle_tlb_pending_event(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- struct intel_uncore *uncore = &dev_priv->uncore;
+ struct intel_uncore *uncore = engine->uncore;
struct intel_vgpu_submission *s = &vgpu->submission;
u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
@@ -357,13 +357,13 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
if (!regs)
return;
- if (WARN_ON(ring_id >= cnt))
+ if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt))
return;
- if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
+ if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending))
return;
- reg = _MMIO(regs[ring_id]);
+ reg = _MMIO(regs[engine->id]);
/* WaForceWakeRenderDuringMmioTLBInvalidate:skl
* we need to put a forcewake when invalidating RCS TLB caches,
@@ -372,30 +372,27 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/
fw = intel_uncore_forcewake_for_reg(uncore, reg,
FW_REG_READ | FW_REG_WRITE);
- if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
+ if (engine->id == RCS0 && INTEL_GEN(engine->i915) >= 9)
fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(uncore, fw);
intel_uncore_write_fw(uncore, reg, 0x1);
- if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
- gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
+ if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50))
+ gvt_vgpu_err("timeout in invalidate ring %s tlb\n",
+ engine->name);
else
vgpu_vreg_t(vgpu, reg) = 0;
intel_uncore_forcewake_put(uncore, fw);
- gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
+ gvt_dbg_core("invalidate TLB for ring %s\n", engine->name);
}
static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
- int ring_id)
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
- i915_reg_t offset, l3_offset;
- u32 old_v, new_v;
-
u32 regs[] = {
[RCS0] = 0xc800,
[VCS0] = 0xc900,
@@ -403,36 +400,38 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
[BCS0] = 0xcc00,
[VECS0] = 0xcb00,
};
+ struct intel_uncore *uncore = engine->uncore;
+ i915_reg_t offset, l3_offset;
+ u32 old_v, new_v;
int i;
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
+ if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs)))
return;
- if (ring_id == RCS0 && IS_GEN(dev_priv, 9))
+ if (engine->id == RCS0 && IS_GEN(engine->i915, 9))
return;
if (!pre && !gen9_render_mocs.initialized)
- load_render_mocs(dev_priv);
+ load_render_mocs(engine);
- offset.reg = regs[ring_id];
+ offset.reg = regs[engine->id];
for (i = 0; i < GEN9_MOCS_SIZE; i++) {
if (pre)
old_v = vgpu_vreg_t(pre, offset);
else
- old_v = gen9_render_mocs.control_table[ring_id][i];
+ old_v = gen9_render_mocs.control_table[engine->id][i];
if (next)
new_v = vgpu_vreg_t(next, offset);
else
- new_v = gen9_render_mocs.control_table[ring_id][i];
+ new_v = gen9_render_mocs.control_table[engine->id][i];
if (old_v != new_v)
- I915_WRITE_FW(offset, new_v);
+ intel_uncore_write_fw(uncore, offset, new_v);
offset.reg += 4;
}
- if (ring_id == RCS0) {
+ if (engine->id == RCS0) {
l3_offset.reg = 0xb020;
for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre)
@@ -445,7 +444,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
new_v = gen9_render_mocs.l3cc_table[i];
if (old_v != new_v)
- I915_WRITE_FW(l3_offset, new_v);
+ intel_uncore_write_fw(uncore, l3_offset, new_v);
l3_offset.reg += 4;
}
@@ -467,38 +466,40 @@ bool is_inhibit_context(struct intel_context *ce)
/* Switch ring mmio values (context). */
static void switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next,
- int ring_id)
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
+ struct intel_uncore *uncore = engine->uncore;
struct intel_vgpu_submission *s;
struct engine_mmio *mmio;
u32 old_v, new_v;
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
- if (INTEL_GEN(dev_priv) >= 9)
- switch_mocs(pre, next, ring_id);
+ if (INTEL_GEN(engine->i915) >= 9)
+ switch_mocs(pre, next, engine);
- for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
+ for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->ring_id != ring_id)
+ if (mmio->id != engine->id)
continue;
/*
* No need to do save or restore of the mmio which is in context
* state image on gen9, it's initialized by lri command and
* save or restore with context together.
*/
- if (IS_GEN(dev_priv, 9) && mmio->in_context)
+ if (IS_GEN(engine->i915, 9) && mmio->in_context)
continue;
// save
if (pre) {
- vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
+ vgpu_vreg_t(pre, mmio->reg) =
+ intel_uncore_read_fw(uncore, mmio->reg);
if (mmio->mask)
vgpu_vreg_t(pre, mmio->reg) &=
- ~(mmio->mask << 16);
+ ~(mmio->mask << 16);
old_v = vgpu_vreg_t(pre, mmio->reg);
- } else
- old_v = mmio->value = I915_READ_FW(mmio->reg);
+ } else {
+ old_v = mmio->value =
+ intel_uncore_read_fw(uncore, mmio->reg);
+ }
// restore
if (next) {
@@ -509,12 +510,12 @@ static void switch_mmio(struct intel_vgpu *pre,
* itself.
*/
if (mmio->in_context &&
- !is_inhibit_context(s->shadow[ring_id]))
+ !is_inhibit_context(s->shadow[engine->id]))
continue;
if (mmio->mask)
new_v = vgpu_vreg_t(next, mmio->reg) |
- (mmio->mask << 16);
+ (mmio->mask << 16);
else
new_v = vgpu_vreg_t(next, mmio->reg);
} else {
@@ -526,7 +527,7 @@ static void switch_mmio(struct intel_vgpu *pre,
new_v = mmio->value;
}
- I915_WRITE_FW(mmio->reg, new_v);
+ intel_uncore_write_fw(uncore, mmio->reg, new_v);
trace_render_mmio(pre ? pre->id : 0,
next ? next->id : 0,
@@ -536,39 +537,37 @@ static void switch_mmio(struct intel_vgpu *pre,
}
if (next)
- handle_tlb_pending_event(next, ring_id);
+ handle_tlb_pending_event(next, engine);
}
/**
* intel_gvt_switch_render_mmio - switch mmio context of specific engine
* @pre: the last vGPU that own the engine
* @next: the vGPU to switch to
- * @ring_id: specify the engine
+ * @engine: the engine
*
* If pre is null indicates that host own the engine. If next is null
* indicates that we are switching to host workload.
*/
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
- struct intel_vgpu *next, int ring_id)
+ struct intel_vgpu *next,
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
-
- if (WARN_ON(!pre && !next))
+ if (WARN(!pre && !next, "switch ring %s from host to HOST\n",
+ engine->name))
return;
- gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
+ gvt_dbg_render("switch ring %s from %s to %s\n", engine->name,
pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
- dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
-
/**
* We are using raw mmio access wrapper to improve the
* performace for batch mmio read/write, so we need
* handle forcewake mannually.
*/
- intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
- switch_mmio(pre, next, ring_id);
- intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
+ switch_mmio(pre, next, engine);
+ intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
}
/**
@@ -580,7 +579,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{
struct engine_mmio *mmio;
- if (INTEL_GEN(gvt->dev_priv) >= 9) {
+ if (INTEL_GEN(gvt->gt->i915) >= 9) {
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
@@ -595,7 +594,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->in_context) {
- gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+ gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
}
}
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h
index f7eaa442403f..970704b18f23 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.h
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.h
@@ -37,7 +37,7 @@
#define __GVT_RENDER_H__
struct engine_mmio {
- int ring_id;
+ enum intel_engine_id id;
i915_reg_t reg;
u32 mask;
bool in_context;
@@ -45,7 +45,8 @@ struct engine_mmio {
};
void intel_gvt_switch_mmio(struct intel_vgpu *pre,
- struct intel_vgpu *next, int ring_id);
+ struct intel_vgpu *next,
+ const struct intel_engine_cs *engine);
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c
index 2369d4a9af94..036b74fe9298 100644
--- a/drivers/gpu/drm/i915/gvt/sched_policy.c
+++ b/drivers/gpu/drm/i915/gvt/sched_policy.c
@@ -39,8 +39,8 @@ static bool vgpu_has_pending_workload(struct intel_vgpu *vgpu)
enum intel_engine_id i;
struct intel_engine_cs *engine;
- for_each_engine(engine, vgpu->gvt->dev_priv, i) {
- if (!list_empty(workload_q_head(vgpu, i)))
+ for_each_engine(engine, vgpu->gvt->gt, i) {
+ if (!list_empty(workload_q_head(vgpu, engine)))
return true;
}
@@ -152,8 +152,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = true;
/* still have uncompleted workload? */
- for_each_engine(engine, gvt->dev_priv, i) {
- if (scheduler->current_workload[i])
+ for_each_engine(engine, gvt->gt, i) {
+ if (scheduler->current_workload[engine->id])
return;
}
@@ -169,8 +169,8 @@ static void try_to_schedule_next_vgpu(struct intel_gvt *gvt)
scheduler->need_reschedule = false;
/* wake up workload dispatch thread */
- for_each_engine(engine, gvt->dev_priv, i)
- wake_up(&scheduler->waitq[i]);
+ for_each_engine(engine, gvt->gt, i)
+ wake_up(&scheduler->waitq[engine->id]);
}
static struct intel_vgpu *find_busy_vgpu(struct gvt_sched_data *sched_data)
@@ -444,9 +444,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
{
struct intel_gvt_workload_scheduler *scheduler =
&vgpu->gvt->scheduler;
- int ring_id;
struct vgpu_sched_data *vgpu_data = vgpu->sched_data;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
+ struct intel_engine_cs *engine;
+ enum intel_engine_id id;
if (!vgpu_data->active)
return;
@@ -467,10 +468,10 @@ void intel_vgpu_stop_schedule(struct intel_vgpu *vgpu)
intel_runtime_pm_get(&dev_priv->runtime_pm);
spin_lock_bh(&scheduler->mmio_context_lock);
- for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
- if (scheduler->engine_owner[ring_id] == vgpu) {
- intel_gvt_switch_mmio(vgpu, NULL, ring_id);
- scheduler->engine_owner[ring_id] = NULL;
+ for_each_engine(engine, vgpu->gvt->gt, id) {
+ if (scheduler->engine_owner[engine->id] == vgpu) {
+ intel_gvt_switch_mmio(vgpu, NULL, engine);
+ scheduler->engine_owner[engine->id] = NULL;
}
}
spin_unlock_bh(&scheduler->mmio_context_lock);
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 685d1e04a5ff..1c95bf8cbed0 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -84,7 +84,7 @@ static void update_shadow_pdps(struct intel_vgpu_workload *workload)
static void sr_oa_regs(struct intel_vgpu_workload *workload,
u32 *reg_state, bool save)
{
- struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = workload->vgpu->gvt->gt->i915;
u32 ctx_oactxctrl = dev_priv->perf.ctx_oactxctrl_offset;
u32 ctx_flexeu0 = dev_priv->perf.ctx_flexeu0_offset;
int i = 0;
@@ -98,7 +98,7 @@ static void sr_oa_regs(struct intel_vgpu_workload *workload,
i915_mmio_reg_offset(EU_PERF_CNTL6),
};
- if (workload->ring_id != RCS0)
+ if (workload->engine->id != RCS0)
return;
if (save) {
@@ -128,7 +128,6 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
- int ring_id = workload->ring_id;
struct drm_i915_gem_object *ctx_obj =
workload->req->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
@@ -154,7 +153,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
COPY_REG_MASKED(ctx_ctrl);
COPY_REG(ctx_timestamp);
- if (ring_id == RCS0) {
+ if (workload->engine->id == RCS0) {
COPY_REG(bb_per_ctx_ptr);
COPY_REG(rcs_indirect_ctx);
COPY_REG(rcs_indirect_ctx_offset);
@@ -175,14 +174,14 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
if (IS_RESTORE_INHIBIT(shadow_ring_context->ctx_ctrl.val))
return 0;
- gvt_dbg_sched("ring id %d workload lrca %x", ring_id,
- workload->ctx_desc.lrca);
-
- context_page_num = gvt->dev_priv->engine[ring_id]->context_size;
+ gvt_dbg_sched("ring %s workload lrca %x",
+ workload->engine->name,
+ workload->ctx_desc.lrca);
+ context_page_num = workload->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && ring_id == RCS0)
+ if (IS_BROADWELL(gvt->gt->i915) && workload->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -210,38 +209,43 @@ static inline bool is_gvt_request(struct i915_request *rq)
return intel_context_force_single_submission(rq->context);
}
-static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
+static void save_ring_hw_state(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 ring_base = dev_priv->engine[ring_id]->mmio_base;
+ struct intel_uncore *uncore = engine->uncore;
i915_reg_t reg;
- reg = RING_INSTDONE(ring_base);
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
- reg = RING_ACTHD(ring_base);
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
- reg = RING_ACTHD_UDW(ring_base);
- vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) = I915_READ_FW(reg);
+ reg = RING_INSTDONE(engine->mmio_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+ intel_uncore_read(uncore, reg);
+
+ reg = RING_ACTHD(engine->mmio_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+ intel_uncore_read(uncore, reg);
+
+ reg = RING_ACTHD_UDW(engine->mmio_base);
+ vgpu_vreg(vgpu, i915_mmio_reg_offset(reg)) =
+ intel_uncore_read(uncore, reg);
}
static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data)
{
- struct i915_request *req = data;
+ struct i915_request *rq = data;
struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
- shadow_ctx_notifier_block[req->engine->id]);
+ shadow_ctx_notifier_block[rq->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- enum intel_engine_id ring_id = req->engine->id;
+ enum intel_engine_id ring_id = rq->engine->id;
struct intel_vgpu_workload *workload;
unsigned long flags;
- if (!is_gvt_request(req)) {
+ if (!is_gvt_request(rq)) {
spin_lock_irqsave(&scheduler->mmio_context_lock, flags);
if (action == INTEL_CONTEXT_SCHEDULE_IN &&
scheduler->engine_owner[ring_id]) {
/* Switch ring from vGPU to host. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
- NULL, ring_id);
+ NULL, rq->engine);
scheduler->engine_owner[ring_id] = NULL;
}
spin_unlock_irqrestore(&scheduler->mmio_context_lock, flags);
@@ -259,7 +263,7 @@ static int shadow_context_status_change(struct notifier_block *nb,
if (workload->vgpu != scheduler->engine_owner[ring_id]) {
/* Switch ring from host to vGPU or vGPU to vGPU. */
intel_gvt_switch_mmio(scheduler->engine_owner[ring_id],
- workload->vgpu, ring_id);
+ workload->vgpu, rq->engine);
scheduler->engine_owner[ring_id] = workload->vgpu;
} else
gvt_dbg_sched("skip ring %d mmio switch for vgpu%d\n",
@@ -268,11 +272,11 @@ static int shadow_context_status_change(struct notifier_block *nb,
atomic_set(&workload->shadow_ctx_active, 1);
break;
case INTEL_CONTEXT_SCHEDULE_OUT:
- save_ring_hw_state(workload->vgpu, ring_id);
+ save_ring_hw_state(workload->vgpu, rq->engine);
atomic_set(&workload->shadow_ctx_active, 0);
break;
case INTEL_CONTEXT_SCHEDULE_PREEMPTED:
- save_ring_hw_state(workload->vgpu, ring_id);
+ save_ring_hw_state(workload->vgpu, rq->engine);
break;
default:
WARN_ON(1);
@@ -391,7 +395,7 @@ intel_gvt_workload_req_alloc(struct intel_vgpu_workload *workload)
if (workload->req)
return 0;
- rq = i915_request_create(s->shadow[workload->ring_id]);
+ rq = i915_request_create(s->shadow[workload->engine->id]);
if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n");
return PTR_ERR(rq);
@@ -420,15 +424,16 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
if (workload->shadow)
return 0;
- if (!test_and_set_bit(workload->ring_id, s->shadow_ctx_desc_updated))
- shadow_context_descriptor_update(s->shadow[workload->ring_id],
+ if (!test_and_set_bit(workload->engine->id, s->shadow_ctx_desc_updated))
+ shadow_context_descriptor_update(s->shadow[workload->engine->id],
workload);
ret = intel_gvt_scan_and_shadow_ringbuffer(workload);
if (ret)
return ret;
- if (workload->ring_id == RCS0 && workload->wa_ctx.indirect_ctx.size) {
+ if (workload->engine->id == RCS0 &&
+ workload->wa_ctx.indirect_ctx.size) {
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err_shadow;
@@ -436,6 +441,7 @@ int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload)
workload->shadow = true;
return 0;
+
err_shadow:
release_shadow_wa_ctx(&workload->wa_ctx);
return ret;
@@ -567,12 +573,8 @@ static int prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static void update_vreg_in_ctx(struct intel_vgpu_workload *workload)
{
- struct intel_vgpu *vgpu = workload->vgpu;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
- u32 ring_base;
-
- ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
- vgpu_vreg_t(vgpu, RING_START(ring_base)) = workload->rb_start;
+ vgpu_vreg_t(workload->vgpu, RING_START(workload->engine->mmio_base)) =
+ workload->rb_start;
}
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
@@ -608,7 +610,6 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission;
- int ring = workload->ring_id;
int ret = 0;
ret = intel_vgpu_pin_mm(workload->shadow_mm);
@@ -625,7 +626,7 @@ static int prepare_workload(struct intel_vgpu_workload *workload)
update_shadow_pdps(workload);
- set_context_ppgtt_from_shadow(workload, s->shadow[ring]);
+ set_context_ppgtt_from_shadow(workload, s->shadow[workload->engine->id]);
ret = intel_vgpu_sync_oos_pages(workload->vgpu);
if (ret) {
@@ -677,11 +678,10 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
{
struct intel_vgpu *vgpu = workload->vgpu;
struct i915_request *rq;
- int ring_id = workload->ring_id;
int ret;
- gvt_dbg_sched("ring id %d prepare to dispatch workload %p\n",
- ring_id, workload);
+ gvt_dbg_sched("ring id %s prepare to dispatch workload %p\n",
+ workload->engine->name, workload);
mutex_lock(&vgpu->vgpu_lock);
@@ -710,8 +710,8 @@ out:
}
if (!IS_ERR_OR_NULL(workload->req)) {
- gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
- ring_id, workload->req);
+ gvt_dbg_sched("ring id %s submit workload to i915 %p\n",
+ workload->engine->name, workload->req);
i915_request_add(workload->req);
workload->dispatched = true;
}
@@ -722,8 +722,8 @@ err_req:
return ret;
}
-static struct intel_vgpu_workload *pick_next_workload(
- struct intel_gvt *gvt, int ring_id)
+static struct intel_vgpu_workload *
+pick_next_workload(struct intel_gvt *gvt, struct intel_engine_cs *engine)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
@@ -735,27 +735,27 @@ static struct intel_vgpu_workload *pick_next_workload(
* bail out
*/
if (!scheduler->current_vgpu) {
- gvt_dbg_sched("ring id %d stop - no current vgpu\n", ring_id);
+ gvt_dbg_sched("ring %s stop - no current vgpu\n", engine->name);
goto out;
}
if (scheduler->need_reschedule) {
- gvt_dbg_sched("ring id %d stop - will reschedule\n", ring_id);
+ gvt_dbg_sched("ring %s stop - will reschedule\n", engine->name);
goto out;
}
if (!scheduler->current_vgpu->active ||
- list_empty(workload_q_head(scheduler->current_vgpu, ring_id)))
+ list_empty(workload_q_head(scheduler->current_vgpu, engine)))
goto out;
/*
* still have current workload, maybe the workload disptacher
* fail to submit it for some reason, resubmit it.
*/
- if (scheduler->current_workload[ring_id]) {
- workload = scheduler->current_workload[ring_id];
- gvt_dbg_sched("ring id %d still have current workload %p\n",
- ring_id, workload);
+ if (scheduler->current_workload[engine->id]) {
+ workload = scheduler->current_workload[engine->id];
+ gvt_dbg_sched("ring %s still have current workload %p\n",
+ engine->name, workload);
goto out;
}
@@ -765,13 +765,14 @@ static struct intel_vgpu_workload *pick_next_workload(
* will wait the current workload is finished when trying to
* schedule out a vgpu.
*/
- scheduler->current_workload[ring_id] = container_of(
- workload_q_head(scheduler->current_vgpu, ring_id)->next,
- struct intel_vgpu_workload, list);
+ scheduler->current_workload[engine->id] =
+ list_first_entry(workload_q_head(scheduler->current_vgpu,
+ engine),
+ struct intel_vgpu_workload, list);
- workload = scheduler->current_workload[ring_id];
+ workload = scheduler->current_workload[engine->id];
- gvt_dbg_sched("ring id %d pick new workload %p\n", ring_id, workload);
+ gvt_dbg_sched("ring %s pick new workload %p\n", engine->name, workload);
atomic_inc(&workload->vgpu->submission.running_workload_num);
out:
@@ -783,14 +784,12 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
{
struct i915_request *rq = workload->req;
struct intel_vgpu *vgpu = workload->vgpu;
- struct intel_gvt *gvt = vgpu->gvt;
struct drm_i915_gem_object *ctx_obj = rq->context->state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
void *src;
unsigned long context_gpa, context_page_num;
int i;
- struct drm_i915_private *dev_priv = gvt->dev_priv;
u32 ring_base;
u32 head, tail;
u16 wrap_count;
@@ -811,14 +810,14 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
head = (wrap_count << RB_HEAD_WRAP_CNT_OFF) | tail;
- ring_base = dev_priv->engine[workload->ring_id]->mmio_base;
+ ring_base = rq->engine->mmio_base;
vgpu_vreg_t(vgpu, RING_TAIL(ring_base)) = tail;
vgpu_vreg_t(vgpu, RING_HEAD(ring_base)) = head;
context_page_num = rq->engine->context_size;
context_page_num = context_page_num >> PAGE_SHIFT;
- if (IS_BROADWELL(gvt->dev_priv) && rq->engine->id == RCS0)
+ if (IS_BROADWELL(rq->i915) && rq->engine->id == RCS0)
context_page_num = 19;
i = 2;
@@ -869,7 +868,7 @@ void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
+ struct drm_i915_private *dev_priv = vgpu->gvt->gt->i915;
struct intel_engine_cs *engine;
struct intel_vgpu_workload *pos, *n;
intel_engine_mask_t tmp;
@@ -966,54 +965,47 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
mutex_unlock(&vgpu->vgpu_lock);
}
-struct workload_thread_param {
- struct intel_gvt *gvt;
- int ring_id;
-};
-
-static int workload_thread(void *priv)
+static int workload_thread(void *arg)
{
- struct workload_thread_param *p = (struct workload_thread_param *)priv;
- struct intel_gvt *gvt = p->gvt;
- int ring_id = p->ring_id;
+ struct intel_engine_cs *engine = arg;
+ const bool need_force_wake = INTEL_GEN(engine->i915) >= 9;
+ struct intel_gvt *gvt = engine->i915->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL;
int ret;
- bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
DEFINE_WAIT_FUNC(wait, woken_wake_function);
- struct intel_runtime_pm *rpm = &gvt->dev_priv->runtime_pm;
-
- kfree(p);
- gvt_dbg_core("workload thread for ring %d started\n", ring_id);
+ gvt_dbg_core("workload thread for ring %s started\n", engine->name);
while (!kthread_should_stop()) {
- add_wait_queue(&scheduler->waitq[ring_id], &wait);
+ intel_wakeref_t wakeref;
+
+ add_wait_queue(&scheduler->waitq[engine->id], &wait);
do {
- workload = pick_next_workload(gvt, ring_id);
+ workload = pick_next_workload(gvt, engine);
if (workload)
break;
wait_woken(&wait, TASK_INTERRUPTIBLE,
MAX_SCHEDULE_TIMEOUT);
} while (!kthread_should_stop());
- remove_wait_queue(&scheduler->waitq[ring_id], &wait);
+ remove_wait_queue(&scheduler->waitq[engine->id], &wait);
if (!workload)
break;
- gvt_dbg_sched("ring id %d next workload %p vgpu %d\n",
- workload->ring_id, workload,
- workload->vgpu->id);
+ gvt_dbg_sched("ring %s next workload %p vgpu %d\n",
+ engine->name, workload,
+ workload->vgpu->id);
- intel_runtime_pm_get(rpm);
+ wakeref = intel_runtime_pm_get(engine->uncore->rpm);
- gvt_dbg_sched("ring id %d will dispatch workload %p\n",
- workload->ring_id, workload);
+ gvt_dbg_sched("ring %s will dispatch workload %p\n",
+ engine->name, workload);
if (need_force_wake)
- intel_uncore_forcewake_get(&gvt->dev_priv->uncore,
- FORCEWAKE_ALL);
+ intel_uncore_forcewake_get(engine->uncore,
+ FORCEWAKE_ALL);
/*
* Update the vReg of the vGPU which submitted this
* workload. The vGPU may use these registers for checking
@@ -1030,21 +1022,21 @@ static int workload_thread(void *priv)
goto complete;
}
- gvt_dbg_sched("ring id %d wait workload %p\n",
- workload->ring_id, workload);
+ gvt_dbg_sched("ring %s wait workload %p\n",
+ engine->name, workload);
i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
complete:
gvt_dbg_sched("will complete workload %p, status: %d\n",
- workload, workload->status);
+ workload, workload->status);
- complete_current_workload(gvt, ring_id);
+ complete_current_workload(gvt, engine->id);
if (need_force_wake)
- intel_uncore_forcewake_put(&gvt->dev_priv->uncore,
- FORCEWAKE_ALL);
+ intel_uncore_forcewake_put(engine->uncore,
+ FORCEWAKE_ALL);
- intel_runtime_pm_put_unchecked(rpm);
+ intel_runtime_pm_put(engine->uncore->rpm, wakeref);
if (ret && (vgpu_is_vm_unhealthy(ret)))
enter_failsafe_mode(vgpu, GVT_FAILSAFE_GUEST_ERR);
}
@@ -1073,7 +1065,7 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
gvt_dbg_core("clean workload scheduler\n");
- for_each_engine(engine, gvt->dev_priv, i) {
+ for_each_engine(engine, gvt->gt, i) {
atomic_notifier_chain_unregister(
&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]);
@@ -1084,7 +1076,6 @@ void intel_gvt_clean_workload_scheduler(struct intel_gvt *gvt)
int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
{
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
- struct workload_thread_param *param = NULL;
struct intel_engine_cs *engine;
enum intel_engine_id i;
int ret;
@@ -1093,20 +1084,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
init_waitqueue_head(&scheduler->workload_complete_wq);
- for_each_engine(engine, gvt->dev_priv, i) {
+ for_each_engine(engine, gvt->gt, i) {
init_waitqueue_head(&scheduler->waitq[i]);
- param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (!param) {
- ret = -ENOMEM;
- goto err;
- }
-
- param->gvt = gvt;
- param->ring_id = i;
-
- scheduler->thread[i] = kthread_run(workload_thread, param,
- "gvt workload %d", i);
+ scheduler->thread[i] = kthread_run(workload_thread, engine,
+ "gvt:%s", engine->name);
if (IS_ERR(scheduler->thread[i])) {
gvt_err("fail to create workload thread\n");
ret = PTR_ERR(scheduler->thread[i]);
@@ -1118,11 +1100,11 @@ int intel_gvt_init_workload_scheduler(struct intel_gvt *gvt)
atomic_notifier_chain_register(&engine->context_status_notifier,
&gvt->shadow_ctx_notifier_block[i]);
}
+
return 0;
+
err:
intel_gvt_clean_workload_scheduler(gvt);
- kfree(param);
- param = NULL;
return ret;
}
@@ -1160,7 +1142,7 @@ void intel_vgpu_clean_submission(struct intel_vgpu *vgpu)
intel_vgpu_select_submission_ops(vgpu, ALL_ENGINES, 0);
i915_context_ppgtt_root_restore(s, i915_vm_to_ppgtt(s->shadow[0]->vm));
- for_each_engine(engine, vgpu->gvt->dev_priv, id)
+ for_each_engine(engine, vgpu->gvt->gt, id)
intel_context_unpin(s->shadow[id]);
kmem_cache_destroy(s->workloads);
@@ -1217,7 +1199,7 @@ i915_context_ppgtt_root_save(struct intel_vgpu_submission *s,
*/
int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
{
- struct drm_i915_private *i915 = vgpu->gvt->dev_priv;
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_submission *s = &vgpu->submission;
struct intel_engine_cs *engine;
struct i915_ppgtt *ppgtt;
@@ -1230,7 +1212,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
i915_context_ppgtt_root_save(s, ppgtt);
- for_each_engine(engine, i915, i) {
+ for_each_engine(engine, vgpu->gvt->gt, i) {
struct intel_context *ce;
INIT_LIST_HEAD(&s->workload_q_head[i]);
@@ -1246,7 +1228,8 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
ce->vm = i915_vm_get(&ppgtt->vm);
intel_context_set_single_submission(ce);
- if (!USES_GUC_SUBMISSION(i915)) { /* Max ring buffer size */
+ /* Max ring buffer size */
+ if (!intel_uc_wants_guc_submission(&engine->gt->uc)) {
const unsigned int ring_size = 512 * SZ_4K;
ce->ring = __intel_context_ring_size(ring_size);
@@ -1282,7 +1265,7 @@ int intel_vgpu_setup_submission(struct intel_vgpu *vgpu)
out_shadow_ctx:
i915_context_ppgtt_root_restore(s, ppgtt);
- for_each_engine(engine, i915, i) {
+ for_each_engine(engine, vgpu->gvt->gt, i) {
if (IS_ERR(s->shadow[i]))
break;
@@ -1309,6 +1292,7 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
intel_engine_mask_t engine_mask,
unsigned int interface)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
struct intel_vgpu_submission *s = &vgpu->submission;
const struct intel_vgpu_submission_ops *ops[] = {
[INTEL_VGPU_EXECLIST_SUBMISSION] =
@@ -1316,10 +1300,11 @@ int intel_vgpu_select_submission_ops(struct intel_vgpu *vgpu,
};
int ret;
- if (WARN_ON(interface >= ARRAY_SIZE(ops)))
+ if (drm_WARN_ON(&i915->drm, interface >= ARRAY_SIZE(ops)))
return -EINVAL;
- if (WARN_ON(interface == 0 && engine_mask != ALL_ENGINES))
+ if (drm_WARN_ON(&i915->drm,
+ interface == 0 && engine_mask != ALL_ENGINES))
return -EINVAL;
if (s->active)
@@ -1441,7 +1426,7 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
/**
* intel_vgpu_create_workload - create a vGPU workload
* @vgpu: a vGPU
- * @ring_id: ring index
+ * @engine: the engine
* @desc: a guest context descriptor
*
* This function is called when creating a vGPU workload.
@@ -1452,14 +1437,14 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
*
*/
struct intel_vgpu_workload *
-intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+intel_vgpu_create_workload(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
struct execlist_ctx_descriptor_format *desc)
{
struct intel_vgpu_submission *s = &vgpu->submission;
- struct list_head *q = workload_q_head(vgpu, ring_id);
+ struct list_head *q = workload_q_head(vgpu, engine);
struct intel_vgpu_workload *last_workload = NULL;
struct intel_vgpu_workload *workload = NULL;
- struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
u32 guest_head;
@@ -1486,10 +1471,10 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
list_for_each_entry_reverse(last_workload, q, list) {
if (same_context(&last_workload->ctx_desc, desc)) {
- gvt_dbg_el("ring id %d cur workload == last\n",
- ring_id);
+ gvt_dbg_el("ring %s cur workload == last\n",
+ engine->name);
gvt_dbg_el("ctx head %x real head %lx\n", head,
- last_workload->rb_tail);
+ last_workload->rb_tail);
/*
* cannot use guest context head pointer here,
* as it might not be updated at this time
@@ -1499,7 +1484,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
}
}
- gvt_dbg_el("ring id %d begin a new workload\n", ring_id);
+ gvt_dbg_el("ring %s begin a new workload\n", engine->name);
/* record some ring buffer register values for scan and shadow */
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -1519,7 +1504,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
if (IS_ERR(workload))
return workload;
- workload->ring_id = ring_id;
+ workload->engine = engine;
workload->ctx_desc = *desc;
workload->ring_context_gpa = ring_context_gpa;
workload->rb_head = head;
@@ -1528,7 +1513,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
workload->rb_start = start;
workload->rb_ctl = ctl;
- if (ring_id == RCS0) {
+ if (engine->id == RCS0) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
@@ -1566,8 +1551,8 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
}
}
- gvt_dbg_el("workload %p ring id %d head %x tail %x start %x ctl %x\n",
- workload, ring_id, head, tail, start, ctl);
+ gvt_dbg_el("workload %p ring %s head %x tail %x start %x ctl %x\n",
+ workload, engine->name, head, tail, start, ctl);
ret = prepare_mm(workload);
if (ret) {
@@ -1578,10 +1563,11 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
/* Only scan and shadow the first workload in the queue
* as there is only one pre-allocated buf-obj for shadow.
*/
- if (list_empty(workload_q_head(vgpu, ring_id))) {
- intel_runtime_pm_get(&dev_priv->runtime_pm);
- ret = intel_gvt_scan_and_shadow_workload(workload);
- intel_runtime_pm_put_unchecked(&dev_priv->runtime_pm);
+ if (list_empty(q)) {
+ intel_wakeref_t wakeref;
+
+ with_intel_runtime_pm(engine->gt->uncore->rpm, wakeref)
+ ret = intel_gvt_scan_and_shadow_workload(workload);
}
if (ret) {
@@ -1601,7 +1587,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload)
{
list_add_tail(&workload->list,
- workload_q_head(workload->vgpu, workload->ring_id));
+ workload_q_head(workload->vgpu, workload->engine));
intel_gvt_kick_schedule(workload->vgpu->gvt);
- wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->ring_id]);
+ wake_up(&workload->vgpu->gvt->scheduler.waitq[workload->engine->id]);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index c50d14a9ce85..bf7fc0ca4cb1 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -79,7 +79,7 @@ struct intel_shadow_wa_ctx {
struct intel_vgpu_workload {
struct intel_vgpu *vgpu;
- int ring_id;
+ const struct intel_engine_cs *engine;
struct i915_request *req;
/* if this workload has been dispatched to i915? */
bool dispatched;
@@ -129,8 +129,8 @@ struct intel_vgpu_shadow_bb {
bool ppgtt;
};
-#define workload_q_head(vgpu, ring_id) \
- (&(vgpu->submission.workload_q_head[ring_id]))
+#define workload_q_head(vgpu, e) \
+ (&(vgpu)->submission.workload_q_head[(e)->id])
void intel_vgpu_queue_workload(struct intel_vgpu_workload *workload);
@@ -155,7 +155,8 @@ extern const struct intel_vgpu_submission_ops
intel_vgpu_execlist_submission_ops;
struct intel_vgpu_workload *
-intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
+intel_vgpu_create_workload(struct intel_vgpu *vgpu,
+ const struct intel_engine_cs *engine,
struct execlist_ctx_descriptor_format *desc);
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index 85bd9bf4f6ee..78f14f04d2ea 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -37,6 +37,7 @@
void populate_pvinfo_page(struct intel_vgpu *vgpu)
{
+ struct drm_i915_private *i915 = vgpu->gvt->gt->i915;
/* setup the ballooning information */
vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
@@ -69,7 +70,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
vgpu_hidden_gmadr_base(vgpu), vgpu_hidden_sz(vgpu));
gvt_dbg_core("fence size %d\n", vgpu_fence_sz(vgpu));
- WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
+ drm_WARN_ON(&i915->drm, sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
}
#define VGPU_MAX_WEIGHT 16
@@ -148,12 +149,12 @@ int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
gvt->types[i].avail_instance = min(low_avail / vgpu_types[i].low_mm,
high_avail / vgpu_types[i].high_mm);
- if (IS_GEN(gvt->dev_priv, 8))
+ if (IS_GEN(gvt->gt->i915, 8))
sprintf(gvt->types[i].name, "GVTg_V4_%s",
- vgpu_types[i].name);
- else if (IS_GEN(gvt->dev_priv, 9))
+ vgpu_types[i].name);
+ else if (IS_GEN(gvt->gt->i915, 9))
sprintf(gvt->types[i].name, "GVTg_V5_%s",
- vgpu_types[i].name);
+ vgpu_types[i].name);
gvt_dbg_core("type[%d]: %s avail %u low %u high %u fence %u weight %u res %s\n",
i, gvt->types[i].name,
@@ -271,10 +272,11 @@ void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
+ struct drm_i915_private *i915 = gvt->gt->i915;
mutex_lock(&vgpu->vgpu_lock);
- WARN(vgpu->active, "vGPU is still active!\n");
+ drm_WARN(&i915->drm, vgpu->active, "vGPU is still active!\n");
intel_gvt_debugfs_remove_vgpu(vgpu);
intel_vgpu_clean_sched_policy(vgpu);
@@ -426,9 +428,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret)
goto out_clean_sched_policy;
- /*TODO: add more platforms support */
- if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
- ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
+ ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D);
if (ret)
goto out_clean_sched_policy;
@@ -560,9 +560,9 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
intel_vgpu_reset_mmio(vgpu, dmlr);
populate_pvinfo_page(vgpu);
- intel_vgpu_reset_display(vgpu);
if (dmlr) {
+ intel_vgpu_reset_display(vgpu);
intel_vgpu_reset_cfg_space(vgpu);
/* only reset the failsafe mode when dmlr reset */
vgpu->failsafe = false;
diff --git a/drivers/gpu/drm/i915/i915_active.c b/drivers/gpu/drm/i915/i915_active.c
index f3da5c06f331..c4048628188a 100644
--- a/drivers/gpu/drm/i915/i915_active.c
+++ b/drivers/gpu/drm/i915/i915_active.c
@@ -7,6 +7,7 @@
#include <linux/debugobjects.h>
#include "gt/intel_context.h"
+#include "gt/intel_engine_heartbeat.h"
#include "gt/intel_engine_pm.h"
#include "gt/intel_ring.h"
@@ -390,13 +391,23 @@ out:
return err;
}
-void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
+struct dma_fence *
+i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
{
+ struct dma_fence *prev;
+
/* We expect the caller to manage the exclusive timeline ordering */
GEM_BUG_ON(i915_active_is_idle(ref));
- if (!__i915_active_fence_set(&ref->excl, f))
+ rcu_read_lock();
+ prev = __i915_active_fence_set(&ref->excl, f);
+ if (prev)
+ prev = dma_fence_get_rcu(prev);
+ else
atomic_inc(&ref->count);
+ rcu_read_unlock();
+
+ return prev;
}
bool i915_active_acquire_if_busy(struct i915_active *ref)
@@ -416,13 +427,15 @@ int i915_active_acquire(struct i915_active *ref)
if (err)
return err;
- if (!atomic_read(&ref->count) && ref->active)
- err = ref->active(ref);
- if (!err) {
- spin_lock_irq(&ref->tree_lock); /* vs __active_retire() */
- debug_active_activate(ref);
- atomic_inc(&ref->count);
- spin_unlock_irq(&ref->tree_lock);
+ if (likely(!i915_active_acquire_if_busy(ref))) {
+ if (ref->active)
+ err = ref->active(ref);
+ if (!err) {
+ spin_lock_irq(&ref->tree_lock); /* __active_retire() */
+ debug_active_activate(ref);
+ atomic_inc(&ref->count);
+ spin_unlock_irq(&ref->tree_lock);
+ }
}
mutex_unlock(&ref->mutex);
@@ -440,6 +453,9 @@ static void enable_signaling(struct i915_active_fence *active)
{
struct dma_fence *fence;
+ if (unlikely(is_barrier(active)))
+ return;
+
fence = i915_active_fence_get(active);
if (!fence)
return;
@@ -448,26 +464,49 @@ static void enable_signaling(struct i915_active_fence *active)
dma_fence_put(fence);
}
-int i915_active_wait(struct i915_active *ref)
+static int flush_barrier(struct active_node *it)
{
- struct active_node *it, *n;
- int err = 0;
+ struct intel_engine_cs *engine;
- might_sleep();
+ if (likely(!is_barrier(&it->base)))
+ return 0;
- if (!i915_active_acquire_if_busy(ref))
+ engine = __barrier_to_engine(it);
+ smp_rmb(); /* serialise with add_active_barriers */
+ if (!is_barrier(&it->base))
return 0;
- /* Flush lazy signals */
+ return intel_engine_flush_barriers(engine);
+}
+
+static int flush_lazy_signals(struct i915_active *ref)
+{
+ struct active_node *it, *n;
+ int err = 0;
+
enable_signaling(&ref->excl);
rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- if (is_barrier(&it->base)) /* unconnected idle barrier */
- continue;
+ err = flush_barrier(it); /* unconnected idle barrier? */
+ if (err)
+ break;
enable_signaling(&it->base);
}
- /* Any fence added after the wait begins will not be auto-signaled */
+ return err;
+}
+
+int i915_active_wait(struct i915_active *ref)
+{
+ int err;
+
+ might_sleep();
+
+ if (!i915_active_acquire_if_busy(ref))
+ return 0;
+
+ /* Any fence added after the wait begins will not be auto-signaled */
+ err = flush_lazy_signals(ref);
i915_active_release(ref);
if (err)
return err;
@@ -479,25 +518,81 @@ int i915_active_wait(struct i915_active *ref)
return 0;
}
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
+static int __await_active(struct i915_active_fence *active,
+ int (*fn)(void *arg, struct dma_fence *fence),
+ void *arg)
+{
+ struct dma_fence *fence;
+
+ if (is_barrier(active)) /* XXX flush the barrier? */
+ return 0;
+
+ fence = i915_active_fence_get(active);
+ if (fence) {
+ int err;
+
+ err = fn(arg, fence);
+ dma_fence_put(fence);
+ if (err < 0)
+ return err;
+ }
+
+ return 0;
+}
+
+static int await_active(struct i915_active *ref,
+ unsigned int flags,
+ int (*fn)(void *arg, struct dma_fence *fence),
+ void *arg)
{
int err = 0;
+ /* We must always wait for the exclusive fence! */
if (rcu_access_pointer(ref->excl.fence)) {
- struct dma_fence *fence;
-
- rcu_read_lock();
- fence = dma_fence_get_rcu_safe(&ref->excl.fence);
- rcu_read_unlock();
- if (fence) {
- err = i915_request_await_dma_fence(rq, fence);
- dma_fence_put(fence);
+ err = __await_active(&ref->excl, fn, arg);
+ if (err)
+ return err;
+ }
+
+ if (flags & I915_ACTIVE_AWAIT_ALL && i915_active_acquire_if_busy(ref)) {
+ struct active_node *it, *n;
+
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
+ err = __await_active(&it->base, fn, arg);
+ if (err)
+ break;
}
+ i915_active_release(ref);
+ if (err)
+ return err;
}
- /* In the future we may choose to await on all fences */
+ return 0;
+}
- return err;
+static int rq_await_fence(void *arg, struct dma_fence *fence)
+{
+ return i915_request_await_dma_fence(arg, fence);
+}
+
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref,
+ unsigned int flags)
+{
+ return await_active(ref, flags, rq_await_fence, rq);
+}
+
+static int sw_await_fence(void *arg, struct dma_fence *fence)
+{
+ return i915_sw_fence_await_dma_fence(arg, fence, 0,
+ GFP_NOWAIT | __GFP_NOWARN);
+}
+
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+ struct i915_active *ref,
+ unsigned int flags)
+{
+ return await_active(ref, flags, sw_await_fence, fence);
}
#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
@@ -605,7 +700,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
struct intel_engine_cs *engine)
{
intel_engine_mask_t tmp, mask = engine->mask;
- struct llist_node *pos = NULL, *next;
+ struct llist_node *first = NULL, *last = NULL;
struct intel_gt *gt = engine->gt;
int err;
@@ -621,8 +716,10 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
* We can then use the preallocated nodes in
* i915_active_acquire_barrier()
*/
+ GEM_BUG_ON(!mask);
for_each_engine_masked(engine, gt, mask, tmp) {
u64 idx = engine->kernel_context->timeline->fence_context;
+ struct llist_node *prev = first;
struct active_node *node;
node = reuse_idle_barrier(ref, idx);
@@ -656,23 +753,23 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
GEM_BUG_ON(barrier_to_engine(node) != engine);
- next = barrier_to_ll(node);
- next->next = pos;
- if (!pos)
- pos = next;
+ first = barrier_to_ll(node);
+ first->next = prev;
+ if (!last)
+ last = first;
intel_engine_pm_get(engine);
}
GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
- llist_add_batch(next, pos, &ref->preallocated_barriers);
+ llist_add_batch(first, last, &ref->preallocated_barriers);
return 0;
unwind:
- while (pos) {
- struct active_node *node = barrier_from_ll(pos);
+ while (first) {
+ struct active_node *node = barrier_from_ll(first);
- pos = pos->next;
+ first = first->next;
atomic_dec(&ref->count);
intel_engine_pm_put(barrier_to_engine(node));
@@ -809,7 +906,6 @@ __i915_active_fence_set(struct i915_active_fence *active,
__list_del_entry(&active->cb.node);
spin_unlock(prev->lock); /* serialise with prev->cb_list */
}
- GEM_BUG_ON(rcu_access_pointer(active->fence) != fence);
list_add_tail(&active->cb.node, &fence->cb_list);
spin_unlock_irqrestore(fence->lock, flags);
diff --git a/drivers/gpu/drm/i915/i915_active.h b/drivers/gpu/drm/i915/i915_active.h
index b571f675c795..b3282ae7913c 100644
--- a/drivers/gpu/drm/i915/i915_active.h
+++ b/drivers/gpu/drm/i915/i915_active.h
@@ -173,7 +173,8 @@ i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
return i915_active_ref(ref, i915_request_timeline(rq), &rq->fence);
}
-void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
+struct dma_fence *
+i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f);
static inline bool i915_active_has_exclusive(struct i915_active *ref)
{
@@ -182,12 +183,24 @@ static inline bool i915_active_has_exclusive(struct i915_active *ref)
int i915_active_wait(struct i915_active *ref);
-int i915_request_await_active(struct i915_request *rq, struct i915_active *ref);
+int i915_sw_fence_await_active(struct i915_sw_fence *fence,
+ struct i915_active *ref,
+ unsigned int flags);
+int i915_request_await_active(struct i915_request *rq,
+ struct i915_active *ref,
+ unsigned int flags);
+#define I915_ACTIVE_AWAIT_ALL BIT(0)
int i915_active_acquire(struct i915_active *ref);
bool i915_active_acquire_if_busy(struct i915_active *ref);
void i915_active_release(struct i915_active *ref);
+static inline void __i915_active_acquire(struct i915_active *ref)
+{
+ GEM_BUG_ON(!atomic_read(&ref->count));
+ atomic_inc(&ref->count);
+}
+
static inline bool
i915_active_is_idle(const struct i915_active *ref)
{
diff --git a/drivers/gpu/drm/i915/i915_buddy.c b/drivers/gpu/drm/i915/i915_buddy.c
index 66883af64ca1..20babbdb297d 100644
--- a/drivers/gpu/drm/i915/i915_buddy.c
+++ b/drivers/gpu/drm/i915/i915_buddy.c
@@ -312,7 +312,8 @@ i915_buddy_alloc(struct i915_buddy_mm *mm, unsigned int order)
return block;
out_free:
- __i915_buddy_free(mm, block);
+ if (i != order)
+ __i915_buddy_free(mm, block);
return ERR_PTR(err);
}
diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c
index a0e437aa65b7..189b573d02be 100644
--- a/drivers/gpu/drm/i915/i915_cmd_parser.c
+++ b/drivers/gpu/drm/i915/i915_cmd_parser.c
@@ -803,10 +803,11 @@ static bool validate_cmds_sorted(const struct intel_engine_cs *engine,
u32 curr = desc->cmd.value & desc->cmd.mask;
if (curr < previous) {
- DRM_ERROR("CMD: %s [%d] command table not sorted: "
- "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
- engine->name, engine->id,
- i, j, curr, previous);
+ drm_err(&engine->i915->drm,
+ "CMD: %s [%d] command table not sorted: "
+ "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
+ engine->name, engine->id,
+ i, j, curr, previous);
ret = false;
}
@@ -829,10 +830,11 @@ static bool check_sorted(const struct intel_engine_cs *engine,
u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
if (curr < previous) {
- DRM_ERROR("CMD: %s [%d] register table not sorted: "
- "entry=%d reg=0x%08X prev=0x%08X\n",
- engine->name, engine->id,
- i, curr, previous);
+ drm_err(&engine->i915->drm,
+ "CMD: %s [%d] register table not sorted: "
+ "entry=%d reg=0x%08X prev=0x%08X\n",
+ engine->name, engine->id,
+ i, curr, previous);
ret = false;
}
@@ -1010,18 +1012,21 @@ void intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
}
if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
- DRM_ERROR("%s: command descriptions are not sorted\n",
- engine->name);
+ drm_err(&engine->i915->drm,
+ "%s: command descriptions are not sorted\n",
+ engine->name);
return;
}
if (!validate_regs_sorted(engine)) {
- DRM_ERROR("%s: registers are not sorted\n", engine->name);
+ drm_err(&engine->i915->drm,
+ "%s: registers are not sorted\n", engine->name);
return;
}
ret = init_hash_table(engine, cmd_tables, cmd_table_count);
if (ret) {
- DRM_ERROR("%s: initialised failed!\n", engine->name);
+ drm_err(&engine->i915->drm,
+ "%s: initialised failed!\n", engine->name);
fini_hash_table(engine);
return;
}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index d5a9b8a964c2..6ca797128aa1 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -30,14 +30,6 @@
#include <linux/sort.h>
#include <drm/drm_debugfs.h>
-#include <drm/drm_fourcc.h>
-
-#include "display/intel_display_types.h"
-#include "display/intel_dp.h"
-#include "display/intel_fbc.h"
-#include "display/intel_hdcp.h"
-#include "display/intel_hdmi.h"
-#include "display/intel_psr.h"
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_pm.h"
@@ -48,9 +40,9 @@
#include "gt/uc/intel_guc_submission.h"
#include "i915_debugfs.h"
+#include "i915_debugfs_params.h"
#include "i915_irq.h"
#include "i915_trace.h"
-#include "intel_csr.h"
#include "intel_pm.h"
#include "intel_sideband.h"
@@ -127,8 +119,8 @@ stringify_page_sizes(unsigned int page_sizes, char *buf, size_t len)
}
}
-static void
-describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
+void
+i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj)
{
struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
struct intel_engine_cs *engine;
@@ -673,7 +665,7 @@ static int i915_gem_fence_regs_info(struct seq_file *m, void *data)
if (!vma)
seq_puts(m, "unused");
else
- describe_obj(m, vma->obj);
+ i915_debugfs_describe_obj(m, vma->obj);
seq_putc(m, '\n');
}
rcu_read_unlock();
@@ -1004,367 +996,6 @@ static int i915_frequency_info(struct seq_file *m, void *unused)
return ret;
}
-static int ilk_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_uncore *uncore = &i915->uncore;
- u32 rgvmodectl, rstdbyctl;
- u16 crstandvid;
-
- rgvmodectl = intel_uncore_read(uncore, MEMMODECTL);
- rstdbyctl = intel_uncore_read(uncore, RSTDBYCTL);
- crstandvid = intel_uncore_read16(uncore, CRSTANDVID);
-
- seq_printf(m, "HD boost: %s\n", yesno(rgvmodectl & MEMMODE_BOOST_EN));
- seq_printf(m, "Boost freq: %d\n",
- (rgvmodectl & MEMMODE_BOOST_FREQ_MASK) >>
- MEMMODE_BOOST_FREQ_SHIFT);
- seq_printf(m, "HW control enabled: %s\n",
- yesno(rgvmodectl & MEMMODE_HWIDLE_EN));
- seq_printf(m, "SW control enabled: %s\n",
- yesno(rgvmodectl & MEMMODE_SWMODE_EN));
- seq_printf(m, "Gated voltage change: %s\n",
- yesno(rgvmodectl & MEMMODE_RCLK_GATE));
- seq_printf(m, "Starting frequency: P%d\n",
- (rgvmodectl & MEMMODE_FSTART_MASK) >> MEMMODE_FSTART_SHIFT);
- seq_printf(m, "Max P-state: P%d\n",
- (rgvmodectl & MEMMODE_FMAX_MASK) >> MEMMODE_FMAX_SHIFT);
- seq_printf(m, "Min P-state: P%d\n", (rgvmodectl & MEMMODE_FMIN_MASK));
- seq_printf(m, "RS1 VID: %d\n", (crstandvid & 0x3f));
- seq_printf(m, "RS2 VID: %d\n", ((crstandvid >> 8) & 0x3f));
- seq_printf(m, "Render standby enabled: %s\n",
- yesno(!(rstdbyctl & RCX_SW_EXIT)));
- seq_puts(m, "Current RS state: ");
- switch (rstdbyctl & RSX_STATUS_MASK) {
- case RSX_STATUS_ON:
- seq_puts(m, "on\n");
- break;
- case RSX_STATUS_RC1:
- seq_puts(m, "RC1\n");
- break;
- case RSX_STATUS_RC1E:
- seq_puts(m, "RC1E\n");
- break;
- case RSX_STATUS_RS1:
- seq_puts(m, "RS1\n");
- break;
- case RSX_STATUS_RS2:
- seq_puts(m, "RS2 (RC6)\n");
- break;
- case RSX_STATUS_RS3:
- seq_puts(m, "RC3 (RC6+)\n");
- break;
- default:
- seq_puts(m, "unknown\n");
- break;
- }
-
- return 0;
-}
-
-static int i915_forcewake_domains(struct seq_file *m, void *data)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- struct intel_uncore *uncore = &i915->uncore;
- struct intel_uncore_forcewake_domain *fw_domain;
- unsigned int tmp;
-
- seq_printf(m, "user.bypass_count = %u\n",
- uncore->user_forcewake_count);
-
- for_each_fw_domain(fw_domain, uncore, tmp)
- seq_printf(m, "%s.wake_count = %u\n",
- intel_uncore_forcewake_domain_to_str(fw_domain->id),
- READ_ONCE(fw_domain->wake_count));
-
- return 0;
-}
-
-static void print_rc6_res(struct seq_file *m,
- const char *title,
- const i915_reg_t reg)
-{
- struct drm_i915_private *i915 = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- with_intel_runtime_pm(&i915->runtime_pm, wakeref)
- seq_printf(m, "%s %u (%llu us)\n", title,
- intel_uncore_read(&i915->uncore, reg),
- intel_rc6_residency_us(&i915->gt.rc6, reg));
-}
-
-static int vlv_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 rcctl1, pw_status;
-
- pw_status = I915_READ(VLV_GTLC_PW_STATUS);
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
-
- seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & (GEN7_RC_CTL_TO_MODE |
- GEN6_RC_CTL_EI_MODE(1))));
- seq_printf(m, "Render Power Well: %s\n",
- (pw_status & VLV_GTLC_PW_RENDER_STATUS_MASK) ? "Up" : "Down");
- seq_printf(m, "Media Power Well: %s\n",
- (pw_status & VLV_GTLC_PW_MEDIA_STATUS_MASK) ? "Up" : "Down");
-
- print_rc6_res(m, "Render RC6 residency since boot:", VLV_GT_RENDER_RC6);
- print_rc6_res(m, "Media RC6 residency since boot:", VLV_GT_MEDIA_RC6);
-
- return i915_forcewake_domains(m, NULL);
-}
-
-static int gen6_drpc_info(struct seq_file *m)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- u32 gt_core_status, rcctl1, rc6vids = 0;
- u32 gen9_powergate_enable = 0, gen9_powergate_status = 0;
-
- gt_core_status = I915_READ_FW(GEN6_GT_CORE_STATUS);
- trace_i915_reg_rw(false, GEN6_GT_CORE_STATUS, gt_core_status, 4, true);
-
- rcctl1 = I915_READ(GEN6_RC_CONTROL);
- if (INTEL_GEN(dev_priv) >= 9) {
- gen9_powergate_enable = I915_READ(GEN9_PG_ENABLE);
- gen9_powergate_status = I915_READ(GEN9_PWRGT_DOMAIN_STATUS);
- }
-
- if (INTEL_GEN(dev_priv) <= 7)
- sandybridge_pcode_read(dev_priv, GEN6_PCODE_READ_RC6VIDS,
- &rc6vids, NULL);
-
- seq_printf(m, "RC1e Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE));
- seq_printf(m, "RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6_ENABLE));
- if (INTEL_GEN(dev_priv) >= 9) {
- seq_printf(m, "Render Well Gating Enabled: %s\n",
- yesno(gen9_powergate_enable & GEN9_RENDER_PG_ENABLE));
- seq_printf(m, "Media Well Gating Enabled: %s\n",
- yesno(gen9_powergate_enable & GEN9_MEDIA_PG_ENABLE));
- }
- seq_printf(m, "Deep RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6p_ENABLE));
- seq_printf(m, "Deepest RC6 Enabled: %s\n",
- yesno(rcctl1 & GEN6_RC_CTL_RC6pp_ENABLE));
- seq_puts(m, "Current RC state: ");
- switch (gt_core_status & GEN6_RCn_MASK) {
- case GEN6_RC0:
- if (gt_core_status & GEN6_CORE_CPD_STATE_MASK)
- seq_puts(m, "Core Power Down\n");
- else
- seq_puts(m, "on\n");
- break;
- case GEN6_RC3:
- seq_puts(m, "RC3\n");
- break;
- case GEN6_RC6:
- seq_puts(m, "RC6\n");
- break;
- case GEN6_RC7:
- seq_puts(m, "RC7\n");
- break;
- default:
- seq_puts(m, "Unknown\n");
- break;
- }
-
- seq_printf(m, "Core Power Down: %s\n",
- yesno(gt_core_status & GEN6_CORE_CPD_STATE_MASK));
- if (INTEL_GEN(dev_priv) >= 9) {
- seq_printf(m, "Render Power Well: %s\n",
- (gen9_powergate_status &
- GEN9_PWRGT_RENDER_STATUS_MASK) ? "Up" : "Down");
- seq_printf(m, "Media Power Well: %s\n",
- (gen9_powergate_status &
- GEN9_PWRGT_MEDIA_STATUS_MASK) ? "Up" : "Down");
- }
-
- /* Not exactly sure what this is */
- print_rc6_res(m, "RC6 \"Locked to RPn\" residency since boot:",
- GEN6_GT_GFX_RC6_LOCKED);
- print_rc6_res(m, "RC6 residency since boot:", GEN6_GT_GFX_RC6);
- print_rc6_res(m, "RC6+ residency since boot:", GEN6_GT_GFX_RC6p);
- print_rc6_res(m, "RC6++ residency since boot:", GEN6_GT_GFX_RC6pp);
-
- if (INTEL_GEN(dev_priv) <= 7) {
- seq_printf(m, "RC6 voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 0) & 0xff)));
- seq_printf(m, "RC6+ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 8) & 0xff)));
- seq_printf(m, "RC6++ voltage: %dmV\n",
- GEN6_DECODE_RC6_VID(((rc6vids >> 16) & 0xff)));
- }
-
- return i915_forcewake_domains(m, NULL);
-}
-
-static int i915_drpc_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- int err = -ENODEV;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- err = vlv_drpc_info(m);
- else if (INTEL_GEN(dev_priv) >= 6)
- err = gen6_drpc_info(m);
- else
- err = ilk_drpc_info(m);
- }
-
- return err;
-}
-
-static int i915_frontbuffer_tracking(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
-
- seq_printf(m, "FB tracking busy bits: 0x%08x\n",
- dev_priv->fb_tracking.busy_bits);
-
- seq_printf(m, "FB tracking flip bits: 0x%08x\n",
- dev_priv->fb_tracking.flip_bits);
-
- return 0;
-}
-
-static int i915_fbc_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_fbc *fbc = &dev_priv->fbc;
- intel_wakeref_t wakeref;
-
- if (!HAS_FBC(dev_priv))
- return -ENODEV;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&fbc->lock);
-
- if (intel_fbc_is_active(dev_priv))
- seq_puts(m, "FBC enabled\n");
- else
- seq_printf(m, "FBC disabled: %s\n", fbc->no_fbc_reason);
-
- if (intel_fbc_is_active(dev_priv)) {
- u32 mask;
-
- if (INTEL_GEN(dev_priv) >= 8)
- mask = I915_READ(IVB_FBC_STATUS2) & BDW_FBC_COMP_SEG_MASK;
- else if (INTEL_GEN(dev_priv) >= 7)
- mask = I915_READ(IVB_FBC_STATUS2) & IVB_FBC_COMP_SEG_MASK;
- else if (INTEL_GEN(dev_priv) >= 5)
- mask = I915_READ(ILK_DPFC_STATUS) & ILK_DPFC_COMP_SEG_MASK;
- else if (IS_G4X(dev_priv))
- mask = I915_READ(DPFC_STATUS) & DPFC_COMP_SEG_MASK;
- else
- mask = I915_READ(FBC_STATUS) & (FBC_STAT_COMPRESSING |
- FBC_STAT_COMPRESSED);
-
- seq_printf(m, "Compressing: %s\n", yesno(mask));
- }
-
- mutex_unlock(&fbc->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_fbc_false_color_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
-
- if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
- return -ENODEV;
-
- *val = dev_priv->fbc.false_color;
-
- return 0;
-}
-
-static int i915_fbc_false_color_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- u32 reg;
-
- if (INTEL_GEN(dev_priv) < 7 || !HAS_FBC(dev_priv))
- return -ENODEV;
-
- mutex_lock(&dev_priv->fbc.lock);
-
- reg = I915_READ(ILK_DPFC_CONTROL);
- dev_priv->fbc.false_color = val;
-
- I915_WRITE(ILK_DPFC_CONTROL, val ?
- (reg | FBC_CTL_FALSE_COLOR) :
- (reg & ~FBC_CTL_FALSE_COLOR));
-
- mutex_unlock(&dev_priv->fbc.lock);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_fbc_false_color_fops,
- i915_fbc_false_color_get, i915_fbc_false_color_set,
- "%llu\n");
-
-static int i915_ips_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
-
- if (!HAS_IPS(dev_priv))
- return -ENODEV;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- seq_printf(m, "Enabled by kernel parameter: %s\n",
- yesno(i915_modparams.enable_ips));
-
- if (INTEL_GEN(dev_priv) >= 8) {
- seq_puts(m, "Currently: unknown\n");
- } else {
- if (I915_READ(IPS_CTL) & IPS_ENABLE)
- seq_puts(m, "Currently: enabled\n");
- else
- seq_puts(m, "Currently: disabled\n");
- }
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int i915_sr_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- bool sr_enabled = false;
-
- wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_INIT);
-
- if (INTEL_GEN(dev_priv) >= 9)
- /* no global SR status; inspect per-plane WM */;
- else if (HAS_PCH_SPLIT(dev_priv))
- sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
- else if (IS_I965GM(dev_priv) || IS_G4X(dev_priv) ||
- IS_I945G(dev_priv) || IS_I945GM(dev_priv))
- sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
- else if (IS_I915GM(dev_priv))
- sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
- else if (IS_PINEVIEW(dev_priv))
- sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
-
- intel_display_power_put(dev_priv, POWER_DOMAIN_INIT, wakeref);
-
- seq_printf(m, "self-refresh: %s\n", enableddisabled(sr_enabled));
-
- return 0;
-}
-
static int i915_ring_freq_table(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -1406,70 +1037,6 @@ static int i915_ring_freq_table(struct seq_file *m, void *unused)
return 0;
}
-static int i915_opregion(struct seq_file *m, void *unused)
-{
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
-
- if (opregion->header)
- seq_write(m, opregion->header, OPREGION_SIZE);
-
- return 0;
-}
-
-static int i915_vbt(struct seq_file *m, void *unused)
-{
- struct intel_opregion *opregion = &node_to_i915(m->private)->opregion;
-
- if (opregion->vbt)
- seq_write(m, opregion->vbt, opregion->vbt_size);
-
- return 0;
-}
-
-static int i915_gem_framebuffer_info(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_framebuffer *fbdev_fb = NULL;
- struct drm_framebuffer *drm_fb;
-
-#ifdef CONFIG_DRM_FBDEV_EMULATION
- if (dev_priv->fbdev && dev_priv->fbdev->helper.fb) {
- fbdev_fb = to_intel_framebuffer(dev_priv->fbdev->helper.fb);
-
- seq_printf(m, "fbcon size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
- fbdev_fb->base.width,
- fbdev_fb->base.height,
- fbdev_fb->base.format->depth,
- fbdev_fb->base.format->cpp[0] * 8,
- fbdev_fb->base.modifier,
- drm_framebuffer_read_refcount(&fbdev_fb->base));
- describe_obj(m, intel_fb_obj(&fbdev_fb->base));
- seq_putc(m, '\n');
- }
-#endif
-
- mutex_lock(&dev->mode_config.fb_lock);
- drm_for_each_fb(drm_fb, dev) {
- struct intel_framebuffer *fb = to_intel_framebuffer(drm_fb);
- if (fb == fbdev_fb)
- continue;
-
- seq_printf(m, "user size: %d x %d, depth %d, %d bpp, modifier 0x%llx, refcount %d, obj ",
- fb->base.width,
- fb->base.height,
- fb->base.format->depth,
- fb->base.format->cpp[0] * 8,
- fb->base.modifier,
- drm_framebuffer_read_refcount(&fb->base));
- describe_obj(m, intel_fb_obj(&fb->base));
- seq_putc(m, '\n');
- }
- mutex_unlock(&dev->mode_config.fb_lock);
-
- return 0;
-}
-
static void describe_ctx_ring(struct seq_file *m, struct intel_ring *ring)
{
seq_printf(m, " (ringbuffer, space: %d, head: %u, tail: %u, emit: %u)",
@@ -1515,7 +1082,7 @@ static int i915_context_status(struct seq_file *m, void *unused)
if (intel_context_pin_if_active(ce)) {
seq_printf(m, "%s: ", ce->engine->name);
if (ce->state)
- describe_obj(m, ce->state->obj);
+ i915_debugfs_describe_obj(m, ce->state->obj);
describe_ctx_ring(m, ce->ring);
seq_putc(m, '\n');
intel_context_unpin(ce);
@@ -1752,10 +1319,8 @@ stringify_guc_log_type(enum guc_log_buffer_type type)
return "";
}
-static void i915_guc_log_info(struct seq_file *m,
- struct drm_i915_private *dev_priv)
+static void i915_guc_log_info(struct seq_file *m, struct intel_guc_log *log)
{
- struct intel_guc_log *log = &dev_priv->gt.uc.guc.log;
enum guc_log_buffer_type type;
if (!intel_guc_log_relay_created(log)) {
@@ -1779,11 +1344,12 @@ static void i915_guc_log_info(struct seq_file *m,
static int i915_guc_info(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
+ struct intel_uc *uc = &dev_priv->gt.uc;
- if (!USES_GUC(dev_priv))
+ if (!intel_uc_uses_guc(uc))
return -ENODEV;
- i915_guc_log_info(m, dev_priv);
+ i915_guc_log_info(m, &uc->guc.log);
/* Add more as required ... */
@@ -1793,11 +1359,11 @@ static int i915_guc_info(struct seq_file *m, void *data)
static int i915_guc_stage_pool(struct seq_file *m, void *data)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_guc *guc = &dev_priv->gt.uc.guc;
- struct guc_stage_desc *desc = guc->stage_desc_pool_vaddr;
+ struct intel_uc *uc = &dev_priv->gt.uc;
+ struct guc_stage_desc *desc = uc->guc.stage_desc_pool_vaddr;
int index;
- if (!USES_GUC_SUBMISSION(dev_priv))
+ if (!intel_uc_uses_guc_submission(uc))
return -ENODEV;
for (index = 0; index < GUC_MAX_STAGE_DESCRIPTORS; index++, desc++) {
@@ -1884,11 +1450,12 @@ static int i915_guc_log_dump(struct seq_file *m, void *data)
static int i915_guc_log_level_get(void *data, u64 *val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_uc *uc = &dev_priv->gt.uc;
- if (!USES_GUC(dev_priv))
+ if (!intel_uc_uses_guc(uc))
return -ENODEV;
- *val = intel_guc_log_get_level(&dev_priv->gt.uc.guc.log);
+ *val = intel_guc_log_get_level(&uc->guc.log);
return 0;
}
@@ -1896,11 +1463,12 @@ static int i915_guc_log_level_get(void *data, u64 *val)
static int i915_guc_log_level_set(void *data, u64 val)
{
struct drm_i915_private *dev_priv = data;
+ struct intel_uc *uc = &dev_priv->gt.uc;
- if (!USES_GUC(dev_priv))
+ if (!intel_uc_uses_guc(uc))
return -ENODEV;
- return intel_guc_log_set_level(&dev_priv->gt.uc.guc.log, val);
+ return intel_guc_log_set_level(&uc->guc.log, val);
}
DEFINE_SIMPLE_ATTRIBUTE(i915_guc_log_level_fops,
@@ -1913,7 +1481,7 @@ static int i915_guc_log_relay_open(struct inode *inode, struct file *file)
struct intel_guc *guc = &i915->gt.uc.guc;
struct intel_guc_log *log = &guc->log;
- if (!intel_guc_is_running(guc))
+ if (!intel_guc_is_ready(guc))
return -ENODEV;
file->private_data = log;
@@ -1963,253 +1531,6 @@ static const struct file_operations i915_guc_log_relay_fops = {
.release = i915_guc_log_relay_release,
};
-static int i915_psr_sink_status_show(struct seq_file *m, void *data)
-{
- u8 val;
- static const char * const sink_status[] = {
- "inactive",
- "transition to active, capture and display",
- "active, display from RFB",
- "active, capture and display on sink device timings",
- "transition to inactive, capture and display, timing re-sync",
- "reserved",
- "reserved",
- "sink internal error",
- };
- struct drm_connector *connector = m->private;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
- struct intel_dp *intel_dp =
- enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
- int ret;
-
- if (!CAN_PSR(dev_priv)) {
- seq_puts(m, "PSR Unsupported\n");
- return -ENODEV;
- }
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- ret = drm_dp_dpcd_readb(&intel_dp->aux, DP_PSR_STATUS, &val);
-
- if (ret == 1) {
- const char *str = "unknown";
-
- val &= DP_PSR_SINK_STATE_MASK;
- if (val < ARRAY_SIZE(sink_status))
- str = sink_status[val];
- seq_printf(m, "Sink PSR status: 0x%x [%s]\n", val, str);
- } else {
- return ret;
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_psr_sink_status);
-
-static void
-psr_source_status(struct drm_i915_private *dev_priv, struct seq_file *m)
-{
- u32 val, status_val;
- const char *status = "unknown";
-
- if (dev_priv->psr.psr2_enabled) {
- static const char * const live_status[] = {
- "IDLE",
- "CAPTURE",
- "CAPTURE_FS",
- "SLEEP",
- "BUFON_FW",
- "ML_UP",
- "SU_STANDBY",
- "FAST_SLEEP",
- "DEEP_SLEEP",
- "BUF_ON",
- "TG_ON"
- };
- val = I915_READ(EDP_PSR2_STATUS(dev_priv->psr.transcoder));
- status_val = (val & EDP_PSR2_STATUS_STATE_MASK) >>
- EDP_PSR2_STATUS_STATE_SHIFT;
- if (status_val < ARRAY_SIZE(live_status))
- status = live_status[status_val];
- } else {
- static const char * const live_status[] = {
- "IDLE",
- "SRDONACK",
- "SRDENT",
- "BUFOFF",
- "BUFON",
- "AUXACK",
- "SRDOFFACK",
- "SRDENT_ON",
- };
- val = I915_READ(EDP_PSR_STATUS(dev_priv->psr.transcoder));
- status_val = (val & EDP_PSR_STATUS_STATE_MASK) >>
- EDP_PSR_STATUS_STATE_SHIFT;
- if (status_val < ARRAY_SIZE(live_status))
- status = live_status[status_val];
- }
-
- seq_printf(m, "Source PSR status: %s [0x%08x]\n", status, val);
-}
-
-static int i915_edp_psr_status(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_psr *psr = &dev_priv->psr;
- intel_wakeref_t wakeref;
- const char *status;
- bool enabled;
- u32 val;
-
- if (!HAS_PSR(dev_priv))
- return -ENODEV;
-
- seq_printf(m, "Sink support: %s", yesno(psr->sink_support));
- if (psr->dp)
- seq_printf(m, " [0x%02x]", psr->dp->psr_dpcd[0]);
- seq_puts(m, "\n");
-
- if (!psr->sink_support)
- return 0;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
- mutex_lock(&psr->lock);
-
- if (psr->enabled)
- status = psr->psr2_enabled ? "PSR2 enabled" : "PSR1 enabled";
- else
- status = "disabled";
- seq_printf(m, "PSR mode: %s\n", status);
-
- if (!psr->enabled) {
- seq_printf(m, "PSR sink not reliable: %s\n",
- yesno(psr->sink_not_reliable));
-
- goto unlock;
- }
-
- if (psr->psr2_enabled) {
- val = I915_READ(EDP_PSR2_CTL(dev_priv->psr.transcoder));
- enabled = val & EDP_PSR2_ENABLE;
- } else {
- val = I915_READ(EDP_PSR_CTL(dev_priv->psr.transcoder));
- enabled = val & EDP_PSR_ENABLE;
- }
- seq_printf(m, "Source PSR ctl: %s [0x%08x]\n",
- enableddisabled(enabled), val);
- psr_source_status(dev_priv, m);
- seq_printf(m, "Busy frontbuffer bits: 0x%08x\n",
- psr->busy_frontbuffer_bits);
-
- /*
- * SKL+ Perf counter is reset to 0 everytime DC state is entered
- */
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
- val = I915_READ(EDP_PSR_PERF_CNT(dev_priv->psr.transcoder));
- val &= EDP_PSR_PERF_CNT_MASK;
- seq_printf(m, "Performance counter: %u\n", val);
- }
-
- if (psr->debug & I915_PSR_DEBUG_IRQ) {
- seq_printf(m, "Last attempted entry at: %lld\n",
- psr->last_entry_attempt);
- seq_printf(m, "Last exit at: %lld\n", psr->last_exit);
- }
-
- if (psr->psr2_enabled) {
- u32 su_frames_val[3];
- int frame;
-
- /*
- * Reading all 3 registers before hand to minimize crossing a
- * frame boundary between register reads
- */
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame += 3) {
- val = I915_READ(PSR2_SU_STATUS(dev_priv->psr.transcoder,
- frame));
- su_frames_val[frame / 3] = val;
- }
-
- seq_puts(m, "Frame:\tPSR2 SU blocks:\n");
-
- for (frame = 0; frame < PSR2_SU_STATUS_FRAMES; frame++) {
- u32 su_blocks;
-
- su_blocks = su_frames_val[frame / 3] &
- PSR2_SU_STATUS_MASK(frame);
- su_blocks = su_blocks >> PSR2_SU_STATUS_SHIFT(frame);
- seq_printf(m, "%d\t%d\n", frame, su_blocks);
- }
- }
-
-unlock:
- mutex_unlock(&psr->lock);
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static int
-i915_edp_psr_debug_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- intel_wakeref_t wakeref;
- int ret;
-
- if (!CAN_PSR(dev_priv))
- return -ENODEV;
-
- DRM_DEBUG_KMS("Setting PSR debug to %llx\n", val);
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- ret = intel_psr_debug_set(dev_priv, val);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return ret;
-}
-
-static int
-i915_edp_psr_debug_get(void *data, u64 *val)
-{
- struct drm_i915_private *dev_priv = data;
-
- if (!CAN_PSR(dev_priv))
- return -ENODEV;
-
- *val = READ_ONCE(dev_priv->psr.debug);
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops,
- i915_edp_psr_debug_get, i915_edp_psr_debug_set,
- "%llu\n");
-
-static int i915_energy_uJ(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- unsigned long long power;
- intel_wakeref_t wakeref;
- u32 units;
-
- if (INTEL_GEN(dev_priv) < 6)
- return -ENODEV;
-
- if (rdmsrl_safe(MSR_RAPL_POWER_UNIT, &power))
- return -ENODEV;
-
- units = (power & 0x1f00) >> 8;
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref)
- power = I915_READ(MCH_SECP_NRG_STTS);
-
- power = (1000000 * power) >> units; /* convert to uJ */
- seq_printf(m, "%llu", power);
-
- return 0;
-}
-
static int i915_runtime_pm_status(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2243,452 +1564,6 @@ static int i915_runtime_pm_status(struct seq_file *m, void *unused)
return 0;
}
-static int i915_power_domain_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct i915_power_domains *power_domains = &dev_priv->power_domains;
- int i;
-
- mutex_lock(&power_domains->lock);
-
- seq_printf(m, "%-25s %s\n", "Power well/domain", "Use count");
- for (i = 0; i < power_domains->power_well_count; i++) {
- struct i915_power_well *power_well;
- enum intel_display_power_domain power_domain;
-
- power_well = &power_domains->power_wells[i];
- seq_printf(m, "%-25s %d\n", power_well->desc->name,
- power_well->count);
-
- for_each_power_domain(power_domain, power_well->desc->domains)
- seq_printf(m, " %-23s %d\n",
- intel_display_power_domain_str(power_domain),
- power_domains->domain_use_count[power_domain]);
- }
-
- mutex_unlock(&power_domains->lock);
-
- return 0;
-}
-
-static int i915_dmc_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- intel_wakeref_t wakeref;
- struct intel_csr *csr;
- i915_reg_t dc5_reg, dc6_reg = {};
-
- if (!HAS_CSR(dev_priv))
- return -ENODEV;
-
- csr = &dev_priv->csr;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- seq_printf(m, "fw loaded: %s\n", yesno(csr->dmc_payload != NULL));
- seq_printf(m, "path: %s\n", csr->fw_path);
-
- if (!csr->dmc_payload)
- goto out;
-
- seq_printf(m, "version: %d.%d\n", CSR_VERSION_MAJOR(csr->version),
- CSR_VERSION_MINOR(csr->version));
-
- if (INTEL_GEN(dev_priv) >= 12) {
- dc5_reg = TGL_DMC_DEBUG_DC5_COUNT;
- dc6_reg = TGL_DMC_DEBUG_DC6_COUNT;
- /*
- * NOTE: DMC_DEBUG3 is a general purpose reg.
- * According to B.Specs:49196 DMC f/w reuses DC5/6 counter
- * reg for DC3CO debugging and validation,
- * but TGL DMC f/w is using DMC_DEBUG3 reg for DC3CO counter.
- */
- seq_printf(m, "DC3CO count: %d\n", I915_READ(DMC_DEBUG3));
- } else {
- dc5_reg = IS_BROXTON(dev_priv) ? BXT_CSR_DC3_DC5_COUNT :
- SKL_CSR_DC3_DC5_COUNT;
- if (!IS_GEN9_LP(dev_priv))
- dc6_reg = SKL_CSR_DC5_DC6_COUNT;
- }
-
- seq_printf(m, "DC3 -> DC5 count: %d\n", I915_READ(dc5_reg));
- if (dc6_reg.reg)
- seq_printf(m, "DC5 -> DC6 count: %d\n", I915_READ(dc6_reg));
-
-out:
- seq_printf(m, "program base: 0x%08x\n", I915_READ(CSR_PROGRAM(0)));
- seq_printf(m, "ssp base: 0x%08x\n", I915_READ(CSR_SSP_BASE));
- seq_printf(m, "htp: 0x%08x\n", I915_READ(CSR_HTP_SKL));
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
-static void intel_seq_print_mode(struct seq_file *m, int tabs,
- const struct drm_display_mode *mode)
-{
- int i;
-
- for (i = 0; i < tabs; i++)
- seq_putc(m, '\t');
-
- seq_printf(m, DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
-}
-
-static void intel_encoder_info(struct seq_file *m,
- struct intel_crtc *crtc,
- struct intel_encoder *encoder)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_connector_list_iter conn_iter;
- struct drm_connector *connector;
-
- seq_printf(m, "\t[ENCODER:%d:%s]: connectors:\n",
- encoder->base.base.id, encoder->base.name);
-
- drm_connector_list_iter_begin(&dev_priv->drm, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- const struct drm_connector_state *conn_state =
- connector->state;
-
- if (conn_state->best_encoder != &encoder->base)
- continue;
-
- seq_printf(m, "\t\t[CONNECTOR:%d:%s]\n",
- connector->base.id, connector->name);
- }
- drm_connector_list_iter_end(&conn_iter);
-}
-
-static void intel_panel_info(struct seq_file *m, struct intel_panel *panel)
-{
- const struct drm_display_mode *mode = panel->fixed_mode;
-
- seq_printf(m, "\tfixed mode: " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
-}
-
-static void intel_hdcp_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- bool hdcp_cap, hdcp2_cap;
-
- hdcp_cap = intel_hdcp_capable(intel_connector);
- hdcp2_cap = intel_hdcp2_capable(intel_connector);
-
- if (hdcp_cap)
- seq_puts(m, "HDCP1.4 ");
- if (hdcp2_cap)
- seq_puts(m, "HDCP2.2 ");
-
- if (!hdcp_cap && !hdcp2_cap)
- seq_puts(m, "None");
-
- seq_puts(m, "\n");
-}
-
-static void intel_dp_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_dp *intel_dp = enc_to_intel_dp(intel_encoder);
-
- seq_printf(m, "\tDPCD rev: %x\n", intel_dp->dpcd[DP_DPCD_REV]);
- seq_printf(m, "\taudio support: %s\n", yesno(intel_dp->has_audio));
- if (intel_connector->base.connector_type == DRM_MODE_CONNECTOR_eDP)
- intel_panel_info(m, &intel_connector->panel);
-
- drm_dp_downstream_debug(m, intel_dp->dpcd, intel_dp->downstream_ports,
- &intel_dp->aux);
- if (intel_connector->hdcp.shim) {
- seq_puts(m, "\tHDCP version: ");
- intel_hdcp_info(m, intel_connector);
- }
-}
-
-static void intel_dp_mst_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_dp_mst_encoder *intel_mst =
- enc_to_mst(intel_encoder);
- struct intel_digital_port *intel_dig_port = intel_mst->primary;
- struct intel_dp *intel_dp = &intel_dig_port->dp;
- bool has_audio = drm_dp_mst_port_has_audio(&intel_dp->mst_mgr,
- intel_connector->port);
-
- seq_printf(m, "\taudio support: %s\n", yesno(has_audio));
-}
-
-static void intel_hdmi_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- struct intel_encoder *intel_encoder = intel_connector->encoder;
- struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(intel_encoder);
-
- seq_printf(m, "\taudio support: %s\n", yesno(intel_hdmi->has_audio));
- if (intel_connector->hdcp.shim) {
- seq_puts(m, "\tHDCP version: ");
- intel_hdcp_info(m, intel_connector);
- }
-}
-
-static void intel_lvds_info(struct seq_file *m,
- struct intel_connector *intel_connector)
-{
- intel_panel_info(m, &intel_connector->panel);
-}
-
-static void intel_connector_info(struct seq_file *m,
- struct drm_connector *connector)
-{
- struct intel_connector *intel_connector = to_intel_connector(connector);
- const struct drm_connector_state *conn_state = connector->state;
- struct intel_encoder *encoder =
- to_intel_encoder(conn_state->best_encoder);
- const struct drm_display_mode *mode;
-
- seq_printf(m, "[CONNECTOR:%d:%s]: status: %s\n",
- connector->base.id, connector->name,
- drm_get_connector_status_name(connector->status));
-
- if (connector->status == connector_status_disconnected)
- return;
-
- seq_printf(m, "\tphysical dimensions: %dx%dmm\n",
- connector->display_info.width_mm,
- connector->display_info.height_mm);
- seq_printf(m, "\tsubpixel order: %s\n",
- drm_get_subpixel_order_name(connector->display_info.subpixel_order));
- seq_printf(m, "\tCEA rev: %d\n", connector->display_info.cea_rev);
-
- if (!encoder)
- return;
-
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DisplayPort:
- case DRM_MODE_CONNECTOR_eDP:
- if (encoder->type == INTEL_OUTPUT_DP_MST)
- intel_dp_mst_info(m, intel_connector);
- else
- intel_dp_info(m, intel_connector);
- break;
- case DRM_MODE_CONNECTOR_LVDS:
- if (encoder->type == INTEL_OUTPUT_LVDS)
- intel_lvds_info(m, intel_connector);
- break;
- case DRM_MODE_CONNECTOR_HDMIA:
- if (encoder->type == INTEL_OUTPUT_HDMI ||
- encoder->type == INTEL_OUTPUT_DDI)
- intel_hdmi_info(m, intel_connector);
- break;
- default:
- break;
- }
-
- seq_printf(m, "\tmodes:\n");
- list_for_each_entry(mode, &connector->modes, head)
- intel_seq_print_mode(m, 2, mode);
-}
-
-static const char *plane_type(enum drm_plane_type type)
-{
- switch (type) {
- case DRM_PLANE_TYPE_OVERLAY:
- return "OVL";
- case DRM_PLANE_TYPE_PRIMARY:
- return "PRI";
- case DRM_PLANE_TYPE_CURSOR:
- return "CUR";
- /*
- * Deliberately omitting default: to generate compiler warnings
- * when a new drm_plane_type gets added.
- */
- }
-
- return "unknown";
-}
-
-static void plane_rotation(char *buf, size_t bufsize, unsigned int rotation)
-{
- /*
- * According to doc only one DRM_MODE_ROTATE_ is allowed but this
- * will print them all to visualize if the values are misused
- */
- snprintf(buf, bufsize,
- "%s%s%s%s%s%s(0x%08x)",
- (rotation & DRM_MODE_ROTATE_0) ? "0 " : "",
- (rotation & DRM_MODE_ROTATE_90) ? "90 " : "",
- (rotation & DRM_MODE_ROTATE_180) ? "180 " : "",
- (rotation & DRM_MODE_ROTATE_270) ? "270 " : "",
- (rotation & DRM_MODE_REFLECT_X) ? "FLIPX " : "",
- (rotation & DRM_MODE_REFLECT_Y) ? "FLIPY " : "",
- rotation);
-}
-
-static void intel_plane_uapi_info(struct seq_file *m, struct intel_plane *plane)
-{
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- const struct drm_framebuffer *fb = plane_state->uapi.fb;
- struct drm_format_name_buf format_name;
- struct drm_rect src, dst;
- char rot_str[48];
-
- src = drm_plane_state_src(&plane_state->uapi);
- dst = drm_plane_state_dest(&plane_state->uapi);
-
- if (fb)
- drm_get_format_name(fb->format->format, &format_name);
-
- plane_rotation(rot_str, sizeof(rot_str),
- plane_state->uapi.rotation);
-
- seq_printf(m, "\t\tuapi: fb=%d,%s,%dx%d, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
- fb ? fb->base.id : 0, fb ? format_name.str : "n/a",
- fb ? fb->width : 0, fb ? fb->height : 0,
- DRM_RECT_FP_ARG(&src),
- DRM_RECT_ARG(&dst),
- rot_str);
-}
-
-static void intel_plane_hw_info(struct seq_file *m, struct intel_plane *plane)
-{
- const struct intel_plane_state *plane_state =
- to_intel_plane_state(plane->base.state);
- const struct drm_framebuffer *fb = plane_state->hw.fb;
- struct drm_format_name_buf format_name;
- char rot_str[48];
-
- if (!fb)
- return;
-
- drm_get_format_name(fb->format->format, &format_name);
-
- plane_rotation(rot_str, sizeof(rot_str),
- plane_state->hw.rotation);
-
- seq_printf(m, "\t\thw: fb=%d,%s,%dx%d, visible=%s, src=" DRM_RECT_FP_FMT ", dst=" DRM_RECT_FMT ", rotation=%s\n",
- fb->base.id, format_name.str,
- fb->width, fb->height,
- yesno(plane_state->uapi.visible),
- DRM_RECT_FP_ARG(&plane_state->uapi.src),
- DRM_RECT_ARG(&plane_state->uapi.dst),
- rot_str);
-}
-
-static void intel_plane_info(struct seq_file *m, struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct intel_plane *plane;
-
- for_each_intel_plane_on_crtc(&dev_priv->drm, crtc, plane) {
- seq_printf(m, "\t[PLANE:%d:%s]: type=%s\n",
- plane->base.base.id, plane->base.name,
- plane_type(plane->base.type));
- intel_plane_uapi_info(m, plane);
- intel_plane_hw_info(m, plane);
- }
-}
-
-static void intel_scaler_info(struct seq_file *m, struct intel_crtc *crtc)
-{
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- int num_scalers = crtc->num_scalers;
- int i;
-
- /* Not all platformas have a scaler */
- if (num_scalers) {
- seq_printf(m, "\tnum_scalers=%d, scaler_users=%x scaler_id=%d",
- num_scalers,
- crtc_state->scaler_state.scaler_users,
- crtc_state->scaler_state.scaler_id);
-
- for (i = 0; i < num_scalers; i++) {
- const struct intel_scaler *sc =
- &crtc_state->scaler_state.scalers[i];
-
- seq_printf(m, ", scalers[%d]: use=%s, mode=%x",
- i, yesno(sc->in_use), sc->mode);
- }
- seq_puts(m, "\n");
- } else {
- seq_puts(m, "\tNo scalers available on this platform\n");
- }
-}
-
-static void intel_crtc_info(struct seq_file *m, struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- const struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- struct intel_encoder *encoder;
-
- seq_printf(m, "[CRTC:%d:%s]:\n",
- crtc->base.base.id, crtc->base.name);
-
- seq_printf(m, "\tuapi: enable=%s, active=%s, mode=" DRM_MODE_FMT "\n",
- yesno(crtc_state->uapi.enable),
- yesno(crtc_state->uapi.active),
- DRM_MODE_ARG(&crtc_state->uapi.mode));
-
- if (crtc_state->hw.enable) {
- seq_printf(m, "\thw: active=%s, adjusted_mode=" DRM_MODE_FMT "\n",
- yesno(crtc_state->hw.active),
- DRM_MODE_ARG(&crtc_state->hw.adjusted_mode));
-
- seq_printf(m, "\tpipe src size=%dx%d, dither=%s, bpp=%d\n",
- crtc_state->pipe_src_w, crtc_state->pipe_src_h,
- yesno(crtc_state->dither), crtc_state->pipe_bpp);
-
- intel_scaler_info(m, crtc);
- }
-
- for_each_intel_encoder_mask(&dev_priv->drm, encoder,
- crtc_state->uapi.encoder_mask)
- intel_encoder_info(m, crtc, encoder);
-
- intel_plane_info(m, crtc);
-
- seq_printf(m, "\tunderrun reporting: cpu=%s pch=%s\n",
- yesno(!crtc->cpu_fifo_underrun_disabled),
- yesno(!crtc->pch_fifo_underrun_disabled));
-}
-
-static int i915_display_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- intel_wakeref_t wakeref;
-
- wakeref = intel_runtime_pm_get(&dev_priv->runtime_pm);
-
- drm_modeset_lock_all(dev);
-
- seq_printf(m, "CRTC info\n");
- seq_printf(m, "---------\n");
- for_each_intel_crtc(dev, crtc)
- intel_crtc_info(m, crtc);
-
- seq_printf(m, "\n");
- seq_printf(m, "Connector info\n");
- seq_printf(m, "--------------\n");
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter)
- intel_connector_info(m, connector);
- drm_connector_list_iter_end(&conn_iter);
-
- drm_modeset_unlock_all(dev);
-
- intel_runtime_pm_put(&dev_priv->runtime_pm, wakeref);
-
- return 0;
-}
-
static int i915_engine_info(struct seq_file *m, void *unused)
{
struct drm_i915_private *dev_priv = node_to_i915(m->private);
@@ -2733,55 +1608,6 @@ static int i915_shrinker_info(struct seq_file *m, void *unused)
return 0;
}
-static int i915_shared_dplls_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- int i;
-
- drm_modeset_lock_all(dev);
- for (i = 0; i < dev_priv->num_shared_dpll; i++) {
- struct intel_shared_dpll *pll = &dev_priv->shared_dplls[i];
-
- seq_printf(m, "DPLL%i: %s, id: %i\n", i, pll->info->name,
- pll->info->id);
- seq_printf(m, " crtc_mask: 0x%08x, active: 0x%x, on: %s\n",
- pll->state.crtc_mask, pll->active_mask, yesno(pll->on));
- seq_printf(m, " tracked hardware state:\n");
- seq_printf(m, " dpll: 0x%08x\n", pll->state.hw_state.dpll);
- seq_printf(m, " dpll_md: 0x%08x\n",
- pll->state.hw_state.dpll_md);
- seq_printf(m, " fp0: 0x%08x\n", pll->state.hw_state.fp0);
- seq_printf(m, " fp1: 0x%08x\n", pll->state.hw_state.fp1);
- seq_printf(m, " wrpll: 0x%08x\n", pll->state.hw_state.wrpll);
- seq_printf(m, " cfgcr0: 0x%08x\n", pll->state.hw_state.cfgcr0);
- seq_printf(m, " cfgcr1: 0x%08x\n", pll->state.hw_state.cfgcr1);
- seq_printf(m, " mg_refclkin_ctl: 0x%08x\n",
- pll->state.hw_state.mg_refclkin_ctl);
- seq_printf(m, " mg_clktop2_coreclkctl1: 0x%08x\n",
- pll->state.hw_state.mg_clktop2_coreclkctl1);
- seq_printf(m, " mg_clktop2_hsclkctl: 0x%08x\n",
- pll->state.hw_state.mg_clktop2_hsclkctl);
- seq_printf(m, " mg_pll_div0: 0x%08x\n",
- pll->state.hw_state.mg_pll_div0);
- seq_printf(m, " mg_pll_div1: 0x%08x\n",
- pll->state.hw_state.mg_pll_div1);
- seq_printf(m, " mg_pll_lf: 0x%08x\n",
- pll->state.hw_state.mg_pll_lf);
- seq_printf(m, " mg_pll_frac_lock: 0x%08x\n",
- pll->state.hw_state.mg_pll_frac_lock);
- seq_printf(m, " mg_pll_ssc: 0x%08x\n",
- pll->state.hw_state.mg_pll_ssc);
- seq_printf(m, " mg_pll_bias: 0x%08x\n",
- pll->state.hw_state.mg_pll_bias);
- seq_printf(m, " mg_pll_tdc_coldst_bias: 0x%08x\n",
- pll->state.hw_state.mg_pll_tdc_coldst_bias);
- }
- drm_modeset_unlock_all(dev);
-
- return 0;
-}
-
static int i915_wa_registers(struct seq_file *m, void *unused)
{
struct drm_i915_private *i915 = node_to_i915(m->private);
@@ -2802,7 +1628,7 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
for (wa = wal->list; count--; wa++)
seq_printf(m, "0x%X: 0x%08X, mask: 0x%08X\n",
i915_mmio_reg_offset(wa->reg),
- wa->val, wa->mask);
+ wa->set, wa->clr);
seq_printf(m, "\n");
}
@@ -2810,646 +1636,6 @@ static int i915_wa_registers(struct seq_file *m, void *unused)
return 0;
}
-static int i915_ipc_status_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
-
- seq_printf(m, "Isochronous Priority Control: %s\n",
- yesno(dev_priv->ipc_enabled));
- return 0;
-}
-
-static int i915_ipc_status_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (!HAS_IPC(dev_priv))
- return -ENODEV;
-
- return single_open(file, i915_ipc_status_show, dev_priv);
-}
-
-static ssize_t i915_ipc_status_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- intel_wakeref_t wakeref;
- bool enable;
- int ret;
-
- ret = kstrtobool_from_user(ubuf, len, &enable);
- if (ret < 0)
- return ret;
-
- with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
- if (!dev_priv->ipc_enabled && enable)
- DRM_INFO("Enabling IPC: WM will be proper only after next commit\n");
- dev_priv->wm.distrust_bios_wm = true;
- dev_priv->ipc_enabled = enable;
- intel_enable_ipc(dev_priv);
- }
-
- return len;
-}
-
-static const struct file_operations i915_ipc_status_fops = {
- .owner = THIS_MODULE,
- .open = i915_ipc_status_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_ipc_status_write
-};
-
-static int i915_ddb_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct skl_ddb_entry *entry;
- struct intel_crtc *crtc;
-
- if (INTEL_GEN(dev_priv) < 9)
- return -ENODEV;
-
- drm_modeset_lock_all(dev);
-
- seq_printf(m, "%-15s%8s%8s%8s\n", "", "Start", "End", "Size");
-
- for_each_intel_crtc(&dev_priv->drm, crtc) {
- struct intel_crtc_state *crtc_state =
- to_intel_crtc_state(crtc->base.state);
- enum pipe pipe = crtc->pipe;
- enum plane_id plane_id;
-
- seq_printf(m, "Pipe %c\n", pipe_name(pipe));
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- entry = &crtc_state->wm.skl.plane_ddb_y[plane_id];
- seq_printf(m, " Plane%-8d%8u%8u%8u\n", plane_id + 1,
- entry->start, entry->end,
- skl_ddb_entry_size(entry));
- }
-
- entry = &crtc_state->wm.skl.plane_ddb_y[PLANE_CURSOR];
- seq_printf(m, " %-13s%8u%8u%8u\n", "Cursor", entry->start,
- entry->end, skl_ddb_entry_size(entry));
- }
-
- drm_modeset_unlock_all(dev);
-
- return 0;
-}
-
-static void drrs_status_per_crtc(struct seq_file *m,
- struct drm_device *dev,
- struct intel_crtc *intel_crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(dev);
- struct i915_drrs *drrs = &dev_priv->drrs;
- int vrefresh = 0;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->state->crtc != &intel_crtc->base)
- continue;
-
- seq_printf(m, "%s:\n", connector->name);
- }
- drm_connector_list_iter_end(&conn_iter);
-
- if (dev_priv->vbt.drrs_type == STATIC_DRRS_SUPPORT)
- seq_puts(m, "\tVBT: DRRS_type: Static");
- else if (dev_priv->vbt.drrs_type == SEAMLESS_DRRS_SUPPORT)
- seq_puts(m, "\tVBT: DRRS_type: Seamless");
- else if (dev_priv->vbt.drrs_type == DRRS_NOT_SUPPORTED)
- seq_puts(m, "\tVBT: DRRS_type: None");
- else
- seq_puts(m, "\tVBT: DRRS_type: FIXME: Unrecognized Value");
-
- seq_puts(m, "\n\n");
-
- if (to_intel_crtc_state(intel_crtc->base.state)->has_drrs) {
- struct intel_panel *panel;
-
- mutex_lock(&drrs->mutex);
- /* DRRS Supported */
- seq_puts(m, "\tDRRS Supported: Yes\n");
-
- /* disable_drrs() will make drrs->dp NULL */
- if (!drrs->dp) {
- seq_puts(m, "Idleness DRRS: Disabled\n");
- if (dev_priv->psr.enabled)
- seq_puts(m,
- "\tAs PSR is enabled, DRRS is not enabled\n");
- mutex_unlock(&drrs->mutex);
- return;
- }
-
- panel = &drrs->dp->attached_connector->panel;
- seq_printf(m, "\t\tBusy_frontbuffer_bits: 0x%X",
- drrs->busy_frontbuffer_bits);
-
- seq_puts(m, "\n\t\t");
- if (drrs->refresh_rate_type == DRRS_HIGH_RR) {
- seq_puts(m, "DRRS_State: DRRS_HIGH_RR\n");
- vrefresh = panel->fixed_mode->vrefresh;
- } else if (drrs->refresh_rate_type == DRRS_LOW_RR) {
- seq_puts(m, "DRRS_State: DRRS_LOW_RR\n");
- vrefresh = panel->downclock_mode->vrefresh;
- } else {
- seq_printf(m, "DRRS_State: Unknown(%d)\n",
- drrs->refresh_rate_type);
- mutex_unlock(&drrs->mutex);
- return;
- }
- seq_printf(m, "\t\tVrefresh: %d", vrefresh);
-
- seq_puts(m, "\n\t\t");
- mutex_unlock(&drrs->mutex);
- } else {
- /* DRRS not supported. Print the VBT parameter*/
- seq_puts(m, "\tDRRS Supported : No");
- }
- seq_puts(m, "\n");
-}
-
-static int i915_drrs_status(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *intel_crtc;
- int active_crtc_cnt = 0;
-
- drm_modeset_lock_all(dev);
- for_each_intel_crtc(dev, intel_crtc) {
- if (intel_crtc->base.state->active) {
- active_crtc_cnt++;
- seq_printf(m, "\nCRTC %d: ", active_crtc_cnt);
-
- drrs_status_per_crtc(m, dev, intel_crtc);
- }
- }
- drm_modeset_unlock_all(dev);
-
- if (!active_crtc_cnt)
- seq_puts(m, "No active crtc found\n");
-
- return 0;
-}
-
-static int i915_dp_mst_info(struct seq_file *m, void *unused)
-{
- struct drm_i915_private *dev_priv = node_to_i915(m->private);
- struct drm_device *dev = &dev_priv->drm;
- struct intel_encoder *intel_encoder;
- struct intel_digital_port *intel_dig_port;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- intel_encoder = intel_attached_encoder(to_intel_connector(connector));
- if (!intel_encoder || intel_encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- intel_dig_port = enc_to_dig_port(intel_encoder);
- if (!intel_dig_port->dp.can_mst)
- continue;
-
- seq_printf(m, "MST Source Port [ENCODER:%d:%s]\n",
- intel_dig_port->base.base.base.id,
- intel_dig_port->base.base.name);
- drm_dp_mst_dump_topology(m, &intel_dig_port->dp.mst_mgr);
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-
-static ssize_t i915_displayport_test_active_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- char *input_buffer;
- int status = 0;
- struct drm_device *dev;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
- int val = 0;
-
- dev = ((struct seq_file *)file->private_data)->private;
-
- if (len == 0)
- return 0;
-
- input_buffer = memdup_user_nul(ubuf, len);
- if (IS_ERR(input_buffer))
- return PTR_ERR(input_buffer);
-
- DRM_DEBUG_DRIVER("Copied %d bytes from user\n", (unsigned int)len);
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- status = kstrtoint(input_buffer, 10, &val);
- if (status < 0)
- break;
- DRM_DEBUG_DRIVER("Got %d for test active\n", val);
- /* To prevent erroneous activation of the compliance
- * testing code, only accept an actual value of 1 here
- */
- if (val == 1)
- intel_dp->compliance.test_active = true;
- else
- intel_dp->compliance.test_active = false;
- }
- }
- drm_connector_list_iter_end(&conn_iter);
- kfree(input_buffer);
- if (status < 0)
- return status;
-
- *offp += len;
- return len;
-}
-
-static int i915_displayport_test_active_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->compliance.test_active)
- seq_puts(m, "1");
- else
- seq_puts(m, "0");
- } else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-
-static int i915_displayport_test_active_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, i915_displayport_test_active_show,
- inode->i_private);
-}
-
-static const struct file_operations i915_displayport_test_active_fops = {
- .owner = THIS_MODULE,
- .open = i915_displayport_test_active_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_displayport_test_active_write
-};
-
-static int i915_displayport_test_data_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- if (intel_dp->compliance.test_type ==
- DP_TEST_LINK_EDID_READ)
- seq_printf(m, "%lx",
- intel_dp->compliance.test_data.edid);
- else if (intel_dp->compliance.test_type ==
- DP_TEST_LINK_VIDEO_PATTERN) {
- seq_printf(m, "hdisplay: %d\n",
- intel_dp->compliance.test_data.hdisplay);
- seq_printf(m, "vdisplay: %d\n",
- intel_dp->compliance.test_data.vdisplay);
- seq_printf(m, "bpc: %u\n",
- intel_dp->compliance.test_data.bpc);
- }
- } else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_data);
-
-static int i915_displayport_test_type_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- struct drm_connector *connector;
- struct drm_connector_list_iter conn_iter;
- struct intel_dp *intel_dp;
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
-
- if (connector->connector_type !=
- DRM_MODE_CONNECTOR_DisplayPort)
- continue;
-
- encoder = to_intel_encoder(connector->encoder);
- if (encoder && encoder->type == INTEL_OUTPUT_DP_MST)
- continue;
-
- if (encoder && connector->status == connector_status_connected) {
- intel_dp = enc_to_intel_dp(encoder);
- seq_printf(m, "%02lx", intel_dp->compliance.test_type);
- } else
- seq_puts(m, "0");
- }
- drm_connector_list_iter_end(&conn_iter);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_displayport_test_type);
-
-static void wm_latency_show(struct seq_file *m, const u16 wm[8])
-{
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- int level;
- int num_levels;
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- drm_modeset_lock_all(dev);
-
- for (level = 0; level < num_levels; level++) {
- unsigned int latency = wm[level];
-
- /*
- * - WM1+ latency values in 0.5us units
- * - latencies are in us on gen9/vlv/chv
- */
- if (INTEL_GEN(dev_priv) >= 9 ||
- IS_VALLEYVIEW(dev_priv) ||
- IS_CHERRYVIEW(dev_priv) ||
- IS_G4X(dev_priv))
- latency *= 10;
- else if (level > 0)
- latency *= 5;
-
- seq_printf(m, "WM%d %u (%u.%u usec)\n",
- level, wm[level], latency / 10, latency % 10);
- }
-
- drm_modeset_unlock_all(dev);
-}
-
-static int pri_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.pri_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int spr_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.spr_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int cur_wm_latency_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- const u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.cur_latency;
-
- wm_latency_show(m, latencies);
-
- return 0;
-}
-
-static int pri_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (INTEL_GEN(dev_priv) < 5 && !IS_G4X(dev_priv))
- return -ENODEV;
-
- return single_open(file, pri_wm_latency_show, dev_priv);
-}
-
-static int spr_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, spr_wm_latency_show, dev_priv);
-}
-
-static int cur_wm_latency_open(struct inode *inode, struct file *file)
-{
- struct drm_i915_private *dev_priv = inode->i_private;
-
- if (HAS_GMCH(dev_priv))
- return -ENODEV;
-
- return single_open(file, cur_wm_latency_show, dev_priv);
-}
-
-static ssize_t wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp, u16 wm[8])
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct drm_device *dev = &dev_priv->drm;
- u16 new[8] = { 0 };
- int num_levels;
- int level;
- int ret;
- char tmp[32];
-
- if (IS_CHERRYVIEW(dev_priv))
- num_levels = 3;
- else if (IS_VALLEYVIEW(dev_priv))
- num_levels = 1;
- else if (IS_G4X(dev_priv))
- num_levels = 3;
- else
- num_levels = ilk_wm_max_level(dev_priv) + 1;
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- ret = sscanf(tmp, "%hu %hu %hu %hu %hu %hu %hu %hu",
- &new[0], &new[1], &new[2], &new[3],
- &new[4], &new[5], &new[6], &new[7]);
- if (ret != num_levels)
- return -EINVAL;
-
- drm_modeset_lock_all(dev);
-
- for (level = 0; level < num_levels; level++)
- wm[level] = new[level];
-
- drm_modeset_unlock_all(dev);
-
- return len;
-}
-
-
-static ssize_t pri_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.pri_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t spr_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.spr_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static ssize_t cur_wm_latency_write(struct file *file, const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- u16 *latencies;
-
- if (INTEL_GEN(dev_priv) >= 9)
- latencies = dev_priv->wm.skl_latency;
- else
- latencies = dev_priv->wm.cur_latency;
-
- return wm_latency_write(file, ubuf, len, offp, latencies);
-}
-
-static const struct file_operations i915_pri_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = pri_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = pri_wm_latency_write
-};
-
-static const struct file_operations i915_spr_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = spr_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = spr_wm_latency_write
-};
-
-static const struct file_operations i915_cur_wm_latency_fops = {
- .owner = THIS_MODULE,
- .open = cur_wm_latency_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = cur_wm_latency_write
-};
-
static int
i915_wedged_get(void *data, u64 *val)
{
@@ -3641,7 +1827,8 @@ i915_cache_sharing_set(void *data, u64 val)
if (val > 3)
return -EINVAL;
- DRM_DEBUG_DRIVER("Manually setting uncore sharing to %llu\n", val);
+ drm_dbg(&dev_priv->drm,
+ "Manually setting uncore sharing to %llu\n", val);
with_intel_runtime_pm(&dev_priv->runtime_pm, wakeref) {
u32 snpcr;
@@ -3947,292 +2134,6 @@ static const struct file_operations i915_forcewake_fops = {
.release = i915_forcewake_release,
};
-static int i915_hpd_storm_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
-
- /* Synchronize with everything first in case there's been an HPD
- * storm, but we haven't finished handling it in the kernel yet
- */
- intel_synchronize_irq(dev_priv);
- flush_work(&dev_priv->hotplug.dig_port_work);
- flush_delayed_work(&dev_priv->hotplug.hotplug_work);
-
- seq_printf(m, "Threshold: %d\n", hotplug->hpd_storm_threshold);
- seq_printf(m, "Detected: %s\n",
- yesno(delayed_work_pending(&hotplug->reenable_work)));
-
- return 0;
-}
-
-static ssize_t i915_hpd_storm_ctl_write(struct file *file,
- const char __user *ubuf, size_t len,
- loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
- unsigned int new_threshold;
- int i;
- char *newline;
- char tmp[16];
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- /* Strip newline, if any */
- newline = strchr(tmp, '\n');
- if (newline)
- *newline = '\0';
-
- if (strcmp(tmp, "reset") == 0)
- new_threshold = HPD_STORM_DEFAULT_THRESHOLD;
- else if (kstrtouint(tmp, 10, &new_threshold) != 0)
- return -EINVAL;
-
- if (new_threshold > 0)
- DRM_DEBUG_KMS("Setting HPD storm detection threshold to %d\n",
- new_threshold);
- else
- DRM_DEBUG_KMS("Disabling HPD storm detection\n");
-
- spin_lock_irq(&dev_priv->irq_lock);
- hotplug->hpd_storm_threshold = new_threshold;
- /* Reset the HPD storm stats so we don't accidentally trigger a storm */
- for_each_hpd_pin(i)
- hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
-
- return len;
-}
-
-static int i915_hpd_storm_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, i915_hpd_storm_ctl_show, inode->i_private);
-}
-
-static const struct file_operations i915_hpd_storm_ctl_fops = {
- .owner = THIS_MODULE,
- .open = i915_hpd_storm_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_hpd_storm_ctl_write
-};
-
-static int i915_hpd_short_storm_ctl_show(struct seq_file *m, void *data)
-{
- struct drm_i915_private *dev_priv = m->private;
-
- seq_printf(m, "Enabled: %s\n",
- yesno(dev_priv->hotplug.hpd_short_storm_enabled));
-
- return 0;
-}
-
-static int
-i915_hpd_short_storm_ctl_open(struct inode *inode, struct file *file)
-{
- return single_open(file, i915_hpd_short_storm_ctl_show,
- inode->i_private);
-}
-
-static ssize_t i915_hpd_short_storm_ctl_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- struct seq_file *m = file->private_data;
- struct drm_i915_private *dev_priv = m->private;
- struct i915_hotplug *hotplug = &dev_priv->hotplug;
- char *newline;
- char tmp[16];
- int i;
- bool new_state;
-
- if (len >= sizeof(tmp))
- return -EINVAL;
-
- if (copy_from_user(tmp, ubuf, len))
- return -EFAULT;
-
- tmp[len] = '\0';
-
- /* Strip newline, if any */
- newline = strchr(tmp, '\n');
- if (newline)
- *newline = '\0';
-
- /* Reset to the "default" state for this system */
- if (strcmp(tmp, "reset") == 0)
- new_state = !HAS_DP_MST(dev_priv);
- else if (kstrtobool(tmp, &new_state) != 0)
- return -EINVAL;
-
- DRM_DEBUG_KMS("%sabling HPD short storm detection\n",
- new_state ? "En" : "Dis");
-
- spin_lock_irq(&dev_priv->irq_lock);
- hotplug->hpd_short_storm_enabled = new_state;
- /* Reset the HPD storm stats so we don't accidentally trigger a storm */
- for_each_hpd_pin(i)
- hotplug->stats[i].count = 0;
- spin_unlock_irq(&dev_priv->irq_lock);
-
- /* Re-enable hpd immediately if we were in an irq storm */
- flush_delayed_work(&dev_priv->hotplug.reenable_work);
-
- return len;
-}
-
-static const struct file_operations i915_hpd_short_storm_ctl_fops = {
- .owner = THIS_MODULE,
- .open = i915_hpd_short_storm_ctl_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_hpd_short_storm_ctl_write,
-};
-
-static int i915_drrs_ctl_set(void *data, u64 val)
-{
- struct drm_i915_private *dev_priv = data;
- struct drm_device *dev = &dev_priv->drm;
- struct intel_crtc *crtc;
-
- if (INTEL_GEN(dev_priv) < 7)
- return -ENODEV;
-
- for_each_intel_crtc(dev, crtc) {
- struct drm_connector_list_iter conn_iter;
- struct intel_crtc_state *crtc_state;
- struct drm_connector *connector;
- struct drm_crtc_commit *commit;
- int ret;
-
- ret = drm_modeset_lock_single_interruptible(&crtc->base.mutex);
- if (ret)
- return ret;
-
- crtc_state = to_intel_crtc_state(crtc->base.state);
-
- if (!crtc_state->hw.active ||
- !crtc_state->has_drrs)
- goto out;
-
- commit = crtc_state->uapi.commit;
- if (commit) {
- ret = wait_for_completion_interruptible(&commit->hw_done);
- if (ret)
- goto out;
- }
-
- drm_connector_list_iter_begin(dev, &conn_iter);
- drm_for_each_connector_iter(connector, &conn_iter) {
- struct intel_encoder *encoder;
- struct intel_dp *intel_dp;
-
- if (!(crtc_state->uapi.connector_mask &
- drm_connector_mask(connector)))
- continue;
-
- encoder = intel_attached_encoder(to_intel_connector(connector));
- if (encoder->type != INTEL_OUTPUT_EDP)
- continue;
-
- DRM_DEBUG_DRIVER("Manually %sabling DRRS. %llu\n",
- val ? "en" : "dis", val);
-
- intel_dp = enc_to_intel_dp(encoder);
- if (val)
- intel_edp_drrs_enable(intel_dp,
- crtc_state);
- else
- intel_edp_drrs_disable(intel_dp,
- crtc_state);
- }
- drm_connector_list_iter_end(&conn_iter);
-
-out:
- drm_modeset_unlock(&crtc->base.mutex);
- if (ret)
- return ret;
- }
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(i915_drrs_ctl_fops, NULL, i915_drrs_ctl_set, "%llu\n");
-
-static ssize_t
-i915_fifo_underrun_reset_write(struct file *filp,
- const char __user *ubuf,
- size_t cnt, loff_t *ppos)
-{
- struct drm_i915_private *dev_priv = filp->private_data;
- struct intel_crtc *intel_crtc;
- struct drm_device *dev = &dev_priv->drm;
- int ret;
- bool reset;
-
- ret = kstrtobool_from_user(ubuf, cnt, &reset);
- if (ret)
- return ret;
-
- if (!reset)
- return cnt;
-
- for_each_intel_crtc(dev, intel_crtc) {
- struct drm_crtc_commit *commit;
- struct intel_crtc_state *crtc_state;
-
- ret = drm_modeset_lock_single_interruptible(&intel_crtc->base.mutex);
- if (ret)
- return ret;
-
- crtc_state = to_intel_crtc_state(intel_crtc->base.state);
- commit = crtc_state->uapi.commit;
- if (commit) {
- ret = wait_for_completion_interruptible(&commit->hw_done);
- if (!ret)
- ret = wait_for_completion_interruptible(&commit->flip_done);
- }
-
- if (!ret && crtc_state->hw.active) {
- DRM_DEBUG_KMS("Re-arming FIFO underruns on pipe %c\n",
- pipe_name(intel_crtc->pipe));
-
- intel_crtc_arm_fifo_underrun(intel_crtc, crtc_state);
- }
-
- drm_modeset_unlock(&intel_crtc->base.mutex);
-
- if (ret)
- return ret;
- }
-
- ret = intel_fbc_reset_underrun(dev_priv);
- if (ret)
- return ret;
-
- return cnt;
-}
-
-static const struct file_operations i915_fifo_underrun_reset_ops = {
- .owner = THIS_MODULE,
- .open = simple_open,
- .write = i915_fifo_underrun_reset_write,
- .llseek = default_llseek,
-};
-
static const struct drm_info_list i915_debugfs_list[] = {
{"i915_capabilities", i915_capabilities, 0},
{"i915_gem_objects", i915_gem_object_info, 0},
@@ -4245,34 +2146,16 @@ static const struct drm_info_list i915_debugfs_list[] = {
{"i915_guc_stage_pool", i915_guc_stage_pool, 0},
{"i915_huc_load_status", i915_huc_load_status_info, 0},
{"i915_frequency_info", i915_frequency_info, 0},
- {"i915_drpc_info", i915_drpc_info, 0},
{"i915_ring_freq_table", i915_ring_freq_table, 0},
- {"i915_frontbuffer_tracking", i915_frontbuffer_tracking, 0},
- {"i915_fbc_status", i915_fbc_status, 0},
- {"i915_ips_status", i915_ips_status, 0},
- {"i915_sr_status", i915_sr_status, 0},
- {"i915_opregion", i915_opregion, 0},
- {"i915_vbt", i915_vbt, 0},
- {"i915_gem_framebuffer", i915_gem_framebuffer_info, 0},
{"i915_context_status", i915_context_status, 0},
- {"i915_forcewake_domains", i915_forcewake_domains, 0},
{"i915_swizzle_info", i915_swizzle_info, 0},
{"i915_llc", i915_llc, 0},
- {"i915_edp_psr_status", i915_edp_psr_status, 0},
- {"i915_energy_uJ", i915_energy_uJ, 0},
{"i915_runtime_pm_status", i915_runtime_pm_status, 0},
- {"i915_power_domain_info", i915_power_domain_info, 0},
- {"i915_dmc_info", i915_dmc_info, 0},
- {"i915_display_info", i915_display_info, 0},
{"i915_engine_info", i915_engine_info, 0},
{"i915_rcs_topology", i915_rcs_topology, 0},
{"i915_shrinker_info", i915_shrinker_info, 0},
- {"i915_shared_dplls_info", i915_shared_dplls_info, 0},
- {"i915_dp_mst_info", i915_dp_mst_info, 0},
{"i915_wa_registers", i915_wa_registers, 0},
- {"i915_ddb_info", i915_ddb_info, 0},
{"i915_sseu_status", i915_sseu_status, 0},
- {"i915_drrs_status", i915_drrs_status, 0},
{"i915_rps_boost_info", i915_rps_boost_info, 0},
};
#define I915_DEBUGFS_ENTRIES ARRAY_SIZE(i915_debugfs_list)
@@ -4289,21 +2172,8 @@ static const struct i915_debugfs_files {
{"i915_error_state", &i915_error_state_fops},
{"i915_gpu_info", &i915_gpu_info_fops},
#endif
- {"i915_fifo_underrun_reset", &i915_fifo_underrun_reset_ops},
- {"i915_pri_wm_latency", &i915_pri_wm_latency_fops},
- {"i915_spr_wm_latency", &i915_spr_wm_latency_fops},
- {"i915_cur_wm_latency", &i915_cur_wm_latency_fops},
- {"i915_fbc_false_color", &i915_fbc_false_color_fops},
- {"i915_dp_test_data", &i915_displayport_test_data_fops},
- {"i915_dp_test_type", &i915_displayport_test_type_fops},
- {"i915_dp_test_active", &i915_displayport_test_active_fops},
{"i915_guc_log_level", &i915_guc_log_level_fops},
{"i915_guc_log_relay", &i915_guc_log_relay_fops},
- {"i915_hpd_storm_ctl", &i915_hpd_storm_ctl_fops},
- {"i915_hpd_short_storm_ctl", &i915_hpd_short_storm_ctl_fops},
- {"i915_ipc_status", &i915_ipc_status_fops},
- {"i915_drrs_ctl", &i915_drrs_ctl_fops},
- {"i915_edp_psr_debug", &i915_edp_psr_debug_fops}
};
int i915_debugfs_register(struct drm_i915_private *dev_priv)
@@ -4311,9 +2181,10 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
struct drm_minor *minor = dev_priv->drm.primary;
int i;
+ i915_debugfs_params(dev_priv);
+
debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root,
to_i915(minor->dev), &i915_forcewake_fops);
-
for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) {
debugfs_create_file(i915_debugfs_files[i].name,
S_IRUGO | S_IWUSR,
@@ -4326,254 +2197,3 @@ int i915_debugfs_register(struct drm_i915_private *dev_priv)
I915_DEBUGFS_ENTRIES,
minor->debugfs_root, minor);
}
-
-struct dpcd_block {
- /* DPCD dump start address. */
- unsigned int offset;
- /* DPCD dump end address, inclusive. If unset, .size will be used. */
- unsigned int end;
- /* DPCD dump size. Used if .end is unset. If unset, defaults to 1. */
- size_t size;
- /* Only valid for eDP. */
- bool edp;
-};
-
-static const struct dpcd_block i915_dpcd_debug[] = {
- { .offset = DP_DPCD_REV, .size = DP_RECEIVER_CAP_SIZE },
- { .offset = DP_PSR_SUPPORT, .end = DP_PSR_CAPS },
- { .offset = DP_DOWNSTREAM_PORT_0, .size = 16 },
- { .offset = DP_LINK_BW_SET, .end = DP_EDP_CONFIGURATION_SET },
- { .offset = DP_SINK_COUNT, .end = DP_ADJUST_REQUEST_LANE2_3 },
- { .offset = DP_SET_POWER },
- { .offset = DP_EDP_DPCD_REV },
- { .offset = DP_EDP_GENERAL_CAP_1, .end = DP_EDP_GENERAL_CAP_3 },
- { .offset = DP_EDP_DISPLAY_CONTROL_REGISTER, .end = DP_EDP_BACKLIGHT_FREQ_CAP_MAX_LSB },
- { .offset = DP_EDP_DBC_MINIMUM_BRIGHTNESS_SET, .end = DP_EDP_DBC_MAXIMUM_BRIGHTNESS_SET },
-};
-
-static int i915_dpcd_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
- u8 buf[16];
- ssize_t err;
- int i;
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- for (i = 0; i < ARRAY_SIZE(i915_dpcd_debug); i++) {
- const struct dpcd_block *b = &i915_dpcd_debug[i];
- size_t size = b->end ? b->end - b->offset + 1 : (b->size ?: 1);
-
- if (b->edp &&
- connector->connector_type != DRM_MODE_CONNECTOR_eDP)
- continue;
-
- /* low tech for now */
- if (WARN_ON(size > sizeof(buf)))
- continue;
-
- err = drm_dp_dpcd_read(&intel_dp->aux, b->offset, buf, size);
- if (err < 0)
- seq_printf(m, "%04x: ERROR %d\n", b->offset, (int)err);
- else
- seq_printf(m, "%04x: %*ph\n", b->offset, (int)err, buf);
- }
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_dpcd);
-
-static int i915_panel_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct intel_dp *intel_dp =
- enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- seq_printf(m, "Panel power up delay: %d\n",
- intel_dp->panel_power_up_delay);
- seq_printf(m, "Panel power down delay: %d\n",
- intel_dp->panel_power_down_delay);
- seq_printf(m, "Backlight on delay: %d\n",
- intel_dp->backlight_on_delay);
- seq_printf(m, "Backlight off delay: %d\n",
- intel_dp->backlight_off_delay);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_panel);
-
-static int i915_hdcp_sink_capability_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct intel_connector *intel_connector = to_intel_connector(connector);
-
- if (connector->status != connector_status_connected)
- return -ENODEV;
-
- /* HDCP is supported by connector */
- if (!intel_connector->hdcp.shim)
- return -EINVAL;
-
- seq_printf(m, "%s:%d HDCP version: ", connector->name,
- connector->base.id);
- intel_hdcp_info(m, intel_connector);
-
- return 0;
-}
-DEFINE_SHOW_ATTRIBUTE(i915_hdcp_sink_capability);
-
-static int i915_dsc_fec_support_show(struct seq_file *m, void *data)
-{
- struct drm_connector *connector = m->private;
- struct drm_device *dev = connector->dev;
- struct drm_crtc *crtc;
- struct intel_dp *intel_dp;
- struct drm_modeset_acquire_ctx ctx;
- struct intel_crtc_state *crtc_state = NULL;
- int ret = 0;
- bool try_again = false;
-
- drm_modeset_acquire_init(&ctx, DRM_MODESET_ACQUIRE_INTERRUPTIBLE);
-
- do {
- try_again = false;
- ret = drm_modeset_lock(&dev->mode_config.connection_mutex,
- &ctx);
- if (ret) {
- if (ret == -EDEADLK && !drm_modeset_backoff(&ctx)) {
- try_again = true;
- continue;
- }
- break;
- }
- crtc = connector->state->crtc;
- if (connector->status != connector_status_connected || !crtc) {
- ret = -ENODEV;
- break;
- }
- ret = drm_modeset_lock(&crtc->mutex, &ctx);
- if (ret == -EDEADLK) {
- ret = drm_modeset_backoff(&ctx);
- if (!ret) {
- try_again = true;
- continue;
- }
- break;
- } else if (ret) {
- break;
- }
- intel_dp = enc_to_intel_dp(intel_attached_encoder(to_intel_connector(connector)));
- crtc_state = to_intel_crtc_state(crtc->state);
- seq_printf(m, "DSC_Enabled: %s\n",
- yesno(crtc_state->dsc.compression_enable));
- seq_printf(m, "DSC_Sink_Support: %s\n",
- yesno(drm_dp_sink_supports_dsc(intel_dp->dsc_dpcd)));
- seq_printf(m, "Force_DSC_Enable: %s\n",
- yesno(intel_dp->force_dsc_en));
- if (!intel_dp_is_edp(intel_dp))
- seq_printf(m, "FEC_Sink_Support: %s\n",
- yesno(drm_dp_sink_supports_fec(intel_dp->fec_capable)));
- } while (try_again);
-
- drm_modeset_drop_locks(&ctx);
- drm_modeset_acquire_fini(&ctx);
-
- return ret;
-}
-
-static ssize_t i915_dsc_fec_support_write(struct file *file,
- const char __user *ubuf,
- size_t len, loff_t *offp)
-{
- bool dsc_enable = false;
- int ret;
- struct drm_connector *connector =
- ((struct seq_file *)file->private_data)->private;
- struct intel_encoder *encoder = intel_attached_encoder(to_intel_connector(connector));
- struct intel_dp *intel_dp = enc_to_intel_dp(encoder);
-
- if (len == 0)
- return 0;
-
- DRM_DEBUG_DRIVER("Copied %zu bytes from user to force DSC\n",
- len);
-
- ret = kstrtobool_from_user(ubuf, len, &dsc_enable);
- if (ret < 0)
- return ret;
-
- DRM_DEBUG_DRIVER("Got %s for DSC Enable\n",
- (dsc_enable) ? "true" : "false");
- intel_dp->force_dsc_en = dsc_enable;
-
- *offp += len;
- return len;
-}
-
-static int i915_dsc_fec_support_open(struct inode *inode,
- struct file *file)
-{
- return single_open(file, i915_dsc_fec_support_show,
- inode->i_private);
-}
-
-static const struct file_operations i915_dsc_fec_support_fops = {
- .owner = THIS_MODULE,
- .open = i915_dsc_fec_support_open,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = single_release,
- .write = i915_dsc_fec_support_write
-};
-
-/**
- * i915_debugfs_connector_add - add i915 specific connector debugfs files
- * @connector: pointer to a registered drm_connector
- *
- * Cleanup will be done by drm_connector_unregister() through a call to
- * drm_debugfs_connector_remove().
- *
- * Returns 0 on success, negative error codes on error.
- */
-int i915_debugfs_connector_add(struct drm_connector *connector)
-{
- struct dentry *root = connector->debugfs_entry;
- struct drm_i915_private *dev_priv = to_i915(connector->dev);
-
- /* The connector must have been registered beforehands. */
- if (!root)
- return -ENODEV;
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP)
- debugfs_create_file("i915_dpcd", S_IRUGO, root,
- connector, &i915_dpcd_fops);
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
- debugfs_create_file("i915_panel_timings", S_IRUGO, root,
- connector, &i915_panel_fops);
- debugfs_create_file("i915_psr_sink_status", S_IRUGO, root,
- connector, &i915_psr_sink_status_fops);
- }
-
- if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIA ||
- connector->connector_type == DRM_MODE_CONNECTOR_HDMIB) {
- debugfs_create_file("i915_hdcp_sink_capability", S_IRUGO, root,
- connector, &i915_hdcp_sink_capability_fops);
- }
-
- if (INTEL_GEN(dev_priv) >= 10 &&
- (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort ||
- connector->connector_type == DRM_MODE_CONNECTOR_eDP))
- debugfs_create_file("i915_dsc_fec_support", S_IRUGO, root,
- connector, &i915_dsc_fec_support_fops);
-
- return 0;
-}
diff --git a/drivers/gpu/drm/i915/i915_debugfs.h b/drivers/gpu/drm/i915/i915_debugfs.h
index c0cd22eb916d..6da39c76ab5e 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.h
+++ b/drivers/gpu/drm/i915/i915_debugfs.h
@@ -6,15 +6,17 @@
#ifndef __I915_DEBUGFS_H__
#define __I915_DEBUGFS_H__
-struct drm_i915_private;
struct drm_connector;
+struct drm_i915_gem_object;
+struct drm_i915_private;
+struct seq_file;
#ifdef CONFIG_DEBUG_FS
int i915_debugfs_register(struct drm_i915_private *dev_priv);
-int i915_debugfs_connector_add(struct drm_connector *connector);
+void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj);
#else
static inline int i915_debugfs_register(struct drm_i915_private *dev_priv) { return 0; }
-static inline int i915_debugfs_connector_add(struct drm_connector *connector) { return 0; }
+static inline void i915_debugfs_describe_obj(struct seq_file *m, struct drm_i915_gem_object *obj) {}
#endif
#endif /* __I915_DEBUGFS_H__ */
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c
new file mode 100644
index 000000000000..62b2c5f0495d
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include "i915_debugfs_params.h"
+#include "i915_drv.h"
+#include "i915_params.h"
+
+/* int param */
+static int i915_param_int_show(struct seq_file *m, void *data)
+{
+ int *value = m->private;
+
+ seq_printf(m, "%d\n", *value);
+
+ return 0;
+}
+
+static int i915_param_int_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_param_int_show, inode->i_private);
+}
+
+static ssize_t i915_param_int_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ int *value = m->private;
+ int ret;
+
+ ret = kstrtoint_from_user(ubuf, len, 0, value);
+ if (ret) {
+ /* support boolean values too */
+ bool b;
+
+ ret = kstrtobool_from_user(ubuf, len, &b);
+ if (!ret)
+ *value = b;
+ }
+
+ return ret ?: len;
+}
+
+static const struct file_operations i915_param_int_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_param_int_open,
+ .read = seq_read,
+ .write = i915_param_int_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations i915_param_int_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = i915_param_int_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+/* unsigned int param */
+static int i915_param_uint_show(struct seq_file *m, void *data)
+{
+ unsigned int *value = m->private;
+
+ seq_printf(m, "%u\n", *value);
+
+ return 0;
+}
+
+static int i915_param_uint_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_param_uint_show, inode->i_private);
+}
+
+static ssize_t i915_param_uint_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ unsigned int *value = m->private;
+ int ret;
+
+ ret = kstrtouint_from_user(ubuf, len, 0, value);
+ if (ret) {
+ /* support boolean values too */
+ bool b;
+
+ ret = kstrtobool_from_user(ubuf, len, &b);
+ if (!ret)
+ *value = b;
+ }
+
+ return ret ?: len;
+}
+
+static const struct file_operations i915_param_uint_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_param_uint_open,
+ .read = seq_read,
+ .write = i915_param_uint_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations i915_param_uint_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = i915_param_uint_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+/* char * param */
+static int i915_param_charp_show(struct seq_file *m, void *data)
+{
+ const char **s = m->private;
+
+ seq_printf(m, "%s\n", *s);
+
+ return 0;
+}
+
+static int i915_param_charp_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, i915_param_charp_show, inode->i_private);
+}
+
+static ssize_t i915_param_charp_write(struct file *file,
+ const char __user *ubuf, size_t len,
+ loff_t *offp)
+{
+ struct seq_file *m = file->private_data;
+ char **s = m->private;
+ char *new, *old;
+
+ /* FIXME: remove locking after params aren't the module params */
+ kernel_param_lock(THIS_MODULE);
+
+ old = *s;
+ new = strndup_user(ubuf, PAGE_SIZE);
+ if (IS_ERR(new)) {
+ len = PTR_ERR(new);
+ goto out;
+ }
+
+ *s = new;
+
+ kfree(old);
+out:
+ kernel_param_unlock(THIS_MODULE);
+
+ return len;
+}
+
+static const struct file_operations i915_param_charp_fops = {
+ .owner = THIS_MODULE,
+ .open = i915_param_charp_open,
+ .read = seq_read,
+ .write = i915_param_charp_write,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+static const struct file_operations i915_param_charp_fops_ro = {
+ .owner = THIS_MODULE,
+ .open = i915_param_charp_open,
+ .read = seq_read,
+ .llseek = default_llseek,
+ .release = single_release,
+};
+
+#define RO(mode) (((mode) & 0222) == 0)
+
+static struct dentry *
+i915_debugfs_create_int(const char *name, umode_t mode,
+ struct dentry *parent, int *value)
+{
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ RO(mode) ? &i915_param_int_fops_ro :
+ &i915_param_int_fops);
+}
+
+static struct dentry *
+i915_debugfs_create_uint(const char *name, umode_t mode,
+ struct dentry *parent, unsigned int *value)
+{
+ return debugfs_create_file_unsafe(name, mode, parent, value,
+ RO(mode) ? &i915_param_uint_fops_ro :
+ &i915_param_uint_fops);
+}
+
+static struct dentry *
+i915_debugfs_create_charp(const char *name, umode_t mode,
+ struct dentry *parent, char **value)
+{
+ return debugfs_create_file(name, mode, parent, value,
+ RO(mode) ? &i915_param_charp_fops_ro :
+ &i915_param_charp_fops);
+}
+
+static __always_inline void
+_i915_param_create_file(struct dentry *parent, const char *name,
+ const char *type, int mode, void *value)
+{
+ if (!mode)
+ return;
+
+ if (!__builtin_strcmp(type, "bool"))
+ debugfs_create_bool(name, mode, parent, value);
+ else if (!__builtin_strcmp(type, "int"))
+ i915_debugfs_create_int(name, mode, parent, value);
+ else if (!__builtin_strcmp(type, "unsigned int"))
+ i915_debugfs_create_uint(name, mode, parent, value);
+ else if (!__builtin_strcmp(type, "unsigned long"))
+ debugfs_create_ulong(name, mode, parent, value);
+ else if (!__builtin_strcmp(type, "char *"))
+ i915_debugfs_create_charp(name, mode, parent, value);
+ else
+ WARN(1, "no debugfs fops defined for param type %s (i915.%s)\n",
+ type, name);
+}
+
+/* add a subdirectory with files for each i915 param */
+struct dentry *i915_debugfs_params(struct drm_i915_private *i915)
+{
+ struct drm_minor *minor = i915->drm.primary;
+ struct i915_params *params = &i915_modparams;
+ struct dentry *dir;
+
+ dir = debugfs_create_dir("i915_params", minor->debugfs_root);
+ if (IS_ERR(dir))
+ return dir;
+
+ /*
+ * Note: We could create files for params needing special handling
+ * here. Set mode in params to 0 to skip the generic create file, or
+ * just let the generic create file fail silently with -EEXIST.
+ */
+
+#define REGISTER(T, x, unused, mode, ...) _i915_param_create_file(dir, #x, #T, mode, &params->x);
+ I915_PARAMS_FOR_EACH(REGISTER);
+#undef REGISTER
+
+ return dir;
+}
diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.h b/drivers/gpu/drm/i915/i915_debugfs_params.h
new file mode 100644
index 000000000000..66567076546b
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_debugfs_params.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2019 Intel Corporation
+ */
+
+#ifndef __I915_DEBUGFS_PARAMS__
+#define __I915_DEBUGFS_PARAMS__
+
+struct dentry;
+struct drm_i915_private;
+
+struct dentry *i915_debugfs_params(struct drm_i915_private *i915);
+
+#endif /* __I915_DEBUGFS_PARAMS__ */
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index f7385abdd74b..82d9df15b22b 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -44,18 +44,20 @@
#include <drm/drm_ioctl.h>
#include <drm/drm_irq.h>
#include <drm/drm_probe_helper.h>
-#include <drm/i915_drm.h>
#include "display/intel_acpi.h"
#include "display/intel_audio.h"
#include "display/intel_bw.h"
#include "display/intel_cdclk.h"
+#include "display/intel_csr.h"
+#include "display/intel_display_debugfs.h"
#include "display/intel_display_types.h"
#include "display/intel_dp.h"
#include "display/intel_fbdev.h"
#include "display/intel_hotplug.h"
#include "display/intel_overlay.h"
#include "display/intel_pipe_crc.h"
+#include "display/intel_psr.h"
#include "display/intel_sprite.h"
#include "display/intel_vga.h"
@@ -68,6 +70,7 @@
#include "i915_debugfs.h"
#include "i915_drv.h"
+#include "i915_ioc32.h"
#include "i915_irq.h"
#include "i915_memcpy.h"
#include "i915_perf.h"
@@ -77,74 +80,14 @@
#include "i915_sysfs.h"
#include "i915_trace.h"
#include "i915_vgpu.h"
-#include "intel_csr.h"
+#include "intel_dram.h"
+#include "intel_gvt.h"
#include "intel_memory_region.h"
#include "intel_pm.h"
+#include "vlv_suspend.h"
static struct drm_driver driver;
-struct vlv_s0ix_state {
- /* GAM */
- u32 wr_watermark;
- u32 gfx_prio_ctrl;
- u32 arb_mode;
- u32 gfx_pend_tlb0;
- u32 gfx_pend_tlb1;
- u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
- u32 media_max_req_count;
- u32 gfx_max_req_count;
- u32 render_hwsp;
- u32 ecochk;
- u32 bsd_hwsp;
- u32 blt_hwsp;
- u32 tlb_rd_addr;
-
- /* MBC */
- u32 g3dctl;
- u32 gsckgctl;
- u32 mbctl;
-
- /* GCP */
- u32 ucgctl1;
- u32 ucgctl3;
- u32 rcgctl1;
- u32 rcgctl2;
- u32 rstctl;
- u32 misccpctl;
-
- /* GPM */
- u32 gfxpause;
- u32 rpdeuhwtc;
- u32 rpdeuc;
- u32 ecobus;
- u32 pwrdwnupctl;
- u32 rp_down_timeout;
- u32 rp_deucsw;
- u32 rcubmabdtmr;
- u32 rcedata;
- u32 spare2gh;
-
- /* Display 1 CZ domain */
- u32 gt_imr;
- u32 gt_ier;
- u32 pm_imr;
- u32 pm_ier;
- u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
-
- /* GT SA CZ domain */
- u32 tilectl;
- u32 gt_fifoctl;
- u32 gtlc_wake_ctrl;
- u32 gtlc_survive;
- u32 pmwgicz;
-
- /* Display 2 CZ domain */
- u32 gu_ctl0;
- u32 gu_ctl1;
- u32 pcbr;
- u32 clock_gate_dis2;
-};
-
static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
{
int domain = pci_domain_nr(dev_priv->drm.pdev->bus);
@@ -152,7 +95,7 @@ static int i915_get_bridge_dev(struct drm_i915_private *dev_priv)
dev_priv->bridge_dev =
pci_get_domain_bus_and_slot(domain, 0, PCI_DEVFN(0, 0));
if (!dev_priv->bridge_dev) {
- DRM_ERROR("bridge device not found\n");
+ drm_err(&dev_priv->drm, "bridge device not found\n");
return -1;
}
return 0;
@@ -189,7 +132,7 @@ intel_alloc_mchbar_resource(struct drm_i915_private *dev_priv)
0, pcibios_align_resource,
dev_priv->bridge_dev);
if (ret) {
- DRM_DEBUG_DRIVER("failed bus alloc: %d\n", ret);
+ drm_dbg(&dev_priv->drm, "failed bus alloc: %d\n", ret);
dev_priv->mch_res.start = 0;
return ret;
}
@@ -272,7 +215,8 @@ intel_teardown_mchbar(struct drm_i915_private *dev_priv)
release_resource(&dev_priv->mch_res);
}
-static int i915_driver_modeset_probe(struct drm_i915_private *i915)
+/* part #1: call before irq install */
+static int i915_driver_modeset_probe_noirq(struct drm_i915_private *i915)
{
int ret;
@@ -292,25 +236,32 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915)
if (ret)
goto out;
- intel_register_dsm_handler();
+ intel_power_domains_init_hw(i915, false);
+
+ intel_csr_ucode_init(i915);
- ret = i915_switcheroo_register(i915);
+ ret = intel_modeset_init_noirq(i915);
if (ret)
goto cleanup_vga_client;
- intel_power_domains_init_hw(i915, false);
+ return 0;
- intel_csr_ucode_init(i915);
+cleanup_vga_client:
+ intel_vga_unregister(i915);
+out:
+ return ret;
+}
- ret = intel_irq_install(i915);
- if (ret)
- goto cleanup_csr;
+/* part #2: call after irq install */
+static int i915_driver_modeset_probe(struct drm_i915_private *i915)
+{
+ int ret;
/* Important: The output setup functions called by modeset_init need
* working irqs for e.g. gmbus and dp aux transfers. */
ret = intel_modeset_init(i915);
if (ret)
- goto cleanup_irq;
+ goto out;
ret = i915_gem_init(i915);
if (ret)
@@ -330,6 +281,8 @@ static int i915_driver_modeset_probe(struct drm_i915_private *i915)
intel_init_ipc(i915);
+ intel_psr_set_force_mode_changed(i915->psr.dp);
+
return 0;
cleanup_gem:
@@ -337,29 +290,27 @@ cleanup_gem:
i915_gem_driver_remove(i915);
i915_gem_driver_release(i915);
cleanup_modeset:
+ /* FIXME */
intel_modeset_driver_remove(i915);
-cleanup_irq:
intel_irq_uninstall(i915);
-cleanup_csr:
- intel_csr_ucode_fini(i915);
- intel_power_domains_driver_remove(i915);
- i915_switcheroo_unregister(i915);
-cleanup_vga_client:
- intel_vga_unregister(i915);
+ intel_modeset_driver_remove_noirq(i915);
out:
return ret;
}
+/* part #1: call before irq uninstall */
static void i915_driver_modeset_remove(struct drm_i915_private *i915)
{
intel_modeset_driver_remove(i915);
+}
- intel_irq_uninstall(i915);
+/* part #2: call after irq uninstall */
+static void i915_driver_modeset_remove_noirq(struct drm_i915_private *i915)
+{
+ intel_modeset_driver_remove_noirq(i915);
intel_bios_driver_remove(i915);
- i915_switcheroo_unregister(i915);
-
intel_vga_unregister(i915);
intel_csr_ucode_fini(i915);
@@ -409,7 +360,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
out_free_wq:
destroy_workqueue(dev_priv->wq);
out_err:
- DRM_ERROR("Failed to allocate workqueues.\n");
+ drm_err(&dev_priv->drm, "Failed to allocate workqueues.\n");
return -ENOMEM;
}
@@ -438,37 +389,15 @@ static void intel_detect_preproduction_hw(struct drm_i915_private *dev_priv)
pre |= IS_SKL_REVID(dev_priv, 0, SKL_REVID_F0);
pre |= IS_BXT_REVID(dev_priv, 0, BXT_REVID_B_LAST);
pre |= IS_KBL_REVID(dev_priv, 0, KBL_REVID_A0);
+ pre |= IS_GLK_REVID(dev_priv, 0, GLK_REVID_A2);
if (pre) {
- DRM_ERROR("This is a pre-production stepping. "
+ drm_err(&dev_priv->drm, "This is a pre-production stepping. "
"It may not be fully functional.\n");
add_taint(TAINT_MACHINE_CHECK, LOCKDEP_STILL_OK);
}
}
-static int vlv_alloc_s0ix_state(struct drm_i915_private *i915)
-{
- if (!IS_VALLEYVIEW(i915))
- return 0;
-
- /* we write all the values in the struct, so no need to zero it out */
- i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
- GFP_KERNEL);
- if (!i915->vlv_s0ix_state)
- return -ENOMEM;
-
- return 0;
-}
-
-static void vlv_free_s0ix_state(struct drm_i915_private *i915)
-{
- if (!i915->vlv_s0ix_state)
- return;
-
- kfree(i915->vlv_s0ix_state);
- i915->vlv_s0ix_state = NULL;
-}
-
static void sanitize_gpu(struct drm_i915_private *i915)
{
if (!INTEL_INFO(i915)->gpu_reset_clobbers_display)
@@ -517,7 +446,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
if (ret < 0)
return ret;
- ret = vlv_alloc_s0ix_state(dev_priv);
+ ret = vlv_suspend_init(dev_priv);
if (ret < 0)
goto err_workqueues;
@@ -539,7 +468,6 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
intel_init_display_hooks(dev_priv);
intel_init_clock_gating_hooks(dev_priv);
intel_init_audio_hooks(dev_priv);
- intel_display_crc_init(dev_priv);
intel_detect_preproduction_hw(dev_priv);
@@ -548,7 +476,7 @@ static int i915_driver_early_probe(struct drm_i915_private *dev_priv)
err_gem:
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release(&dev_priv->gt);
- vlv_free_s0ix_state(dev_priv);
+ vlv_suspend_cleanup(dev_priv);
err_workqueues:
i915_workqueues_cleanup(dev_priv);
return ret;
@@ -565,7 +493,7 @@ static void i915_driver_late_release(struct drm_i915_private *dev_priv)
intel_power_domains_cleanup(dev_priv);
i915_gem_cleanup_early(dev_priv);
intel_gt_driver_late_release(&dev_priv->gt);
- vlv_free_s0ix_state(dev_priv);
+ vlv_suspend_cleanup(dev_priv);
i915_workqueues_cleanup(dev_priv);
pm_qos_remove_request(&dev_priv->sb_qos);
@@ -638,487 +566,6 @@ static void intel_sanitize_options(struct drm_i915_private *dev_priv)
intel_gvt_sanitize_options(dev_priv);
}
-#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
-
-static const char *intel_dram_type_str(enum intel_dram_type type)
-{
- static const char * const str[] = {
- DRAM_TYPE_STR(UNKNOWN),
- DRAM_TYPE_STR(DDR3),
- DRAM_TYPE_STR(DDR4),
- DRAM_TYPE_STR(LPDDR3),
- DRAM_TYPE_STR(LPDDR4),
- };
-
- if (type >= ARRAY_SIZE(str))
- type = INTEL_DRAM_UNKNOWN;
-
- return str[type];
-}
-
-#undef DRAM_TYPE_STR
-
-static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
-{
- return dimm->ranks * 64 / (dimm->width ?: 1);
-}
-
-/* Returns total GB for the whole DIMM */
-static int skl_get_dimm_size(u16 val)
-{
- return val & SKL_DRAM_SIZE_MASK;
-}
-
-static int skl_get_dimm_width(u16 val)
-{
- if (skl_get_dimm_size(val) == 0)
- return 0;
-
- switch (val & SKL_DRAM_WIDTH_MASK) {
- case SKL_DRAM_WIDTH_X8:
- case SKL_DRAM_WIDTH_X16:
- case SKL_DRAM_WIDTH_X32:
- val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
- return 8 << val;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int skl_get_dimm_ranks(u16 val)
-{
- if (skl_get_dimm_size(val) == 0)
- return 0;
-
- val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
-
- return val + 1;
-}
-
-/* Returns total GB for the whole DIMM */
-static int cnl_get_dimm_size(u16 val)
-{
- return (val & CNL_DRAM_SIZE_MASK) / 2;
-}
-
-static int cnl_get_dimm_width(u16 val)
-{
- if (cnl_get_dimm_size(val) == 0)
- return 0;
-
- switch (val & CNL_DRAM_WIDTH_MASK) {
- case CNL_DRAM_WIDTH_X8:
- case CNL_DRAM_WIDTH_X16:
- case CNL_DRAM_WIDTH_X32:
- val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
- return 8 << val;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int cnl_get_dimm_ranks(u16 val)
-{
- if (cnl_get_dimm_size(val) == 0)
- return 0;
-
- val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
-
- return val + 1;
-}
-
-static bool
-skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
-{
- /* Convert total GB to Gb per DRAM device */
- return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
-}
-
-static void
-skl_dram_get_dimm_info(struct drm_i915_private *dev_priv,
- struct dram_dimm_info *dimm,
- int channel, char dimm_name, u16 val)
-{
- if (INTEL_GEN(dev_priv) >= 10) {
- dimm->size = cnl_get_dimm_size(val);
- dimm->width = cnl_get_dimm_width(val);
- dimm->ranks = cnl_get_dimm_ranks(val);
- } else {
- dimm->size = skl_get_dimm_size(val);
- dimm->width = skl_get_dimm_width(val);
- dimm->ranks = skl_get_dimm_ranks(val);
- }
-
- DRM_DEBUG_KMS("CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
- channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
- yesno(skl_is_16gb_dimm(dimm)));
-}
-
-static int
-skl_dram_get_channel_info(struct drm_i915_private *dev_priv,
- struct dram_channel_info *ch,
- int channel, u32 val)
-{
- skl_dram_get_dimm_info(dev_priv, &ch->dimm_l,
- channel, 'L', val & 0xffff);
- skl_dram_get_dimm_info(dev_priv, &ch->dimm_s,
- channel, 'S', val >> 16);
-
- if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
- DRM_DEBUG_KMS("CH%u not populated\n", channel);
- return -EINVAL;
- }
-
- if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
- ch->ranks = 2;
- else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
- ch->ranks = 2;
- else
- ch->ranks = 1;
-
- ch->is_16gb_dimm =
- skl_is_16gb_dimm(&ch->dimm_l) ||
- skl_is_16gb_dimm(&ch->dimm_s);
-
- DRM_DEBUG_KMS("CH%u ranks: %u, 16Gb DIMMs: %s\n",
- channel, ch->ranks, yesno(ch->is_16gb_dimm));
-
- return 0;
-}
-
-static bool
-intel_is_dram_symmetric(const struct dram_channel_info *ch0,
- const struct dram_channel_info *ch1)
-{
- return !memcmp(ch0, ch1, sizeof(*ch0)) &&
- (ch0->dimm_s.size == 0 ||
- !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
-}
-
-static int
-skl_dram_get_channels_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- struct dram_channel_info ch0 = {}, ch1 = {};
- u32 val;
- int ret;
-
- val = I915_READ(SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(dev_priv, &ch0, 0, val);
- if (ret == 0)
- dram_info->num_channels++;
-
- val = I915_READ(SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
- ret = skl_dram_get_channel_info(dev_priv, &ch1, 1, val);
- if (ret == 0)
- dram_info->num_channels++;
-
- if (dram_info->num_channels == 0) {
- DRM_INFO("Number of memory channels is zero\n");
- return -EINVAL;
- }
-
- /*
- * If any of the channel is single rank channel, worst case output
- * will be same as if single rank memory, so consider single rank
- * memory.
- */
- if (ch0.ranks == 1 || ch1.ranks == 1)
- dram_info->ranks = 1;
- else
- dram_info->ranks = max(ch0.ranks, ch1.ranks);
-
- if (dram_info->ranks == 0) {
- DRM_INFO("couldn't get memory rank information\n");
- return -EINVAL;
- }
-
- dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
-
- dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
-
- DRM_DEBUG_KMS("Memory configuration is symmetric? %s\n",
- yesno(dram_info->symmetric_memory));
- return 0;
-}
-
-static enum intel_dram_type
-skl_get_dram_type(struct drm_i915_private *dev_priv)
-{
- u32 val;
-
- val = I915_READ(SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
-
- switch (val & SKL_DRAM_DDR_TYPE_MASK) {
- case SKL_DRAM_DDR_TYPE_DDR3:
- return INTEL_DRAM_DDR3;
- case SKL_DRAM_DDR_TYPE_DDR4:
- return INTEL_DRAM_DDR4;
- case SKL_DRAM_DDR_TYPE_LPDDR3:
- return INTEL_DRAM_LPDDR3;
- case SKL_DRAM_DDR_TYPE_LPDDR4:
- return INTEL_DRAM_LPDDR4;
- default:
- MISSING_CASE(val);
- return INTEL_DRAM_UNKNOWN;
- }
-}
-
-static int
-skl_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- u32 mem_freq_khz, val;
- int ret;
-
- dram_info->type = skl_get_dram_type(dev_priv);
- DRM_DEBUG_KMS("DRAM type: %s\n", intel_dram_type_str(dram_info->type));
-
- ret = skl_dram_get_channels_info(dev_priv);
- if (ret)
- return ret;
-
- val = I915_READ(SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
- mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
- SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_info->bandwidth_kbps = dram_info->num_channels *
- mem_freq_khz * 8;
-
- if (dram_info->bandwidth_kbps == 0) {
- DRM_INFO("Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
- dram_info->valid = true;
- return 0;
-}
-
-/* Returns Gb per DRAM device */
-static int bxt_get_dimm_size(u32 val)
-{
- switch (val & BXT_DRAM_SIZE_MASK) {
- case BXT_DRAM_SIZE_4GBIT:
- return 4;
- case BXT_DRAM_SIZE_6GBIT:
- return 6;
- case BXT_DRAM_SIZE_8GBIT:
- return 8;
- case BXT_DRAM_SIZE_12GBIT:
- return 12;
- case BXT_DRAM_SIZE_16GBIT:
- return 16;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static int bxt_get_dimm_width(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return 0;
-
- val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
-
- return 8 << val;
-}
-
-static int bxt_get_dimm_ranks(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return 0;
-
- switch (val & BXT_DRAM_RANK_MASK) {
- case BXT_DRAM_RANK_SINGLE:
- return 1;
- case BXT_DRAM_RANK_DUAL:
- return 2;
- default:
- MISSING_CASE(val);
- return 0;
- }
-}
-
-static enum intel_dram_type bxt_get_dimm_type(u32 val)
-{
- if (!bxt_get_dimm_size(val))
- return INTEL_DRAM_UNKNOWN;
-
- switch (val & BXT_DRAM_TYPE_MASK) {
- case BXT_DRAM_TYPE_DDR3:
- return INTEL_DRAM_DDR3;
- case BXT_DRAM_TYPE_LPDDR3:
- return INTEL_DRAM_LPDDR3;
- case BXT_DRAM_TYPE_DDR4:
- return INTEL_DRAM_DDR4;
- case BXT_DRAM_TYPE_LPDDR4:
- return INTEL_DRAM_LPDDR4;
- default:
- MISSING_CASE(val);
- return INTEL_DRAM_UNKNOWN;
- }
-}
-
-static void bxt_get_dimm_info(struct dram_dimm_info *dimm,
- u32 val)
-{
- dimm->width = bxt_get_dimm_width(val);
- dimm->ranks = bxt_get_dimm_ranks(val);
-
- /*
- * Size in register is Gb per DRAM device. Convert to total
- * GB to match the way we report this for non-LP platforms.
- */
- dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
-}
-
-static int
-bxt_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- u32 dram_channels;
- u32 mem_freq_khz, val;
- u8 num_active_channels;
- int i;
-
- val = I915_READ(BXT_P_CR_MC_BIOS_REQ_0_0_0);
- mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
- BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
-
- dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
- num_active_channels = hweight32(dram_channels);
-
- /* Each active bit represents 4-byte channel */
- dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
-
- if (dram_info->bandwidth_kbps == 0) {
- DRM_INFO("Couldn't get system memory bandwidth\n");
- return -EINVAL;
- }
-
- /*
- * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
- */
- for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
- struct dram_dimm_info dimm;
- enum intel_dram_type type;
-
- val = I915_READ(BXT_D_CR_DRP0_DUNIT(i));
- if (val == 0xFFFFFFFF)
- continue;
-
- dram_info->num_channels++;
-
- bxt_get_dimm_info(&dimm, val);
- type = bxt_get_dimm_type(val);
-
- WARN_ON(type != INTEL_DRAM_UNKNOWN &&
- dram_info->type != INTEL_DRAM_UNKNOWN &&
- dram_info->type != type);
-
- DRM_DEBUG_KMS("CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
- i - BXT_D_CR_DRP0_DUNIT_START,
- dimm.size, dimm.width, dimm.ranks,
- intel_dram_type_str(type));
-
- /*
- * If any of the channel is single rank channel,
- * worst case output will be same as if single rank
- * memory, so consider single rank memory.
- */
- if (dram_info->ranks == 0)
- dram_info->ranks = dimm.ranks;
- else if (dimm.ranks == 1)
- dram_info->ranks = 1;
-
- if (type != INTEL_DRAM_UNKNOWN)
- dram_info->type = type;
- }
-
- if (dram_info->type == INTEL_DRAM_UNKNOWN ||
- dram_info->ranks == 0) {
- DRM_INFO("couldn't get memory information\n");
- return -EINVAL;
- }
-
- dram_info->valid = true;
- return 0;
-}
-
-static void
-intel_get_dram_info(struct drm_i915_private *dev_priv)
-{
- struct dram_info *dram_info = &dev_priv->dram_info;
- int ret;
-
- /*
- * Assume 16Gb DIMMs are present until proven otherwise.
- * This is only used for the level 0 watermark latency
- * w/a which does not apply to bxt/glk.
- */
- dram_info->is_16gb_dimm = !IS_GEN9_LP(dev_priv);
-
- if (INTEL_GEN(dev_priv) < 9 || !HAS_DISPLAY(dev_priv))
- return;
-
- if (IS_GEN9_LP(dev_priv))
- ret = bxt_get_dram_info(dev_priv);
- else
- ret = skl_get_dram_info(dev_priv);
- if (ret)
- return;
-
- DRM_DEBUG_KMS("DRAM bandwidth: %u kBps, channels: %u\n",
- dram_info->bandwidth_kbps,
- dram_info->num_channels);
-
- DRM_DEBUG_KMS("DRAM ranks: %u, 16Gb DIMMs: %s\n",
- dram_info->ranks, yesno(dram_info->is_16gb_dimm));
-}
-
-static u32 gen9_edram_size_mb(struct drm_i915_private *dev_priv, u32 cap)
-{
- static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
- static const u8 sets[4] = { 1, 1, 2, 2 };
-
- return EDRAM_NUM_BANKS(cap) *
- ways[EDRAM_WAYS_IDX(cap)] *
- sets[EDRAM_SETS_IDX(cap)];
-}
-
-static void edram_detect(struct drm_i915_private *dev_priv)
-{
- u32 edram_cap = 0;
-
- if (!(IS_HASWELL(dev_priv) ||
- IS_BROADWELL(dev_priv) ||
- INTEL_GEN(dev_priv) >= 9))
- return;
-
- edram_cap = __raw_uncore_read32(&dev_priv->uncore, HSW_EDRAM_CAP);
-
- /* NB: We can't write IDICR yet because we don't have gt funcs set up */
-
- if (!(edram_cap & EDRAM_ENABLED))
- return;
-
- /*
- * The needed capability bits for size calculation are not there with
- * pre gen9 so return 128MB always.
- */
- if (INTEL_GEN(dev_priv) < 9)
- dev_priv->edram_size_mb = 128;
- else
- dev_priv->edram_size_mb =
- gen9_edram_size_mb(dev_priv, edram_cap);
-
- dev_info(dev_priv->drm.dev,
- "Found %uMB of eDRAM\n", dev_priv->edram_size_mb);
-}
-
/**
* i915_driver_hw_probe - setup state requiring device access
* @dev_priv: device private
@@ -1162,7 +609,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
intel_sanitize_options(dev_priv);
/* needs to be done before ggtt probe */
- edram_detect(dev_priv);
+ intel_dram_edram_detect(dev_priv);
i915_perf_init(dev_priv);
@@ -1186,7 +633,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
ret = i915_ggtt_enable_hw(dev_priv);
if (ret) {
- DRM_ERROR("failed to enable GGTT\n");
+ drm_err(&dev_priv->drm, "failed to enable GGTT\n");
goto err_mem_regions;
}
@@ -1202,7 +649,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
if (IS_GEN(dev_priv, 2)) {
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(30));
if (ret) {
- DRM_ERROR("failed to set DMA mask\n");
+ drm_err(&dev_priv->drm, "failed to set DMA mask\n");
goto err_mem_regions;
}
@@ -1220,7 +667,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
- DRM_ERROR("failed to set DMA mask\n");
+ drm_err(&dev_priv->drm, "failed to set DMA mask\n");
goto err_mem_regions;
}
@@ -1252,7 +699,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
*/
if (INTEL_GEN(dev_priv) >= 5) {
if (pci_enable_msi(pdev) < 0)
- DRM_DEBUG_DRIVER("can't enable MSI");
+ drm_dbg(&dev_priv->drm, "can't enable MSI");
}
ret = intel_gvt_init(dev_priv);
@@ -1264,7 +711,7 @@ static int i915_driver_hw_probe(struct drm_i915_private *dev_priv)
* Fill the dram structure to get the system raw bandwidth and
* dram info. This will be used for memory latency calculation.
*/
- intel_get_dram_info(dev_priv);
+ intel_dram_detect(dev_priv);
intel_bw_init_hw(dev_priv);
@@ -1313,22 +760,19 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
i915_gem_driver_register(dev_priv);
i915_pmu_register(dev_priv);
- /*
- * Notify a valid surface after modesetting,
- * when running inside a VM.
- */
- if (intel_vgpu_active(dev_priv))
- I915_WRITE(vgtif_reg(display_ready), VGT_DRV_DISPLAY_READY);
+ intel_vgpu_register(dev_priv);
/* Reveal our presence to userspace */
if (drm_dev_register(dev, 0) == 0) {
i915_debugfs_register(dev_priv);
+ intel_display_debugfs_register(dev_priv);
i915_setup_sysfs(dev_priv);
/* Depends on sysfs having been initialized */
i915_perf_register(dev_priv);
} else
- DRM_ERROR("Failed to register driver for userspace access!\n");
+ drm_err(&dev_priv->drm,
+ "Failed to register driver for userspace access!\n");
if (HAS_DISPLAY(dev_priv) && INTEL_DISPLAY_ENABLED(dev_priv)) {
/* Must be done after probing outputs */
@@ -1358,6 +802,11 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
intel_power_domains_enable(dev_priv);
intel_runtime_pm_enable(&dev_priv->runtime_pm);
+
+ intel_register_dsm_handler();
+
+ if (i915_switcheroo_register(dev_priv))
+ drm_err(&dev_priv->drm, "Failed to register vga switcheroo!\n");
}
/**
@@ -1366,6 +815,10 @@ static void i915_driver_register(struct drm_i915_private *dev_priv)
*/
static void i915_driver_unregister(struct drm_i915_private *dev_priv)
{
+ i915_switcheroo_unregister(dev_priv);
+
+ intel_unregister_dsm_handler();
+
intel_runtime_pm_disable(&dev_priv->runtime_pm);
intel_power_domains_disable(dev_priv);
@@ -1410,11 +863,12 @@ static void i915_welcome_messages(struct drm_i915_private *dev_priv)
}
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG))
- DRM_INFO("DRM_I915_DEBUG enabled\n");
+ drm_info(&dev_priv->drm, "DRM_I915_DEBUG enabled\n");
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
- DRM_INFO("DRM_I915_DEBUG_GEM enabled\n");
+ drm_info(&dev_priv->drm, "DRM_I915_DEBUG_GEM enabled\n");
if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM))
- DRM_INFO("DRM_I915_DEBUG_RUNTIME_PM enabled\n");
+ drm_info(&dev_priv->drm,
+ "DRM_I915_DEBUG_RUNTIME_PM enabled\n");
}
static struct drm_i915_private *
@@ -1436,8 +890,6 @@ i915_driver_create(struct pci_dev *pdev, const struct pci_device_id *ent)
return ERR_PTR(err);
}
- i915->drm.dev_private = i915;
-
i915->drm.pdev = pdev;
pci_set_drvdata(pdev, i915);
@@ -1477,16 +929,16 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
const struct intel_device_info *match_info =
(struct intel_device_info *)ent->driver_data;
- struct drm_i915_private *dev_priv;
+ struct drm_i915_private *i915;
int ret;
- dev_priv = i915_driver_create(pdev, ent);
- if (IS_ERR(dev_priv))
- return PTR_ERR(dev_priv);
+ i915 = i915_driver_create(pdev, ent);
+ if (IS_ERR(i915))
+ return PTR_ERR(i915);
/* Disable nuclear pageflip by default on pre-ILK */
if (!i915_modparams.nuclear_pageflip && match_info->gen < 5)
- dev_priv->drm.driver_features &= ~DRIVER_ATOMIC;
+ i915->drm.driver_features &= ~DRIVER_ATOMIC;
/*
* Check if we support fake LMEM -- for now we only unleash this for
@@ -1494,13 +946,13 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
*/
#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
if (IS_ENABLED(CONFIG_DRM_I915_UNSTABLE_FAKE_LMEM)) {
- if (INTEL_GEN(dev_priv) >= 9 && i915_selftest.live < 0 &&
+ if (INTEL_GEN(i915) >= 9 && i915_selftest.live < 0 &&
i915_modparams.fake_lmem_start) {
- mkwrite_device_info(dev_priv)->memory_regions =
+ mkwrite_device_info(i915)->memory_regions =
REGION_SMEM | REGION_LMEM | REGION_STOLEN;
- mkwrite_device_info(dev_priv)->is_dgfx = true;
- GEM_BUG_ON(!HAS_LMEM(dev_priv));
- GEM_BUG_ON(!IS_DGFX(dev_priv));
+ mkwrite_device_info(i915)->is_dgfx = true;
+ GEM_BUG_ON(!HAS_LMEM(i915));
+ GEM_BUG_ON(!IS_DGFX(i915));
}
}
#endif
@@ -1509,48 +961,60 @@ int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (ret)
goto out_fini;
- ret = i915_driver_early_probe(dev_priv);
+ ret = i915_driver_early_probe(i915);
if (ret < 0)
goto out_pci_disable;
- disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ disable_rpm_wakeref_asserts(&i915->runtime_pm);
- i915_detect_vgpu(dev_priv);
+ intel_vgpu_detect(i915);
- ret = i915_driver_mmio_probe(dev_priv);
+ ret = i915_driver_mmio_probe(i915);
if (ret < 0)
goto out_runtime_pm_put;
- ret = i915_driver_hw_probe(dev_priv);
+ ret = i915_driver_hw_probe(i915);
if (ret < 0)
goto out_cleanup_mmio;
- ret = i915_driver_modeset_probe(dev_priv);
+ ret = i915_driver_modeset_probe_noirq(i915);
if (ret < 0)
goto out_cleanup_hw;
- i915_driver_register(dev_priv);
+ ret = intel_irq_install(i915);
+ if (ret)
+ goto out_cleanup_modeset;
- enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
+ ret = i915_driver_modeset_probe(i915);
+ if (ret < 0)
+ goto out_cleanup_irq;
+
+ i915_driver_register(i915);
+
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
- i915_welcome_messages(dev_priv);
+ i915_welcome_messages(i915);
return 0;
+out_cleanup_irq:
+ intel_irq_uninstall(i915);
+out_cleanup_modeset:
+ /* FIXME */
out_cleanup_hw:
- i915_driver_hw_remove(dev_priv);
- intel_memory_regions_driver_release(dev_priv);
- i915_ggtt_driver_release(dev_priv);
+ i915_driver_hw_remove(i915);
+ intel_memory_regions_driver_release(i915);
+ i915_ggtt_driver_release(i915);
out_cleanup_mmio:
- i915_driver_mmio_release(dev_priv);
+ i915_driver_mmio_release(i915);
out_runtime_pm_put:
- enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- i915_driver_late_release(dev_priv);
+ enable_rpm_wakeref_asserts(&i915->runtime_pm);
+ i915_driver_late_release(i915);
out_pci_disable:
pci_disable_device(pdev);
out_fini:
- i915_probe_error(dev_priv, "Device initialization failed (%d)\n", ret);
- i915_driver_destroy(dev_priv);
+ i915_probe_error(i915, "Device initialization failed (%d)\n", ret);
+ i915_driver_destroy(i915);
return ret;
}
@@ -1560,13 +1024,6 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_driver_unregister(i915);
- /*
- * After unregistering the device to prevent any new users, cancel
- * all in-flight requests so that we can quickly unbind the active
- * resources.
- */
- intel_gt_set_wedged(&i915->gt);
-
/* Flush any external code that still may be under the RCU lock */
synchronize_rcu();
@@ -1578,6 +1035,10 @@ void i915_driver_remove(struct drm_i915_private *i915)
i915_driver_modeset_remove(i915);
+ intel_irq_uninstall(i915);
+
+ i915_driver_modeset_remove_noirq(i915);
+
i915_reset_error_state(i915);
i915_gem_driver_remove(i915);
@@ -1664,10 +1125,6 @@ static void intel_suspend_encoders(struct drm_i915_private *dev_priv)
drm_modeset_unlock_all(dev);
}
-static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume);
-static int vlv_suspend_complete(struct drm_i915_private *dev_priv);
-
static bool suspend_to_idle(struct drm_i915_private *dev_priv)
{
#if IS_ENABLED(CONFIG_ACPI_SLEEP)
@@ -1719,7 +1176,7 @@ static int i915_drm_suspend(struct drm_device *dev)
intel_suspend_hw(dev_priv);
- i915_gem_suspend_gtt_mappings(dev_priv);
+ i915_ggtt_suspend(&dev_priv->ggtt);
i915_save_state(dev_priv);
@@ -1754,7 +1211,7 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
struct drm_i915_private *dev_priv = to_i915(dev);
struct pci_dev *pdev = dev_priv->drm.pdev;
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret = 0;
+ int ret;
disable_rpm_wakeref_asserts(rpm);
@@ -1767,11 +1224,9 @@ static int i915_drm_suspend_late(struct drm_device *dev, bool hibernation)
intel_display_power_suspend_late(dev_priv);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_suspend_complete(dev_priv);
-
+ ret = vlv_suspend_complete(dev_priv);
if (ret) {
- DRM_ERROR("Suspend complete failed: %d\n", ret);
+ drm_err(&dev_priv->drm, "Suspend complete failed: %d\n", ret);
intel_power_domains_resume(dev_priv);
goto out;
@@ -1805,8 +1260,8 @@ int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state)
{
int error;
- if (WARN_ON_ONCE(state.event != PM_EVENT_SUSPEND &&
- state.event != PM_EVENT_FREEZE))
+ if (drm_WARN_ON_ONCE(&i915->drm, state.event != PM_EVENT_SUSPEND &&
+ state.event != PM_EVENT_FREEZE))
return -EINVAL;
if (i915->drm.switch_power_state == DRM_SWITCH_POWER_OFF)
@@ -1830,9 +1285,9 @@ static int i915_drm_resume(struct drm_device *dev)
ret = i915_ggtt_enable_hw(dev_priv);
if (ret)
- DRM_ERROR("failed to re-enable GGTT\n");
+ drm_err(&dev_priv->drm, "failed to re-enable GGTT\n");
- i915_gem_restore_gtt_mappings(dev_priv);
+ i915_ggtt_resume(&dev_priv->ggtt);
i915_gem_restore_fences(&dev_priv->ggtt);
intel_csr_ucode_resume(dev_priv);
@@ -1919,7 +1374,8 @@ static int i915_drm_resume_early(struct drm_device *dev)
*/
ret = pci_set_power_state(pdev, PCI_D0);
if (ret) {
- DRM_ERROR("failed to set PCI D0 power state (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "failed to set PCI D0 power state (%d)\n", ret);
return ret;
}
@@ -1943,11 +1399,10 @@ static int i915_drm_resume_early(struct drm_device *dev)
disable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_resume_prepare(dev_priv, false);
+ ret = vlv_resume_prepare(dev_priv, false);
if (ret)
- DRM_ERROR("Resume prepare failed: %d, continuing anyway\n",
- ret);
+ drm_err(&dev_priv->drm,
+ "Resume prepare failed: %d, continuing anyway\n", ret);
intel_uncore_resume_early(&dev_priv->uncore);
@@ -2114,391 +1569,16 @@ static int i915_pm_restore(struct device *kdev)
return i915_pm_resume(kdev);
}
-/*
- * Save all Gunit registers that may be lost after a D3 and a subsequent
- * S0i[R123] transition. The list of registers needing a save/restore is
- * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
- * registers in the following way:
- * - Driver: saved/restored by the driver
- * - Punit : saved/restored by the Punit firmware
- * - No, w/o marking: no need to save/restore, since the register is R/O or
- * used internally by the HW in a way that doesn't depend
- * keeping the content across a suspend/resume.
- * - Debug : used for debugging
- *
- * We save/restore all registers marked with 'Driver', with the following
- * exceptions:
- * - Registers out of use, including also registers marked with 'Debug'.
- * These have no effect on the driver's operation, so we don't save/restore
- * them to reduce the overhead.
- * - Registers that are fully setup by an initialization function called from
- * the resume path. For example many clock gating and RPS/RC6 registers.
- * - Registers that provide the right functionality with their reset defaults.
- *
- * TODO: Except for registers that based on the above 3 criteria can be safely
- * ignored, we save/restore all others, practically treating the HW context as
- * a black-box for the driver. Further investigation is needed to reduce the
- * saved/restored registers even further, by following the same 3 criteria.
- */
-static void vlv_save_gunit_s0ix_state(struct drm_i915_private *dev_priv)
-{
- struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
- int i;
-
- if (!s)
- return;
-
- /* GAM 0x4000-0x4770 */
- s->wr_watermark = I915_READ(GEN7_WR_WATERMARK);
- s->gfx_prio_ctrl = I915_READ(GEN7_GFX_PRIO_CTRL);
- s->arb_mode = I915_READ(ARB_MODE);
- s->gfx_pend_tlb0 = I915_READ(GEN7_GFX_PEND_TLB0);
- s->gfx_pend_tlb1 = I915_READ(GEN7_GFX_PEND_TLB1);
-
- for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
- s->lra_limits[i] = I915_READ(GEN7_LRA_LIMITS(i));
-
- s->media_max_req_count = I915_READ(GEN7_MEDIA_MAX_REQ_COUNT);
- s->gfx_max_req_count = I915_READ(GEN7_GFX_MAX_REQ_COUNT);
-
- s->render_hwsp = I915_READ(RENDER_HWS_PGA_GEN7);
- s->ecochk = I915_READ(GAM_ECOCHK);
- s->bsd_hwsp = I915_READ(BSD_HWS_PGA_GEN7);
- s->blt_hwsp = I915_READ(BLT_HWS_PGA_GEN7);
-
- s->tlb_rd_addr = I915_READ(GEN7_TLB_RD_ADDR);
-
- /* MBC 0x9024-0x91D0, 0x8500 */
- s->g3dctl = I915_READ(VLV_G3DCTL);
- s->gsckgctl = I915_READ(VLV_GSCKGCTL);
- s->mbctl = I915_READ(GEN6_MBCTL);
-
- /* GCP 0x9400-0x9424, 0x8100-0x810C */
- s->ucgctl1 = I915_READ(GEN6_UCGCTL1);
- s->ucgctl3 = I915_READ(GEN6_UCGCTL3);
- s->rcgctl1 = I915_READ(GEN6_RCGCTL1);
- s->rcgctl2 = I915_READ(GEN6_RCGCTL2);
- s->rstctl = I915_READ(GEN6_RSTCTL);
- s->misccpctl = I915_READ(GEN7_MISCCPCTL);
-
- /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
- s->gfxpause = I915_READ(GEN6_GFXPAUSE);
- s->rpdeuhwtc = I915_READ(GEN6_RPDEUHWTC);
- s->rpdeuc = I915_READ(GEN6_RPDEUC);
- s->ecobus = I915_READ(ECOBUS);
- s->pwrdwnupctl = I915_READ(VLV_PWRDWNUPCTL);
- s->rp_down_timeout = I915_READ(GEN6_RP_DOWN_TIMEOUT);
- s->rp_deucsw = I915_READ(GEN6_RPDEUCSW);
- s->rcubmabdtmr = I915_READ(GEN6_RCUBMABDTMR);
- s->rcedata = I915_READ(VLV_RCEDATA);
- s->spare2gh = I915_READ(VLV_SPAREG2H);
-
- /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
- s->gt_imr = I915_READ(GTIMR);
- s->gt_ier = I915_READ(GTIER);
- s->pm_imr = I915_READ(GEN6_PMIMR);
- s->pm_ier = I915_READ(GEN6_PMIER);
-
- for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
- s->gt_scratch[i] = I915_READ(GEN7_GT_SCRATCH(i));
-
- /* GT SA CZ domain, 0x100000-0x138124 */
- s->tilectl = I915_READ(TILECTL);
- s->gt_fifoctl = I915_READ(GTFIFOCTL);
- s->gtlc_wake_ctrl = I915_READ(VLV_GTLC_WAKE_CTRL);
- s->gtlc_survive = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
- s->pmwgicz = I915_READ(VLV_PMWGICZ);
-
- /* Gunit-Display CZ domain, 0x182028-0x1821CF */
- s->gu_ctl0 = I915_READ(VLV_GU_CTL0);
- s->gu_ctl1 = I915_READ(VLV_GU_CTL1);
- s->pcbr = I915_READ(VLV_PCBR);
- s->clock_gate_dis2 = I915_READ(VLV_GUNIT_CLOCK_GATE2);
-
- /*
- * Not saving any of:
- * DFT, 0x9800-0x9EC0
- * SARB, 0xB000-0xB1FC
- * GAC, 0x5208-0x524C, 0x14000-0x14C000
- * PCI CFG
- */
-}
-
-static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *dev_priv)
-{
- struct vlv_s0ix_state *s = dev_priv->vlv_s0ix_state;
- u32 val;
- int i;
-
- if (!s)
- return;
-
- /* GAM 0x4000-0x4770 */
- I915_WRITE(GEN7_WR_WATERMARK, s->wr_watermark);
- I915_WRITE(GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
- I915_WRITE(ARB_MODE, s->arb_mode | (0xffff << 16));
- I915_WRITE(GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
- I915_WRITE(GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
-
- for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
- I915_WRITE(GEN7_LRA_LIMITS(i), s->lra_limits[i]);
-
- I915_WRITE(GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
- I915_WRITE(GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
-
- I915_WRITE(RENDER_HWS_PGA_GEN7, s->render_hwsp);
- I915_WRITE(GAM_ECOCHK, s->ecochk);
- I915_WRITE(BSD_HWS_PGA_GEN7, s->bsd_hwsp);
- I915_WRITE(BLT_HWS_PGA_GEN7, s->blt_hwsp);
-
- I915_WRITE(GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
-
- /* MBC 0x9024-0x91D0, 0x8500 */
- I915_WRITE(VLV_G3DCTL, s->g3dctl);
- I915_WRITE(VLV_GSCKGCTL, s->gsckgctl);
- I915_WRITE(GEN6_MBCTL, s->mbctl);
-
- /* GCP 0x9400-0x9424, 0x8100-0x810C */
- I915_WRITE(GEN6_UCGCTL1, s->ucgctl1);
- I915_WRITE(GEN6_UCGCTL3, s->ucgctl3);
- I915_WRITE(GEN6_RCGCTL1, s->rcgctl1);
- I915_WRITE(GEN6_RCGCTL2, s->rcgctl2);
- I915_WRITE(GEN6_RSTCTL, s->rstctl);
- I915_WRITE(GEN7_MISCCPCTL, s->misccpctl);
-
- /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
- I915_WRITE(GEN6_GFXPAUSE, s->gfxpause);
- I915_WRITE(GEN6_RPDEUHWTC, s->rpdeuhwtc);
- I915_WRITE(GEN6_RPDEUC, s->rpdeuc);
- I915_WRITE(ECOBUS, s->ecobus);
- I915_WRITE(VLV_PWRDWNUPCTL, s->pwrdwnupctl);
- I915_WRITE(GEN6_RP_DOWN_TIMEOUT,s->rp_down_timeout);
- I915_WRITE(GEN6_RPDEUCSW, s->rp_deucsw);
- I915_WRITE(GEN6_RCUBMABDTMR, s->rcubmabdtmr);
- I915_WRITE(VLV_RCEDATA, s->rcedata);
- I915_WRITE(VLV_SPAREG2H, s->spare2gh);
-
- /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
- I915_WRITE(GTIMR, s->gt_imr);
- I915_WRITE(GTIER, s->gt_ier);
- I915_WRITE(GEN6_PMIMR, s->pm_imr);
- I915_WRITE(GEN6_PMIER, s->pm_ier);
-
- for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
- I915_WRITE(GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
-
- /* GT SA CZ domain, 0x100000-0x138124 */
- I915_WRITE(TILECTL, s->tilectl);
- I915_WRITE(GTFIFOCTL, s->gt_fifoctl);
- /*
- * Preserve the GT allow wake and GFX force clock bit, they are not
- * be restored, as they are used to control the s0ix suspend/resume
- * sequence by the caller.
- */
- val = I915_READ(VLV_GTLC_WAKE_CTRL);
- val &= VLV_GTLC_ALLOWWAKEREQ;
- val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
- I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
-
- val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
- val &= VLV_GFX_CLK_FORCE_ON_BIT;
- val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
- I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
-
- I915_WRITE(VLV_PMWGICZ, s->pmwgicz);
-
- /* Gunit-Display CZ domain, 0x182028-0x1821CF */
- I915_WRITE(VLV_GU_CTL0, s->gu_ctl0);
- I915_WRITE(VLV_GU_CTL1, s->gu_ctl1);
- I915_WRITE(VLV_PCBR, s->pcbr);
- I915_WRITE(VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
-}
-
-static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
- u32 mask, u32 val)
-{
- i915_reg_t reg = VLV_GTLC_PW_STATUS;
- u32 reg_value;
- int ret;
-
- /* The HW does not like us polling for PW_STATUS frequently, so
- * use the sleeping loop rather than risk the busy spin within
- * intel_wait_for_register().
- *
- * Transitioning between RC6 states should be at most 2ms (see
- * valleyview_enable_rps) so use a 3ms timeout.
- */
- ret = wait_for(((reg_value =
- intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
- == val, 3);
-
- /* just trace the final value */
- trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
-
- return ret;
-}
-
-int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool force_on)
-{
- u32 val;
- int err;
-
- val = I915_READ(VLV_GTLC_SURVIVABILITY_REG);
- val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
- if (force_on)
- val |= VLV_GFX_CLK_FORCE_ON_BIT;
- I915_WRITE(VLV_GTLC_SURVIVABILITY_REG, val);
-
- if (!force_on)
- return 0;
-
- err = intel_wait_for_register(&dev_priv->uncore,
- VLV_GTLC_SURVIVABILITY_REG,
- VLV_GFX_CLK_STATUS_BIT,
- VLV_GFX_CLK_STATUS_BIT,
- 20);
- if (err)
- DRM_ERROR("timeout waiting for GFX clock force-on (%08x)\n",
- I915_READ(VLV_GTLC_SURVIVABILITY_REG));
-
- return err;
-}
-
-static int vlv_allow_gt_wake(struct drm_i915_private *dev_priv, bool allow)
-{
- u32 mask;
- u32 val;
- int err;
-
- val = I915_READ(VLV_GTLC_WAKE_CTRL);
- val &= ~VLV_GTLC_ALLOWWAKEREQ;
- if (allow)
- val |= VLV_GTLC_ALLOWWAKEREQ;
- I915_WRITE(VLV_GTLC_WAKE_CTRL, val);
- POSTING_READ(VLV_GTLC_WAKE_CTRL);
-
- mask = VLV_GTLC_ALLOWWAKEACK;
- val = allow ? mask : 0;
-
- err = vlv_wait_for_pw_status(dev_priv, mask, val);
- if (err)
- DRM_ERROR("timeout disabling GT waking\n");
-
- return err;
-}
-
-static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
- bool wait_for_on)
-{
- u32 mask;
- u32 val;
-
- mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
- val = wait_for_on ? mask : 0;
-
- /*
- * RC6 transitioning can be delayed up to 2 msec (see
- * valleyview_enable_rps), use 3 msec for safety.
- *
- * This can fail to turn off the rc6 if the GPU is stuck after a failed
- * reset and we are trying to force the machine to sleep.
- */
- if (vlv_wait_for_pw_status(dev_priv, mask, val))
- DRM_DEBUG_DRIVER("timeout waiting for GT wells to go %s\n",
- onoff(wait_for_on));
-}
-
-static void vlv_check_no_gt_access(struct drm_i915_private *dev_priv)
-{
- if (!(I915_READ(VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
- return;
-
- DRM_DEBUG_DRIVER("GT register access while GT waking disabled\n");
- I915_WRITE(VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
-}
-
-static int vlv_suspend_complete(struct drm_i915_private *dev_priv)
-{
- u32 mask;
- int err;
-
- /*
- * Bspec defines the following GT well on flags as debug only, so
- * don't treat them as hard failures.
- */
- vlv_wait_for_gt_wells(dev_priv, false);
-
- mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
- WARN_ON((I915_READ(VLV_GTLC_WAKE_CTRL) & mask) != mask);
-
- vlv_check_no_gt_access(dev_priv);
-
- err = vlv_force_gfx_clock(dev_priv, true);
- if (err)
- goto err1;
-
- err = vlv_allow_gt_wake(dev_priv, false);
- if (err)
- goto err2;
-
- vlv_save_gunit_s0ix_state(dev_priv);
-
- err = vlv_force_gfx_clock(dev_priv, false);
- if (err)
- goto err2;
-
- return 0;
-
-err2:
- /* For safety always re-enable waking and disable gfx clock forcing */
- vlv_allow_gt_wake(dev_priv, true);
-err1:
- vlv_force_gfx_clock(dev_priv, false);
-
- return err;
-}
-
-static int vlv_resume_prepare(struct drm_i915_private *dev_priv,
- bool rpm_resume)
-{
- int err;
- int ret;
-
- /*
- * If any of the steps fail just try to continue, that's the best we
- * can do at this point. Return the first error code (which will also
- * leave RPM permanently disabled).
- */
- ret = vlv_force_gfx_clock(dev_priv, true);
-
- vlv_restore_gunit_s0ix_state(dev_priv);
-
- err = vlv_allow_gt_wake(dev_priv, true);
- if (!ret)
- ret = err;
-
- err = vlv_force_gfx_clock(dev_priv, false);
- if (!ret)
- ret = err;
-
- vlv_check_no_gt_access(dev_priv);
-
- if (rpm_resume)
- intel_init_clock_gating(dev_priv);
-
- return ret;
-}
-
static int intel_runtime_suspend(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret = 0;
+ int ret;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
- DRM_DEBUG_KMS("Suspending device\n");
+ drm_dbg_kms(&dev_priv->drm, "Suspending device\n");
disable_rpm_wakeref_asserts(rpm);
@@ -2516,11 +1596,10 @@ static int intel_runtime_suspend(struct device *kdev)
intel_display_power_suspend(dev_priv);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_suspend_complete(dev_priv);
-
+ ret = vlv_suspend_complete(dev_priv);
if (ret) {
- DRM_ERROR("Runtime suspend failed, disabling it (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "Runtime suspend failed, disabling it (%d)\n", ret);
intel_uncore_runtime_resume(&dev_priv->uncore);
intel_runtime_pm_enable_interrupts(dev_priv);
@@ -2538,7 +1617,8 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_driver_release(rpm);
if (intel_uncore_arm_unclaimed_mmio_detection(&dev_priv->uncore))
- DRM_ERROR("Unclaimed access detected prior to suspending\n");
+ drm_err(&dev_priv->drm,
+ "Unclaimed access detected prior to suspending\n");
rpm->suspended = true;
@@ -2570,7 +1650,7 @@ static int intel_runtime_suspend(struct device *kdev)
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_init(dev_priv);
- DRM_DEBUG_KMS("Device suspended\n");
+ drm_dbg_kms(&dev_priv->drm, "Device suspended\n");
return 0;
}
@@ -2578,25 +1658,25 @@ static int intel_runtime_resume(struct device *kdev)
{
struct drm_i915_private *dev_priv = kdev_to_i915(kdev);
struct intel_runtime_pm *rpm = &dev_priv->runtime_pm;
- int ret = 0;
+ int ret;
- if (WARN_ON_ONCE(!HAS_RUNTIME_PM(dev_priv)))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm, !HAS_RUNTIME_PM(dev_priv)))
return -ENODEV;
- DRM_DEBUG_KMS("Resuming device\n");
+ drm_dbg_kms(&dev_priv->drm, "Resuming device\n");
- WARN_ON_ONCE(atomic_read(&rpm->wakeref_count));
+ drm_WARN_ON_ONCE(&dev_priv->drm, atomic_read(&rpm->wakeref_count));
disable_rpm_wakeref_asserts(rpm);
intel_opregion_notify_adapter(dev_priv, PCI_D0);
rpm->suspended = false;
if (intel_uncore_unclaimed_mmio(&dev_priv->uncore))
- DRM_DEBUG_DRIVER("Unclaimed access during suspend, bios?\n");
+ drm_dbg(&dev_priv->drm,
+ "Unclaimed access during suspend, bios?\n");
intel_display_power_resume(dev_priv);
- if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- ret = vlv_resume_prepare(dev_priv, true);
+ ret = vlv_resume_prepare(dev_priv, true);
intel_uncore_runtime_resume(&dev_priv->uncore);
@@ -2622,9 +1702,10 @@ static int intel_runtime_resume(struct device *kdev)
enable_rpm_wakeref_asserts(rpm);
if (ret)
- DRM_ERROR("Runtime resume failed, disabling it (%d)\n", ret);
+ drm_err(&dev_priv->drm,
+ "Runtime resume failed, disabling it (%d)\n", ret);
else
- DRM_DEBUG_KMS("Device resumed\n");
+ drm_dbg_kms(&dev_priv->drm, "Device resumed\n");
return ret;
}
@@ -2672,12 +1753,12 @@ const struct dev_pm_ops i915_pm_ops = {
static const struct file_operations i915_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .release = drm_release,
+ .release = drm_release_noglobal,
.unlocked_ioctl = drm_ioctl,
.mmap = i915_gem_mmap,
.poll = drm_poll,
.read = drm_read,
- .compat_ioctl = i915_compat_ioctl,
+ .compat_ioctl = i915_ioc32_compat_ioctl,
.llseek = noop_llseek,
};
@@ -2769,9 +1850,6 @@ static struct drm_driver driver = {
.gem_prime_export = i915_gem_prime_export,
.gem_prime_import = i915_gem_prime_import,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
- .get_scanout_position = i915_get_crtc_scanoutpos,
-
.dumb_create = i915_gem_dumb_create,
.dumb_map_offset = i915_gem_dumb_mmap_offset,
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 077af22b8340..1f5b9a584f71 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -59,7 +59,6 @@
#include <drm/drm_connector.h>
#include <drm/i915_mei_hdcp_interface.h>
-#include "i915_fixed.h"
#include "i915_params.h"
#include "i915_reg.h"
#include "i915_utils.h"
@@ -70,6 +69,7 @@
#include "display/intel_dpll_mgr.h"
#include "display/intel_dsb.h"
#include "display/intel_frontbuffer.h"
+#include "display/intel_global_state.h"
#include "display/intel_gmbus.h"
#include "display/intel_opregion.h"
@@ -104,18 +104,23 @@
#include "intel_region_lmem.h"
-#include "intel_gvt.h"
-
/* General customization:
*/
#define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics"
-#define DRIVER_DATE "20200114"
-#define DRIVER_TIMESTAMP 1579001978
+#define DRIVER_DATE "20200313"
+#define DRIVER_TIMESTAMP 1584144591
struct drm_i915_gem_object;
+/*
+ * The code assumes that the hpd_pins below have consecutive values and
+ * starting with HPD_PORT_A, the HPD pin associated with any port can be
+ * retrieved by adding the corresponding port (or phy) enum value to
+ * HPD_PORT_A in most cases. For example:
+ * HPD_PORT_C = HPD_PORT_A + PHY_C - PHY_A
+ */
enum hpd_pin {
HPD_NONE = 0,
HPD_TV = HPD_NONE, /* TV is known to be unreliable */
@@ -203,9 +208,7 @@ struct drm_i915_file_private {
} mm;
struct xarray context_xa;
-
- struct idr vm_idr;
- struct mutex vm_idr_lock; /* guards vm_idr */
+ struct xarray vm_xa;
unsigned int bsd_engine;
@@ -255,18 +258,19 @@ struct sdvo_device_mapping {
struct intel_connector;
struct intel_encoder;
struct intel_atomic_state;
-struct intel_crtc_state;
+struct intel_cdclk_config;
+struct intel_cdclk_state;
+struct intel_cdclk_vals;
struct intel_initial_plane_config;
struct intel_crtc;
struct intel_limit;
struct dpll;
-struct intel_cdclk_state;
struct drm_i915_display_funcs {
void (*get_cdclk)(struct drm_i915_private *dev_priv,
- struct intel_cdclk_state *cdclk_state);
+ struct intel_cdclk_config *cdclk_config);
void (*set_cdclk)(struct drm_i915_private *dev_priv,
- const struct intel_cdclk_state *cdclk_state,
+ const struct intel_cdclk_config *cdclk_config,
enum pipe pipe);
int (*get_fifo_size)(struct drm_i915_private *dev_priv,
enum i9xx_plane_id i9xx_plane);
@@ -280,7 +284,7 @@ struct drm_i915_display_funcs {
struct intel_crtc *crtc);
int (*compute_global_watermarks)(struct intel_atomic_state *state);
void (*update_wm)(struct intel_crtc *crtc);
- int (*modeset_calc_cdclk)(struct intel_atomic_state *state);
+ int (*modeset_calc_cdclk)(struct intel_cdclk_state *state);
u8 (*calc_voltage_level)(int cdclk);
/* Returns the active state of the crtc, and if the crtc is active,
* fills out the pipe-config with the hw state. */
@@ -504,8 +508,8 @@ struct i915_psr {
u16 su_x_granularity;
bool dc3co_enabled;
u32 dc3co_exit_delay;
- struct delayed_work idle_work;
- bool initially_probed;
+ struct delayed_work dc3co_work;
+ bool force_mode_changed;
};
#define QUIRK_LVDS_SSC_DISABLE (1<<1)
@@ -732,19 +736,10 @@ enum intel_ddb_partitioning {
INTEL_DDB_PART_5_6, /* IVB+ */
};
-struct intel_wm_level {
- bool enable;
- u32 pri_val;
- u32 spr_val;
- u32 cur_val;
- u32 fbc_val;
-};
-
struct ilk_wm_values {
u32 wm_pipe[3];
u32 wm_lp[3];
u32 wm_lp_spr[3];
- u32 wm_linetime[3];
bool enable_fbc_wm;
enum intel_ddb_partitioning partitioning;
};
@@ -799,65 +794,6 @@ static inline bool skl_ddb_entry_equal(const struct skl_ddb_entry *e1,
return false;
}
-struct skl_ddb_allocation {
- u8 enabled_slices; /* GEN11 has configurable 2 slices */
-};
-
-struct skl_ddb_values {
- unsigned dirty_pipes;
- struct skl_ddb_allocation ddb;
-};
-
-struct skl_wm_level {
- u16 min_ddb_alloc;
- u16 plane_res_b;
- u8 plane_res_l;
- bool plane_en;
- bool ignore_lines;
-};
-
-/* Stores plane specific WM parameters */
-struct skl_wm_params {
- bool x_tiled, y_tiled;
- bool rc_surface;
- bool is_planar;
- u32 width;
- u8 cpp;
- u32 plane_pixel_rate;
- u32 y_min_scanlines;
- u32 plane_bytes_per_line;
- uint_fixed_16_16_t plane_blocks_per_line;
- uint_fixed_16_16_t y_tile_minimum;
- u32 linetime_us;
- u32 dbuf_block_size;
-};
-
-enum intel_pipe_crc_source {
- INTEL_PIPE_CRC_SOURCE_NONE,
- INTEL_PIPE_CRC_SOURCE_PLANE1,
- INTEL_PIPE_CRC_SOURCE_PLANE2,
- INTEL_PIPE_CRC_SOURCE_PLANE3,
- INTEL_PIPE_CRC_SOURCE_PLANE4,
- INTEL_PIPE_CRC_SOURCE_PLANE5,
- INTEL_PIPE_CRC_SOURCE_PLANE6,
- INTEL_PIPE_CRC_SOURCE_PLANE7,
- INTEL_PIPE_CRC_SOURCE_PIPE,
- /* TV/DP on pre-gen5/vlv can't use the pipe source. */
- INTEL_PIPE_CRC_SOURCE_TV,
- INTEL_PIPE_CRC_SOURCE_DP_B,
- INTEL_PIPE_CRC_SOURCE_DP_C,
- INTEL_PIPE_CRC_SOURCE_DP_D,
- INTEL_PIPE_CRC_SOURCE_AUTO,
- INTEL_PIPE_CRC_SOURCE_MAX,
-};
-
-#define INTEL_PIPE_CRC_ENTRIES_NR 128
-struct intel_pipe_crc {
- spinlock_t lock;
- int skipped;
- enum intel_pipe_crc_source source;
-};
-
struct i915_frontbuffer_tracking {
spinlock_t lock;
@@ -875,14 +811,7 @@ struct i915_virtual_gpu {
u32 caps;
};
-/* used in computing the new watermarks state */
-struct intel_wm_config {
- unsigned int num_pipes_active;
- bool sprites_enabled;
- bool sprites_scaled;
-};
-
-struct intel_cdclk_state {
+struct intel_cdclk_config {
unsigned int cdclk, vco, ref, bypass;
u8 voltage_level;
};
@@ -1002,33 +931,18 @@ struct drm_i915_private {
unsigned int max_cdclk_freq;
unsigned int max_dotclk_freq;
- unsigned int rawclk_freq;
unsigned int hpll_freq;
unsigned int fdi_pll_freq;
unsigned int czclk_freq;
- /*
- * For reading holding any crtc lock is sufficient,
- * for writing must hold all of them.
- */
struct {
- /*
- * The current logical cdclk state.
- * See intel_atomic_state.cdclk.logical
- */
- struct intel_cdclk_state logical;
- /*
- * The current actual cdclk state.
- * See intel_atomic_state.cdclk.actual
- */
- struct intel_cdclk_state actual;
- /* The current hardware cdclk state */
- struct intel_cdclk_state hw;
+ /* The current hardware cdclk configuration */
+ struct intel_cdclk_config hw;
/* cdclk, divider, and ratio table from bspec */
const struct intel_cdclk_vals *table;
- int force_min_cdclk;
+ struct intel_global_obj obj;
} cdclk;
/**
@@ -1068,31 +982,32 @@ struct drm_i915_private {
struct intel_crtc *plane_to_crtc_mapping[I915_MAX_PIPES];
struct intel_crtc *pipe_to_crtc_mapping[I915_MAX_PIPES];
-#ifdef CONFIG_DEBUG_FS
- struct intel_pipe_crc pipe_crc[I915_MAX_PIPES];
-#endif
+ /**
+ * dpll and cdclk state is protected by connection_mutex
+ * dpll.lock serializes intel_{prepare,enable,disable}_shared_dpll.
+ * Must be global rather than per dpll, because on some platforms plls
+ * share registers.
+ */
+ struct {
+ struct mutex lock;
- /* dpll and cdclk state is protected by connection_mutex */
- int num_shared_dpll;
- struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
- const struct intel_dpll_mgr *dpll_mgr;
+ int num_shared_dpll;
+ struct intel_shared_dpll shared_dplls[I915_NUM_PLLS];
+ const struct intel_dpll_mgr *mgr;
- /*
- * dpll_lock serializes intel_{prepare,enable,disable}_shared_dpll.
- * Must be global rather than per dpll, because on some platforms
- * plls share registers.
- */
- struct mutex dpll_lock;
+ struct {
+ int nssc;
+ int ssc;
+ } ref_clks;
+ } dpll;
+
+ struct list_head global_obj_list;
/*
- * For reading active_pipes, min_cdclk, min_voltage_level holding
- * any crtc lock is sufficient, for writing must hold all of them.
+ * For reading active_pipes holding any crtc lock is
+ * sufficient, for writing must hold all of them.
*/
u8 active_pipes;
- /* minimum acceptable cdclk for each pipe */
- int min_cdclk[I915_MAX_PIPES];
- /* minimum acceptable voltage level for each pipe */
- u8 min_voltage_level[I915_MAX_PIPES];
int dpio_phy_iosf_port[I915_NUM_PHYS_VLV];
@@ -1105,8 +1020,6 @@ struct drm_i915_private {
struct work_struct free_work;
} atomic_helper;
- u16 orig_clock;
-
bool mchbar_need_disable;
struct intel_l3_parity l3_parity;
@@ -1191,7 +1104,6 @@ struct drm_i915_private {
/* current hardware state */
union {
struct ilk_wm_values hw;
- struct skl_ddb_values skl_hw;
struct vlv_wm_values vlv;
struct g4x_wm_values g4x;
};
@@ -1213,6 +1125,8 @@ struct drm_i915_private {
bool distrust_bios_wm;
} wm;
+ u8 enabled_dbuf_slices_mask; /* GEN11 has configurable 2 slices */
+
struct dram_info {
bool valid;
bool is_16gb_dimm;
@@ -1236,7 +1150,7 @@ struct drm_i915_private {
u8 num_planes;
} max_bw[6];
- struct drm_private_obj bw_obj;
+ struct intel_global_obj bw_obj;
struct intel_runtime_pm runtime_pm;
@@ -1300,16 +1214,6 @@ struct drm_i915_private {
*/
};
-struct dram_dimm_info {
- u8 size, width, ranks;
-};
-
-struct dram_channel_info {
- struct dram_dimm_info dimm_l, dimm_s;
- u8 ranks;
- bool is_16gb_dimm;
-};
-
static inline struct drm_i915_private *to_i915(const struct drm_device *dev)
{
return container_of(dev, struct drm_i915_private, drm);
@@ -1580,6 +1484,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define GLK_REVID_A0 0x0
#define GLK_REVID_A1 0x1
+#define GLK_REVID_A2 0x2
+#define GLK_REVID_B0 0x3
#define IS_GLK_REVID(dev_priv, since, until) \
(IS_GEMINILAKE(dev_priv) && IS_REVID(dev_priv, since, until))
@@ -1718,10 +1624,6 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915,
#define HAS_GT_UC(dev_priv) (INTEL_INFO(dev_priv)->has_gt_uc)
-/* Having GuC is not the same as using GuC */
-#define USES_GUC(dev_priv) intel_uc_uses_guc(&(dev_priv)->gt.uc)
-#define USES_GUC_SUBMISSION(dev_priv) intel_uc_uses_guc_submission(&(dev_priv)->gt.uc)
-
#define HAS_POOLED_EU(dev_priv) (INTEL_INFO(dev_priv)->has_pooled_eu)
#define HAS_GLOBAL_MOCS_REGISTERS(dev_priv) (INTEL_INFO(dev_priv)->has_global_mocs)
@@ -1767,11 +1669,6 @@ intel_ggtt_update_needs_vtd_wa(struct drm_i915_private *dev_priv)
}
/* i915_drv.c */
-#ifdef CONFIG_COMPAT
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
-#else
-#define i915_compat_ioctl NULL
-#endif
extern const struct dev_pm_ops i915_pm_ops;
int i915_driver_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
@@ -1780,18 +1677,6 @@ void i915_driver_remove(struct drm_i915_private *i915);
int i915_resume_switcheroo(struct drm_i915_private *i915);
int i915_suspend_switcheroo(struct drm_i915_private *i915, pm_message_t state);
-int vlv_force_gfx_clock(struct drm_i915_private *dev_priv, bool on);
-
-static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
-{
- return dev_priv->gvt;
-}
-
-static inline bool intel_vgpu_active(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.active;
-}
-
int i915_getparam_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
@@ -1856,12 +1741,6 @@ int i915_gem_object_unbind(struct drm_i915_gem_object *obj,
void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv);
-static inline int __must_check
-i915_mutex_lock_interruptible(struct drm_device *dev)
-{
- return mutex_lock_interruptible(&dev->struct_mutex);
-}
-
int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -2010,20 +1889,6 @@ int i915_reg_read_ioctl(struct drm_device *dev, void *data,
#define I915_READ_FW(reg__) __I915_REG_OP(read_fw, dev_priv, (reg__))
#define I915_WRITE_FW(reg__, val__) __I915_REG_OP(write_fw, dev_priv, (reg__), (val__))
-/* register wait wrappers for display regs */
-#define intel_de_wait_for_register(dev_priv_, reg_, mask_, value_, timeout_) \
- intel_wait_for_register(&(dev_priv_)->uncore, \
- (reg_), (mask_), (value_), (timeout_))
-
-#define intel_de_wait_for_set(dev_priv_, reg_, mask_, timeout_) ({ \
- u32 mask__ = (mask_); \
- intel_de_wait_for_register((dev_priv_), (reg_), \
- mask__, mask__, (timeout_)); \
-})
-
-#define intel_de_wait_for_clear(dev_priv_, reg_, mask_, timeout_) \
- intel_de_wait_for_register((dev_priv_), (reg_), (mask_), 0, (timeout_))
-
/* i915_mm.c */
int remap_io_mapping(struct vm_area_struct *vma,
unsigned long addr, unsigned long pfn, unsigned long size,
@@ -2046,10 +1911,4 @@ i915_coherent_map_type(struct drm_i915_private *i915)
return HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
}
-static inline bool intel_guc_submission_is_enabled(struct intel_guc *guc)
-{
- return intel_guc_is_submission_supported(guc) &&
- intel_guc_is_running(guc);
-}
-
#endif
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 94f993e4c12f..ca5420012a22 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -26,7 +26,6 @@
*/
#include <drm/drm_vma_manager.h>
-#include <drm/i915_drm.h>
#include <linux/dma-fence-array.h>
#include <linux/kthread.h>
#include <linux/dma-resv.h>
@@ -180,7 +179,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
struct drm_i915_gem_pwrite *args,
struct drm_file *file)
{
- void *vaddr = obj->phys_handle->vaddr + args->offset;
+ void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
char __user *user_data = u64_to_user_ptr(args->data_ptr);
/*
@@ -265,7 +264,10 @@ i915_gem_dumb_create(struct drm_file *file,
DRM_FORMAT_MOD_LINEAR))
args->pitch = ALIGN(args->pitch, 4096);
- args->size = args->pitch * args->height;
+ if (args->pitch < args->width)
+ return -EINVAL;
+
+ args->size = mul_u32_u32(args->pitch, args->height);
mem_type = INTEL_MEMORY_SYSTEM;
if (HAS_LMEM(to_i915(dev)))
@@ -841,10 +843,10 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
ret = i915_gem_gtt_pwrite_fast(obj, args);
if (ret == -EFAULT || ret == -ENOSPC) {
- if (obj->phys_handle)
- ret = i915_gem_phys_pwrite(obj, args, file);
- else
+ if (i915_gem_object_has_struct_page(obj))
ret = i915_gem_shmem_pwrite(obj, args);
+ else
+ ret = i915_gem_phys_pwrite(obj, args, file);
}
i915_gem_object_unpin_pages(obj);
@@ -938,9 +940,6 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
struct i915_vma *vma;
int ret;
- if (i915_gem_object_never_bind_ggtt(obj))
- return ERR_PTR(-ENODEV);
-
if (flags & PIN_MAPPABLE &&
(!view || view->type == I915_GGTT_VIEW_NORMAL)) {
/*
@@ -1006,6 +1005,12 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
if (ret)
return ERR_PTR(ret);
+ ret = i915_vma_wait_for_bind(vma);
+ if (ret) {
+ i915_vma_unpin(vma);
+ return ERR_PTR(ret);
+ }
+
return vma;
}
@@ -1150,7 +1155,7 @@ err_unlock:
/* Minimal basic recovery for KMS */
ret = i915_ggtt_enable_hw(dev_priv);
- i915_gem_restore_gtt_mappings(dev_priv);
+ i915_ggtt_resume(&dev_priv->ggtt);
i915_gem_restore_fences(&dev_priv->ggtt);
intel_init_clock_gating(dev_priv);
}
@@ -1198,7 +1203,7 @@ void i915_gem_driver_release(struct drm_i915_private *dev_priv)
i915_gem_drain_freed_objects(dev_priv);
- WARN_ON(!list_empty(&dev_priv->gem.contexts.list));
+ drm_WARN_ON(&dev_priv->drm, !list_empty(&dev_priv->gem.contexts.list));
}
static void i915_gem_init__mm(struct drm_i915_private *i915)
@@ -1226,7 +1231,7 @@ void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
i915_gem_drain_freed_objects(dev_priv);
GEM_BUG_ON(!llist_empty(&dev_priv->mm.free_list));
GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
- WARN_ON(dev_priv->mm.shrink_count);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->mm.shrink_count);
}
int i915_gem_freeze(struct drm_i915_private *dev_priv)
@@ -1266,7 +1271,8 @@ int i915_gem_freeze_late(struct drm_i915_private *i915)
list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
i915_gem_object_lock(obj);
- WARN_ON(i915_gem_object_set_to_cpu_domain(obj, true));
+ drm_WARN_ON(&i915->drm,
+ i915_gem_object_set_to_cpu_domain(obj, true));
i915_gem_object_unlock(obj);
}
diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
index 0697bedebeef..4518b9b35c3d 100644
--- a/drivers/gpu/drm/i915/i915_gem_evict.c
+++ b/drivers/gpu/drm/i915/i915_gem_evict.c
@@ -26,8 +26,6 @@
*
*/
-#include <drm/i915_drm.h>
-
#include "gem/i915_gem_context.h"
#include "gt/intel_gt_requests.h"
@@ -292,7 +290,8 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
GEM_BUG_ON(!drm_mm_node_allocated(node));
vma = container_of(node, typeof(*vma), node);
- /* If we are using coloring to insert guard pages between
+ /*
+ * If we are using coloring to insert guard pages between
* different cache domains within the address space, we have
* to check whether the objects on either side of our range
* abutt and conflict. If they are in conflict, then we evict
@@ -309,22 +308,18 @@ int i915_gem_evict_for_node(struct i915_address_space *vm,
}
}
- if (flags & PIN_NONBLOCK &&
- (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))) {
+ if (i915_vma_is_pinned(vma)) {
ret = -ENOSPC;
break;
}
- /* Overlap of objects in the same batch? */
- if (i915_vma_is_pinned(vma)) {
+ if (flags & PIN_NONBLOCK && i915_vma_is_active(vma)) {
ret = -ENOSPC;
- if (vma->exec_flags &&
- *vma->exec_flags & EXEC_OBJECT_PINNED)
- ret = -EINVAL;
break;
}
- /* Never show fear in the face of dragons!
+ /*
+ * Never show fear in the face of dragons!
*
* We cannot directly remove this node from within this
* iterator and as with i915_gem_evict_something() we employ
diff --git a/drivers/gpu/drm/i915/i915_gem_fence_reg.c b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
index d9c34a23cd67..d152b648c73c 100644
--- a/drivers/gpu/drm/i915/i915_gem_fence_reg.c
+++ b/drivers/gpu/drm/i915/i915_gem_fence_reg.c
@@ -21,10 +21,9 @@
* IN THE SOFTWARE.
*/
-#include <drm/i915_drm.h>
-
#include "i915_drv.h"
#include "i915_scatterlist.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
/**
@@ -237,11 +236,12 @@ static int fence_update(struct i915_fence_reg *fence,
if (!i915_vma_is_map_and_fenceable(vma))
return -EINVAL;
- if (WARN(!i915_gem_object_get_stride(vma->obj) ||
- !i915_gem_object_get_tiling(vma->obj),
- "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
- i915_gem_object_get_stride(vma->obj),
- i915_gem_object_get_tiling(vma->obj)))
+ if (drm_WARN(&uncore->i915->drm,
+ !i915_gem_object_get_stride(vma->obj) ||
+ !i915_gem_object_get_tiling(vma->obj),
+ "bogus fence setup with stride: 0x%x, tiling mode: %i\n",
+ i915_gem_object_get_stride(vma->obj),
+ i915_gem_object_get_tiling(vma->obj)))
return -EINVAL;
ret = i915_vma_sync(vma);
@@ -713,7 +713,7 @@ static void detect_bit_6_swizzle(struct i915_ggtt *ggtt)
}
if (dcc == 0xffffffff) {
- DRM_ERROR("Couldn't read from MCHBAR. "
+ drm_err(&i915->drm, "Couldn't read from MCHBAR. "
"Disabling tiling.\n");
swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN;
swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN;
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index e039eb56900f..cb43381b0d37 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -15,8 +15,6 @@
#include <asm/set_memory.h>
#include <asm/smp.h>
-#include <drm/i915_drm.h>
-
#include "display/intel_frontbuffer.h"
#include "gt/intel_gt.h"
#include "gt/intel_gt_requests.h"
@@ -63,7 +61,8 @@ void i915_gem_gtt_finish_pages(struct drm_i915_gem_object *obj,
/* XXX This does not prevent more requests being submitted! */
if (intel_gt_retire_requests_timeout(ggtt->vm.gt,
-MAX_SCHEDULE_TIMEOUT)) {
- DRM_ERROR("Failed to wait for idle; VT'd may hang.\n");
+ drm_err(&dev_priv->drm,
+ "Failed to wait for idle; VT'd may hang.\n");
/* Wait a bit, in hopes it avoids the hang */
udelay(10);
}
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 4c1836f0a991..2a4cd0ba5464 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -37,6 +37,7 @@
#include <drm/drm_print.h>
#include "display/intel_atomic.h"
+#include "display/intel_csr.h"
#include "display/intel_overlay.h"
#include "gem/i915_gem_context.h"
@@ -47,7 +48,6 @@
#include "i915_gpu_error.h"
#include "i915_memcpy.h"
#include "i915_scatterlist.h"
-#include "intel_csr.h"
#define ALLOW_FAIL (GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN)
#define ATOMIC_MAYFAIL (GFP_ATOMIC | __GFP_NOWARN)
@@ -450,6 +450,14 @@ static void error_print_instdone(struct drm_i915_error_state_buf *m,
err_printf(m, " ROW_INSTDONE[%d][%d]: 0x%08x\n",
slice, subslice,
ee->instdone.row[slice][subslice]);
+
+ if (INTEL_GEN(m->i915) < 12)
+ return;
+
+ err_printf(m, " SC_INSTDONE_EXTRA: 0x%08x\n",
+ ee->instdone.slice_common_extra[0]);
+ err_printf(m, " SC_INSTDONE_EXTRA2: 0x%08x\n",
+ ee->instdone.slice_common_extra[1]);
}
static void error_print_request(struct drm_i915_error_state_buf *m,
@@ -473,9 +481,13 @@ static void error_print_context(struct drm_i915_error_state_buf *m,
const char *header,
const struct i915_gem_context_coredump *ctx)
{
- err_printf(m, "%s%s[%d] prio %d, guilty %d active %d\n",
+ const u32 period = RUNTIME_INFO(m->i915)->cs_timestamp_period_ns;
+
+ err_printf(m, "%s%s[%d] prio %d, guilty %d active %d, runtime total %lluns, avg %lluns\n",
header, ctx->comm, ctx->pid, ctx->sched_attr.priority,
- ctx->guilty, ctx->active);
+ ctx->guilty, ctx->active,
+ ctx->total_runtime * period,
+ mul_u32_u32(ctx->avg_runtime, period));
}
static struct i915_vma_coredump *
@@ -515,6 +527,7 @@ static void error_print_engine(struct drm_i915_error_state_buf *m,
(u32)(ee->acthd>>32), (u32)ee->acthd);
err_printf(m, " IPEIR: 0x%08x\n", ee->ipeir);
err_printf(m, " IPEHR: 0x%08x\n", ee->ipehr);
+ err_printf(m, " ESR: 0x%08x\n", ee->esr);
error_print_instdone(m, ee);
@@ -1102,6 +1115,7 @@ static void engine_record_registers(struct intel_engine_coredump *ee)
}
if (INTEL_GEN(i915) >= 4) {
+ ee->esr = ENGINE_READ(engine, RING_ESR);
ee->faddr = ENGINE_READ(engine, RING_DMA_FADD);
ee->ipeir = ENGINE_READ(engine, RING_IPEIR);
ee->ipehr = ENGINE_READ(engine, RING_IPEHR);
@@ -1228,7 +1242,7 @@ static bool record_context(struct i915_gem_context_coredump *e,
{
struct i915_gem_context *ctx;
struct task_struct *task;
- bool capture;
+ bool simulated;
rcu_read_lock();
ctx = rcu_dereference(rq->context->gem_context);
@@ -1236,7 +1250,7 @@ static bool record_context(struct i915_gem_context_coredump *e,
ctx = NULL;
rcu_read_unlock();
if (!ctx)
- return false;
+ return true;
rcu_read_lock();
task = pid_task(ctx->pid, PIDTYPE_PID);
@@ -1250,10 +1264,13 @@ static bool record_context(struct i915_gem_context_coredump *e,
e->guilty = atomic_read(&ctx->guilty_count);
e->active = atomic_read(&ctx->active_count);
- capture = i915_gem_context_no_error_capture(ctx);
+ e->total_runtime = rq->context->runtime.total;
+ e->avg_runtime = ewma_runtime_read(&rq->context->runtime.avg);
+
+ simulated = i915_gem_context_no_error_capture(ctx);
i915_gem_context_put(ctx);
- return capture;
+ return simulated;
}
struct intel_engine_capture_vma {
@@ -1681,7 +1698,7 @@ static const char *error_msg(struct i915_gpu_coredump *error)
"GPU HANG: ecode %d:%x:%08x",
INTEL_GEN(error->i915), engines,
generate_ecode(first));
- if (first) {
+ if (first && first->context.pid) {
/* Just show the first executing process, more is confusing */
len += scnprintf(error->error_msg + len,
sizeof(error->error_msg) - len,
@@ -1852,7 +1869,8 @@ void i915_error_state_store(struct i915_gpu_coredump *error)
if (!xchg(&warned, true) &&
ktime_get_real_seconds() - DRIVER_TIMESTAMP < DAY_AS_SECONDS(180)) {
pr_info("GPU hangs can indicate a bug anywhere in the entire gfx stack, including userspace.\n");
- pr_info("Please file a _new_ bug report on bugs.freedesktop.org against DRI -> DRM/Intel\n");
+ pr_info("Please file a _new_ bug report at https://gitlab.freedesktop.org/drm/intel/issues/new.\n");
+ pr_info("Please see https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs for details.\n");
pr_info("drm/i915 developers can then reassign to the right component if it's not a kernel issue.\n");
pr_info("The GPU crash dump is required to analyze GPU hangs, so please always attach it.\n");
pr_info("GPU crash dump saved to /sys/class/drm/card%d/error\n",
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.h b/drivers/gpu/drm/i915/i915_gpu_error.h
index 9109004956bd..0d1f6c8ff355 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.h
+++ b/drivers/gpu/drm/i915/i915_gpu_error.h
@@ -75,6 +75,7 @@ struct intel_engine_coredump {
u32 hws;
u32 ipeir;
u32 ipehr;
+ u32 esr;
u32 bbstate;
u32 instpm;
u32 instps;
@@ -87,6 +88,10 @@ struct intel_engine_coredump {
struct i915_gem_context_coredump {
char comm[TASK_COMM_LEN];
+
+ u64 total_runtime;
+ u32 avg_runtime;
+
pid_t pid;
int active;
int guilty;
@@ -314,8 +319,11 @@ i915_vma_capture_finish(struct intel_gt_coredump *gt,
}
static inline void
-i915_error_state_store(struct drm_i915_private *i915,
- struct i915_gpu_coredump *error)
+i915_error_state_store(struct i915_gpu_coredump *error)
+{
+}
+
+static inline void i915_gpu_coredump_put(struct i915_gpu_coredump *gpu)
{
}
diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
index c1007245f46d..8e45ca3d2ede 100644
--- a/drivers/gpu/drm/i915/i915_ioc32.c
+++ b/drivers/gpu/drm/i915/i915_ioc32.c
@@ -28,9 +28,10 @@
*/
#include <linux/compat.h>
-#include <drm/i915_drm.h>
#include <drm/drm_ioctl.h>
+
#include "i915_drv.h"
+#include "i915_ioc32.h"
struct drm_i915_getparam32 {
s32 param;
@@ -67,7 +68,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
};
/**
- * i915_compat_ioctl - handle the mistakes of the past
+ * i915_ioc32_compat_ioctl - handle the mistakes of the past
* @filp: the file pointer
* @cmd: the ioctl command (and encoded flags)
* @arg: the ioctl argument (from userspace)
@@ -75,7 +76,7 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
* Called whenever a 32-bit process running under a 64-bit kernel
* performs an ioctl on /dev/dri/card<n>.
*/
-long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
+long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
unsigned int nr = DRM_IOCTL_NR(cmd);
drm_ioctl_compat_t *fn = NULL;
diff --git a/drivers/gpu/drm/i915/i915_ioc32.h b/drivers/gpu/drm/i915/i915_ioc32.h
new file mode 100644
index 000000000000..40dcd55ca213
--- /dev/null
+++ b/drivers/gpu/drm/i915/i915_ioc32.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __I915_IOC32_H__
+#define __I915_IOC32_H__
+
+#ifdef CONFIG_COMPAT
+struct file;
+long i915_ioc32_compat_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+#else
+#define i915_ioc32_compat_ioctl NULL
+#endif
+
+#endif /* __I915_IOC32_H__ */
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index afc6aad9bf8c..9f0653cf0510 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -34,7 +34,6 @@
#include <drm/drm_drv.h>
#include <drm/drm_irq.h>
-#include <drm/i915_drm.h>
#include "display/intel_display_types.h"
#include "display/intel_fifo_underrun.h"
@@ -79,7 +78,7 @@ static const u32 hpd_ibx[HPD_NUM_PINS] = {
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG,
[HPD_PORT_C] = SDE_PORTC_HOTPLUG,
- [HPD_PORT_D] = SDE_PORTD_HOTPLUG
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG,
};
static const u32 hpd_cpt[HPD_NUM_PINS] = {
@@ -87,7 +86,7 @@ static const u32 hpd_cpt[HPD_NUM_PINS] = {
[HPD_SDVO_B] = SDE_SDVOB_HOTPLUG_CPT,
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
- [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT
+ [HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
};
static const u32 hpd_spt[HPD_NUM_PINS] = {
@@ -95,7 +94,7 @@ static const u32 hpd_spt[HPD_NUM_PINS] = {
[HPD_PORT_B] = SDE_PORTB_HOTPLUG_CPT,
[HPD_PORT_C] = SDE_PORTC_HOTPLUG_CPT,
[HPD_PORT_D] = SDE_PORTD_HOTPLUG_CPT,
- [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT
+ [HPD_PORT_E] = SDE_PORTE_HOTPLUG_SPT,
};
static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
@@ -104,7 +103,7 @@ static const u32 hpd_mask_i915[HPD_NUM_PINS] = {
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_EN,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_EN,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_EN,
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_EN,
};
static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
@@ -113,7 +112,7 @@ static const u32 hpd_status_g4x[HPD_NUM_PINS] = {
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_G4X,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
};
static const u32 hpd_status_i915[HPD_NUM_PINS] = {
@@ -122,21 +121,21 @@ static const u32 hpd_status_i915[HPD_NUM_PINS] = {
[HPD_SDVO_C] = SDVOC_HOTPLUG_INT_STATUS_I915,
[HPD_PORT_B] = PORTB_HOTPLUG_INT_STATUS,
[HPD_PORT_C] = PORTC_HOTPLUG_INT_STATUS,
- [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS
+ [HPD_PORT_D] = PORTD_HOTPLUG_INT_STATUS,
};
/* BXT hpd list */
static const u32 hpd_bxt[HPD_NUM_PINS] = {
[HPD_PORT_A] = BXT_DE_PORT_HP_DDIA,
[HPD_PORT_B] = BXT_DE_PORT_HP_DDIB,
- [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC
+ [HPD_PORT_C] = BXT_DE_PORT_HP_DDIC,
};
static const u32 hpd_gen11[HPD_NUM_PINS] = {
[HPD_PORT_C] = GEN11_TC1_HOTPLUG | GEN11_TBT1_HOTPLUG,
[HPD_PORT_D] = GEN11_TC2_HOTPLUG | GEN11_TBT2_HOTPLUG,
[HPD_PORT_E] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
- [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG
+ [HPD_PORT_F] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
};
static const u32 hpd_gen12[HPD_NUM_PINS] = {
@@ -145,7 +144,7 @@ static const u32 hpd_gen12[HPD_NUM_PINS] = {
[HPD_PORT_F] = GEN11_TC3_HOTPLUG | GEN11_TBT3_HOTPLUG,
[HPD_PORT_G] = GEN11_TC4_HOTPLUG | GEN11_TBT4_HOTPLUG,
[HPD_PORT_H] = GEN12_TC5_HOTPLUG | GEN12_TBT5_HOTPLUG,
- [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG
+ [HPD_PORT_I] = GEN12_TC6_HOTPLUG | GEN12_TBT6_HOTPLUG,
};
static const u32 hpd_icp[HPD_NUM_PINS] = {
@@ -169,6 +168,14 @@ static const u32 hpd_tgp[HPD_NUM_PINS] = {
[HPD_PORT_I] = SDE_TC_HOTPLUG_ICP(PORT_TC6),
};
+static void
+intel_handle_vblank(struct drm_i915_private *dev_priv, enum pipe pipe)
+{
+ struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+
+ drm_crtc_handle_vblank(&crtc->base);
+}
+
void gen3_irq_reset(struct intel_uncore *uncore, i915_reg_t imr,
i915_reg_t iir, i915_reg_t ier)
{
@@ -208,8 +215,9 @@ static void gen3_assert_iir_is_zero(struct intel_uncore *uncore, i915_reg_t reg)
if (val == 0)
return;
- WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
- i915_mmio_reg_offset(reg), val);
+ drm_WARN(&uncore->i915->drm, 1,
+ "Interrupt register 0x%x is not zero: 0x%08x\n",
+ i915_mmio_reg_offset(reg), val);
intel_uncore_write(uncore, reg, 0xffffffff);
intel_uncore_posting_read(uncore, reg);
intel_uncore_write(uncore, reg, 0xffffffff);
@@ -223,8 +231,9 @@ static void gen2_assert_iir_is_zero(struct intel_uncore *uncore)
if (val == 0)
return;
- WARN(1, "Interrupt register 0x%x is not zero: 0x%08x\n",
- i915_mmio_reg_offset(GEN2_IIR), val);
+ drm_WARN(&uncore->i915->drm, 1,
+ "Interrupt register 0x%x is not zero: 0x%08x\n",
+ i915_mmio_reg_offset(GEN2_IIR), val);
intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
intel_uncore_posting_read16(uncore, GEN2_IIR);
intel_uncore_write16(uncore, GEN2_IIR, 0xffff);
@@ -262,7 +271,7 @@ i915_hotplug_interrupt_update_locked(struct drm_i915_private *dev_priv,
u32 val;
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(bits & ~mask);
+ drm_WARN_ON(&dev_priv->drm, bits & ~mask);
val = I915_READ(PORT_HOTPLUG_EN);
val &= ~mask;
@@ -305,9 +314,9 @@ void ilk_update_display_irq(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->irq_mask;
@@ -336,9 +345,9 @@ static void bdw_update_port_irq(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
old_val = I915_READ(GEN8_DE_PORT_IMR);
@@ -369,9 +378,9 @@ void bdw_update_pipe_irq(struct drm_i915_private *dev_priv,
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
new_val = dev_priv->de_irq_mask[pipe];
@@ -399,11 +408,11 @@ void ibx_display_interrupt_update(struct drm_i915_private *dev_priv,
sdeimr &= ~interrupt_mask;
sdeimr |= (~enabled_irq_mask & interrupt_mask);
- WARN_ON(enabled_irq_mask & ~interrupt_mask);
+ drm_WARN_ON(&dev_priv->drm, enabled_irq_mask & ~interrupt_mask);
lockdep_assert_held(&dev_priv->irq_lock);
- if (WARN_ON(!intel_irqs_enabled(dev_priv)))
+ if (drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv)))
return;
I915_WRITE(SDEIMR, sdeimr);
@@ -425,13 +434,15 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
* On pipe A we don't support the PSR interrupt yet,
* on pipe B and C the same bit MBZ.
*/
- if (WARN_ON_ONCE(status_mask & PIPE_A_PSR_STATUS_VLV))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ status_mask & PIPE_A_PSR_STATUS_VLV))
return 0;
/*
* On pipe B and C we don't support the PSR interrupt yet, on pipe
* A the same bit is for perf counters which we don't use either.
*/
- if (WARN_ON_ONCE(status_mask & PIPE_B_PSR_STATUS_VLV))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ status_mask & PIPE_B_PSR_STATUS_VLV))
return 0;
enable_mask &= ~(PIPE_FIFO_UNDERRUN_STATUS |
@@ -443,10 +454,11 @@ u32 i915_pipestat_enable_mask(struct drm_i915_private *dev_priv,
enable_mask |= SPRITE1_FLIP_DONE_INT_EN_VLV;
out:
- WARN_ONCE(enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
- status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
- pipe_name(pipe), enable_mask, status_mask);
+ drm_WARN_ONCE(&dev_priv->drm,
+ enable_mask & ~PIPESTAT_INT_ENABLE_MASK ||
+ status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: enable_mask=0x%x, status_mask=0x%x\n",
+ pipe_name(pipe), enable_mask, status_mask);
return enable_mask;
}
@@ -457,12 +469,12 @@ void i915_enable_pipestat(struct drm_i915_private *dev_priv,
i915_reg_t reg = PIPESTAT(pipe);
u32 enable_mask;
- WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: status_mask=0x%x\n",
- pipe_name(pipe), status_mask);
+ drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: status_mask=0x%x\n",
+ pipe_name(pipe), status_mask);
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(!intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == status_mask)
return;
@@ -480,12 +492,12 @@ void i915_disable_pipestat(struct drm_i915_private *dev_priv,
i915_reg_t reg = PIPESTAT(pipe);
u32 enable_mask;
- WARN_ONCE(status_mask & ~PIPESTAT_INT_STATUS_MASK,
- "pipe %c: status_mask=0x%x\n",
- pipe_name(pipe), status_mask);
+ drm_WARN_ONCE(&dev_priv->drm, status_mask & ~PIPESTAT_INT_STATUS_MASK,
+ "pipe %c: status_mask=0x%x\n",
+ pipe_name(pipe), status_mask);
lockdep_assert_held(&dev_priv->irq_lock);
- WARN_ON(!intel_irqs_enabled(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !intel_irqs_enabled(dev_priv));
if ((dev_priv->pipestat_irq_mask[pipe] & status_mask) == 0)
return;
@@ -624,9 +636,9 @@ u32 i915_get_vblank_counter(struct drm_crtc *crtc)
* register.
*/
do {
- high1 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
- low = I915_READ_FW(low_frame);
- high2 = I915_READ_FW(high_frame) & PIPE_FRAME_HIGH_MASK;
+ high1 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
+ low = intel_de_read_fw(dev_priv, low_frame);
+ high2 = intel_de_read_fw(dev_priv, high_frame) & PIPE_FRAME_HIGH_MASK;
} while (high1 != high2);
spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags);
@@ -683,15 +695,17 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
* pipe frame time stamp. The time stamp value
* is sampled at every start of vertical blank.
*/
- scan_prev_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
+ scan_prev_time = intel_de_read_fw(dev_priv,
+ PIPE_FRMTMSTMP(crtc->pipe));
/*
* The TIMESTAMP_CTR register has the current
* time stamp value.
*/
- scan_curr_time = I915_READ_FW(IVB_TIMESTAMP_CTR);
+ scan_curr_time = intel_de_read_fw(dev_priv, IVB_TIMESTAMP_CTR);
- scan_post_time = I915_READ_FW(PIPE_FRMTMSTMP(crtc->pipe));
+ scan_post_time = intel_de_read_fw(dev_priv,
+ PIPE_FRMTMSTMP(crtc->pipe));
} while (scan_post_time != scan_prev_time);
scanline = div_u64(mul_u32_u32(scan_curr_time - scan_prev_time,
@@ -702,7 +716,10 @@ static u32 __intel_get_crtc_scanline_from_timestamp(struct intel_crtc *crtc)
return scanline;
}
-/* I915_READ_FW, only for fast reads of display block, no need for forcewake etc. */
+/*
+ * intel_de_read_fw(), only for fast reads of display block, no need for
+ * forcewake etc.
+ */
static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_device *dev = crtc->base.dev;
@@ -726,9 +743,9 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
vtotal /= 2;
if (IS_GEN(dev_priv, 2))
- position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
+ position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2;
else
- position = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
+ position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
/*
* On HSW, the DSL reg (0x70000) appears to return 0 if we
@@ -747,7 +764,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
for (i = 0; i < 100; i++) {
udelay(1);
- temp = I915_READ_FW(PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
+ temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3;
if (temp != position) {
position = temp;
break;
@@ -762,13 +779,15 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc)
return (position + crtc->scanline_offset) % vtotal;
}
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+static bool i915_get_crtc_scanoutpos(struct drm_crtc *_crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
+ struct drm_device *dev = _crtc->dev;
struct drm_i915_private *dev_priv = to_i915(dev);
- struct intel_crtc *crtc = to_intel_crtc(drm_crtc_from_index(dev, index));
+ struct intel_crtc *crtc = to_intel_crtc(_crtc);
enum pipe pipe = crtc->pipe;
int position;
int vbl_start, vbl_end, hsync_start, htotal, vtotal;
@@ -777,9 +796,10 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
IS_G4X(dev_priv) || IS_GEN(dev_priv, 2) ||
mode->private_flags & I915_MODE_FLAG_USE_SCANLINE_COUNTER;
- if (WARN_ON(!mode->crtc_clock)) {
- DRM_DEBUG_DRIVER("trying to get scanoutpos for disabled "
- "pipe %c\n", pipe_name(pipe));
+ if (drm_WARN_ON(&dev_priv->drm, !mode->crtc_clock)) {
+ drm_dbg(&dev_priv->drm,
+ "trying to get scanoutpos for disabled "
+ "pipe %c\n", pipe_name(pipe));
return false;
}
@@ -818,7 +838,7 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
* We can split this into vertical and horizontal
* scanout position.
*/
- position = (I915_READ_FW(PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
+ position = (intel_de_read_fw(dev_priv, PIPEFRAMEPIXEL(pipe)) & PIPE_PIXEL_MASK) >> PIPE_PIXEL_SHIFT;
/* convert to pixel counts */
vbl_start *= htotal;
@@ -879,6 +899,14 @@ bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int index,
return true;
}
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq)
+{
+ return drm_crtc_vblank_helper_get_vblank_timestamp_internal(
+ crtc, max_error, vblank_time, in_vblank_irq,
+ i915_get_crtc_scanoutpos);
+}
+
int intel_get_crtc_scanline(struct intel_crtc *crtc)
{
struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
@@ -918,7 +946,7 @@ static void ivb_parity_work(struct work_struct *work)
mutex_lock(&dev_priv->drm.struct_mutex);
/* If we've screwed up tracking, just let the interrupt fire again */
- if (WARN_ON(!dev_priv->l3_parity.which_slice))
+ if (drm_WARN_ON(&dev_priv->drm, !dev_priv->l3_parity.which_slice))
goto out;
misccpctl = I915_READ(GEN7_MISCCPCTL);
@@ -929,7 +957,8 @@ static void ivb_parity_work(struct work_struct *work)
i915_reg_t reg;
slice--;
- if (WARN_ON_ONCE(slice >= NUM_L3_SLICES(dev_priv)))
+ if (drm_WARN_ON_ONCE(&dev_priv->drm,
+ slice >= NUM_L3_SLICES(dev_priv)))
break;
dev_priv->l3_parity.which_slice &= ~(1<<slice);
@@ -966,7 +995,7 @@ static void ivb_parity_work(struct work_struct *work)
I915_WRITE(GEN7_MISCCPCTL, misccpctl);
out:
- WARN_ON(dev_priv->l3_parity.which_slice);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->l3_parity.which_slice);
spin_lock_irq(&gt->irq_lock);
gen5_gt_enable_irq(gt, GT_PARITY_ERROR(dev_priv));
spin_unlock_irq(&gt->irq_lock);
@@ -1165,8 +1194,9 @@ static void intel_get_hpd_pins(struct drm_i915_private *dev_priv,
*long_mask |= BIT(pin);
}
- DRM_DEBUG_DRIVER("hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
- hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
+ drm_dbg(&dev_priv->drm,
+ "hotplug event received, stat 0x%08x, dig 0x%08x, pins 0x%08x, long 0x%08x\n",
+ hotplug_trigger, dig_hotplug_reg, *pin_mask, *long_mask);
}
@@ -1187,8 +1217,8 @@ static void display_pipe_crc_irq_handler(struct drm_i915_private *dev_priv,
u32 crc2, u32 crc3,
u32 crc4)
{
- struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = intel_get_crtc_for_pipe(dev_priv, pipe);
+ struct intel_pipe_crc *pipe_crc = &crtc->pipe_crc;
u32 crcs[5] = { crc0, crc1, crc2, crc3, crc4 };
trace_intel_pipe_crc(crtc, crcs);
@@ -1351,7 +1381,7 @@ static void i8xx_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1369,7 +1399,7 @@ static void i915_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -1393,7 +1423,7 @@ static void i965_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_LEGACY_BLC_EVENT_STATUS)
blc_event = true;
@@ -1419,7 +1449,7 @@ static void valleyview_pipestat_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (pipe_stats[pipe] & PIPE_START_VBLANK_INTERRUPT_STATUS)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (pipe_stats[pipe] & PIPE_CRC_DONE_INTERRUPT_STATUS)
i9xx_pipe_crc_irq_handler(dev_priv, pipe);
@@ -1463,9 +1493,9 @@ static u32 i9xx_hpd_irq_ack(struct drm_i915_private *dev_priv)
I915_WRITE(PORT_HOTPLUG_STAT, hotplug_status);
}
- WARN_ONCE(1,
- "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
- I915_READ(PORT_HOTPLUG_STAT));
+ drm_WARN_ONCE(&dev_priv->drm, 1,
+ "PORT_HOTPLUG_STAT did not clear (0x%08x)\n",
+ I915_READ(PORT_HOTPLUG_STAT));
return hotplug_status;
}
@@ -1603,7 +1633,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
u32 master_ctl, iir;
u32 pipe_stats[I915_MAX_PIPES] = {};
u32 hotplug_status = 0;
- u32 gt_iir[4];
u32 ier = 0;
master_ctl = I915_READ(GEN8_MASTER_IRQ) & ~GEN8_MASTER_IRQ_CONTROL;
@@ -1631,7 +1660,7 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
ier = I915_READ(VLV_IER);
I915_WRITE(VLV_IER, 0);
- gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
if (iir & I915_DISPLAY_PORT_INTERRUPT)
hotplug_status = i9xx_hpd_irq_ack(dev_priv);
@@ -1655,8 +1684,6 @@ static irqreturn_t cherryview_irq_handler(int irq, void *arg)
I915_WRITE(VLV_IER, ier);
I915_WRITE(GEN8_MASTER_IRQ, GEN8_MASTER_IRQ_CONTROL);
- gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
-
if (hotplug_status)
i9xx_hpd_irq_handler(dev_priv, hotplug_status);
@@ -1710,8 +1737,8 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (pch_iir & SDE_AUDIO_POWER_MASK) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK) >>
SDE_AUDIO_POWER_SHIFT);
- DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
- port_name(port));
+ drm_dbg(&dev_priv->drm, "PCH audio power change on port %d\n",
+ port_name(port));
}
if (pch_iir & SDE_AUX_MASK)
@@ -1721,25 +1748,27 @@ static void ibx_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
gmbus_irq_handler(dev_priv);
if (pch_iir & SDE_AUDIO_HDCP_MASK)
- DRM_DEBUG_DRIVER("PCH HDCP audio interrupt\n");
+ drm_dbg(&dev_priv->drm, "PCH HDCP audio interrupt\n");
if (pch_iir & SDE_AUDIO_TRANS_MASK)
- DRM_DEBUG_DRIVER("PCH transcoder audio interrupt\n");
+ drm_dbg(&dev_priv->drm, "PCH transcoder audio interrupt\n");
if (pch_iir & SDE_POISON)
- DRM_ERROR("PCH poison interrupt\n");
+ drm_err(&dev_priv->drm, "PCH poison interrupt\n");
- if (pch_iir & SDE_FDI_MASK)
+ if (pch_iir & SDE_FDI_MASK) {
for_each_pipe(dev_priv, pipe)
- DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
- pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+ }
if (pch_iir & (SDE_TRANSB_CRC_DONE | SDE_TRANSA_CRC_DONE))
- DRM_DEBUG_DRIVER("PCH transcoder CRC done interrupt\n");
+ drm_dbg(&dev_priv->drm, "PCH transcoder CRC done interrupt\n");
if (pch_iir & (SDE_TRANSB_CRC_ERR | SDE_TRANSA_CRC_ERR))
- DRM_DEBUG_DRIVER("PCH transcoder CRC error interrupt\n");
+ drm_dbg(&dev_priv->drm,
+ "PCH transcoder CRC error interrupt\n");
if (pch_iir & SDE_TRANSA_FIFO_UNDER)
intel_pch_fifo_underrun_irq_handler(dev_priv, PIPE_A);
@@ -1754,7 +1783,7 @@ static void ivb_err_int_handler(struct drm_i915_private *dev_priv)
enum pipe pipe;
if (err_int & ERR_INT_POISON)
- DRM_ERROR("Poison interrupt\n");
+ drm_err(&dev_priv->drm, "Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
if (err_int & ERR_INT_FIFO_UNDERRUN(pipe))
@@ -1777,7 +1806,7 @@ static void cpt_serr_int_handler(struct drm_i915_private *dev_priv)
enum pipe pipe;
if (serr_int & SERR_INT_POISON)
- DRM_ERROR("PCH poison interrupt\n");
+ drm_err(&dev_priv->drm, "PCH poison interrupt\n");
for_each_pipe(dev_priv, pipe)
if (serr_int & SERR_INT_TRANS_FIFO_UNDERRUN(pipe))
@@ -1796,8 +1825,8 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
if (pch_iir & SDE_AUDIO_POWER_MASK_CPT) {
int port = ffs((pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
SDE_AUDIO_POWER_SHIFT_CPT);
- DRM_DEBUG_DRIVER("PCH audio power change on port %c\n",
- port_name(port));
+ drm_dbg(&dev_priv->drm, "PCH audio power change on port %c\n",
+ port_name(port));
}
if (pch_iir & SDE_AUX_MASK_CPT)
@@ -1807,16 +1836,17 @@ static void cpt_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
gmbus_irq_handler(dev_priv);
if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
- DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
+ drm_dbg(&dev_priv->drm, "Audio CP request interrupt\n");
if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
- DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
+ drm_dbg(&dev_priv->drm, "Audio CP change interrupt\n");
- if (pch_iir & SDE_FDI_MASK_CPT)
+ if (pch_iir & SDE_FDI_MASK_CPT) {
for_each_pipe(dev_priv, pipe)
- DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
- pipe_name(pipe),
- I915_READ(FDI_RX_IIR(pipe)));
+ drm_dbg(&dev_priv->drm, " pipe %c FDI IIR: 0x%08x\n",
+ pipe_name(pipe),
+ I915_READ(FDI_RX_IIR(pipe)));
+ }
if (pch_iir & SDE_ERROR_CPT)
cpt_serr_int_handler(dev_priv);
@@ -1844,8 +1874,9 @@ static void icp_irq_handler(struct drm_i915_private *dev_priv, u32 pch_iir)
tc_port_hotplug_long_detect = icp_tc_port_hotplug_long_detect;
pins = hpd_icp;
} else {
- WARN(!HAS_PCH_ICP(dev_priv),
- "Unrecognized PCH type 0x%x\n", INTEL_PCH_TYPE(dev_priv));
+ drm_WARN(&dev_priv->drm, !HAS_PCH_ICP(dev_priv),
+ "Unrecognized PCH type 0x%x\n",
+ INTEL_PCH_TYPE(dev_priv));
ddi_hotplug_trigger = pch_iir & SDE_DDI_MASK_ICP;
tc_hotplug_trigger = pch_iir & SDE_TC_MASK_ICP;
@@ -1952,11 +1983,11 @@ static void ilk_display_irq_handler(struct drm_i915_private *dev_priv,
intel_opregion_asle_intr(dev_priv);
if (de_iir & DE_POISON)
- DRM_ERROR("Poison interrupt\n");
+ drm_err(&dev_priv->drm, "Poison interrupt\n");
for_each_pipe(dev_priv, pipe) {
if (de_iir & DE_PIPE_VBLANK(pipe))
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (de_iir & DE_PIPE_FIFO_UNDERRUN(pipe))
intel_cpu_fifo_underrun_irq_handler(dev_priv, pipe);
@@ -2009,7 +2040,7 @@ static void ivb_display_irq_handler(struct drm_i915_private *dev_priv,
for_each_pipe(dev_priv, pipe) {
if (de_iir & (DE_PIPE_VBLANK_IVB(pipe)))
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
}
/* check event from PCH */
@@ -2153,7 +2184,8 @@ static void gen11_hpd_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
if (pin_mask)
intel_hpd_irq_handler(dev_priv, pin_mask, long_mask);
else
- DRM_ERROR("Unexpected DE HPD interrupt 0x%08x\n", iir);
+ drm_err(&dev_priv->drm,
+ "Unexpected DE HPD interrupt 0x%08x\n", iir);
}
static u32 gen8_de_port_aux_mask(struct drm_i915_private *dev_priv)
@@ -2226,7 +2258,7 @@ gen8_de_misc_irq_handler(struct drm_i915_private *dev_priv, u32 iir)
}
if (!found)
- DRM_ERROR("Unexpected DE Misc interrupt\n");
+ drm_err(&dev_priv->drm, "Unexpected DE Misc interrupt\n");
}
static irqreturn_t
@@ -2243,7 +2275,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
gen8_de_misc_irq_handler(dev_priv, iir);
} else {
- DRM_ERROR("The master control interrupt lied (DE MISC)!\n");
+ drm_err(&dev_priv->drm,
+ "The master control interrupt lied (DE MISC)!\n");
}
}
@@ -2254,7 +2287,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
ret = IRQ_HANDLED;
gen11_hpd_irq_handler(dev_priv, iir);
} else {
- DRM_ERROR("The master control interrupt lied, (DE HPD)!\n");
+ drm_err(&dev_priv->drm,
+ "The master control interrupt lied, (DE HPD)!\n");
}
}
@@ -2294,10 +2328,12 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
}
if (!found)
- DRM_ERROR("Unexpected DE Port interrupt\n");
+ drm_err(&dev_priv->drm,
+ "Unexpected DE Port interrupt\n");
}
else
- DRM_ERROR("The master control interrupt lied (DE PORT)!\n");
+ drm_err(&dev_priv->drm,
+ "The master control interrupt lied (DE PORT)!\n");
}
for_each_pipe(dev_priv, pipe) {
@@ -2308,7 +2344,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
iir = I915_READ(GEN8_DE_PIPE_IIR(pipe));
if (!iir) {
- DRM_ERROR("The master control interrupt lied (DE PIPE)!\n");
+ drm_err(&dev_priv->drm,
+ "The master control interrupt lied (DE PIPE)!\n");
continue;
}
@@ -2316,7 +2353,7 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
I915_WRITE(GEN8_DE_PIPE_IIR(pipe), iir);
if (iir & GEN8_PIPE_VBLANK)
- drm_handle_vblank(&dev_priv->drm, pipe);
+ intel_handle_vblank(dev_priv, pipe);
if (iir & GEN8_PIPE_CDCLK_CRC_DONE)
hsw_pipe_crc_irq_handler(dev_priv, pipe);
@@ -2326,9 +2363,10 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
fault_errors = iir & gen8_de_pipe_fault_mask(dev_priv);
if (fault_errors)
- DRM_ERROR("Fault errors on pipe %c: 0x%08x\n",
- pipe_name(pipe),
- fault_errors);
+ drm_err(&dev_priv->drm,
+ "Fault errors on pipe %c: 0x%08x\n",
+ pipe_name(pipe),
+ fault_errors);
}
if (HAS_PCH_SPLIT(dev_priv) && !HAS_PCH_NOP(dev_priv) &&
@@ -2354,7 +2392,8 @@ gen8_de_irq_handler(struct drm_i915_private *dev_priv, u32 master_ctl)
* Like on previous PCH there seems to be something
* fishy going on with forwarding PCH interrupts.
*/
- DRM_DEBUG_DRIVER("The master control interrupt lied (SDE)!\n");
+ drm_dbg(&dev_priv->drm,
+ "The master control interrupt lied (SDE)!\n");
}
}
@@ -2384,7 +2423,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
struct drm_i915_private *dev_priv = arg;
void __iomem * const regs = dev_priv->uncore.regs;
u32 master_ctl;
- u32 gt_iir[4];
if (!intel_irqs_enabled(dev_priv))
return IRQ_NONE;
@@ -2395,8 +2433,8 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
return IRQ_NONE;
}
- /* Find, clear, then process each source of interrupt */
- gen8_gt_irq_ack(&dev_priv->gt, master_ctl, gt_iir);
+ /* Find, queue (onto bottom-halves), then clear each source */
+ gen8_gt_irq_handler(&dev_priv->gt, master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
if (master_ctl & ~GEN8_GT_IRQS) {
@@ -2407,8 +2445,6 @@ static irqreturn_t gen8_irq_handler(int irq, void *arg)
gen8_master_intr_enable(regs);
- gen8_gt_irq_handler(&dev_priv->gt, master_ctl, gt_iir);
-
return IRQ_HANDLED;
}
@@ -2491,7 +2527,7 @@ __gen11_irq_handler(struct drm_i915_private * const i915,
return IRQ_NONE;
}
- /* Find, clear, then process each source of interrupt. */
+ /* Find, queue (onto bottom-halves), then clear each source */
gen11_gt_irq_handler(gt, master_ctl);
/* IRQs are synced during runtime_suspend, we don't require a wakeref */
@@ -2686,7 +2722,7 @@ static void ibx_irq_pre_postinstall(struct drm_i915_private *dev_priv)
if (HAS_PCH_NOP(dev_priv))
return;
- WARN_ON(I915_READ(SDEIER) != 0);
+ drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
}
@@ -2733,7 +2769,7 @@ static void vlv_display_irq_postinstall(struct drm_i915_private *dev_priv)
enable_mask |= I915_DISPLAY_PIPE_C_EVENT_INTERRUPT |
I915_LPE_PIPE_C_INTERRUPT;
- WARN_ON(dev_priv->irq_mask != ~0u);
+ drm_WARN_ON(&dev_priv->drm, dev_priv->irq_mask != ~0u);
dev_priv->irq_mask = ~enable_mask;
@@ -3163,8 +3199,9 @@ static void __bxt_hpd_detection_setup(struct drm_i915_private *dev_priv,
PORTB_HOTPLUG_ENABLE |
PORTC_HOTPLUG_ENABLE;
- DRM_DEBUG_KMS("Invert bit setting: hp_ctl:%x hp_port:%x\n",
- hotplug, enabled_irqs);
+ drm_dbg_kms(&dev_priv->drm,
+ "Invert bit setting: hp_ctl:%x hp_port:%x\n",
+ hotplug, enabled_irqs);
hotplug &= ~BXT_DDI_HPD_INVERT_MASK;
/*
@@ -3418,7 +3455,7 @@ static void icp_irq_postinstall(struct drm_i915_private *dev_priv)
{
u32 mask = SDE_GMBUS_ICP;
- WARN_ON(I915_READ(SDEIER) != 0);
+ drm_WARN_ON(&dev_priv->drm, I915_READ(SDEIER) != 0);
I915_WRITE(SDEIER, 0xffffffff);
POSTING_READ(SDEIER);
@@ -3547,7 +3584,8 @@ static void i8xx_error_irq_handler(struct drm_i915_private *dev_priv,
DRM_DEBUG("Master Error: EIR 0x%04x\n", eir);
if (eir_stuck)
- DRM_DEBUG_DRIVER("EIR stuck: 0x%04x, masked\n", eir_stuck);
+ drm_dbg(&dev_priv->drm, "EIR stuck: 0x%04x, masked\n",
+ eir_stuck);
}
static void i9xx_error_irq_ack(struct drm_i915_private *dev_priv,
@@ -3584,7 +3622,8 @@ static void i9xx_error_irq_handler(struct drm_i915_private *dev_priv,
DRM_DEBUG("Master Error, EIR 0x%08x\n", eir);
if (eir_stuck)
- DRM_DEBUG_DRIVER("EIR stuck: 0x%08x, masked\n", eir_stuck);
+ drm_dbg(&dev_priv->drm, "EIR stuck: 0x%08x, masked\n",
+ eir_stuck);
}
static irqreturn_t i8xx_irq_handler(int irq, void *arg)
diff --git a/drivers/gpu/drm/i915/i915_irq.h b/drivers/gpu/drm/i915/i915_irq.h
index 812c47a9c2d6..25f25cd95818 100644
--- a/drivers/gpu/drm/i915/i915_irq.h
+++ b/drivers/gpu/drm/i915/i915_irq.h
@@ -101,10 +101,8 @@ void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv,
void gen8_irq_power_well_pre_disable(struct drm_i915_private *dev_priv,
u8 pipe_mask);
-bool i915_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
+bool intel_crtc_get_vblank_timestamp(struct drm_crtc *crtc, int *max_error,
+ ktime_t *vblank_time, bool in_vblank_irq);
u32 i915_get_vblank_counter(struct drm_crtc *crtc);
u32 g4x_get_vblank_counter(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 1dd1f3652795..add00ec1f787 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -35,7 +35,7 @@
MODULE_PARM_DESC(name, desc)
struct i915_params i915_modparams __read_mostly = {
-#define MEMBER(T, member, value) .member = (value),
+#define MEMBER(T, member, value, ...) .member = (value),
I915_PARAMS_FOR_EACH(MEMBER)
#undef MEMBER
};
@@ -92,9 +92,6 @@ i915_param_named_unsafe(force_probe, charp, 0400,
"Force probe the driver for specified devices. "
"See CONFIG_DRM_I915_FORCE_PROBE for details.");
-i915_param_named_unsafe(alpha_support, bool, 0400,
- "Deprecated. See i915.force_probe.");
-
i915_param_named_unsafe(disable_power_well, int, 0400,
"Disable display power wells when possible "
"(-1=auto [default], 0=power wells always on, 1=power wells disabled when possible)");
@@ -106,10 +103,6 @@ i915_param_named(fastboot, int, 0600,
"(0=disabled, 1=enabled) "
"Default: -1 (use per-chip default)");
-i915_param_named_unsafe(prefault_disable, bool, 0600,
- "Disable page prefaulting for pread/pwrite/reloc (default:false). "
- "For developers only.");
-
i915_param_named_unsafe(load_detect_test, bool, 0600,
"Force-enable the VGA load detect code for testing (default:false). "
"For developers only.");
@@ -172,7 +165,7 @@ i915_param_named_unsafe(inject_probe_failure, uint, 0400,
i915_param_named(enable_dpcd_backlight, int, 0600,
"Enable support for DPCD backlight control"
- "(-1=use per-VBT LFP backlight type setting, 0=disabled [default], 1=enabled)");
+ "(-1=use per-VBT LFP backlight type setting [default], 0=disabled, 1=enabled)");
#if IS_ENABLED(CONFIG_DRM_I915_GVT)
i915_param_named(enable_gvt, bool, 0400,
diff --git a/drivers/gpu/drm/i915/i915_params.h b/drivers/gpu/drm/i915/i915_params.h
index 31b88f297fbc..45323732f099 100644
--- a/drivers/gpu/drm/i915/i915_params.h
+++ b/drivers/gpu/drm/i915/i915_params.h
@@ -36,49 +36,49 @@ struct drm_printer;
/*
* Invoke param, a function-like macro, for each i915 param, with arguments:
*
- * param(type, name, value)
+ * param(type, name, value, mode)
*
- * type: parameter type, one of {bool, int, unsigned int, char *}
+ * type: parameter type, one of {bool, int, unsigned int, unsigned long, char *}
* name: name of the parameter
* value: initial/default value of the parameter
+ * mode: debugfs file permissions, one of {0400, 0600, 0}, use 0 to not create
+ * debugfs file
*/
#define I915_PARAMS_FOR_EACH(param) \
- param(char *, vbt_firmware, NULL) \
- param(int, modeset, -1) \
- param(int, lvds_channel_mode, 0) \
- param(int, panel_use_ssc, -1) \
- param(int, vbt_sdvo_panel_type, -1) \
- param(int, enable_dc, -1) \
- param(int, enable_fbc, -1) \
- param(int, enable_psr, -1) \
- param(int, disable_power_well, -1) \
- param(int, enable_ips, 1) \
- param(int, invert_brightness, 0) \
- param(int, enable_guc, 0) \
- param(int, guc_log_level, -1) \
- param(char *, guc_firmware_path, NULL) \
- param(char *, huc_firmware_path, NULL) \
- param(char *, dmc_firmware_path, NULL) \
- param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO)) \
- param(int, edp_vswing, 0) \
- param(int, reset, 3) \
- param(unsigned int, inject_probe_failure, 0) \
- param(int, fastboot, -1) \
- param(int, enable_dpcd_backlight, 0) \
- param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE) \
- param(unsigned long, fake_lmem_start, 0) \
+ param(char *, vbt_firmware, NULL, 0400) \
+ param(int, modeset, -1, 0400) \
+ param(int, lvds_channel_mode, 0, 0400) \
+ param(int, panel_use_ssc, -1, 0600) \
+ param(int, vbt_sdvo_panel_type, -1, 0400) \
+ param(int, enable_dc, -1, 0400) \
+ param(int, enable_fbc, -1, 0600) \
+ param(int, enable_psr, -1, 0600) \
+ param(int, disable_power_well, -1, 0400) \
+ param(int, enable_ips, 1, 0600) \
+ param(int, invert_brightness, 0, 0600) \
+ param(int, enable_guc, 0, 0400) \
+ param(int, guc_log_level, -1, 0400) \
+ param(char *, guc_firmware_path, NULL, 0400) \
+ param(char *, huc_firmware_path, NULL, 0400) \
+ param(char *, dmc_firmware_path, NULL, 0400) \
+ param(int, mmio_debug, -IS_ENABLED(CONFIG_DRM_I915_DEBUG_MMIO), 0600) \
+ param(int, edp_vswing, 0, 0400) \
+ param(unsigned int, reset, 3, 0600) \
+ param(unsigned int, inject_probe_failure, 0, 0600) \
+ param(int, fastboot, -1, 0600) \
+ param(int, enable_dpcd_backlight, -1, 0600) \
+ param(char *, force_probe, CONFIG_DRM_I915_FORCE_PROBE, 0400) \
+ param(unsigned long, fake_lmem_start, 0, 0400) \
/* leave bools at the end to not create holes */ \
- param(bool, alpha_support, IS_ENABLED(CONFIG_DRM_I915_ALPHA_SUPPORT)) \
- param(bool, enable_hangcheck, true) \
- param(bool, prefault_disable, false) \
- param(bool, load_detect_test, false) \
- param(bool, force_reset_modeset_test, false) \
- param(bool, error_capture, true) \
- param(bool, disable_display, false) \
- param(bool, verbose_state_checks, true) \
- param(bool, nuclear_pageflip, false) \
- param(bool, enable_dp_mst, true) \
- param(bool, enable_gvt, false)
+ param(bool, enable_hangcheck, true, 0600) \
+ param(bool, load_detect_test, false, 0600) \
+ param(bool, force_reset_modeset_test, false, 0600) \
+ param(bool, error_capture, true, 0600) \
+ param(bool, disable_display, false, 0400) \
+ param(bool, verbose_state_checks, true, 0) \
+ param(bool, nuclear_pageflip, false, 0400) \
+ param(bool, enable_dp_mst, true, 0600) \
+ param(bool, enable_gvt, false, 0400)
#define MEMBER(T, member, ...) T member;
struct i915_params {
diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c
index 83f01401b8b5..2c80a0194c80 100644
--- a/drivers/gpu/drm/i915/i915_pci.c
+++ b/drivers/gpu/drm/i915/i915_pci.c
@@ -26,6 +26,7 @@
#include <linux/vga_switcheroo.h>
#include <drm/drm_drv.h>
+#include <drm/i915_pciids.h>
#include "display/intel_fbdev.h"
@@ -437,7 +438,7 @@ static const struct intel_device_info snb_m_gt2_info = {
.has_rc6 = 1, \
.has_rc6p = 1, \
.has_rps = true, \
- .ppgtt_type = INTEL_PPGTT_FULL, \
+ .ppgtt_type = INTEL_PPGTT_ALIASING, \
.ppgtt_size = 31, \
IVB_PIPE_OFFSETS, \
IVB_CURSOR_OFFSETS, \
@@ -494,7 +495,7 @@ static const struct intel_device_info vlv_info = {
.has_rps = true,
.display.has_gmch = 1,
.display.has_hotplug = 1,
- .ppgtt_type = INTEL_PPGTT_FULL,
+ .ppgtt_type = INTEL_PPGTT_ALIASING,
.ppgtt_size = 31,
.has_snoop = true,
.has_coherent_ggtt = false,
@@ -615,7 +616,8 @@ static const struct intel_device_info chv_info = {
.has_gt_uc = 1, \
.display.has_hdcp = 1, \
.display.has_ipc = 1, \
- .ddb_size = 896
+ .ddb_size = 896, \
+ .num_supported_dbuf_slices = 1
#define SKL_PLATFORM \
GEN9_FEATURES, \
@@ -650,6 +652,7 @@ static const struct intel_device_info skl_gt4_info = {
#define GEN9_LP_FEATURES \
GEN(9), \
.is_lp = 1, \
+ .num_supported_dbuf_slices = 1, \
.display.has_hotplug = 1, \
.engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0) | BIT(VECS0), \
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \
@@ -774,6 +777,7 @@ static const struct intel_device_info cnl_info = {
}, \
GEN(11), \
.ddb_size = 2048, \
+ .num_supported_dbuf_slices = 2, \
.has_logical_ring_elsq = 1, \
.color = { .degamma_lut_size = 33, .gamma_lut_size = 262145 }
@@ -819,11 +823,9 @@ static const struct intel_device_info tgl_info = {
GEN12_FEATURES,
PLATFORM(INTEL_TIGERLAKE),
.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
- .require_force_probe = 1,
.display.has_modular_fia = 1,
.engine_mask =
BIT(RCS0) | BIT(BCS0) | BIT(VECS0) | BIT(VCS0) | BIT(VCS2),
- .has_rps = false, /* XXX disabled for debugging */
};
#define GEN12_DGFX_FEATURES \
@@ -928,13 +930,6 @@ static bool force_probe(u16 device_id, const char *devices)
char *s, *p, *tok;
bool ret;
- /* FIXME: transitional */
- if (i915_modparams.alpha_support) {
- DRM_INFO("i915.alpha_support is deprecated, use i915.force_probe=%04x instead\n",
- device_id);
- return true;
- }
-
if (!devices || !*devices)
return false;
@@ -968,7 +963,8 @@ static int i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
if (intel_info->require_force_probe &&
!force_probe(pdev->device, i915_modparams.force_probe)) {
- DRM_INFO("Your graphics device %04x is not properly supported by the driver in this\n"
+ dev_info(&pdev->dev,
+ "Your graphics device %04x is not properly supported by the driver in this\n"
"kernel version. To force driver probe anyway, use i915.force_probe=%04x\n"
"module parameter or CONFIG_DRM_I915_FORCE_PROBE=%04x configuration option,\n"
"or (recommended) check for kernel updates.\n",
diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c
index 0f556d80ba36..1b074bb4a7fe 100644
--- a/drivers/gpu/drm/i915/i915_perf.c
+++ b/drivers/gpu/drm/i915/i915_perf.c
@@ -555,8 +555,9 @@ static bool oa_buffer_check_unlocked(struct i915_perf_stream *stream)
aging_tail = hw_tail;
stream->oa_buffer.aging_timestamp = now;
} else {
- DRM_ERROR("Ignoring spurious out of range OA buffer tail pointer = %x\n",
- hw_tail);
+ drm_err(&stream->perf->i915->drm,
+ "Ignoring spurious out of range OA buffer tail pointer = %x\n",
+ hw_tail);
}
}
@@ -686,7 +687,7 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
u32 taken;
int ret = 0;
- if (WARN_ON(!stream->enabled))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
return -EIO;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -718,10 +719,11 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* only be incremented by multiples of the report size (notably also
* all a power of two).
*/
- if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
- tail > OA_BUFFER_SIZE || tail % report_size,
- "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
- head, tail))
+ if (drm_WARN_ONCE(&uncore->i915->drm,
+ head > OA_BUFFER_SIZE || head % report_size ||
+ tail > OA_BUFFER_SIZE || tail % report_size,
+ "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
+ head, tail))
return -EIO;
@@ -742,8 +744,10 @@ static int gen8_append_oa_reports(struct i915_perf_stream *stream,
* here would imply a driver bug that would result
* in an overrun.
*/
- if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
- DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+ if (drm_WARN_ON(&uncore->i915->drm,
+ (OA_BUFFER_SIZE - head) < report_size)) {
+ drm_err(&uncore->i915->drm,
+ "Spurious OA head ptr: non-integral report offset\n");
break;
}
@@ -896,7 +900,7 @@ static int gen8_oa_read(struct i915_perf_stream *stream,
i915_reg_t oastatus_reg;
int ret;
- if (WARN_ON(!stream->oa_buffer.vaddr))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
return -EIO;
oastatus_reg = IS_GEN(stream->perf->i915, 12) ?
@@ -986,7 +990,7 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
u32 taken;
int ret = 0;
- if (WARN_ON(!stream->enabled))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->enabled))
return -EIO;
spin_lock_irqsave(&stream->oa_buffer.ptr_lock, flags);
@@ -1015,10 +1019,11 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* only be incremented by multiples of the report size (notably also
* all a power of two).
*/
- if (WARN_ONCE(head > OA_BUFFER_SIZE || head % report_size ||
- tail > OA_BUFFER_SIZE || tail % report_size,
- "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
- head, tail))
+ if (drm_WARN_ONCE(&uncore->i915->drm,
+ head > OA_BUFFER_SIZE || head % report_size ||
+ tail > OA_BUFFER_SIZE || tail % report_size,
+ "Inconsistent OA buffer pointers: head = %u, tail = %u\n",
+ head, tail))
return -EIO;
@@ -1036,8 +1041,10 @@ static int gen7_append_oa_reports(struct i915_perf_stream *stream,
* here would imply a driver bug that would result
* in an overrun.
*/
- if (WARN_ON((OA_BUFFER_SIZE - head) < report_size)) {
- DRM_ERROR("Spurious OA head ptr: non-integral report offset\n");
+ if (drm_WARN_ON(&uncore->i915->drm,
+ (OA_BUFFER_SIZE - head) < report_size)) {
+ drm_err(&uncore->i915->drm,
+ "Spurious OA head ptr: non-integral report offset\n");
break;
}
@@ -1110,7 +1117,7 @@ static int gen7_oa_read(struct i915_perf_stream *stream,
u32 oastatus1;
int ret;
- if (WARN_ON(!stream->oa_buffer.vaddr))
+ if (drm_WARN_ON(&uncore->i915->drm, !stream->oa_buffer.vaddr))
return -EIO;
oastatus1 = intel_uncore_read(uncore, GEN7_OASTATUS1);
@@ -1319,7 +1326,13 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
case 12: {
stream->specific_ctx_id_mask =
((1U << GEN11_SW_CTX_ID_WIDTH) - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
- stream->specific_ctx_id = stream->specific_ctx_id_mask;
+ /*
+ * Pick an unused context id
+ * 0 - (NUM_CONTEXT_TAG - 1) are used by other contexts
+ * GEN12_MAX_CONTEXT_HW_ID (0x7ff) is used by idle context
+ */
+ stream->specific_ctx_id = (GEN12_MAX_CONTEXT_HW_ID - 1) << (GEN11_SW_CTX_ID_SHIFT - 32);
+ BUILD_BUG_ON((GEN12_MAX_CONTEXT_HW_ID - 1) < NUM_CONTEXT_TAG);
break;
}
@@ -1327,11 +1340,12 @@ static int oa_get_render_ctx_id(struct i915_perf_stream *stream)
MISSING_CASE(INTEL_GEN(ce->engine->i915));
}
- ce->tag = stream->specific_ctx_id_mask;
+ ce->tag = stream->specific_ctx_id;
- DRM_DEBUG_DRIVER("filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
- stream->specific_ctx_id,
- stream->specific_ctx_id_mask);
+ drm_dbg(&stream->perf->i915->drm,
+ "filtering on ctx_id=0x%x ctx_id_mask=0x%x\n",
+ stream->specific_ctx_id,
+ stream->specific_ctx_id_mask);
return 0;
}
@@ -1391,8 +1405,10 @@ static void i915_oa_stream_destroy(struct i915_perf_stream *stream)
/*
* Unset exclusive_stream first, it will be checked while disabling
* the metric set on gen8+.
+ *
+ * See i915_oa_init_reg_state() and lrc_configure_all_contexts()
*/
- perf->exclusive_stream = NULL;
+ WRITE_ONCE(perf->exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
@@ -1575,11 +1591,12 @@ static void gen12_init_oa_buffer(struct i915_perf_stream *stream)
static int alloc_oa_buffer(struct i915_perf_stream *stream)
{
+ struct drm_i915_private *i915 = stream->perf->i915;
struct drm_i915_gem_object *bo;
struct i915_vma *vma;
int ret;
- if (WARN_ON(stream->oa_buffer.vma))
+ if (drm_WARN_ON(&i915->drm, stream->oa_buffer.vma))
return -ENODEV;
BUILD_BUG_ON_NOT_POWER_OF_2(OA_BUFFER_SIZE);
@@ -1587,7 +1604,7 @@ static int alloc_oa_buffer(struct i915_perf_stream *stream)
bo = i915_gem_object_create_shmem(stream->perf->i915, OA_BUFFER_SIZE);
if (IS_ERR(bo)) {
- DRM_ERROR("Failed to allocate OA buffer\n");
+ drm_err(&i915->drm, "Failed to allocate OA buffer\n");
return PTR_ERR(bo);
}
@@ -1669,7 +1686,8 @@ static int alloc_noa_wait(struct i915_perf_stream *stream)
bo = i915_gem_object_create_internal(i915, 4096);
if (IS_ERR(bo)) {
- DRM_ERROR("Failed to allocate NOA wait batchbuffer\n");
+ drm_err(&i915->drm,
+ "Failed to allocate NOA wait batchbuffer\n");
return PTR_ERR(bo);
}
@@ -1954,9 +1972,10 @@ out:
return i915_vma_get(oa_bo->vma);
}
-static int emit_oa_config(struct i915_perf_stream *stream,
- struct i915_oa_config *oa_config,
- struct intel_context *ce)
+static struct i915_request *
+emit_oa_config(struct i915_perf_stream *stream,
+ struct i915_oa_config *oa_config,
+ struct intel_context *ce)
{
struct i915_request *rq;
struct i915_vma *vma;
@@ -1964,7 +1983,7 @@ static int emit_oa_config(struct i915_perf_stream *stream,
vma = get_oa_vma(stream, oa_config);
if (IS_ERR(vma))
- return PTR_ERR(vma);
+ return ERR_CAST(vma);
err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
if (err)
@@ -1989,13 +2008,17 @@ static int emit_oa_config(struct i915_perf_stream *stream,
err = rq->engine->emit_bb_start(rq,
vma->node.start, 0,
I915_DISPATCH_SECURE);
+ if (err)
+ goto err_add_request;
+
+ i915_request_get(rq);
err_add_request:
i915_request_add(rq);
err_vma_unpin:
i915_vma_unpin(vma);
err_vma_put:
i915_vma_put(vma);
- return err;
+ return err ? ERR_PTR(err) : rq;
}
static struct intel_context *oa_context(struct i915_perf_stream *stream)
@@ -2003,7 +2026,8 @@ static struct intel_context *oa_context(struct i915_perf_stream *stream)
return stream->pinned_ctx ?: stream->engine->kernel_context;
}
-static int hsw_enable_metric_set(struct i915_perf_stream *stream)
+static struct i915_request *
+hsw_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
@@ -2178,7 +2202,9 @@ static int gen8_modify_self(struct intel_context *ce,
struct i915_request *rq;
int err;
+ intel_engine_pm_get(ce->engine);
rq = i915_request_create(ce);
+ intel_engine_pm_put(ce->engine);
if (IS_ERR(rq))
return PTR_ERR(rq);
@@ -2406,7 +2432,8 @@ static int lrc_configure_all_contexts(struct i915_perf_stream *stream,
return oa_configure_all_contexts(stream, regs, ARRAY_SIZE(regs));
}
-static int gen8_enable_metric_set(struct i915_perf_stream *stream)
+static struct i915_request *
+gen8_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@@ -2448,7 +2475,7 @@ static int gen8_enable_metric_set(struct i915_perf_stream *stream)
*/
ret = lrc_configure_all_contexts(stream, oa_config);
if (ret)
- return ret;
+ return ERR_PTR(ret);
return emit_oa_config(stream, oa_config, oa_context(stream));
}
@@ -2460,7 +2487,8 @@ static u32 oag_report_ctx_switches(const struct i915_perf_stream *stream)
0 : GEN12_OAG_OA_DEBUG_DISABLE_CTX_SWITCH_REPORTS);
}
-static int gen12_enable_metric_set(struct i915_perf_stream *stream)
+static struct i915_request *
+gen12_enable_metric_set(struct i915_perf_stream *stream)
{
struct intel_uncore *uncore = stream->uncore;
struct i915_oa_config *oa_config = stream->oa_config;
@@ -2491,7 +2519,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
*/
ret = gen12_configure_all_contexts(stream, oa_config);
if (ret)
- return ret;
+ return ERR_PTR(ret);
/*
* For Gen12, performance counters are context
@@ -2501,7 +2529,7 @@ static int gen12_enable_metric_set(struct i915_perf_stream *stream)
if (stream->ctx) {
ret = gen12_configure_oar_context(stream, true);
if (ret)
- return ret;
+ return ERR_PTR(ret);
}
return emit_oa_config(stream, oa_config, oa_context(stream));
@@ -2645,7 +2673,8 @@ static void gen7_oa_disable(struct i915_perf_stream *stream)
if (intel_wait_for_register(uncore,
GEN7_OACONTROL, GEN7_OACONTROL_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
}
static void gen8_oa_disable(struct i915_perf_stream *stream)
@@ -2656,7 +2685,8 @@ static void gen8_oa_disable(struct i915_perf_stream *stream)
if (intel_wait_for_register(uncore,
GEN8_OACONTROL, GEN8_OA_COUNTER_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
}
static void gen12_oa_disable(struct i915_perf_stream *stream)
@@ -2668,7 +2698,8 @@ static void gen12_oa_disable(struct i915_perf_stream *stream)
GEN12_OAG_OACONTROL,
GEN12_OAG_OACONTROL_OA_COUNTER_ENABLE, 0,
50))
- DRM_ERROR("wait for OA to be disabled timed out\n");
+ drm_err(&stream->perf->i915->drm,
+ "wait for OA to be disabled timed out\n");
}
/**
@@ -2696,6 +2727,20 @@ static const struct i915_perf_stream_ops i915_oa_stream_ops = {
.read = i915_oa_read,
};
+static int i915_perf_stream_enable_sync(struct i915_perf_stream *stream)
+{
+ struct i915_request *rq;
+
+ rq = stream->perf->ops.enable_metric_set(stream);
+ if (IS_ERR(rq))
+ return PTR_ERR(rq);
+
+ i915_request_wait(rq, 0, MAX_SCHEDULE_TIMEOUT);
+ i915_request_put(rq);
+
+ return 0;
+}
+
/**
* i915_oa_stream_init - validate combined props for OA stream and init
* @stream: An i915 perf stream
@@ -2718,6 +2763,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
struct drm_i915_perf_open_param *param,
struct perf_open_properties *props)
{
+ struct drm_i915_private *i915 = stream->perf->i915;
struct i915_perf *perf = stream->perf;
int format_size;
int ret;
@@ -2774,7 +2820,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
stream->sample_size += format_size;
stream->oa_buffer.format_size = format_size;
- if (WARN_ON(stream->oa_buffer.format_size == 0))
+ if (drm_WARN_ON(&i915->drm, stream->oa_buffer.format_size == 0))
return -EINVAL;
stream->hold_preemption = props->hold_preemption;
@@ -2827,9 +2873,9 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
goto err_oa_buf_alloc;
stream->ops = &i915_oa_stream_ops;
- perf->exclusive_stream = stream;
+ WRITE_ONCE(perf->exclusive_stream, stream);
- ret = perf->ops.enable_metric_set(stream);
+ ret = i915_perf_stream_enable_sync(stream);
if (ret) {
DRM_DEBUG("Unable to enable metric set\n");
goto err_enable;
@@ -2847,7 +2893,7 @@ static int i915_oa_stream_init(struct i915_perf_stream *stream,
return 0;
err_enable:
- perf->exclusive_stream = NULL;
+ WRITE_ONCE(perf->exclusive_stream, NULL);
perf->ops.disable_metric_set(stream);
free_oa_buffer(stream);
@@ -2873,12 +2919,11 @@ void i915_oa_init_reg_state(const struct intel_context *ce,
{
struct i915_perf_stream *stream;
- /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
-
if (engine->class != RENDER_CLASS)
return;
- stream = engine->i915->perf.exclusive_stream;
+ /* perf.exclusive_stream serialised by lrc_configure_all_contexts() */
+ stream = READ_ONCE(engine->i915->perf.exclusive_stream);
/*
* For gen12, only CTX_R_PWR_CLK_STATE needs update, but the caller
* is already doing that, so nothing to be done for gen12 here.
@@ -3147,7 +3192,7 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
return -EINVAL;
if (config != stream->oa_config) {
- int err;
+ struct i915_request *rq;
/*
* If OA is bound to a specific context, emit the
@@ -3158,11 +3203,13 @@ static long i915_perf_config_locked(struct i915_perf_stream *stream,
* When set globally, we use a low priority kernel context,
* so it will effectively take effect when idle.
*/
- err = emit_oa_config(stream, config, oa_context(stream));
- if (err == 0)
+ rq = emit_oa_config(stream, config, oa_context(stream));
+ if (!IS_ERR(rq)) {
config = xchg(&stream->oa_config, config);
- else
- ret = err;
+ i915_request_put(rq);
+ } else {
+ ret = PTR_ERR(rq);
+ }
}
i915_oa_config_put(config);
diff --git a/drivers/gpu/drm/i915/i915_perf_types.h b/drivers/gpu/drm/i915/i915_perf_types.h
index 45e581455f5d..a0e22f00f6cf 100644
--- a/drivers/gpu/drm/i915/i915_perf_types.h
+++ b/drivers/gpu/drm/i915/i915_perf_types.h
@@ -339,7 +339,8 @@ struct i915_oa_ops {
* counter reports being sampled. May apply system constraints such as
* disabling EU clock gating as required.
*/
- int (*enable_metric_set)(struct i915_perf_stream *stream);
+ struct i915_request *
+ (*enable_metric_set)(struct i915_perf_stream *stream);
/**
* @disable_metric_set: Remove system constraints associated with using
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index 28a82c849bac..2c062534eac1 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -448,7 +448,7 @@ static void engine_event_destroy(struct perf_event *event)
engine = intel_engine_lookup_user(i915,
engine_event_class(event),
engine_event_instance(event));
- if (WARN_ON_ONCE(!engine))
+ if (drm_WARN_ON_ONCE(&i915->drm, !engine))
return;
if (engine_event_sample(event) == I915_SAMPLE_BUSY &&
@@ -584,7 +584,7 @@ static u64 __i915_pmu_event_read(struct perf_event *event)
engine_event_class(event),
engine_event_instance(event));
- if (WARN_ON_ONCE(!engine)) {
+ if (drm_WARN_ON_ONCE(&i915->drm, !engine)) {
/* Do nothing */
} else if (sample == I915_SAMPLE_BUSY &&
intel_engine_supports_stats(engine)) {
@@ -637,8 +637,10 @@ static void i915_pmu_enable(struct perf_event *event)
container_of(event->pmu, typeof(*i915), pmu.base);
unsigned int bit = event_enabled_bit(event);
struct i915_pmu *pmu = &i915->pmu;
+ intel_wakeref_t wakeref;
unsigned long flags;
+ wakeref = intel_runtime_pm_get(&i915->runtime_pm);
spin_lock_irqsave(&pmu->lock, flags);
/*
@@ -648,6 +650,14 @@ static void i915_pmu_enable(struct perf_event *event)
BUILD_BUG_ON(ARRAY_SIZE(pmu->enable_count) != I915_PMU_MASK_BITS);
GEM_BUG_ON(bit >= ARRAY_SIZE(pmu->enable_count));
GEM_BUG_ON(pmu->enable_count[bit] == ~0);
+
+ if (pmu->enable_count[bit] == 0 &&
+ config_enabled_mask(I915_PMU_RC6_RESIDENCY) & BIT_ULL(bit)) {
+ pmu->sample[__I915_SAMPLE_RC6_LAST_REPORTED].cur = 0;
+ pmu->sample[__I915_SAMPLE_RC6].cur = __get_rc6(&i915->gt);
+ pmu->sleep_last = ktime_get();
+ }
+
pmu->enable |= BIT_ULL(bit);
pmu->enable_count[bit]++;
@@ -688,6 +698,8 @@ static void i915_pmu_enable(struct perf_event *event)
* an existing non-zero value.
*/
local64_set(&event->hw.prev_count, __i915_pmu_event_read(event));
+
+ intel_runtime_pm_put(&i915->runtime_pm, wakeref);
}
static void i915_pmu_disable(struct perf_event *event)
@@ -810,11 +822,6 @@ static ssize_t i915_pmu_event_show(struct device *dev,
return sprintf(buf, "config=0x%lx\n", eattr->val);
}
-static struct attribute_group i915_pmu_events_attr_group = {
- .name = "events",
- /* Patch in attrs at runtime. */
-};
-
static ssize_t
i915_pmu_get_attr_cpumask(struct device *dev,
struct device_attribute *attr,
@@ -834,13 +841,6 @@ static const struct attribute_group i915_pmu_cpumask_attr_group = {
.attrs = i915_cpumask_attrs,
};
-static const struct attribute_group *i915_pmu_attr_groups[] = {
- &i915_pmu_format_attr_group,
- &i915_pmu_events_attr_group,
- &i915_pmu_cpumask_attr_group,
- NULL
-};
-
#define __event(__config, __name, __unit) \
{ \
.config = (__config), \
@@ -1014,23 +1014,23 @@ err_alloc:
static void free_event_attributes(struct i915_pmu *pmu)
{
- struct attribute **attr_iter = i915_pmu_events_attr_group.attrs;
+ struct attribute **attr_iter = pmu->events_attr_group.attrs;
for (; *attr_iter; attr_iter++)
kfree((*attr_iter)->name);
- kfree(i915_pmu_events_attr_group.attrs);
+ kfree(pmu->events_attr_group.attrs);
kfree(pmu->i915_attr);
kfree(pmu->pmu_attr);
- i915_pmu_events_attr_group.attrs = NULL;
+ pmu->events_attr_group.attrs = NULL;
pmu->i915_attr = NULL;
pmu->pmu_attr = NULL;
}
static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
{
- struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
GEM_BUG_ON(!pmu->base.event_init);
@@ -1043,7 +1043,7 @@ static int i915_pmu_cpu_online(unsigned int cpu, struct hlist_node *node)
static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
{
- struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), node);
+ struct i915_pmu *pmu = hlist_entry_safe(node, typeof(*pmu), cpuhp.node);
unsigned int target;
GEM_BUG_ON(!pmu->base.event_init);
@@ -1060,8 +1060,6 @@ static int i915_pmu_cpu_offline(unsigned int cpu, struct hlist_node *node)
return 0;
}
-static enum cpuhp_state cpuhp_slot = CPUHP_INVALID;
-
static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
{
enum cpuhp_state slot;
@@ -1075,21 +1073,22 @@ static int i915_pmu_register_cpuhp_state(struct i915_pmu *pmu)
return ret;
slot = ret;
- ret = cpuhp_state_add_instance(slot, &pmu->node);
+ ret = cpuhp_state_add_instance(slot, &pmu->cpuhp.node);
if (ret) {
cpuhp_remove_multi_state(slot);
return ret;
}
- cpuhp_slot = slot;
+ pmu->cpuhp.slot = slot;
return 0;
}
static void i915_pmu_unregister_cpuhp_state(struct i915_pmu *pmu)
{
- WARN_ON(cpuhp_slot == CPUHP_INVALID);
- WARN_ON(cpuhp_state_remove_instance(cpuhp_slot, &pmu->node));
- cpuhp_remove_multi_state(cpuhp_slot);
+ WARN_ON(pmu->cpuhp.slot == CPUHP_INVALID);
+ WARN_ON(cpuhp_state_remove_instance(pmu->cpuhp.slot, &pmu->cpuhp.node));
+ cpuhp_remove_multi_state(pmu->cpuhp.slot);
+ pmu->cpuhp.slot = CPUHP_INVALID;
}
static bool is_igp(struct drm_i915_private *i915)
@@ -1106,6 +1105,13 @@ static bool is_igp(struct drm_i915_private *i915)
void i915_pmu_register(struct drm_i915_private *i915)
{
struct i915_pmu *pmu = &i915->pmu;
+ const struct attribute_group *attr_groups[] = {
+ &i915_pmu_format_attr_group,
+ &pmu->events_attr_group,
+ &i915_pmu_cpumask_attr_group,
+ NULL
+ };
+
int ret = -ENOMEM;
if (INTEL_GEN(i915) <= 2) {
@@ -1116,6 +1122,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
spin_lock_init(&pmu->lock);
hrtimer_init(&pmu->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
pmu->timer.function = i915_sample;
+ pmu->cpuhp.slot = CPUHP_INVALID;
if (!is_igp(i915)) {
pmu->name = kasprintf(GFP_KERNEL,
@@ -1131,11 +1138,16 @@ void i915_pmu_register(struct drm_i915_private *i915)
if (!pmu->name)
goto err;
- i915_pmu_events_attr_group.attrs = create_event_attributes(pmu);
- if (!i915_pmu_events_attr_group.attrs)
+ pmu->events_attr_group.name = "events";
+ pmu->events_attr_group.attrs = create_event_attributes(pmu);
+ if (!pmu->events_attr_group.attrs)
goto err_name;
- pmu->base.attr_groups = i915_pmu_attr_groups;
+ pmu->base.attr_groups = kmemdup(attr_groups, sizeof(attr_groups),
+ GFP_KERNEL);
+ if (!pmu->base.attr_groups)
+ goto err_attr;
+
pmu->base.task_ctx_nr = perf_invalid_context;
pmu->base.event_init = i915_pmu_event_init;
pmu->base.add = i915_pmu_event_add;
@@ -1147,7 +1159,7 @@ void i915_pmu_register(struct drm_i915_private *i915)
ret = perf_pmu_register(&pmu->base, pmu->name, -1);
if (ret)
- goto err_attr;
+ goto err_groups;
ret = i915_pmu_register_cpuhp_state(pmu);
if (ret)
@@ -1157,6 +1169,8 @@ void i915_pmu_register(struct drm_i915_private *i915)
err_unreg:
perf_pmu_unregister(&pmu->base);
+err_groups:
+ kfree(pmu->base.attr_groups);
err_attr:
pmu->base.event_init = NULL;
free_event_attributes(pmu);
@@ -1174,7 +1188,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
if (!pmu->base.event_init)
return;
- WARN_ON(pmu->enable);
+ drm_WARN_ON(&i915->drm, pmu->enable);
hrtimer_cancel(&pmu->timer);
@@ -1182,6 +1196,7 @@ void i915_pmu_unregister(struct drm_i915_private *i915)
perf_pmu_unregister(&pmu->base);
pmu->base.event_init = NULL;
+ kfree(pmu->base.attr_groups);
if (!is_igp(i915))
kfree(pmu->name);
free_event_attributes(pmu);
diff --git a/drivers/gpu/drm/i915/i915_pmu.h b/drivers/gpu/drm/i915/i915_pmu.h
index 6c1647c5daf2..941f0c14037c 100644
--- a/drivers/gpu/drm/i915/i915_pmu.h
+++ b/drivers/gpu/drm/i915/i915_pmu.h
@@ -10,7 +10,7 @@
#include <linux/hrtimer.h>
#include <linux/perf_event.h>
#include <linux/spinlock_types.h>
-#include <drm/i915_drm.h>
+#include <uapi/drm/i915_drm.h>
struct drm_i915_private;
@@ -39,9 +39,12 @@ struct i915_pmu_sample {
struct i915_pmu {
/**
- * @node: List node for CPU hotplug handling.
+ * @cpuhp: Struct used for CPU hotplug handling.
*/
- struct hlist_node node;
+ struct {
+ struct hlist_node node;
+ enum cpuhp_state slot;
+ } cpuhp;
/**
* @base: PMU base.
*/
@@ -105,6 +108,10 @@ struct i915_pmu {
*/
ktime_t sleep_last;
/**
+ * @events_attr_group: Device events attribute group.
+ */
+ struct attribute_group events_attr_group;
+ /**
* @i915_attr: Memory block holding device attributes.
*/
void *i915_attr;
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 6cc55c103f67..309cb7d96b35 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2626,6 +2626,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define IPEIR_I965 _MMIO(0x2064)
#define IPEHR_I965 _MMIO(0x2068)
#define GEN7_SC_INSTDONE _MMIO(0x7100)
+#define GEN12_SC_INSTDONE_EXTRA _MMIO(0x7104)
+#define GEN12_SC_INSTDONE_EXTRA2 _MMIO(0x7108)
#define GEN7_SAMPLER_INSTDONE _MMIO(0xe160)
#define GEN7_ROW_INSTDONE _MMIO(0xe164)
#define GEN8_MCR_SELECTOR _MMIO(0xfdc)
@@ -2639,6 +2641,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7)
#define RING_IPEIR(base) _MMIO((base) + 0x64)
#define RING_IPEHR(base) _MMIO((base) + 0x68)
+#define RING_EIR(base) _MMIO((base) + 0xb0)
+#define RING_EMR(base) _MMIO((base) + 0xb4)
+#define RING_ESR(base) _MMIO((base) + 0xb8)
/*
* On GEN4, only the render ring INSTDONE exists and has a different
* layout than the GEN7+ version.
@@ -2860,6 +2865,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MI_ARB_STATE _MMIO(0x20e4) /* 915+ only */
#define MBUS_ABOX_CTL _MMIO(0x45038)
+#define MBUS_ABOX1_CTL _MMIO(0x45048)
+#define MBUS_ABOX2_CTL _MMIO(0x4504C)
#define MBUS_ABOX_BW_CREDIT_MASK (3 << 20)
#define MBUS_ABOX_BW_CREDIT(x) ((x) << 20)
#define MBUS_ABOX_B_CREDIT_MASK (0xF << 16)
@@ -3088,7 +3095,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GT_CONTEXT_SWITCH_INTERRUPT (1 << 8)
#define GT_RENDER_L3_PARITY_ERROR_INTERRUPT (1 << 5) /* !snb */
#define GT_RENDER_PIPECTL_NOTIFY_INTERRUPT (1 << 4)
-#define GT_RENDER_CS_MASTER_ERROR_INTERRUPT (1 << 3)
+#define GT_CS_MASTER_ERROR_INTERRUPT REG_BIT(3)
#define GT_RENDER_SYNC_STATUS_INTERRUPT (1 << 2)
#define GT_RENDER_DEBUG_INTERRUPT (1 << 1)
#define GT_RENDER_USER_INTERRUPT (1 << 0)
@@ -3160,6 +3167,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define GEN7_FF_THREAD_MODE _MMIO(0x20a0)
#define GEN7_FF_SCHED_MASK 0x0077070
#define GEN8_FF_DS_REF_CNT_FFME (1 << 19)
+#define GEN12_FF_TESSELATION_DOP_GATE_DISABLE BIT(19)
#define GEN7_FF_TS_SCHED_HS1 (0x5 << 16)
#define GEN7_FF_TS_SCHED_HS0 (0x3 << 16)
#define GEN7_FF_TS_SCHED_LOAD_BALANCE (0x1 << 16)
@@ -3277,6 +3285,7 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
/* Framebuffer compression for Ivybridge */
#define IVB_FBC_RT_BASE _MMIO(0x7020)
+#define IVB_FBC_RT_BASE_UPPER _MMIO(0x7024)
#define IPS_CTL _MMIO(0x43408)
#define IPS_ENABLE (1 << 31)
@@ -3743,8 +3752,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg)
#define MCH_SSKPD_WM0_MASK 0x3f
#define MCH_SSKPD_WM0_VAL 0xc
-#define MCH_SECP_NRG_STTS _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x592c)
-
/* Clocking configuration register */
#define CLKCFG _MMIO(MCHBAR_MIRROR_BASE + 0xc00)
#define CLKCFG_FSB_400 (5 << 0) /* hrawclk 100 */
@@ -4124,6 +4131,9 @@ enum {
#define PWM2_GATING_DIS (1 << 14)
#define PWM1_GATING_DIS (1 << 13)
+#define GEN9_CLKGATE_DIS_3 _MMIO(0x46538)
+#define TGL_VRH_GATING_DIS REG_BIT(31)
+
#define GEN9_CLKGATE_DIS_4 _MMIO(0x4653C)
#define BXT_GMBUS_GATING_DIS (1 << 14)
@@ -4851,16 +4861,6 @@ enum {
#define _PP_STATUS 0x61200
#define PP_STATUS(pps_idx) _MMIO_PPS(pps_idx, _PP_STATUS)
#define PP_ON REG_BIT(31)
-
-#define _PP_CONTROL_1 0xc7204
-#define _PP_CONTROL_2 0xc7304
-#define ICP_PP_CONTROL(x) _MMIO(((x) == 1) ? _PP_CONTROL_1 : \
- _PP_CONTROL_2)
-#define POWER_CYCLE_DELAY_MASK REG_GENMASK(8, 4)
-#define VDD_OVERRIDE_FORCE REG_BIT(3)
-#define BACKLIGHT_ENABLE REG_BIT(2)
-#define PWR_DOWN_ON_RESET REG_BIT(1)
-#define PWR_STATE_TARGET REG_BIT(0)
/*
* Indicates that all dependencies of the panel are on:
*
@@ -4921,6 +4921,7 @@ enum {
#define PFIT_ENABLE (1 << 31)
#define PFIT_PIPE_MASK (3 << 29)
#define PFIT_PIPE_SHIFT 29
+#define PFIT_PIPE(pipe) ((pipe) << 29)
#define VERT_INTERP_DISABLE (0 << 10)
#define VERT_INTERP_BILINEAR (1 << 10)
#define VERT_INTERP_MASK (3 << 10)
@@ -5870,7 +5871,6 @@ enum {
#define _PIPEAGCMAX 0x70010
#define _PIPEBGCMAX 0x71010
-#define PIPEGCMAX_RGB_MASK REG_GENMASK(15, 0)
#define PIPEGCMAX(pipe, i) _MMIO_PIPE2(pipe, _PIPEAGCMAX + (i) * 4)
#define _PIPE_MISC_A 0x70030
@@ -5879,6 +5879,7 @@ enum {
#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */
#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */
#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11)
+#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */
#define PIPEMISC_DITHER_BPC_MASK (7 << 5)
#define PIPEMISC_DITHER_8_BPC (0 << 5)
#define PIPEMISC_DITHER_10_BPC (1 << 5)
@@ -7745,9 +7746,9 @@ enum {
#define DISP_ARB_CTL2 _MMIO(0x45004)
#define DISP_DATA_PARTITION_5_6 (1 << 6)
#define DISP_IPC_ENABLE (1 << 3)
-#define DBUF_CTL _MMIO(0x45008)
-#define DBUF_CTL_S1 _MMIO(0x45008)
-#define DBUF_CTL_S2 _MMIO(0x44FE8)
+#define _DBUF_CTL_S1 0x45008
+#define _DBUF_CTL_S2 0x44FE8
+#define DBUF_CTL_S(slice) _MMIO(_PICK_EVEN(slice, _DBUF_CTL_S1, _DBUF_CTL_S2))
#define DBUF_POWER_REQUEST (1 << 31)
#define DBUF_POWER_STATE (1 << 30)
#define GEN7_MSG_CTL _MMIO(0x45010)
@@ -7757,6 +7758,7 @@ enum {
#define BW_BUDDY1_CTL _MMIO(0x45140)
#define BW_BUDDY2_CTL _MMIO(0x45150)
#define BW_BUDDY_DISABLE REG_BIT(31)
+#define BW_BUDDY_TLB_REQ_TIMER_MASK REG_GENMASK(21, 16)
#define BW_BUDDY1_PAGE_MASK _MMIO(0x45144)
#define BW_BUDDY2_PAGE_MASK _MMIO(0x45154)
@@ -7766,6 +7768,7 @@ enum {
#define GEN8_CHICKEN_DCPR_1 _MMIO(0x46430)
#define SKL_SELECT_ALTERNATE_DC_EXIT (1 << 30)
+#define CNL_DELAY_PMRSP (1 << 22)
#define MASK_WAKEMEM (1 << 13)
#define CNL_DDI_CLOCK_REG_ACCESS_ON (1 << 7)
@@ -8987,6 +8990,8 @@ enum {
#define GEN6_PCODE_UNIMPLEMENTED_CMD 0xFF
#define GEN7_PCODE_TIMEOUT 0x2
#define GEN7_PCODE_ILLEGAL_DATA 0x3
+#define GEN11_PCODE_ILLEGAL_SUBCOMMAND 0x4
+#define GEN11_PCODE_LOCKED 0x6
#define GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE 0x10
#define GEN6_PCODE_WRITE_RC6VIDS 0x4
#define GEN6_PCODE_READ_RC6VIDS 0x5
@@ -9135,12 +9140,19 @@ enum {
#define THROTTLE_12_5 (7 << 2)
#define DISABLE_EARLY_EOT (1 << 1)
-#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
+#define GEN7_ROW_CHICKEN2 _MMIO(0xe4f4)
+#define GEN12_DISABLE_EARLY_READ REG_BIT(14)
+#define GEN12_PUSH_CONST_DEREF_HOLD_DIS REG_BIT(8)
+
#define GEN7_ROW_CHICKEN2_GT2 _MMIO(0xf4f4)
#define DOP_CLOCK_GATING_DISABLE (1 << 0)
#define PUSH_CONSTANT_DEREF_DISABLE (1 << 8)
#define GEN11_TDL_CLOCK_GATING_FIX_DISABLE (1 << 1)
+#define GEN9_ROW_CHICKEN4 _MMIO(0xe48c)
+#define GEN12_DISABLE_TDL_PUSH REG_BIT(9)
+#define GEN11_DIS_PICK_2ND_EU REG_BIT(7)
+
#define HSW_ROW_CHICKEN3 _MMIO(0xe49c)
#define HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE (1 << 6)
@@ -9241,6 +9253,10 @@ enum {
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_74250 (7 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148352 (8 << 16)
#define AUD_CONFIG_PIXEL_CLOCK_HDMI_148500 (9 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_296703 (10 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_297000 (11 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_593407 (12 << 16)
+#define AUD_CONFIG_PIXEL_CLOCK_HDMI_594000 (13 << 16)
#define AUD_CONFIG_DISABLE_NCTS (1 << 3)
/* HSW Audio */
@@ -10532,13 +10548,13 @@ enum skl_power_gate {
#define D_COMP_COMP_DISABLE (1 << 0)
/* Pipe WM_LINETIME - watermark line time */
-#define _PIPE_WM_LINETIME_A 0x45270
-#define _PIPE_WM_LINETIME_B 0x45274
-#define PIPE_WM_LINETIME(pipe) _MMIO_PIPE(pipe, _PIPE_WM_LINETIME_A, _PIPE_WM_LINETIME_B)
-#define PIPE_WM_LINETIME_MASK (0x1ff)
-#define PIPE_WM_LINETIME_TIME(x) ((x))
-#define PIPE_WM_LINETIME_IPS_LINETIME_MASK (0x1ff << 16)
-#define PIPE_WM_LINETIME_IPS_LINETIME(x) ((x) << 16)
+#define _WM_LINETIME_A 0x45270
+#define _WM_LINETIME_B 0x45274
+#define WM_LINETIME(pipe) _MMIO_PIPE(pipe, _WM_LINETIME_A, _WM_LINETIME_B)
+#define HSW_LINETIME_MASK REG_GENMASK(8, 0)
+#define HSW_LINETIME(x) REG_FIELD_PREP(HSW_LINETIME_MASK, (x))
+#define HSW_IPS_LINETIME_MASK REG_GENMASK(24, 16)
+#define HSW_IPS_LINETIME(x) REG_FIELD_PREP(HSW_IPS_LINETIME_MASK, (x))
/* SFUSE_STRAP */
#define SFUSE_STRAP _MMIO(0xc2014)
diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
index be185886e4fc..c0df71d7d0ff 100644
--- a/drivers/gpu/drm/i915/i915_request.c
+++ b/drivers/gpu/drm/i915/i915_request.c
@@ -51,7 +51,6 @@ struct execute_cb {
static struct i915_global_request {
struct i915_global base;
struct kmem_cache *slab_requests;
- struct kmem_cache *slab_dependencies;
struct kmem_cache *slab_execute_cbs;
} global;
@@ -203,6 +202,19 @@ static void free_capture_list(struct i915_request *request)
}
}
+static void __i915_request_fill(struct i915_request *rq, u8 val)
+{
+ void *vaddr = rq->ring->vaddr;
+ u32 head;
+
+ head = rq->infix;
+ if (rq->postfix < head) {
+ memset(vaddr + head, val, rq->ring->size - head);
+ head = 0;
+ }
+ memset(vaddr + head, val, rq->postfix - head);
+}
+
static void remove_from_engine(struct i915_request *rq)
{
struct intel_engine_cs *engine, *locked;
@@ -221,6 +233,8 @@ static void remove_from_engine(struct i915_request *rq)
locked = engine;
}
list_del_init(&rq->sched.link);
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+ clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
spin_unlock_irq(&locked->active.lock);
}
@@ -245,6 +259,9 @@ bool i915_request_retire(struct i915_request *rq)
*/
GEM_BUG_ON(!list_is_first(&rq->link,
&i915_request_timeline(rq)->requests));
+ if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM))
+ /* Poison before we release our space in the ring */
+ __i915_request_fill(rq, POISON_FREE);
rq->ring->head = rq->postfix;
/*
@@ -273,7 +290,7 @@ bool i915_request_retire(struct i915_request *rq)
spin_unlock_irq(&rq->lock);
remove_from_client(rq);
- list_del(&rq->link);
+ __list_del_entry(&rq->link); /* poison neither prev/next (RCU walks) */
intel_context_exit(rq->context);
intel_context_unpin(rq->context);
@@ -346,6 +363,50 @@ __await_execution(struct i915_request *rq,
return 0;
}
+static bool fatal_error(int error)
+{
+ switch (error) {
+ case 0: /* not an error! */
+ case -EAGAIN: /* innocent victim of a GT reset (__i915_request_reset) */
+ case -ETIMEDOUT: /* waiting for Godot (timer_i915_sw_fence_wake) */
+ return false;
+ default:
+ return true;
+ }
+}
+
+void __i915_request_skip(struct i915_request *rq)
+{
+ GEM_BUG_ON(!fatal_error(rq->fence.error));
+
+ if (rq->infix == rq->postfix)
+ return;
+
+ /*
+ * As this request likely depends on state from the lost
+ * context, clear out all the user operations leaving the
+ * breadcrumb at the end (so we get the fence notifications).
+ */
+ __i915_request_fill(rq, 0);
+ rq->infix = rq->postfix;
+}
+
+void i915_request_set_error_once(struct i915_request *rq, int error)
+{
+ int old;
+
+ GEM_BUG_ON(!IS_ERR_VALUE((long)error));
+
+ if (i915_request_signaled(rq))
+ return;
+
+ old = READ_ONCE(rq->fence.error);
+ do {
+ if (fatal_error(old))
+ return;
+ } while (!try_cmpxchg(&rq->fence.error, &old, error));
+}
+
bool __i915_request_submit(struct i915_request *request)
{
struct intel_engine_cs *engine = request->engine;
@@ -375,8 +436,10 @@ bool __i915_request_submit(struct i915_request *request)
if (i915_request_completed(request))
goto xfer;
- if (intel_context_is_banned(request->context))
- i915_request_skip(request, -EIO);
+ if (unlikely(intel_context_is_banned(request->context)))
+ i915_request_set_error_once(request, -EIO);
+ if (unlikely(fatal_error(request->fence.error)))
+ __i915_request_skip(request);
/*
* Are we using semaphores when the gpu is already saturated?
@@ -408,8 +471,10 @@ bool __i915_request_submit(struct i915_request *request)
xfer: /* We may be recursing from the signal callback of another i915 fence */
spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING);
- if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags))
+ if (!test_and_set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)) {
list_move_tail(&request->sched.link, &engine->active.requests);
+ clear_bit(I915_FENCE_FLAG_PQUEUE, &request->fence.flags);
+ }
if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) &&
!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags) &&
@@ -500,7 +565,7 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
trace_i915_request_submit(request);
if (unlikely(fence->error))
- i915_request_skip(request, fence->error);
+ i915_request_set_error_once(request, fence->error);
/*
* We need to serialize use of the submit_request() callback
@@ -523,19 +588,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE;
}
+static void irq_semaphore_cb(struct irq_work *wrk)
+{
+ struct i915_request *rq =
+ container_of(wrk, typeof(*rq), semaphore_work);
+
+ i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
+ i915_request_put(rq);
+}
+
static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{
- struct i915_request *request =
- container_of(fence, typeof(*request), semaphore);
+ struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
switch (state) {
case FENCE_COMPLETE:
- i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE);
+ if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
+ i915_request_get(rq);
+ init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
+ irq_work_queue(&rq->semaphore_work);
+ }
break;
case FENCE_FREE:
- i915_request_put(request);
+ i915_request_put(rq);
break;
}
@@ -591,6 +668,8 @@ static void __i915_request_ctor(void *arg)
i915_sw_fence_init(&rq->submit, submit_notify);
i915_sw_fence_init(&rq->semaphore, semaphore_notify);
+ dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock, 0, 0);
+
rq->file_priv = NULL;
rq->capture_list = NULL;
@@ -649,25 +728,31 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
}
}
- ret = intel_timeline_get_seqno(tl, rq, &seqno);
- if (ret)
- goto err_free;
-
rq->i915 = ce->engine->i915;
rq->context = ce;
rq->engine = ce->engine;
rq->ring = ce->ring;
rq->execution_mask = ce->engine->mask;
+ kref_init(&rq->fence.refcount);
+ rq->fence.flags = 0;
+ rq->fence.error = 0;
+ INIT_LIST_HEAD(&rq->fence.cb_list);
+
+ ret = intel_timeline_get_seqno(tl, rq, &seqno);
+ if (ret)
+ goto err_free;
+
+ rq->fence.context = tl->fence_context;
+ rq->fence.seqno = seqno;
+
RCU_INIT_POINTER(rq->timeline, tl);
RCU_INIT_POINTER(rq->hwsp_cacheline, tl->hwsp_cacheline);
rq->hwsp_seqno = tl->hwsp_seqno;
+ GEM_BUG_ON(i915_request_completed(rq));
rq->rcustate = get_state_synchronize_rcu(); /* acts as smp_mb() */
- dma_fence_init(&rq->fence, &i915_fence_ops, &rq->lock,
- tl->fence_context, seqno);
-
/* We bump the ref for the fence chain */
i915_sw_fence_reinit(&i915_request_get(rq)->submit);
i915_sw_fence_reinit(&i915_request_get(rq)->semaphore);
@@ -710,6 +795,8 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp)
rq->infix = rq->ring->emit; /* end of header; start of user payload */
intel_context_mark_active(ce);
+ list_add_tail_rcu(&rq->link, &tl->requests);
+
return rq;
err_unwind:
@@ -763,16 +850,26 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
struct dma_fence *fence;
int err;
- GEM_BUG_ON(i915_request_timeline(rq) ==
- rcu_access_pointer(signal->timeline));
+ if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
+ return 0;
+
+ if (i915_request_started(signal))
+ return 0;
fence = NULL;
rcu_read_lock();
spin_lock_irq(&signal->lock);
- if (!i915_request_started(signal) &&
- !list_is_first(&signal->link,
- &rcu_dereference(signal->timeline)->requests)) {
- struct i915_request *prev = list_prev_entry(signal, link);
+ do {
+ struct list_head *pos = READ_ONCE(signal->link.prev);
+ struct i915_request *prev;
+
+ /* Confirm signal has not been retired, the link is valid */
+ if (unlikely(i915_request_started(signal)))
+ break;
+
+ /* Is signal the earliest request on its timeline? */
+ if (pos == &rcu_dereference(signal->timeline)->requests)
+ break;
/*
* Peek at the request before us in the timeline. That
@@ -780,20 +877,25 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
* after acquiring a reference to it, confirm that it is
* still part of the signaler's timeline.
*/
- if (i915_request_get_rcu(prev)) {
- if (list_next_entry(prev, link) == signal)
- fence = &prev->fence;
- else
- i915_request_put(prev);
+ prev = list_entry(pos, typeof(*prev), link);
+ if (!i915_request_get_rcu(prev))
+ break;
+
+ /* After the strong barrier, confirm prev is still attached */
+ if (unlikely(READ_ONCE(prev->link.next) != &signal->link)) {
+ i915_request_put(prev);
+ break;
}
- }
+
+ fence = &prev->fence;
+ } while (0);
spin_unlock_irq(&signal->lock);
rcu_read_unlock();
if (!fence)
return 0;
err = 0;
- if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
+ if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
err = i915_sw_fence_await_dma_fence(&rq->submit,
fence, 0,
I915_FENCE_GFP);
@@ -817,7 +919,7 @@ already_busywaiting(struct i915_request *rq)
*
* See the are-we-too-late? check in __i915_request_submit().
*/
- return rq->sched.semaphores | rq->engine->saturated;
+ return rq->sched.semaphores | READ_ONCE(rq->engine->saturated);
}
static int
@@ -875,8 +977,16 @@ emit_semaphore_wait(struct i915_request *to,
struct i915_request *from,
gfp_t gfp)
{
+ const intel_engine_mask_t mask = READ_ONCE(from->engine)->mask;
+
+ if (!intel_context_use_semaphores(to->context))
+ goto await_fence;
+
+ if (!rcu_access_pointer(from->hwsp_cacheline))
+ goto await_fence;
+
/* Just emit the first semaphore we see as request space is limited. */
- if (already_busywaiting(to) & from->engine->mask)
+ if (already_busywaiting(to) & mask)
goto await_fence;
if (i915_request_await_start(to, from) < 0)
@@ -889,7 +999,7 @@ emit_semaphore_wait(struct i915_request *to,
if (__emit_semaphore_wait(to, from, from->fence.seqno))
goto await_fence;
- to->sched.semaphores |= from->engine->mask;
+ to->sched.semaphores |= mask;
to->sched.flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
return 0;
@@ -920,12 +1030,8 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
ret = i915_sw_fence_await_sw_fence_gfp(&to->submit,
&from->submit,
I915_FENCE_GFP);
- else if (intel_context_use_semaphores(to->context))
- ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
else
- ret = i915_sw_fence_await_dma_fence(&to->submit,
- &from->fence, 0,
- I915_FENCE_GFP);
+ ret = emit_semaphore_wait(to, from, I915_FENCE_GFP);
if (ret < 0)
return ret;
@@ -1024,6 +1130,8 @@ __i915_request_await_execution(struct i915_request *to,
{
int err;
+ GEM_BUG_ON(intel_context_is_barrier(from->context));
+
/* Submit both requests at the same time */
err = __await_execution(to, from, hook, I915_FENCE_GFP);
if (err)
@@ -1034,14 +1142,45 @@ __i915_request_await_execution(struct i915_request *to,
&from->fence))
return 0;
- /* Ensure both start together [after all semaphores in signal] */
- if (intel_engine_has_semaphores(to->engine))
- err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
- else
- err = i915_request_await_start(to, from);
+ /*
+ * Wait until the start of this request.
+ *
+ * The execution cb fires when we submit the request to HW. But in
+ * many cases this may be long before the request itself is ready to
+ * run (consider that we submit 2 requests for the same context, where
+ * the request of interest is behind an indefinite spinner). So we hook
+ * up to both to reduce our queues and keep the execution lag minimised
+ * in the worst case, though we hope that the await_start is elided.
+ */
+ err = i915_request_await_start(to, from);
if (err < 0)
return err;
+ /*
+ * Ensure both start together [after all semaphores in signal]
+ *
+ * Now that we are queued to the HW at roughly the same time (thanks
+ * to the execute cb) and are ready to run at roughly the same time
+ * (thanks to the await start), our signaler may still be indefinitely
+ * delayed by waiting on a semaphore from a remote engine. If our
+ * signaler depends on a semaphore, so indirectly do we, and we do not
+ * want to start our payload until our signaler also starts theirs.
+ * So we wait.
+ *
+ * However, there is also a second condition for which we need to wait
+ * for the precise start of the signaler. Consider that the signaler
+ * was submitted in a chain of requests following another context
+ * (with just an ordinary intra-engine fence dependency between the
+ * two). In this case the signaler is queued to HW, but not for
+ * immediate execution, and so we must wait until it reaches the
+ * active slot.
+ */
+ if (intel_engine_has_semaphores(to->engine)) {
+ err = __emit_semaphore_wait(to, from, from->fence.seqno - 1);
+ if (err < 0)
+ return err;
+ }
+
/* Couple the dependency tree for PI on this exposed to->fence */
if (to->engine->schedule) {
err = i915_sched_node_add_dependency(&to->sched, &from->sched);
@@ -1162,31 +1301,6 @@ i915_request_await_object(struct i915_request *to,
return ret;
}
-void i915_request_skip(struct i915_request *rq, int error)
-{
- void *vaddr = rq->ring->vaddr;
- u32 head;
-
- GEM_BUG_ON(!IS_ERR_VALUE((long)error));
- dma_fence_set_error(&rq->fence, error);
-
- if (rq->infix == rq->postfix)
- return;
-
- /*
- * As this request likely depends on state from the lost
- * context, clear out all the user operations leaving the
- * breadcrumb at the end (so we get the fence notifications).
- */
- head = rq->infix;
- if (rq->postfix < head) {
- memset(vaddr + head, 0, rq->ring->size - head);
- head = 0;
- }
- memset(vaddr + head, 0, rq->postfix - head);
- rq->infix = rq->postfix;
-}
-
static struct i915_request *
__i915_request_add_to_timeline(struct i915_request *rq)
{
@@ -1216,7 +1330,17 @@ __i915_request_add_to_timeline(struct i915_request *rq)
prev = to_request(__i915_active_fence_set(&timeline->last_request,
&rq->fence));
if (prev && !i915_request_completed(prev)) {
- if (is_power_of_2(prev->engine->mask | rq->engine->mask))
+ /*
+ * The requests are supposed to be kept in order. However,
+ * we need to be wary in case the timeline->last_request
+ * is used as a barrier for external modification to this
+ * context.
+ */
+ GEM_BUG_ON(prev->context == rq->context &&
+ i915_seqno_passed(prev->fence.seqno,
+ rq->fence.seqno));
+
+ if (is_power_of_2(READ_ONCE(prev->engine)->mask | rq->engine->mask))
i915_sw_fence_await_sw_fence(&rq->submit,
&prev->submit,
&rq->submitq);
@@ -1231,8 +1355,6 @@ __i915_request_add_to_timeline(struct i915_request *rq)
0);
}
- list_add_tail(&rq->link, &timeline->requests);
-
/*
* Make sure that no request gazumped us - if it was allocated after
* our i915_request_alloc() and called __i915_request_add() before
@@ -1292,9 +1414,9 @@ void __i915_request_queue(struct i915_request *rq,
* decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience.
*/
- i915_sw_fence_commit(&rq->semaphore);
if (attr && rq->engine->schedule)
rq->engine->schedule(rq, attr);
+ i915_sw_fence_commit(&rq->semaphore);
i915_sw_fence_commit(&rq->submit);
}
@@ -1302,39 +1424,23 @@ void i915_request_add(struct i915_request *rq)
{
struct intel_timeline * const tl = i915_request_timeline(rq);
struct i915_sched_attr attr = {};
- struct i915_request *prev;
+ struct i915_gem_context *ctx;
lockdep_assert_held(&tl->mutex);
lockdep_unpin_lock(&tl->mutex, rq->cookie);
trace_i915_request_add(rq);
+ __i915_request_commit(rq);
- prev = __i915_request_commit(rq);
-
- if (rcu_access_pointer(rq->context->gem_context))
- attr = i915_request_gem_context(rq)->sched;
+ /* XXX placeholder for selftests */
+ rcu_read_lock();
+ ctx = rcu_dereference(rq->context->gem_context);
+ if (ctx)
+ attr = ctx->sched;
+ rcu_read_unlock();
- /*
- * Boost actual workloads past semaphores!
- *
- * With semaphores we spin on one engine waiting for another,
- * simply to reduce the latency of starting our work when
- * the signaler completes. However, if there is any other
- * work that we could be doing on this engine instead, that
- * is better utilisation and will reduce the overall duration
- * of the current work. To avoid PI boosting a semaphore
- * far in the distance past over useful work, we keep a history
- * of any semaphore use along our dependency chain.
- */
if (!(rq->sched.flags & I915_SCHED_HAS_SEMAPHORE_CHAIN))
attr.priority |= I915_PRIORITY_NOSEMAPHORE;
-
- /*
- * Boost priorities to new clients (new request flows).
- *
- * Allow interactive/synchronous clients to jump ahead of
- * the bulk clients. (FQ_CODEL)
- */
if (list_empty(&rq->sched.signalers_list))
attr.priority |= I915_PRIORITY_WAIT;
@@ -1342,32 +1448,10 @@ void i915_request_add(struct i915_request *rq)
__i915_request_queue(rq, &attr);
local_bh_enable(); /* Kick the execlists tasklet if just scheduled */
- /*
- * In typical scenarios, we do not expect the previous request on
- * the timeline to be still tracked by timeline->last_request if it
- * has been completed. If the completed request is still here, that
- * implies that request retirement is a long way behind submission,
- * suggesting that we haven't been retiring frequently enough from
- * the combination of retire-before-alloc, waiters and the background
- * retirement worker. So if the last request on this timeline was
- * already completed, do a catch up pass, flushing the retirement queue
- * up to this client. Since we have now moved the heaviest operations
- * during retirement onto secondary workers, such as freeing objects
- * or contexts, retiring a bunch of requests is mostly list management
- * (and cache misses), and so we should not be overly penalizing this
- * client by performing excess work, though we may still performing
- * work on behalf of others -- but instead we should benefit from
- * improved resource management. (Well, that's the theory at least.)
- */
- if (prev &&
- i915_request_completed(prev) &&
- rcu_access_pointer(prev->timeline) == tl)
- i915_request_retire_upto(prev);
-
mutex_unlock(&tl->mutex);
}
-static unsigned long local_clock_us(unsigned int *cpu)
+static unsigned long local_clock_ns(unsigned int *cpu)
{
unsigned long t;
@@ -1384,7 +1468,7 @@ static unsigned long local_clock_us(unsigned int *cpu)
* stop busywaiting, see busywait_stop().
*/
*cpu = get_cpu();
- t = local_clock() >> 10;
+ t = local_clock();
put_cpu();
return t;
@@ -1394,15 +1478,15 @@ static bool busywait_stop(unsigned long timeout, unsigned int cpu)
{
unsigned int this_cpu;
- if (time_after(local_clock_us(&this_cpu), timeout))
+ if (time_after(local_clock_ns(&this_cpu), timeout))
return true;
return this_cpu != cpu;
}
-static bool __i915_spin_request(const struct i915_request * const rq,
- int state, unsigned long timeout_us)
+static bool __i915_spin_request(const struct i915_request * const rq, int state)
{
+ unsigned long timeout_ns;
unsigned int cpu;
/*
@@ -1430,7 +1514,8 @@ static bool __i915_spin_request(const struct i915_request * const rq,
* takes to sleep on a request, on the order of a microsecond.
*/
- timeout_us += local_clock_us(&cpu);
+ timeout_ns = READ_ONCE(rq->engine->props.max_busywait_duration_ns);
+ timeout_ns += local_clock_ns(&cpu);
do {
if (i915_request_completed(rq))
return true;
@@ -1438,7 +1523,7 @@ static bool __i915_spin_request(const struct i915_request * const rq,
if (signal_pending_state(state, current))
break;
- if (busywait_stop(timeout_us, cpu))
+ if (busywait_stop(timeout_ns, cpu))
break;
cpu_relax();
@@ -1524,8 +1609,8 @@ long i915_request_wait(struct i915_request *rq,
* completion. That requires having a good predictor for the request
* duration, which we currently lack.
*/
- if (IS_ACTIVE(CONFIG_DRM_I915_SPIN_REQUEST) &&
- __i915_spin_request(rq, state, CONFIG_DRM_I915_SPIN_REQUEST)) {
+ if (IS_ACTIVE(CONFIG_DRM_I915_MAX_REQUEST_BUSYWAIT) &&
+ __i915_spin_request(rq, state)) {
dma_fence_signal(&rq->fence);
goto out;
}
@@ -1560,6 +1645,8 @@ long i915_request_wait(struct i915_request *rq,
break;
}
+ intel_engine_flush_submission(rq->engine);
+
if (signal_pending_state(state, current)) {
timeout = -ERESTARTSYS;
break;
@@ -1570,7 +1657,6 @@ long i915_request_wait(struct i915_request *rq,
break;
}
- intel_engine_flush_submission(rq->engine);
timeout = io_schedule_timeout(timeout);
}
__set_current_state(TASK_RUNNING);
@@ -1590,14 +1676,12 @@ out:
static void i915_global_request_shrink(void)
{
- kmem_cache_shrink(global.slab_dependencies);
kmem_cache_shrink(global.slab_execute_cbs);
kmem_cache_shrink(global.slab_requests);
}
static void i915_global_request_exit(void)
{
- kmem_cache_destroy(global.slab_dependencies);
kmem_cache_destroy(global.slab_execute_cbs);
kmem_cache_destroy(global.slab_requests);
}
@@ -1627,17 +1711,9 @@ int __init i915_global_request_init(void)
if (!global.slab_execute_cbs)
goto err_requests;
- global.slab_dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN |
- SLAB_RECLAIM_ACCOUNT);
- if (!global.slab_dependencies)
- goto err_execute_cbs;
-
i915_global_register(&global.base);
return 0;
-err_execute_cbs:
- kmem_cache_destroy(global.slab_execute_cbs);
err_requests:
kmem_cache_destroy(global.slab_requests);
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_request.h b/drivers/gpu/drm/i915/i915_request.h
index 031433691a06..3c552bfea67a 100644
--- a/drivers/gpu/drm/i915/i915_request.h
+++ b/drivers/gpu/drm/i915/i915_request.h
@@ -26,6 +26,7 @@
#define I915_REQUEST_H
#include <linux/dma-fence.h>
+#include <linux/irq_work.h>
#include <linux/lockdep.h>
#include "gem/i915_gem_context_types.h"
@@ -71,6 +72,18 @@ enum {
I915_FENCE_FLAG_ACTIVE = DMA_FENCE_FLAG_USER_BITS,
/*
+ * I915_FENCE_FLAG_PQUEUE - this request is ready for execution
+ *
+ * Using the scheduler, when a request is ready for execution it is put
+ * into the priority queue, and removed from that queue when transferred
+ * to the HW runlists. We want to track its membership within the
+ * priority queue so that we can easily check before rescheduling.
+ *
+ * See i915_request_in_priority_queue()
+ */
+ I915_FENCE_FLAG_PQUEUE,
+
+ /*
* I915_FENCE_FLAG_SIGNAL - this request is currently on signal_list
*
* Internal bookkeeping used by the breadcrumb code to track when
@@ -79,6 +92,13 @@ enum {
I915_FENCE_FLAG_SIGNAL,
/*
+ * I915_FENCE_FLAG_HOLD - this request is currently on hold
+ *
+ * This request has been suspended, pending an ongoing investigation.
+ */
+ I915_FENCE_FLAG_HOLD,
+
+ /*
* I915_FENCE_FLAG_NOPREEMPT - this request should not be preempted
*
* The execution of some requests should not be interrupted. This is
@@ -189,6 +209,7 @@ struct i915_request {
};
struct list_head execute_cb;
struct i915_sw_fence semaphore;
+ struct irq_work semaphore_work;
/*
* A list of everyone we wait upon, and everyone who waits upon us.
@@ -284,6 +305,9 @@ __i915_request_create(struct intel_context *ce, gfp_t gfp);
struct i915_request * __must_check
i915_request_create(struct intel_context *ce);
+void i915_request_set_error_once(struct i915_request *rq, int error);
+void __i915_request_skip(struct i915_request *rq);
+
struct i915_request *__i915_request_commit(struct i915_request *request);
void __i915_request_queue(struct i915_request *rq,
const struct i915_sched_attr *attr);
@@ -333,8 +357,6 @@ void i915_request_add(struct i915_request *rq);
bool __i915_request_submit(struct i915_request *request);
void i915_request_submit(struct i915_request *request);
-void i915_request_skip(struct i915_request *request, int error);
-
void __i915_request_unsubmit(struct i915_request *request);
void i915_request_unsubmit(struct i915_request *request);
@@ -361,6 +383,11 @@ static inline bool i915_request_is_active(const struct i915_request *rq)
return test_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags);
}
+static inline bool i915_request_in_priority_queue(const struct i915_request *rq)
+{
+ return test_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags);
+}
+
/**
* Returns true if seq1 is later than seq2.
*/
@@ -371,7 +398,9 @@ static inline bool i915_seqno_passed(u32 seq1, u32 seq2)
static inline u32 __hwsp_seqno(const struct i915_request *rq)
{
- return READ_ONCE(*rq->hwsp_seqno);
+ const u32 *hwsp = READ_ONCE(rq->hwsp_seqno);
+
+ return READ_ONCE(*hwsp);
}
/**
@@ -454,6 +483,27 @@ static inline bool i915_request_is_running(const struct i915_request *rq)
return __i915_request_has_started(rq);
}
+/**
+ * i915_request_is_ready - check if the request is ready for execution
+ * @rq: the request
+ *
+ * Upon construction, the request is instructed to wait upon various
+ * signals before it is ready to be executed by the HW. That is, we do
+ * not want to start execution and read data before it is written. In practice,
+ * this is controlled with a mixture of interrupts and semaphores. Once
+ * the submit fence is completed, the backend scheduler will place the
+ * request into its queue and from there submit it for execution. So we
+ * can detect when a request is eligible for execution (and is under control
+ * of the scheduler) by querying where it is in any of the scheduler's lists.
+ *
+ * Returns true if the request is ready for execution (it may be inflight),
+ * false otherwise.
+ */
+static inline bool i915_request_is_ready(const struct i915_request *rq)
+{
+ return !list_empty(&rq->sched.link);
+}
+
static inline bool i915_request_completed(const struct i915_request *rq)
{
if (i915_request_signaled(rq))
@@ -464,7 +514,8 @@ static inline bool i915_request_completed(const struct i915_request *rq)
static inline void i915_request_mark_complete(struct i915_request *rq)
{
- rq->hwsp_seqno = (u32 *)&rq->fence.seqno; /* decouple from HWSP */
+ WRITE_ONCE(rq->hwsp_seqno, /* decouple from HWSP */
+ (u32 *)&rq->fence.seqno);
}
static inline bool i915_request_has_waitboost(const struct i915_request *rq)
@@ -483,6 +534,21 @@ static inline bool i915_request_has_sentinel(const struct i915_request *rq)
return unlikely(test_bit(I915_FENCE_FLAG_SENTINEL, &rq->fence.flags));
}
+static inline bool i915_request_on_hold(const struct i915_request *rq)
+{
+ return unlikely(test_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags));
+}
+
+static inline void i915_request_set_hold(struct i915_request *rq)
+{
+ set_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+}
+
+static inline void i915_request_clear_hold(struct i915_request *rq)
+{
+ clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags);
+}
+
static inline struct intel_timeline *
i915_request_timeline(struct i915_request *rq)
{
diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
index bf87c70bfdd9..68b06a7ba667 100644
--- a/drivers/gpu/drm/i915/i915_scheduler.c
+++ b/drivers/gpu/drm/i915/i915_scheduler.c
@@ -209,6 +209,8 @@ static void kick_submission(struct intel_engine_cs *engine,
if (!inflight)
goto unlock;
+ engine->execlists.queue_priority_hint = prio;
+
/*
* If we are already the currently executing context, don't
* bother evaluating if we should preempt ourselves.
@@ -216,7 +218,6 @@ static void kick_submission(struct intel_engine_cs *engine,
if (inflight->context == rq->context)
goto unlock;
- engine->execlists.queue_priority_hint = prio;
if (need_preempt(prio, rq_prio(inflight)))
tasklet_hi_schedule(&engine->execlists.tasklet);
@@ -227,10 +228,10 @@ unlock:
static void __i915_schedule(struct i915_sched_node *node,
const struct i915_sched_attr *attr)
{
+ const int prio = max(attr->priority, node->attr.priority);
struct intel_engine_cs *engine;
struct i915_dependency *dep, *p;
struct i915_dependency stack;
- const int prio = attr->priority;
struct sched_cache cache;
LIST_HEAD(dfs);
@@ -238,9 +239,6 @@ static void __i915_schedule(struct i915_sched_node *node,
lockdep_assert_held(&schedule_lock);
GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
- if (prio <= READ_ONCE(node->attr.priority))
- return;
-
if (node_signaled(node))
return;
@@ -324,22 +322,20 @@ static void __i915_schedule(struct i915_sched_node *node,
GEM_BUG_ON(node_to_request(node)->engine != engine);
- node->attr.priority = prio;
+ WRITE_ONCE(node->attr.priority, prio);
- if (list_empty(&node->link)) {
- /*
- * If the request is not in the priolist queue because
- * it is not yet runnable, then it doesn't contribute
- * to our preemption decisions. On the other hand,
- * if the request is on the HW, it too is not in the
- * queue; but in that case we may still need to reorder
- * the inflight requests.
- */
+ /*
+ * Once the request is ready, it will be placed into the
+ * priority lists and then onto the HW runlist. Before the
+ * request is ready, it does not contribute to our preemption
+ * decisions and we can safely ignore it, as it will, and
+ * any preemption required, be dealt with upon submission.
+ * See engine->submit_request()
+ */
+ if (list_empty(&node->link))
continue;
- }
- if (!intel_engine_is_virtual(engine) &&
- !i915_request_is_active(node_to_request(node))) {
+ if (i915_request_in_priority_queue(node_to_request(node))) {
if (!cache.priolist)
cache.priolist =
i915_sched_lookup_priolist(engine,
@@ -365,6 +361,9 @@ static void __bump_priority(struct i915_sched_node *node, unsigned int bump)
{
struct i915_sched_attr attr = node->attr;
+ if (attr.priority & bump)
+ return;
+
attr.priority |= bump;
__i915_schedule(node, &attr);
}
@@ -425,8 +424,6 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
if (!node_signaled(signal)) {
INIT_LIST_HEAD(&dep->dfs_link);
- list_add(&dep->wait_link, &signal->waiters_list);
- list_add(&dep->signal_link, &node->signalers_list);
dep->signaler = signal;
dep->waiter = node;
dep->flags = flags;
@@ -436,6 +433,10 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
!node_started(signal))
node->flags |= I915_SCHED_HAS_SEMAPHORE_CHAIN;
+ /* All set, now publish. Beware the lockless walkers. */
+ list_add_rcu(&dep->signal_link, &node->signalers_list);
+ list_add_rcu(&dep->wait_link, &signal->waiters_list);
+
/*
* As we do not allow WAIT to preempt inflight requests,
* once we have executed a request, along with triggering
@@ -486,7 +487,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
list_for_each_entry_safe(dep, tmp, &node->signalers_list, signal_link) {
GEM_BUG_ON(!list_empty(&dep->dfs_link));
- list_del(&dep->wait_link);
+ list_del_rcu(&dep->wait_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
@@ -497,7 +498,7 @@ void i915_sched_node_fini(struct i915_sched_node *node)
GEM_BUG_ON(dep->signaler != node);
GEM_BUG_ON(!list_empty(&dep->dfs_link));
- list_del(&dep->signal_link);
+ list_del_rcu(&dep->signal_link);
if (dep->flags & I915_DEPENDENCY_ALLOC)
i915_dependency_free(dep);
}
@@ -526,7 +527,8 @@ static struct i915_global_scheduler global = { {
int __init i915_global_scheduler_init(void)
{
global.slab_dependencies = KMEM_CACHE(i915_dependency,
- SLAB_HWCACHE_ALIGN);
+ SLAB_HWCACHE_ALIGN |
+ SLAB_TYPESAFE_BY_RCU);
if (!global.slab_dependencies)
return -ENOMEM;
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index 8812cdd9007f..ed2be3489f8e 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -24,8 +24,6 @@
* SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
-#include <drm/i915_drm.h>
-
#include "display/intel_fbc.h"
#include "display/intel_gmbus.h"
#include "display/intel_vga.h"
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.c b/drivers/gpu/drm/i915/i915_sw_fence.c
index 51ba97daf2a0..a3d38e089b6e 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/i915_sw_fence.c
@@ -211,10 +211,21 @@ void i915_sw_fence_complete(struct i915_sw_fence *fence)
__i915_sw_fence_complete(fence, NULL);
}
-void i915_sw_fence_await(struct i915_sw_fence *fence)
+bool i915_sw_fence_await(struct i915_sw_fence *fence)
{
- debug_fence_assert(fence);
- WARN_ON(atomic_inc_return(&fence->pending) <= 1);
+ int pending;
+
+ /*
+ * It is only safe to add a new await to the fence while it has
+ * not yet been signaled (i.e. there are still existing signalers).
+ */
+ pending = atomic_read(&fence->pending);
+ do {
+ if (pending < 1)
+ return false;
+ } while (!atomic_try_cmpxchg(&fence->pending, &pending, pending + 1));
+
+ return true;
}
void __i915_sw_fence_init(struct i915_sw_fence *fence,
diff --git a/drivers/gpu/drm/i915/i915_sw_fence.h b/drivers/gpu/drm/i915/i915_sw_fence.h
index 19e806ce43bc..30a863353ee6 100644
--- a/drivers/gpu/drm/i915/i915_sw_fence.h
+++ b/drivers/gpu/drm/i915/i915_sw_fence.h
@@ -91,7 +91,7 @@ int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
unsigned long timeout,
gfp_t gfp);
-void i915_sw_fence_await(struct i915_sw_fence *fence);
+bool i915_sw_fence_await(struct i915_sw_fence *fence);
void i915_sw_fence_complete(struct i915_sw_fence *fence);
static inline bool i915_sw_fence_signaled(const struct i915_sw_fence *fence)
diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c
index 39c79e1c5b52..ed69b5d4a375 100644
--- a/drivers/gpu/drm/i915/i915_switcheroo.c
+++ b/drivers/gpu/drm/i915/i915_switcheroo.c
@@ -43,7 +43,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return i915 && i915->drm.open_count == 0;
+ return i915 && atomic_read(&i915->drm.open_count) == 0;
}
static const struct vga_switcheroo_client_ops i915_switcheroo_ops = {
diff --git a/drivers/gpu/drm/i915/i915_sysfs.c b/drivers/gpu/drm/i915/i915_sysfs.c
index 0cef3130db05..45d32ef42787 100644
--- a/drivers/gpu/drm/i915/i915_sysfs.c
+++ b/drivers/gpu/drm/i915/i915_sysfs.c
@@ -32,6 +32,7 @@
#include "gt/intel_rc6.h"
#include "gt/intel_rps.h"
+#include "gt/sysfs_engines.h"
#include "i915_drv.h"
#include "i915_sysfs.h"
@@ -525,7 +526,7 @@ static ssize_t error_state_write(struct file *file, struct kobject *kobj,
struct device *kdev = kobj_to_dev(kobj);
struct drm_i915_private *dev_priv = kdev_minor_to_i915(kdev);
- DRM_DEBUG_DRIVER("Resetting error state\n");
+ drm_dbg(&dev_priv->drm, "Resetting error state\n");
i915_reset_error_state(dev_priv);
return count;
@@ -564,31 +565,36 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
ret = sysfs_merge_group(&kdev->kobj,
&rc6_attr_group);
if (ret)
- DRM_ERROR("RC6 residency sysfs setup failed\n");
+ drm_err(&dev_priv->drm,
+ "RC6 residency sysfs setup failed\n");
}
if (HAS_RC6p(dev_priv)) {
ret = sysfs_merge_group(&kdev->kobj,
&rc6p_attr_group);
if (ret)
- DRM_ERROR("RC6p residency sysfs setup failed\n");
+ drm_err(&dev_priv->drm,
+ "RC6p residency sysfs setup failed\n");
}
if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
ret = sysfs_merge_group(&kdev->kobj,
&media_rc6_attr_group);
if (ret)
- DRM_ERROR("Media RC6 residency sysfs setup failed\n");
+ drm_err(&dev_priv->drm,
+ "Media RC6 residency sysfs setup failed\n");
}
#endif
if (HAS_L3_DPF(dev_priv)) {
ret = device_create_bin_file(kdev, &dpf_attrs);
if (ret)
- DRM_ERROR("l3 parity sysfs setup failed\n");
+ drm_err(&dev_priv->drm,
+ "l3 parity sysfs setup failed\n");
if (NUM_L3_SLICES(dev_priv) > 1) {
ret = device_create_bin_file(kdev,
&dpf_attrs_1);
if (ret)
- DRM_ERROR("l3 parity slice 1 setup failed\n");
+ drm_err(&dev_priv->drm,
+ "l3 parity slice 1 setup failed\n");
}
}
@@ -598,9 +604,11 @@ void i915_setup_sysfs(struct drm_i915_private *dev_priv)
else if (INTEL_GEN(dev_priv) >= 6)
ret = sysfs_create_files(&kdev->kobj, gen6_attrs);
if (ret)
- DRM_ERROR("RPS sysfs setup failed\n");
+ drm_err(&dev_priv->drm, "RPS sysfs setup failed\n");
i915_setup_error_capture(kdev);
+
+ intel_engines_add_sysfs(dev_priv);
}
void i915_teardown_sysfs(struct drm_i915_private *dev_priv)
diff --git a/drivers/gpu/drm/i915/i915_trace.h b/drivers/gpu/drm/i915/i915_trace.h
index 233a97a2c276..bc854ad60954 100644
--- a/drivers/gpu/drm/i915/i915_trace.h
+++ b/drivers/gpu/drm/i915/i915_trace.h
@@ -339,6 +339,68 @@ TRACE_EVENT(intel_disable_plane,
__entry->frame, __entry->scanline)
);
+/* fbc */
+
+TRACE_EVENT(intel_fbc_activate,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_deactivate,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
+TRACE_EVENT(intel_fbc_nuke,
+ TP_PROTO(struct intel_crtc *crtc),
+ TP_ARGS(crtc),
+
+ TP_STRUCT__entry(
+ __field(enum pipe, pipe)
+ __field(u32, frame)
+ __field(u32, scanline)
+ ),
+
+ TP_fast_assign(
+ __entry->pipe = crtc->pipe;
+ __entry->frame = intel_crtc_get_vblank_counter(crtc);
+ __entry->scanline = intel_get_crtc_scanline(crtc);
+ ),
+
+ TP_printk("pipe %c, frame=%u, scanline=%u",
+ pipe_name(__entry->pipe), __entry->frame, __entry->scanline)
+);
+
/* pipe updates */
TRACE_EVENT(intel_pipe_update_start,
@@ -738,7 +800,7 @@ TRACE_EVENT(i915_request_in,
__field(u16, instance)
__field(u32, seqno)
__field(u32, port)
- __field(u32, prio)
+ __field(s32, prio)
),
TP_fast_assign(
@@ -751,7 +813,7 @@ TRACE_EVENT(i915_request_in,
__entry->port = port;
),
- TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%u, port=%u",
+ TP_printk("dev=%u, engine=%u:%u, ctx=%llu, seqno=%u, prio=%d, port=%u",
__entry->dev, __entry->class, __entry->instance,
__entry->ctx, __entry->seqno,
__entry->prio, __entry->port)
diff --git a/drivers/gpu/drm/i915/i915_utils.c b/drivers/gpu/drm/i915/i915_utils.c
index c47261ae86ea..029854ae65fc 100644
--- a/drivers/gpu/drm/i915/i915_utils.c
+++ b/drivers/gpu/drm/i915/i915_utils.c
@@ -8,9 +8,7 @@
#include "i915_drv.h"
#include "i915_utils.h"
-#define FDO_BUG_URL "https://bugs.freedesktop.org/enter_bug.cgi?product=DRI"
-#define FDO_BUG_MSG "Please file a bug at " FDO_BUG_URL " against DRM/Intel " \
- "providing the dmesg log by booting with drm.debug=0xf"
+#define FDO_BUG_MSG "Please file a bug on drm/i915; see " FDO_BUG_URL " for details."
void
__i915_printk(struct drm_i915_private *dev_priv, const char *level,
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index b0ade76bec90..03a73d2bd50d 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -34,6 +34,8 @@
struct drm_i915_private;
struct timer_list;
+#define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs"
+
#undef WARN_ON
/* Many gcc seem to no see through this and fall over :( */
#if 0
@@ -100,12 +102,24 @@ bool i915_error_injected(void);
typeof(max) max__ = (max); \
(void)(&start__ == &size__); \
(void)(&start__ == &max__); \
- start__ > max__ || size__ > max__ - start__; \
+ start__ >= max__ || size__ > max__ - start__; \
})
#define range_overflows_t(type, start, size, max) \
range_overflows((type)(start), (type)(size), (type)(max))
+#define range_overflows_end(start, size, max) ({ \
+ typeof(start) start__ = (start); \
+ typeof(size) size__ = (size); \
+ typeof(max) max__ = (max); \
+ (void)(&start__ == &size__); \
+ (void)(&start__ == &max__); \
+ start__ > max__ || size__ > max__ - start__; \
+})
+
+#define range_overflows_end_t(type, start, size, max) \
+ range_overflows_end((type)(start), (type)(size), (type)(max))
+
/* Note we don't consider signbits :| */
#define overflows_type(x, T) \
(sizeof(x) > sizeof(T) && (x) >> BITS_PER_TYPE(T))
@@ -234,6 +248,11 @@ static inline u64 ptr_to_u64(const void *ptr)
__idx; \
})
+static inline bool is_power_of_2_u64(u64 n)
+{
+ return (n != 0 && ((n & (n - 1)) == 0));
+}
+
static inline void __list_del_many(struct list_head *head,
struct list_head *first)
{
@@ -241,6 +260,12 @@ static inline void __list_del_many(struct list_head *head,
WRITE_ONCE(head->next, first);
}
+static inline int list_is_last_rcu(const struct list_head *list,
+ const struct list_head *head)
+{
+ return READ_ONCE(list->next) == head;
+}
+
/*
* Wait until the work is finally complete, even if it tries to postpone
* by requeueing itself. Note, that if the worker never cancels itself,
diff --git a/drivers/gpu/drm/i915/i915_vgpu.c b/drivers/gpu/drm/i915/i915_vgpu.c
index 968be26735c5..70fca72f5162 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.c
+++ b/drivers/gpu/drm/i915/i915_vgpu.c
@@ -21,6 +21,8 @@
* SOFTWARE.
*/
+#include "i915_drv.h"
+#include "i915_pvinfo.h"
#include "i915_vgpu.h"
/**
@@ -51,13 +53,13 @@
*/
/**
- * i915_detect_vgpu - detect virtual GPU
+ * intel_vgpu_detect - detect virtual GPU
* @dev_priv: i915 device private
*
* This function is called at the initialization stage, to detect whether
* running on a vGPU.
*/
-void i915_detect_vgpu(struct drm_i915_private *dev_priv)
+void intel_vgpu_detect(struct drm_i915_private *dev_priv)
{
struct pci_dev *pdev = dev_priv->drm.pdev;
u64 magic;
@@ -77,7 +79,8 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv)
shared_area = pci_iomap_range(pdev, 0, VGT_PVINFO_PAGE, VGT_PVINFO_SIZE);
if (!shared_area) {
- DRM_ERROR("failed to map MMIO bar to check for VGT\n");
+ drm_err(&dev_priv->drm,
+ "failed to map MMIO bar to check for VGT\n");
return;
}
@@ -87,7 +90,7 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv)
version_major = readw(shared_area + vgtif_offset(version_major));
if (version_major < VGT_VERSION_MAJOR) {
- DRM_INFO("VGT interface version mismatch!\n");
+ drm_info(&dev_priv->drm, "VGT interface version mismatch!\n");
goto out;
}
@@ -95,17 +98,42 @@ void i915_detect_vgpu(struct drm_i915_private *dev_priv)
dev_priv->vgpu.active = true;
mutex_init(&dev_priv->vgpu.lock);
- DRM_INFO("Virtual GPU for Intel GVT-g detected.\n");
+ drm_info(&dev_priv->drm, "Virtual GPU for Intel GVT-g detected.\n");
out:
pci_iounmap(pdev, shared_area);
}
+void intel_vgpu_register(struct drm_i915_private *i915)
+{
+ /*
+ * Notify a valid surface after modesetting, when running inside a VM.
+ */
+ if (intel_vgpu_active(i915))
+ intel_uncore_write(&i915->uncore, vgtif_reg(display_ready),
+ VGT_DRV_DISPLAY_READY);
+}
+
+bool intel_vgpu_active(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.active;
+}
+
bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv)
{
return dev_priv->vgpu.caps & VGT_CAPS_FULL_PPGTT;
}
+bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
+}
+
+bool intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
+}
+
struct _balloon_info_ {
/*
* There are up to 2 regions per mappable/unmappable graphic
@@ -120,13 +148,15 @@ static struct _balloon_info_ bl_info;
static void vgt_deballoon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node)
{
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
if (!drm_mm_node_allocated(node))
return;
- DRM_DEBUG_DRIVER("deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
- node->start,
- node->start + node->size,
- node->size / 1024);
+ drm_dbg(&dev_priv->drm,
+ "deballoon space: range [0x%llx - 0x%llx] %llu KiB.\n",
+ node->start,
+ node->start + node->size,
+ node->size / 1024);
ggtt->vm.reserved -= node->size;
drm_mm_remove_node(node);
@@ -141,12 +171,13 @@ static void vgt_deballoon_space(struct i915_ggtt *ggtt,
*/
void intel_vgt_deballoon(struct i915_ggtt *ggtt)
{
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
int i;
if (!intel_vgpu_active(ggtt->vm.i915))
return;
- DRM_DEBUG("VGT deballoon.\n");
+ drm_dbg(&dev_priv->drm, "VGT deballoon.\n");
for (i = 0; i < 4; i++)
vgt_deballoon_space(ggtt, &bl_info.space[i]);
@@ -156,13 +187,15 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
struct drm_mm_node *node,
unsigned long start, unsigned long end)
{
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
unsigned long size = end - start;
int ret;
if (start >= end)
return -EINVAL;
- DRM_INFO("balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
+ drm_info(&dev_priv->drm,
+ "balloon space: range [ 0x%lx - 0x%lx ] %lu KiB.\n",
start, end, size / 1024);
ret = i915_gem_gtt_reserve(&ggtt->vm, node,
size, start, I915_COLOR_UNEVICTABLE,
@@ -219,7 +252,8 @@ static int vgt_balloon_space(struct i915_ggtt *ggtt,
*/
int intel_vgt_balloon(struct i915_ggtt *ggtt)
{
- struct intel_uncore *uncore = &ggtt->vm.i915->uncore;
+ struct drm_i915_private *dev_priv = ggtt->vm.i915;
+ struct intel_uncore *uncore = &dev_priv->uncore;
unsigned long ggtt_end = ggtt->vm.total;
unsigned long mappable_base, mappable_size, mappable_end;
@@ -241,16 +275,18 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt)
mappable_end = mappable_base + mappable_size;
unmappable_end = unmappable_base + unmappable_size;
- DRM_INFO("VGT ballooning configuration:\n");
- DRM_INFO("Mappable graphic memory: base 0x%lx size %ldKiB\n",
+ drm_info(&dev_priv->drm, "VGT ballooning configuration:\n");
+ drm_info(&dev_priv->drm,
+ "Mappable graphic memory: base 0x%lx size %ldKiB\n",
mappable_base, mappable_size / 1024);
- DRM_INFO("Unmappable graphic memory: base 0x%lx size %ldKiB\n",
+ drm_info(&dev_priv->drm,
+ "Unmappable graphic memory: base 0x%lx size %ldKiB\n",
unmappable_base, unmappable_size / 1024);
if (mappable_end > ggtt->mappable_end ||
unmappable_base < ggtt->mappable_end ||
unmappable_end > ggtt_end) {
- DRM_ERROR("Invalid ballooning configuration!\n");
+ drm_err(&dev_priv->drm, "Invalid ballooning configuration!\n");
return -EINVAL;
}
@@ -287,7 +323,7 @@ int intel_vgt_balloon(struct i915_ggtt *ggtt)
goto err_below_mappable;
}
- DRM_INFO("VGT balloon successfully\n");
+ drm_info(&dev_priv->drm, "VGT balloon successfully\n");
return 0;
err_below_mappable:
@@ -297,6 +333,6 @@ err_upon_unmappable:
err_upon_mappable:
vgt_deballoon_space(ggtt, &bl_info.space[2]);
err:
- DRM_ERROR("VGT balloon fail\n");
+ drm_err(&dev_priv->drm, "VGT balloon fail\n");
return ret;
}
diff --git a/drivers/gpu/drm/i915/i915_vgpu.h b/drivers/gpu/drm/i915/i915_vgpu.h
index 8b3663dad193..ffbb77d08048 100644
--- a/drivers/gpu/drm/i915/i915_vgpu.h
+++ b/drivers/gpu/drm/i915/i915_vgpu.h
@@ -24,24 +24,17 @@
#ifndef _I915_VGPU_H_
#define _I915_VGPU_H_
-#include "i915_drv.h"
-#include "i915_pvinfo.h"
+#include <linux/types.h>
-void i915_detect_vgpu(struct drm_i915_private *dev_priv);
+struct drm_i915_private;
+struct i915_ggtt;
-bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *dev_priv);
-
-static inline bool
-intel_vgpu_has_hwsp_emulation(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.caps & VGT_CAPS_HWSP_EMULATION;
-}
-
-static inline bool
-intel_vgpu_has_huge_gtt(struct drm_i915_private *dev_priv)
-{
- return dev_priv->vgpu.caps & VGT_CAPS_HUGE_GTT;
-}
+void intel_vgpu_detect(struct drm_i915_private *i915);
+bool intel_vgpu_active(struct drm_i915_private *i915);
+void intel_vgpu_register(struct drm_i915_private *i915);
+bool intel_vgpu_has_full_ppgtt(struct drm_i915_private *i915);
+bool intel_vgpu_has_hwsp_emulation(struct drm_i915_private *i915);
+bool intel_vgpu_has_huge_gtt(struct drm_i915_private *i915);
int intel_vgt_balloon(struct i915_ggtt *ggtt);
void intel_vgt_deballoon(struct i915_ggtt *ggtt);
diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c
index 17d7c525ea5c..5b3efb43a8ef 100644
--- a/drivers/gpu/drm/i915/i915_vma.c
+++ b/drivers/gpu/drm/i915/i915_vma.c
@@ -294,6 +294,7 @@ struct i915_vma_work {
struct dma_fence_work base;
struct i915_vma *vma;
struct drm_i915_gem_object *pinned;
+ struct i915_sw_dma_fence_cb cb;
enum i915_cache_level cache_level;
unsigned int flags;
};
@@ -339,6 +340,25 @@ struct i915_vma_work *i915_vma_work(void)
return vw;
}
+int i915_vma_wait_for_bind(struct i915_vma *vma)
+{
+ int err = 0;
+
+ if (rcu_access_pointer(vma->active.excl.fence)) {
+ struct dma_fence *fence;
+
+ rcu_read_lock();
+ fence = dma_fence_get_rcu_safe(&vma->active.excl.fence);
+ rcu_read_unlock();
+ if (fence) {
+ err = dma_fence_wait(fence, MAX_SCHEDULE_TIMEOUT);
+ dma_fence_put(fence);
+ }
+ }
+
+ return err;
+}
+
/**
* i915_vma_bind - Sets up PTEs for an VMA in it's corresponding address space.
* @vma: VMA to map
@@ -386,6 +406,8 @@ int i915_vma_bind(struct i915_vma *vma,
trace_i915_vma_bind(vma, bind_flags);
if (work && (bind_flags & ~vma_flags) & vma->vm->bind_async_flags) {
+ struct dma_fence *prev;
+
work->vma = vma;
work->cache_level = cache_level;
work->flags = bind_flags | I915_VMA_ALLOC;
@@ -399,8 +421,14 @@ int i915_vma_bind(struct i915_vma *vma,
* part of the obj->resv->excl_fence as it only affects
* execution and not content or object's backing store lifetime.
*/
- GEM_BUG_ON(i915_active_has_exclusive(&vma->active));
- i915_active_set_exclusive(&vma->active, &work->base.dma);
+ prev = i915_active_set_exclusive(&vma->active, &work->base.dma);
+ if (prev) {
+ __i915_sw_fence_await_dma_fence(&work->base.chain,
+ prev,
+ &work->cb);
+ dma_fence_put(prev);
+ }
+
work->base.dma.error = 0; /* enable the queue_work() */
if (vma->obj) {
@@ -408,7 +436,6 @@ int i915_vma_bind(struct i915_vma *vma,
work->pinned = vma->obj;
}
} else {
- GEM_BUG_ON((bind_flags & ~vma_flags) & vma->vm->bind_async_flags);
ret = vma->ops->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
@@ -614,7 +641,6 @@ i915_vma_insert(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
u64 start, end;
int ret;
- GEM_BUG_ON(i915_vma_is_closed(vma));
GEM_BUG_ON(i915_vma_is_bound(vma, I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND));
GEM_BUG_ON(drm_mm_node_allocated(&vma->node));
@@ -892,6 +918,11 @@ int i915_vma_pin(struct i915_vma *vma, u64 size, u64 alignment, u64 flags)
if (err)
goto err_fence;
+ if (unlikely(i915_vma_is_closed(vma))) {
+ err = -ENOENT;
+ goto err_unlock;
+ }
+
bound = atomic_read(&vma->flags);
if (unlikely(bound & I915_VMA_ERROR)) {
err = -ENOMEM;
@@ -977,8 +1008,14 @@ int i915_ggtt_pin(struct i915_vma *vma, u32 align, unsigned int flags)
do {
err = i915_vma_pin(vma, 0, align, flags | PIN_GLOBAL);
- if (err != -ENOSPC)
+ if (err != -ENOSPC) {
+ if (!err) {
+ err = i915_vma_wait_for_bind(vma);
+ if (err)
+ i915_vma_unpin(vma);
+ }
return err;
+ }
/* Unlike i915_vma_pin, we don't take no for an answer! */
flush_idle_contexts(vm->gt);
@@ -1136,7 +1173,7 @@ int __i915_vma_move_to_active(struct i915_vma *vma, struct i915_request *rq)
GEM_BUG_ON(!i915_vma_is_pinned(vma));
/* Wait for the vma to be bound before we start! */
- err = i915_request_await_active(rq, &vma->active);
+ err = i915_request_await_active(rq, &vma->active, 0);
if (err)
return err;
@@ -1202,25 +1239,41 @@ int __i915_vma_unbind(struct i915_vma *vma)
if (ret)
return ret;
- GEM_BUG_ON(i915_vma_is_active(vma));
if (i915_vma_is_pinned(vma)) {
vma_print_allocator(vma, "is pinned");
return -EAGAIN;
}
- GEM_BUG_ON(i915_vma_is_active(vma));
+ /*
+ * After confirming that no one else is pinning this vma, wait for
+ * any laggards who may have crept in during the wait (through
+ * a residual pin skipping the vm->mutex) to complete.
+ */
+ ret = i915_vma_sync(vma);
+ if (ret)
+ return ret;
+
if (!drm_mm_node_allocated(&vma->node))
return 0;
+ GEM_BUG_ON(i915_vma_is_pinned(vma));
+ GEM_BUG_ON(i915_vma_is_active(vma));
+
if (i915_vma_is_map_and_fenceable(vma)) {
/*
* Check that we have flushed all writes through the GGTT
* before the unbind, other due to non-strict nature of those
* indirect writes they may end up referencing the GGTT PTE
* after the unbind.
+ *
+ * Note that we may be concurrently poking at the GGTT_WRITE
+ * bit from set-domain, as we mark all GGTT vma associated
+ * with an object. We know this is for another vma, as we
+ * are currently unbinding this one -- so if this vma will be
+ * reused, it will be refaulted and have its dirty bit set
+ * before the next write.
*/
i915_vma_flush_writes(vma);
- GEM_BUG_ON(i915_vma_has_ggtt_write(vma));
/* release the fence reg _after_ flushing */
ret = i915_vma_revoke_fence(vma);
@@ -1240,7 +1293,8 @@ int __i915_vma_unbind(struct i915_vma *vma)
trace_i915_vma_unbind(vma);
vma->ops->unbind_vma(vma);
}
- atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR), &vma->flags);
+ atomic_and(~(I915_VMA_BIND_MASK | I915_VMA_ERROR | I915_VMA_GGTT_WRITE),
+ &vma->flags);
i915_vma_detach(vma);
vma_unbind_pages(vma);
@@ -1262,16 +1316,21 @@ int i915_vma_unbind(struct i915_vma *vma)
/* XXX not always required: nop_clear_range */
wakeref = intel_runtime_pm_get(&vm->i915->runtime_pm);
+ /* Optimistic wait before taking the mutex */
+ err = i915_vma_sync(vma);
+ if (err)
+ goto out_rpm;
+
err = mutex_lock_interruptible(&vm->mutex);
if (err)
- return err;
+ goto out_rpm;
err = __i915_vma_unbind(vma);
mutex_unlock(&vm->mutex);
+out_rpm:
if (wakeref)
intel_runtime_pm_put(&vm->i915->runtime_pm, wakeref);
-
return err;
}
diff --git a/drivers/gpu/drm/i915/i915_vma.h b/drivers/gpu/drm/i915/i915_vma.h
index 02b31a62951e..e1ced1df13e1 100644
--- a/drivers/gpu/drm/i915/i915_vma.h
+++ b/drivers/gpu/drm/i915/i915_vma.h
@@ -375,6 +375,8 @@ struct i915_vma *i915_vma_make_unshrinkable(struct i915_vma *vma);
void i915_vma_make_shrinkable(struct i915_vma *vma);
void i915_vma_make_purgeable(struct i915_vma *vma);
+int i915_vma_wait_for_bind(struct i915_vma *vma);
+
static inline int i915_vma_sync(struct i915_vma *vma)
{
/* Wait for the asynchronous bindings and pending GPU reads */
diff --git a/drivers/gpu/drm/i915/i915_vma_types.h b/drivers/gpu/drm/i915/i915_vma_types.h
index e0942efd5236..63831cdb7402 100644
--- a/drivers/gpu/drm/i915/i915_vma_types.h
+++ b/drivers/gpu/drm/i915/i915_vma_types.h
@@ -273,21 +273,10 @@ struct i915_vma {
struct rb_node obj_node;
struct hlist_node obj_hash;
- /** This vma's place in the execbuf reservation list */
- struct list_head exec_link;
- struct list_head reloc_link;
-
/** This vma's place in the eviction list */
struct list_head evict_link;
struct list_head closed_link;
-
- /**
- * Used for performing relocations during execbuffer insertion.
- */
- unsigned int *exec_flags;
- struct hlist_node exec_node;
- u32 exec_handle;
};
#endif
diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c
index 6670a0763be2..d7fe12734db8 100644
--- a/drivers/gpu/drm/i915/intel_device_info.c
+++ b/drivers/gpu/drm/i915/intel_device_info.c
@@ -23,7 +23,9 @@
*/
#include <drm/drm_print.h>
+#include <drm/i915_pciids.h>
+#include "display/intel_cdclk.h"
#include "intel_device_info.h"
#include "i915_drv.h"
@@ -132,6 +134,7 @@ void intel_device_info_print_runtime(const struct intel_runtime_info *info,
{
sseu_dump(&info->sseu, p);
+ drm_printf(p, "rawclk rate: %u kHz\n", info->rawclk_freq);
drm_printf(p, "CS timestamp frequency: %u kHz\n",
info->cs_timestamp_frequency_khz);
}
@@ -743,7 +746,7 @@ static u32 read_timestamp_frequency(struct drm_i915_private *dev_priv)
* hclks." (through the “Clocking Configuration”
* (“CLKCFG”) MCHBAR register)
*/
- return dev_priv->rawclk_freq / 16;
+ return RUNTIME_INFO(dev_priv)->rawclk_freq / 16;
} else if (INTEL_GEN(dev_priv) <= 8) {
/* PRMs say:
*
@@ -974,10 +977,11 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
sfuse_strap & SFUSE_STRAP_DISPLAY_DISABLED ||
(HAS_PCH_CPT(dev_priv) &&
!(sfuse_strap & SFUSE_STRAP_FUSE_LOCK))) {
- DRM_INFO("Display fused off, disabling\n");
+ drm_info(&dev_priv->drm,
+ "Display fused off, disabling\n");
info->pipe_mask = 0;
} else if (fuse_strap & IVB_PIPE_C_DISABLE) {
- DRM_INFO("PipeC fused off\n");
+ drm_info(&dev_priv->drm, "PipeC fused off\n");
info->pipe_mask &= ~BIT(PIPE_C);
}
} else if (HAS_DISPLAY(dev_priv) && INTEL_GEN(dev_priv) >= 9) {
@@ -1000,8 +1004,9 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
* in the mask.
*/
if (enabled_mask == 0 || !is_power_of_2(enabled_mask + 1))
- DRM_ERROR("invalid pipe fuse configuration: enabled_mask=0x%x\n",
- enabled_mask);
+ drm_err(&dev_priv->drm,
+ "invalid pipe fuse configuration: enabled_mask=0x%x\n",
+ enabled_mask);
else
info->pipe_mask = enabled_mask;
@@ -1036,12 +1041,26 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv)
gen12_sseu_info_init(dev_priv);
if (IS_GEN(dev_priv, 6) && intel_vtd_active()) {
- DRM_INFO("Disabling ppGTT for VT-d support\n");
+ drm_info(&dev_priv->drm,
+ "Disabling ppGTT for VT-d support\n");
info->ppgtt_type = INTEL_PPGTT_NONE;
}
+ runtime->rawclk_freq = intel_read_rawclk(dev_priv);
+ drm_dbg(&dev_priv->drm, "rawclk rate: %d kHz\n", runtime->rawclk_freq);
+
/* Initialize command stream timestamp frequency */
- runtime->cs_timestamp_frequency_khz = read_timestamp_frequency(dev_priv);
+ runtime->cs_timestamp_frequency_khz =
+ read_timestamp_frequency(dev_priv);
+ if (runtime->cs_timestamp_frequency_khz) {
+ runtime->cs_timestamp_period_ns =
+ div_u64(1e6, runtime->cs_timestamp_frequency_khz);
+ drm_dbg(&dev_priv->drm,
+ "CS timestamp wraparound in %lldms\n",
+ div_u64(mul_u32_u32(runtime->cs_timestamp_period_ns,
+ S32_MAX),
+ USEC_PER_SEC));
+ }
}
void intel_driver_caps_print(const struct intel_driver_caps *caps,
@@ -1084,7 +1103,7 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (!(BIT(i) & vdbox_mask)) {
info->engine_mask &= ~BIT(_VCS(i));
- DRM_DEBUG_DRIVER("vcs%u fused off\n", i);
+ drm_dbg(&dev_priv->drm, "vcs%u fused off\n", i);
continue;
}
@@ -1096,8 +1115,8 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (INTEL_GEN(dev_priv) >= 12 || logical_vdbox++ % 2 == 0)
RUNTIME_INFO(dev_priv)->vdbox_sfc_access |= BIT(i);
}
- DRM_DEBUG_DRIVER("vdbox enable: %04x, instances: %04lx\n",
- vdbox_mask, VDBOX_MASK(dev_priv));
+ drm_dbg(&dev_priv->drm, "vdbox enable: %04x, instances: %04lx\n",
+ vdbox_mask, VDBOX_MASK(dev_priv));
GEM_BUG_ON(vdbox_mask != VDBOX_MASK(dev_priv));
for (i = 0; i < I915_MAX_VECS; i++) {
@@ -1108,10 +1127,10 @@ void intel_device_info_init_mmio(struct drm_i915_private *dev_priv)
if (!(BIT(i) & vebox_mask)) {
info->engine_mask &= ~BIT(_VECS(i));
- DRM_DEBUG_DRIVER("vecs%u fused off\n", i);
+ drm_dbg(&dev_priv->drm, "vecs%u fused off\n", i);
}
}
- DRM_DEBUG_DRIVER("vebox enable: %04x, instances: %04lx\n",
- vebox_mask, VEBOX_MASK(dev_priv));
+ drm_dbg(&dev_priv->drm, "vebox enable: %04x, instances: %04lx\n",
+ vebox_mask, VEBOX_MASK(dev_priv));
GEM_BUG_ON(vebox_mask != VEBOX_MASK(dev_priv));
}
diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h
index 2725cb7fc169..1ecb9df2de91 100644
--- a/drivers/gpu/drm/i915/intel_device_info.h
+++ b/drivers/gpu/drm/i915/intel_device_info.h
@@ -180,6 +180,7 @@ struct intel_device_info {
} display;
u16 ddb_size; /* in blocks */
+ u8 num_supported_dbuf_slices; /* number of DBuf slices */
/* Register offsets for the various display pipes and transcoders */
int pipe_offsets[I915_MAX_TRANSCODERS];
@@ -215,7 +216,10 @@ struct intel_runtime_info {
/* Slice/subslice/EU info */
struct sseu_dev_info sseu;
+ u32 rawclk_freq;
+
u32 cs_timestamp_frequency_khz;
+ u32 cs_timestamp_period_ns;
/* Media engine access to SFC per instance */
u8 vdbox_sfc_access;
diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c
new file mode 100644
index 000000000000..6b922efb1d7c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dram.c
@@ -0,0 +1,500 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include "i915_drv.h"
+#include "intel_dram.h"
+
+struct dram_dimm_info {
+ u8 size, width, ranks;
+};
+
+struct dram_channel_info {
+ struct dram_dimm_info dimm_l, dimm_s;
+ u8 ranks;
+ bool is_16gb_dimm;
+};
+
+#define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type
+
+static const char *intel_dram_type_str(enum intel_dram_type type)
+{
+ static const char * const str[] = {
+ DRAM_TYPE_STR(UNKNOWN),
+ DRAM_TYPE_STR(DDR3),
+ DRAM_TYPE_STR(DDR4),
+ DRAM_TYPE_STR(LPDDR3),
+ DRAM_TYPE_STR(LPDDR4),
+ };
+
+ if (type >= ARRAY_SIZE(str))
+ type = INTEL_DRAM_UNKNOWN;
+
+ return str[type];
+}
+
+#undef DRAM_TYPE_STR
+
+static int intel_dimm_num_devices(const struct dram_dimm_info *dimm)
+{
+ return dimm->ranks * 64 / (dimm->width ?: 1);
+}
+
+/* Returns total GB for the whole DIMM */
+static int skl_get_dimm_size(u16 val)
+{
+ return val & SKL_DRAM_SIZE_MASK;
+}
+
+static int skl_get_dimm_width(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & SKL_DRAM_WIDTH_MASK) {
+ case SKL_DRAM_WIDTH_X8:
+ case SKL_DRAM_WIDTH_X16:
+ case SKL_DRAM_WIDTH_X32:
+ val = (val & SKL_DRAM_WIDTH_MASK) >> SKL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int skl_get_dimm_ranks(u16 val)
+{
+ if (skl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & SKL_DRAM_RANK_MASK) >> SKL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+/* Returns total GB for the whole DIMM */
+static int cnl_get_dimm_size(u16 val)
+{
+ return (val & CNL_DRAM_SIZE_MASK) / 2;
+}
+
+static int cnl_get_dimm_width(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ switch (val & CNL_DRAM_WIDTH_MASK) {
+ case CNL_DRAM_WIDTH_X8:
+ case CNL_DRAM_WIDTH_X16:
+ case CNL_DRAM_WIDTH_X32:
+ val = (val & CNL_DRAM_WIDTH_MASK) >> CNL_DRAM_WIDTH_SHIFT;
+ return 8 << val;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int cnl_get_dimm_ranks(u16 val)
+{
+ if (cnl_get_dimm_size(val) == 0)
+ return 0;
+
+ val = (val & CNL_DRAM_RANK_MASK) >> CNL_DRAM_RANK_SHIFT;
+
+ return val + 1;
+}
+
+static bool
+skl_is_16gb_dimm(const struct dram_dimm_info *dimm)
+{
+ /* Convert total GB to Gb per DRAM device */
+ return 8 * dimm->size / (intel_dimm_num_devices(dimm) ?: 1) == 16;
+}
+
+static void
+skl_dram_get_dimm_info(struct drm_i915_private *i915,
+ struct dram_dimm_info *dimm,
+ int channel, char dimm_name, u16 val)
+{
+ if (INTEL_GEN(i915) >= 10) {
+ dimm->size = cnl_get_dimm_size(val);
+ dimm->width = cnl_get_dimm_width(val);
+ dimm->ranks = cnl_get_dimm_ranks(val);
+ } else {
+ dimm->size = skl_get_dimm_size(val);
+ dimm->width = skl_get_dimm_width(val);
+ dimm->ranks = skl_get_dimm_ranks(val);
+ }
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM %c size: %u GB, width: X%u, ranks: %u, 16Gb DIMMs: %s\n",
+ channel, dimm_name, dimm->size, dimm->width, dimm->ranks,
+ yesno(skl_is_16gb_dimm(dimm)));
+}
+
+static int
+skl_dram_get_channel_info(struct drm_i915_private *i915,
+ struct dram_channel_info *ch,
+ int channel, u32 val)
+{
+ skl_dram_get_dimm_info(i915, &ch->dimm_l,
+ channel, 'L', val & 0xffff);
+ skl_dram_get_dimm_info(i915, &ch->dimm_s,
+ channel, 'S', val >> 16);
+
+ if (ch->dimm_l.size == 0 && ch->dimm_s.size == 0) {
+ drm_dbg_kms(&i915->drm, "CH%u not populated\n", channel);
+ return -EINVAL;
+ }
+
+ if (ch->dimm_l.ranks == 2 || ch->dimm_s.ranks == 2)
+ ch->ranks = 2;
+ else if (ch->dimm_l.ranks == 1 && ch->dimm_s.ranks == 1)
+ ch->ranks = 2;
+ else
+ ch->ranks = 1;
+
+ ch->is_16gb_dimm = skl_is_16gb_dimm(&ch->dimm_l) ||
+ skl_is_16gb_dimm(&ch->dimm_s);
+
+ drm_dbg_kms(&i915->drm, "CH%u ranks: %u, 16Gb DIMMs: %s\n",
+ channel, ch->ranks, yesno(ch->is_16gb_dimm));
+
+ return 0;
+}
+
+static bool
+intel_is_dram_symmetric(const struct dram_channel_info *ch0,
+ const struct dram_channel_info *ch1)
+{
+ return !memcmp(ch0, ch1, sizeof(*ch0)) &&
+ (ch0->dimm_s.size == 0 ||
+ !memcmp(&ch0->dimm_l, &ch0->dimm_s, sizeof(ch0->dimm_l)));
+}
+
+static int
+skl_dram_get_channels_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ struct dram_channel_info ch0 = {}, ch1 = {};
+ u32 val;
+ int ret;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch0, 0, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_DIMM_CH1_0_0_0_MCHBAR_MCMAIN);
+ ret = skl_dram_get_channel_info(i915, &ch1, 1, val);
+ if (ret == 0)
+ dram_info->num_channels++;
+
+ if (dram_info->num_channels == 0) {
+ drm_info(&i915->drm, "Number of memory channels is zero\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If any of the channel is single rank channel, worst case output
+ * will be same as if single rank memory, so consider single rank
+ * memory.
+ */
+ if (ch0.ranks == 1 || ch1.ranks == 1)
+ dram_info->ranks = 1;
+ else
+ dram_info->ranks = max(ch0.ranks, ch1.ranks);
+
+ if (dram_info->ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory rank information\n");
+ return -EINVAL;
+ }
+
+ dram_info->is_16gb_dimm = ch0.is_16gb_dimm || ch1.is_16gb_dimm;
+
+ dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1);
+
+ drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n",
+ yesno(dram_info->symmetric_memory));
+
+ return 0;
+}
+
+static enum intel_dram_type
+skl_get_dram_type(struct drm_i915_private *i915)
+{
+ u32 val;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MAD_INTER_CHANNEL_0_0_0_MCHBAR_MCMAIN);
+
+ switch (val & SKL_DRAM_DDR_TYPE_MASK) {
+ case SKL_DRAM_DDR_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case SKL_DRAM_DDR_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case SKL_DRAM_DDR_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case SKL_DRAM_DDR_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static int
+skl_get_dram_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ u32 mem_freq_khz, val;
+ int ret;
+
+ dram_info->type = skl_get_dram_type(i915);
+ drm_dbg_kms(&i915->drm, "DRAM type: %s\n",
+ intel_dram_type_str(dram_info->type));
+
+ ret = skl_dram_get_channels_info(i915);
+ if (ret)
+ return ret;
+
+ val = intel_uncore_read(&i915->uncore,
+ SKL_MC_BIOS_DATA_0_0_0_MCHBAR_PCU);
+ mem_freq_khz = DIV_ROUND_UP((val & SKL_REQ_DATA_MASK) *
+ SKL_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+ dram_info->bandwidth_kbps = dram_info->num_channels *
+ mem_freq_khz * 8;
+
+ if (dram_info->bandwidth_kbps == 0) {
+ drm_info(&i915->drm,
+ "Couldn't get system memory bandwidth\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid = true;
+ return 0;
+}
+
+/* Returns Gb per DRAM device */
+static int bxt_get_dimm_size(u32 val)
+{
+ switch (val & BXT_DRAM_SIZE_MASK) {
+ case BXT_DRAM_SIZE_4GBIT:
+ return 4;
+ case BXT_DRAM_SIZE_6GBIT:
+ return 6;
+ case BXT_DRAM_SIZE_8GBIT:
+ return 8;
+ case BXT_DRAM_SIZE_12GBIT:
+ return 12;
+ case BXT_DRAM_SIZE_16GBIT:
+ return 16;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static int bxt_get_dimm_width(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ val = (val & BXT_DRAM_WIDTH_MASK) >> BXT_DRAM_WIDTH_SHIFT;
+
+ return 8 << val;
+}
+
+static int bxt_get_dimm_ranks(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return 0;
+
+ switch (val & BXT_DRAM_RANK_MASK) {
+ case BXT_DRAM_RANK_SINGLE:
+ return 1;
+ case BXT_DRAM_RANK_DUAL:
+ return 2;
+ default:
+ MISSING_CASE(val);
+ return 0;
+ }
+}
+
+static enum intel_dram_type bxt_get_dimm_type(u32 val)
+{
+ if (!bxt_get_dimm_size(val))
+ return INTEL_DRAM_UNKNOWN;
+
+ switch (val & BXT_DRAM_TYPE_MASK) {
+ case BXT_DRAM_TYPE_DDR3:
+ return INTEL_DRAM_DDR3;
+ case BXT_DRAM_TYPE_LPDDR3:
+ return INTEL_DRAM_LPDDR3;
+ case BXT_DRAM_TYPE_DDR4:
+ return INTEL_DRAM_DDR4;
+ case BXT_DRAM_TYPE_LPDDR4:
+ return INTEL_DRAM_LPDDR4;
+ default:
+ MISSING_CASE(val);
+ return INTEL_DRAM_UNKNOWN;
+ }
+}
+
+static void bxt_get_dimm_info(struct dram_dimm_info *dimm, u32 val)
+{
+ dimm->width = bxt_get_dimm_width(val);
+ dimm->ranks = bxt_get_dimm_ranks(val);
+
+ /*
+ * Size in register is Gb per DRAM device. Convert to total
+ * GB to match the way we report this for non-LP platforms.
+ */
+ dimm->size = bxt_get_dimm_size(val) * intel_dimm_num_devices(dimm) / 8;
+}
+
+static int bxt_get_dram_info(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ u32 dram_channels;
+ u32 mem_freq_khz, val;
+ u8 num_active_channels;
+ int i;
+
+ val = intel_uncore_read(&i915->uncore, BXT_P_CR_MC_BIOS_REQ_0_0_0);
+ mem_freq_khz = DIV_ROUND_UP((val & BXT_REQ_DATA_MASK) *
+ BXT_MEMORY_FREQ_MULTIPLIER_HZ, 1000);
+
+ dram_channels = val & BXT_DRAM_CHANNEL_ACTIVE_MASK;
+ num_active_channels = hweight32(dram_channels);
+
+ /* Each active bit represents 4-byte channel */
+ dram_info->bandwidth_kbps = (mem_freq_khz * num_active_channels * 4);
+
+ if (dram_info->bandwidth_kbps == 0) {
+ drm_info(&i915->drm,
+ "Couldn't get system memory bandwidth\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Now read each DUNIT8/9/10/11 to check the rank of each dimms.
+ */
+ for (i = BXT_D_CR_DRP0_DUNIT_START; i <= BXT_D_CR_DRP0_DUNIT_END; i++) {
+ struct dram_dimm_info dimm;
+ enum intel_dram_type type;
+
+ val = intel_uncore_read(&i915->uncore, BXT_D_CR_DRP0_DUNIT(i));
+ if (val == 0xFFFFFFFF)
+ continue;
+
+ dram_info->num_channels++;
+
+ bxt_get_dimm_info(&dimm, val);
+ type = bxt_get_dimm_type(val);
+
+ drm_WARN_ON(&i915->drm, type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != INTEL_DRAM_UNKNOWN &&
+ dram_info->type != type);
+
+ drm_dbg_kms(&i915->drm,
+ "CH%u DIMM size: %u GB, width: X%u, ranks: %u, type: %s\n",
+ i - BXT_D_CR_DRP0_DUNIT_START,
+ dimm.size, dimm.width, dimm.ranks,
+ intel_dram_type_str(type));
+
+ /*
+ * If any of the channel is single rank channel,
+ * worst case output will be same as if single rank
+ * memory, so consider single rank memory.
+ */
+ if (dram_info->ranks == 0)
+ dram_info->ranks = dimm.ranks;
+ else if (dimm.ranks == 1)
+ dram_info->ranks = 1;
+
+ if (type != INTEL_DRAM_UNKNOWN)
+ dram_info->type = type;
+ }
+
+ if (dram_info->type == INTEL_DRAM_UNKNOWN || dram_info->ranks == 0) {
+ drm_info(&i915->drm, "couldn't get memory information\n");
+ return -EINVAL;
+ }
+
+ dram_info->valid = true;
+
+ return 0;
+}
+
+void intel_dram_detect(struct drm_i915_private *i915)
+{
+ struct dram_info *dram_info = &i915->dram_info;
+ int ret;
+
+ /*
+ * Assume 16Gb DIMMs are present until proven otherwise.
+ * This is only used for the level 0 watermark latency
+ * w/a which does not apply to bxt/glk.
+ */
+ dram_info->is_16gb_dimm = !IS_GEN9_LP(i915);
+
+ if (INTEL_GEN(i915) < 9 || !HAS_DISPLAY(i915))
+ return;
+
+ if (IS_GEN9_LP(i915))
+ ret = bxt_get_dram_info(i915);
+ else
+ ret = skl_get_dram_info(i915);
+ if (ret)
+ return;
+
+ drm_dbg_kms(&i915->drm, "DRAM bandwidth: %u kBps, channels: %u\n",
+ dram_info->bandwidth_kbps, dram_info->num_channels);
+
+ drm_dbg_kms(&i915->drm, "DRAM ranks: %u, 16Gb DIMMs: %s\n",
+ dram_info->ranks, yesno(dram_info->is_16gb_dimm));
+}
+
+static u32 gen9_edram_size_mb(struct drm_i915_private *i915, u32 cap)
+{
+ static const u8 ways[8] = { 4, 8, 12, 16, 16, 16, 16, 16 };
+ static const u8 sets[4] = { 1, 1, 2, 2 };
+
+ return EDRAM_NUM_BANKS(cap) *
+ ways[EDRAM_WAYS_IDX(cap)] *
+ sets[EDRAM_SETS_IDX(cap)];
+}
+
+void intel_dram_edram_detect(struct drm_i915_private *i915)
+{
+ u32 edram_cap = 0;
+
+ if (!(IS_HASWELL(i915) || IS_BROADWELL(i915) || INTEL_GEN(i915) >= 9))
+ return;
+
+ edram_cap = __raw_uncore_read32(&i915->uncore, HSW_EDRAM_CAP);
+
+ /* NB: We can't write IDICR yet because we don't have gt funcs set up */
+
+ if (!(edram_cap & EDRAM_ENABLED))
+ return;
+
+ /*
+ * The needed capability bits for size calculation are not there with
+ * pre gen9 so return 128MB always.
+ */
+ if (INTEL_GEN(i915) < 9)
+ i915->edram_size_mb = 128;
+ else
+ i915->edram_size_mb = gen9_edram_size_mb(i915, edram_cap);
+
+ dev_info(i915->drm.dev,
+ "Found %uMB of eDRAM\n", i915->edram_size_mb);
+}
diff --git a/drivers/gpu/drm/i915/intel_dram.h b/drivers/gpu/drm/i915/intel_dram.h
new file mode 100644
index 000000000000..4ba13c13162c
--- /dev/null
+++ b/drivers/gpu/drm/i915/intel_dram.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __INTEL_DRAM_H__
+#define __INTEL_DRAM_H__
+
+struct drm_i915_private;
+
+void intel_dram_edram_detect(struct drm_i915_private *i915);
+void intel_dram_detect(struct drm_i915_private *i915);
+
+#endif /* __INTEL_DRAM_H__ */
diff --git a/drivers/gpu/drm/i915/intel_gvt.c b/drivers/gpu/drm/i915/intel_gvt.c
index 2b6c016387c2..21b91313cc5d 100644
--- a/drivers/gpu/drm/i915/intel_gvt.c
+++ b/drivers/gpu/drm/i915/intel_gvt.c
@@ -22,6 +22,7 @@
*/
#include "i915_drv.h"
+#include "i915_vgpu.h"
#include "intel_gvt.h"
/**
@@ -67,12 +68,13 @@ void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
return;
if (intel_vgpu_active(dev_priv)) {
- DRM_INFO("GVT-g is disabled for guest\n");
+ drm_info(&dev_priv->drm, "GVT-g is disabled for guest\n");
goto bail;
}
if (!is_supported_device(dev_priv)) {
- DRM_INFO("Unsupported device. GVT-g is disabled\n");
+ drm_info(&dev_priv->drm,
+ "Unsupported device. GVT-g is disabled\n");
goto bail;
}
@@ -99,18 +101,20 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return -ENODEV;
if (!i915_modparams.enable_gvt) {
- DRM_DEBUG_DRIVER("GVT-g is disabled by kernel params\n");
+ drm_dbg(&dev_priv->drm,
+ "GVT-g is disabled by kernel params\n");
return 0;
}
- if (USES_GUC_SUBMISSION(dev_priv)) {
- DRM_ERROR("i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
+ if (intel_uc_wants_guc_submission(&dev_priv->gt.uc)) {
+ drm_err(&dev_priv->drm,
+ "i915 GVT-g loading failed due to Graphics virtualization is not yet supported with GuC submission\n");
return -EIO;
}
ret = intel_gvt_init_device(dev_priv);
if (ret) {
- DRM_DEBUG_DRIVER("Fail to init GVT device\n");
+ drm_dbg(&dev_priv->drm, "Fail to init GVT device\n");
goto bail;
}
@@ -121,6 +125,11 @@ bail:
return 0;
}
+static inline bool intel_gvt_active(struct drm_i915_private *dev_priv)
+{
+ return dev_priv->gvt;
+}
+
/**
* intel_gvt_driver_remove - cleanup GVT components when i915 driver is
* unbinding
diff --git a/drivers/gpu/drm/i915/intel_memory_region.c b/drivers/gpu/drm/i915/intel_memory_region.c
index d0d038b3cd79..6b5e9d88646d 100644
--- a/drivers/gpu/drm/i915/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/intel_memory_region.c
@@ -265,7 +265,9 @@ int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
if (IS_ERR(mem)) {
err = PTR_ERR(mem);
- DRM_ERROR("Failed to setup region(%d) type=%d\n", err, type);
+ drm_err(&i915->drm,
+ "Failed to setup region(%d) type=%d\n",
+ err, type);
goto out_cleanup;
}
diff --git a/drivers/gpu/drm/i915/intel_pch.c b/drivers/gpu/drm/i915/intel_pch.c
index 4ed60e1f01db..20ab9a5023b5 100644
--- a/drivers/gpu/drm/i915/intel_pch.c
+++ b/drivers/gpu/drm/i915/intel_pch.c
@@ -13,91 +13,106 @@ intel_pch_type(const struct drm_i915_private *dev_priv, unsigned short id)
switch (id) {
case INTEL_PCH_IBX_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Ibex Peak PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 5));
+ drm_WARN_ON(&dev_priv->drm, !IS_GEN(dev_priv, 5));
return PCH_IBX;
case INTEL_PCH_CPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found CougarPoint PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
return PCH_CPT;
case INTEL_PCH_PPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found PantherPoint PCH\n");
- WARN_ON(!IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_GEN(dev_priv, 6) && !IS_IVYBRIDGE(dev_priv));
/* PantherPoint is CPT compatible */
return PCH_CPT;
case INTEL_PCH_LPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found LynxPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_LPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found LynxPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
return PCH_LPT;
case INTEL_PCH_WPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ IS_HSW_ULT(dev_priv) || IS_BDW_ULT(dev_priv));
/* WildcatPoint is LPT compatible */
return PCH_LPT;
case INTEL_PCH_WPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found WildcatPoint LP PCH\n");
- WARN_ON(!IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
- WARN_ON(!IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HASWELL(dev_priv) && !IS_BROADWELL(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_HSW_ULT(dev_priv) && !IS_BDW_ULT(dev_priv));
/* WildcatPoint is LPT compatible */
return PCH_LPT;
case INTEL_PCH_SPT_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_SPT_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found SunrisePoint LP PCH\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
return PCH_SPT;
case INTEL_PCH_KBP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Kaby Lake PCH (KBP)\n");
- WARN_ON(!IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
- !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm,
+ !IS_SKYLAKE(dev_priv) && !IS_KABYLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
/* KBP is SPT compatible */
return PCH_SPT;
case INTEL_PCH_CNP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Cannon Lake PCH (CNP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CNP_LP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm,
"Found Cannon Lake LP PCH (CNP-LP)\n");
- WARN_ON(!IS_CANNONLAKE(dev_priv) && !IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_CANNONLAKE(dev_priv) &&
+ !IS_COFFEELAKE(dev_priv));
return PCH_CNP;
case INTEL_PCH_CMP_DEVICE_ID_TYPE:
case INTEL_PCH_CMP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Comet Lake PCH (CMP)\n");
- WARN_ON(!IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv));
/* CometPoint is CNP Compatible */
return PCH_CNP;
case INTEL_PCH_CMP_V_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Comet Lake V PCH (CMP-V)\n");
- WARN_ON(!IS_COFFEELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_COFFEELAKE(dev_priv));
/* Comet Lake V PCH is based on KBP, which is SPT compatible */
return PCH_SPT;
case INTEL_PCH_ICP_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Ice Lake PCH\n");
- WARN_ON(!IS_ICELAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ICELAKE(dev_priv));
return PCH_ICP;
case INTEL_PCH_MCC_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Mule Creek Canyon PCH\n");
- WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv));
return PCH_MCC;
case INTEL_PCH_TGP_DEVICE_ID_TYPE:
case INTEL_PCH_TGP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Tiger Lake LP PCH\n");
- WARN_ON(!IS_TIGERLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_TIGERLAKE(dev_priv));
return PCH_TGP;
case INTEL_PCH_JSP_DEVICE_ID_TYPE:
case INTEL_PCH_JSP2_DEVICE_ID_TYPE:
drm_dbg_kms(&dev_priv->drm, "Found Jasper Lake PCH\n");
- WARN_ON(!IS_ELKHARTLAKE(dev_priv));
+ drm_WARN_ON(&dev_priv->drm, !IS_ELKHARTLAKE(dev_priv));
return PCH_JSP;
default:
return PCH_NONE;
@@ -188,7 +203,8 @@ void intel_detect_pch(struct drm_i915_private *dev_priv)
pch_type = intel_pch_type(dev_priv, id);
/* Sanity check virtual PCH id */
- if (WARN_ON(id && pch_type == PCH_NONE))
+ if (drm_WARN_ON(&dev_priv->drm,
+ id && pch_type == PCH_NONE))
id = 0;
dev_priv->pch_type = pch_type;
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index bd2d30ecc030..8375054ba27d 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -40,12 +40,36 @@
#include "gt/intel_llc.h"
#include "i915_drv.h"
+#include "i915_fixed.h"
#include "i915_irq.h"
#include "i915_trace.h"
#include "intel_pm.h"
#include "intel_sideband.h"
#include "../../../platform/x86/intel_ips.h"
+/* Stores plane specific WM parameters */
+struct skl_wm_params {
+ bool x_tiled, y_tiled;
+ bool rc_surface;
+ bool is_planar;
+ u32 width;
+ u8 cpp;
+ u32 plane_pixel_rate;
+ u32 y_min_scanlines;
+ u32 plane_bytes_per_line;
+ uint_fixed_16_16_t plane_blocks_per_line;
+ uint_fixed_16_16_t y_tile_minimum;
+ u32 linetime_us;
+ u32 dbuf_block_size;
+};
+
+/* used in computing the new watermarks state */
+struct intel_wm_config {
+ unsigned int num_pipes_active;
+ bool sprites_enabled;
+ bool sprites_scaled;
+};
+
static void gen9_init_clock_gating(struct drm_i915_private *dev_priv)
{
if (HAS_LLC(dev_priv)) {
@@ -128,16 +152,6 @@ static void glk_init_clock_gating(struct drm_i915_private *dev_priv)
*/
I915_WRITE(GEN9_CLKGATE_DIS_0, I915_READ(GEN9_CLKGATE_DIS_0) |
PWM1_GATING_DIS | PWM2_GATING_DIS);
-
- /* WaDDIIOTimeout:glk */
- if (IS_GLK_REVID(dev_priv, 0, GLK_REVID_A1)) {
- u32 val = I915_READ(CHICKEN_MISC_2);
- val &= ~(GLK_CL0_PWR_DOWN |
- GLK_CL1_PWR_DOWN |
- GLK_CL2_PWR_DOWN);
- I915_WRITE(CHICKEN_MISC_2, val);
- }
-
}
static void pnv_get_mem_freq(struct drm_i915_private *dev_priv)
@@ -469,9 +483,9 @@ static void vlv_get_fifo_size(struct intel_crtc_state *crtc_state)
struct vlv_fifo_state *fifo_state = &crtc_state->wm.vlv.fifo_state;
enum pipe pipe = crtc->pipe;
int sprite0_start, sprite1_start;
+ u32 dsparb, dsparb2, dsparb3;
switch (pipe) {
- u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = I915_READ(DSPARB);
dsparb2 = I915_READ(DSPARB2);
@@ -1969,6 +1983,7 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
const struct vlv_fifo_state *fifo_state =
&crtc_state->wm.vlv.fifo_state;
int sprite0_start, sprite1_start, fifo_size;
+ u32 dsparb, dsparb2, dsparb3;
if (!crtc_state->fifo_changed)
return;
@@ -1977,8 +1992,8 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
sprite1_start = fifo_state->plane[PLANE_SPRITE0] + sprite0_start;
fifo_size = fifo_state->plane[PLANE_SPRITE1] + sprite1_start;
- WARN_ON(fifo_state->plane[PLANE_CURSOR] != 63);
- WARN_ON(fifo_size != 511);
+ drm_WARN_ON(&dev_priv->drm, fifo_state->plane[PLANE_CURSOR] != 63);
+ drm_WARN_ON(&dev_priv->drm, fifo_size != 511);
trace_vlv_fifo_size(crtc, sprite0_start, sprite1_start, fifo_size);
@@ -1994,7 +2009,6 @@ static void vlv_atomic_update_fifo(struct intel_atomic_state *state,
spin_lock(&uncore->lock);
switch (crtc->pipe) {
- u32 dsparb, dsparb2, dsparb3;
case PIPE_A:
dsparb = intel_uncore_read_fw(uncore, DSPARB);
dsparb2 = intel_uncore_read_fw(uncore, DSPARB2);
@@ -2776,7 +2790,7 @@ static bool ilk_validate_wm_level(int level,
}
static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
- const struct intel_crtc *intel_crtc,
+ const struct intel_crtc *crtc,
int level,
struct intel_crtc_state *crtc_state,
const struct intel_plane_state *pristate,
@@ -2810,34 +2824,6 @@ static void ilk_compute_wm_level(const struct drm_i915_private *dev_priv,
result->enable = true;
}
-static u32
-hsw_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
-{
- const struct intel_atomic_state *intel_state =
- to_intel_atomic_state(crtc_state->uapi.state);
- const struct drm_display_mode *adjusted_mode =
- &crtc_state->hw.adjusted_mode;
- u32 linetime, ips_linetime;
-
- if (!crtc_state->hw.active)
- return 0;
- if (WARN_ON(adjusted_mode->crtc_clock == 0))
- return 0;
- if (WARN_ON(intel_state->cdclk.logical.cdclk == 0))
- return 0;
-
- /* The WM are computed with base on how long it takes to fill a single
- * row at the given clock rate, multiplied by 8.
- * */
- linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
- adjusted_mode->crtc_clock);
- ips_linetime = DIV_ROUND_CLOSEST(adjusted_mode->crtc_htotal * 1000 * 8,
- intel_state->cdclk.logical.cdclk);
-
- return PIPE_WM_LINETIME_IPS_LINETIME(ips_linetime) |
- PIPE_WM_LINETIME_TIME(linetime);
-}
-
static void intel_read_wm_latency(struct drm_i915_private *dev_priv,
u16 wm[8])
{
@@ -3135,7 +3121,7 @@ static bool ilk_validate_pipe_wm(const struct drm_i915_private *dev_priv,
static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
{
struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
struct intel_pipe_wm *pipe_wm;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
@@ -3175,12 +3161,9 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
usable_level = 0;
memset(&pipe_wm->wm, 0, sizeof(pipe_wm->wm));
- ilk_compute_wm_level(dev_priv, intel_crtc, 0, crtc_state,
+ ilk_compute_wm_level(dev_priv, crtc, 0, crtc_state,
pristate, sprstate, curstate, &pipe_wm->wm[0]);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- pipe_wm->linetime = hsw_compute_linetime_wm(crtc_state);
-
if (!ilk_validate_pipe_wm(dev_priv, pipe_wm))
return -EINVAL;
@@ -3189,7 +3172,7 @@ static int ilk_compute_pipe_wm(struct intel_crtc_state *crtc_state)
for (level = 1; level <= usable_level; level++) {
struct intel_wm_level *wm = &pipe_wm->wm[level];
- ilk_compute_wm_level(dev_priv, intel_crtc, level, crtc_state,
+ ilk_compute_wm_level(dev_priv, crtc, level, crtc_state,
pristate, sprstate, curstate, wm);
/*
@@ -3417,7 +3400,7 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
* level is disabled. Doing otherwise could cause underruns.
*/
if (INTEL_GEN(dev_priv) <= 6 && r->spr_val) {
- WARN_ON(wm_lp != 1);
+ drm_WARN_ON(&dev_priv->drm, wm_lp != 1);
results->wm_lp_spr[wm_lp - 1] = WM1S_LP_EN | r->spr_val;
} else
results->wm_lp_spr[wm_lp - 1] = r->spr_val;
@@ -3426,14 +3409,12 @@ static void ilk_compute_wm_results(struct drm_i915_private *dev_priv,
/* LP0 register values */
for_each_intel_crtc(&dev_priv->drm, intel_crtc) {
enum pipe pipe = intel_crtc->pipe;
- const struct intel_wm_level *r =
- &intel_crtc->wm.active.ilk.wm[0];
+ const struct intel_pipe_wm *pipe_wm = &intel_crtc->wm.active.ilk;
+ const struct intel_wm_level *r = &pipe_wm->wm[0];
- if (WARN_ON(!r->enable))
+ if (drm_WARN_ON(&dev_priv->drm, !r->enable))
continue;
- results->wm_linetime[pipe] = intel_crtc->wm.active.ilk.linetime;
-
results->wm_pipe[pipe] =
(r->pri_val << WM0_PIPE_PLANE_SHIFT) |
(r->spr_val << WM0_PIPE_SPRITE_SHIFT) |
@@ -3472,7 +3453,6 @@ ilk_find_best_result(struct drm_i915_private *dev_priv,
/* dirty bits used to track which watermarks need changes */
#define WM_DIRTY_PIPE(pipe) (1 << (pipe))
-#define WM_DIRTY_LINETIME(pipe) (1 << (8 + (pipe)))
#define WM_DIRTY_LP(wm_lp) (1 << (15 + (wm_lp)))
#define WM_DIRTY_LP_ALL (WM_DIRTY_LP(1) | WM_DIRTY_LP(2) | WM_DIRTY_LP(3))
#define WM_DIRTY_FBC (1 << 24)
@@ -3487,12 +3467,6 @@ static unsigned int ilk_compute_wm_dirty(struct drm_i915_private *dev_priv,
int wm_lp;
for_each_pipe(dev_priv, pipe) {
- if (old->wm_linetime[pipe] != new->wm_linetime[pipe]) {
- dirty |= WM_DIRTY_LINETIME(pipe);
- /* Must disable LP1+ watermarks too */
- dirty |= WM_DIRTY_LP_ALL;
- }
-
if (old->wm_pipe[pipe] != new->wm_pipe[pipe]) {
dirty |= WM_DIRTY_PIPE(pipe);
/* Must disable LP1+ watermarks too */
@@ -3584,13 +3558,6 @@ static void ilk_write_wm_values(struct drm_i915_private *dev_priv,
if (dirty & WM_DIRTY_PIPE(PIPE_C))
I915_WRITE(WM0_PIPEC_IVB, results->wm_pipe[2]);
- if (dirty & WM_DIRTY_LINETIME(PIPE_A))
- I915_WRITE(PIPE_WM_LINETIME(PIPE_A), results->wm_linetime[0]);
- if (dirty & WM_DIRTY_LINETIME(PIPE_B))
- I915_WRITE(PIPE_WM_LINETIME(PIPE_B), results->wm_linetime[1]);
- if (dirty & WM_DIRTY_LINETIME(PIPE_C))
- I915_WRITE(PIPE_WM_LINETIME(PIPE_C), results->wm_linetime[2]);
-
if (dirty & WM_DIRTY_DDB) {
if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) {
val = I915_READ(WM_MISC);
@@ -3644,26 +3611,18 @@ bool ilk_disable_lp_wm(struct drm_i915_private *dev_priv)
return _ilk_disable_lp_wm(dev_priv, WM_DIRTY_LP_ALL);
}
-static u8 intel_enabled_dbuf_slices_num(struct drm_i915_private *dev_priv)
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv)
{
- u8 enabled_slices;
-
- /* Slice 1 will always be enabled */
- enabled_slices = 1;
-
- /* Gen prior to GEN11 have only one DBuf slice */
- if (INTEL_GEN(dev_priv) < 11)
- return enabled_slices;
+ int i;
+ int max_slices = INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
+ u8 enabled_slices_mask = 0;
- /*
- * FIXME: for now we'll only ever use 1 slice; pretend that we have
- * only that 1 slice enabled until we have a proper way for on-demand
- * toggling of the second slice.
- */
- if (0 && I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE)
- enabled_slices++;
+ for (i = 0; i < max_slices; i++) {
+ if (I915_READ(DBUF_CTL_S(i)) & DBUF_POWER_STATE)
+ enabled_slices_mask |= BIT(i);
+ }
- return enabled_slices;
+ return enabled_slices_mask;
}
/*
@@ -3864,47 +3823,46 @@ bool intel_can_enable_sagv(struct intel_atomic_state *state)
return true;
}
-static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv,
- const struct intel_crtc_state *crtc_state,
- const u64 total_data_rate,
- const int num_active,
- struct skl_ddb_allocation *ddb)
+/*
+ * Calculate initial DBuf slice offset, based on slice size
+ * and mask(i.e if slice size is 1024 and second slice is enabled
+ * offset would be 1024)
+ */
+static unsigned int
+icl_get_first_dbuf_slice_offset(u32 dbuf_slice_mask,
+ u32 slice_size,
+ u32 ddb_size)
+{
+ unsigned int offset = 0;
+
+ if (!dbuf_slice_mask)
+ return 0;
+
+ offset = (ffs(dbuf_slice_mask) - 1) * slice_size;
+
+ WARN_ON(offset >= ddb_size);
+ return offset;
+}
+
+static u16 intel_get_ddb_size(struct drm_i915_private *dev_priv)
{
- const struct drm_display_mode *adjusted_mode;
- u64 total_data_bw;
u16 ddb_size = INTEL_INFO(dev_priv)->ddb_size;
- WARN_ON(ddb_size == 0);
+ drm_WARN_ON(&dev_priv->drm, ddb_size == 0);
if (INTEL_GEN(dev_priv) < 11)
return ddb_size - 4; /* 4 blocks for bypass path allocation */
- adjusted_mode = &crtc_state->hw.adjusted_mode;
- total_data_bw = total_data_rate * drm_mode_vrefresh(adjusted_mode);
-
- /*
- * 12GB/s is maximum BW supported by single DBuf slice.
- *
- * FIXME dbuf slice code is broken:
- * - must wait for planes to stop using the slice before powering it off
- * - plane straddling both slices is illegal in multi-pipe scenarios
- * - should validate we stay within the hw bandwidth limits
- */
- if (0 && (num_active > 1 || total_data_bw >= GBps(12))) {
- ddb->enabled_slices = 2;
- } else {
- ddb->enabled_slices = 1;
- ddb_size /= 2;
- }
-
return ddb_size;
}
+static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
+ u8 active_pipes);
+
static void
skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
const struct intel_crtc_state *crtc_state,
const u64 total_data_rate,
- struct skl_ddb_allocation *ddb,
struct skl_ddb_entry *alloc, /* out */
int *num_active /* out */)
{
@@ -3912,12 +3870,19 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
struct intel_atomic_state *intel_state = to_intel_atomic_state(state);
struct drm_crtc *for_crtc = crtc_state->uapi.crtc;
const struct intel_crtc *crtc;
- u32 pipe_width = 0, total_width = 0, width_before_pipe = 0;
+ u32 pipe_width = 0, total_width_in_range = 0, width_before_pipe_in_range = 0;
enum pipe for_pipe = to_intel_crtc(for_crtc)->pipe;
u16 ddb_size;
+ u32 ddb_range_size;
u32 i;
-
- if (WARN_ON(!state) || !crtc_state->hw.active) {
+ u32 dbuf_slice_mask;
+ u32 active_pipes;
+ u32 offset;
+ u32 slice_size;
+ u32 total_slice_mask;
+ u32 start, end;
+
+ if (drm_WARN_ON(&dev_priv->drm, !state) || !crtc_state->hw.active) {
alloc->start = 0;
alloc->end = 0;
*num_active = hweight8(dev_priv->active_pipes);
@@ -3925,12 +3890,15 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
}
if (intel_state->active_pipe_changes)
- *num_active = hweight8(intel_state->active_pipes);
+ active_pipes = intel_state->active_pipes;
else
- *num_active = hweight8(dev_priv->active_pipes);
+ active_pipes = dev_priv->active_pipes;
+
+ *num_active = hweight8(active_pipes);
- ddb_size = intel_get_ddb_size(dev_priv, crtc_state, total_data_rate,
- *num_active, ddb);
+ ddb_size = intel_get_ddb_size(dev_priv);
+
+ slice_size = ddb_size / INTEL_INFO(dev_priv)->num_supported_dbuf_slices;
/*
* If the state doesn't change the active CRTC's or there is no
@@ -3950,30 +3918,95 @@ skl_ddb_get_pipe_allocation_limits(struct drm_i915_private *dev_priv,
}
/*
+ * Get allowed DBuf slices for correspondent pipe and platform.
+ */
+ dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state, active_pipes);
+
+ DRM_DEBUG_KMS("DBuf slice mask %x pipe %c active pipes %x\n",
+ dbuf_slice_mask,
+ pipe_name(for_pipe), active_pipes);
+
+ /*
+ * Figure out at which DBuf slice we start, i.e if we start at Dbuf S2
+ * and slice size is 1024, the offset would be 1024
+ */
+ offset = icl_get_first_dbuf_slice_offset(dbuf_slice_mask,
+ slice_size, ddb_size);
+
+ /*
+ * Figure out total size of allowed DBuf slices, which is basically
+ * a number of allowed slices for that pipe multiplied by slice size.
+ * Inside of this
+ * range ddb entries are still allocated in proportion to display width.
+ */
+ ddb_range_size = hweight8(dbuf_slice_mask) * slice_size;
+
+ /*
* Watermark/ddb requirement highly depends upon width of the
* framebuffer, So instead of allocating DDB equally among pipes
* distribute DDB based on resolution/width of the display.
*/
+ total_slice_mask = dbuf_slice_mask;
for_each_new_intel_crtc_in_state(intel_state, crtc, crtc_state, i) {
const struct drm_display_mode *adjusted_mode =
&crtc_state->hw.adjusted_mode;
enum pipe pipe = crtc->pipe;
int hdisplay, vdisplay;
+ u32 pipe_dbuf_slice_mask;
+
+ if (!crtc_state->hw.active)
+ continue;
+
+ pipe_dbuf_slice_mask = skl_compute_dbuf_slices(crtc_state,
+ active_pipes);
+
+ /*
+ * According to BSpec pipe can share one dbuf slice with another
+ * pipes or pipe can use multiple dbufs, in both cases we
+ * account for other pipes only if they have exactly same mask.
+ * However we need to account how many slices we should enable
+ * in total.
+ */
+ total_slice_mask |= pipe_dbuf_slice_mask;
- if (!crtc_state->hw.enable)
+ /*
+ * Do not account pipes using other slice sets
+ * luckily as of current BSpec slice sets do not partially
+ * intersect(pipes share either same one slice or same slice set
+ * i.e no partial intersection), so it is enough to check for
+ * equality for now.
+ */
+ if (dbuf_slice_mask != pipe_dbuf_slice_mask)
continue;
drm_mode_get_hv_timing(adjusted_mode, &hdisplay, &vdisplay);
- total_width += hdisplay;
+
+ total_width_in_range += hdisplay;
if (pipe < for_pipe)
- width_before_pipe += hdisplay;
+ width_before_pipe_in_range += hdisplay;
else if (pipe == for_pipe)
pipe_width = hdisplay;
}
- alloc->start = ddb_size * width_before_pipe / total_width;
- alloc->end = ddb_size * (width_before_pipe + pipe_width) / total_width;
+ /*
+ * FIXME: For now we always enable slice S1 as per
+ * the Bspec display initialization sequence.
+ */
+ intel_state->enabled_dbuf_slices_mask = total_slice_mask | BIT(DBUF_S1);
+
+ start = ddb_range_size * width_before_pipe_in_range / total_width_in_range;
+ end = ddb_range_size *
+ (width_before_pipe_in_range + pipe_width) / total_width_in_range;
+
+ alloc->start = offset + start;
+ alloc->end = offset + end;
+
+ DRM_DEBUG_KMS("Pipe %d ddb %d-%d\n", for_pipe,
+ alloc->start, alloc->end);
+ DRM_DEBUG_KMS("Enabled ddb slices mask %x num supported %d\n",
+ intel_state->enabled_dbuf_slices_mask,
+ INTEL_INFO(dev_priv)->num_supported_dbuf_slices);
}
static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state,
@@ -4002,7 +4035,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state,
DRM_FORMAT_MOD_LINEAR,
DRM_MODE_ROTATE_0,
crtc_state->pixel_rate, &wp, 0);
- WARN_ON(ret);
+ drm_WARN_ON(&dev_priv->drm, ret);
for (level = 0; level <= max_level; level++) {
skl_compute_plane_wm(crtc_state, level, &wp, &wm, &wm);
@@ -4091,10 +4124,10 @@ void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
intel_display_power_put(dev_priv, power_domain, wakeref);
}
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */)
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv)
{
- ddb->enabled_slices = intel_enabled_dbuf_slices_num(dev_priv);
+ dev_priv->enabled_dbuf_slices_mask =
+ intel_enabled_dbuf_slices_mask(dev_priv);
}
/*
@@ -4144,6 +4177,254 @@ skl_plane_downscale_amount(const struct intel_crtc_state *crtc_state,
return mul_fixed16(downscale_w, downscale_h);
}
+struct dbuf_slice_conf_entry {
+ u8 active_pipes;
+ u8 dbuf_mask[I915_MAX_PIPES];
+};
+
+/*
+ * Table taken from Bspec 12716
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry icl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+/*
+ * Table taken from Bspec 49255
+ * Pipes do have some preferred DBuf slice affinity,
+ * plus there are some hardcoded requirements on how
+ * those should be distributed for multipipe scenarios.
+ * For more DBuf slices algorithm can get even more messy
+ * and less readable, so decided to use a table almost
+ * as is from BSpec itself - that way it is at least easier
+ * to compare, change and check.
+ */
+static const struct dbuf_slice_conf_entry tgl_allowed_dbufs[] =
+/* Autogenerated with igt/tools/intel_dbuf_map tool: */
+{
+ {
+ .active_pipes = BIT(PIPE_A),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1) | BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S2),
+ [PIPE_B] = BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_D] = BIT(DBUF_S2) | BIT(DBUF_S1),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_C] = BIT(DBUF_S1),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {
+ .active_pipes = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C) | BIT(PIPE_D),
+ .dbuf_mask = {
+ [PIPE_A] = BIT(DBUF_S1),
+ [PIPE_B] = BIT(DBUF_S1),
+ [PIPE_C] = BIT(DBUF_S2),
+ [PIPE_D] = BIT(DBUF_S2),
+ },
+ },
+ {}
+};
+
+static u8 compute_dbuf_slices(enum pipe pipe, u8 active_pipes,
+ const struct dbuf_slice_conf_entry *dbuf_slices)
+{
+ int i;
+
+ for (i = 0; i < dbuf_slices[i].active_pipes; i++) {
+ if (dbuf_slices[i].active_pipes == active_pipes)
+ return dbuf_slices[i].dbuf_mask[pipe];
+ }
+ return 0;
+}
+
+/*
+ * This function finds an entry with same enabled pipe configuration and
+ * returns correspondent DBuf slice mask as stated in BSpec for particular
+ * platform.
+ */
+static u8 icl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+{
+ /*
+ * FIXME: For ICL this is still a bit unclear as prev BSpec revision
+ * required calculating "pipe ratio" in order to determine
+ * if one or two slices can be used for single pipe configurations
+ * as additional constraint to the existing table.
+ * However based on recent info, it should be not "pipe ratio"
+ * but rather ratio between pixel_rate and cdclk with additional
+ * constants, so for now we are using only table until this is
+ * clarified. Also this is the reason why crtc_state param is
+ * still here - we will need it once those additional constraints
+ * pop up.
+ */
+ return compute_dbuf_slices(pipe, active_pipes, icl_allowed_dbufs);
+}
+
+static u8 tgl_compute_dbuf_slices(enum pipe pipe, u8 active_pipes)
+{
+ return compute_dbuf_slices(pipe, active_pipes, tgl_allowed_dbufs);
+}
+
+static u8 skl_compute_dbuf_slices(const struct intel_crtc_state *crtc_state,
+ u8 active_pipes)
+{
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
+ enum pipe pipe = crtc->pipe;
+
+ if (IS_GEN(dev_priv, 12))
+ return tgl_compute_dbuf_slices(pipe, active_pipes);
+ else if (IS_GEN(dev_priv, 11))
+ return icl_compute_dbuf_slices(pipe, active_pipes);
+ /*
+ * For anything else just return one slice yet.
+ * Should be extended for other platforms.
+ */
+ return BIT(DBUF_S1);
+}
+
static u64
skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state,
@@ -4195,14 +4476,10 @@ skl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
u64 *plane_data_rate,
u64 *uv_plane_data_rate)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
struct intel_plane *plane;
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!state))
- return 0;
-
/* Calculate and cache data rate for each plane */
intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
enum plane_id plane_id = plane->id;
@@ -4230,9 +4507,6 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
const struct intel_plane_state *plane_state;
u64 total_data_rate = 0;
- if (WARN_ON(!crtc_state->uapi.state))
- return 0;
-
/* Calculate and cache data rate for each plane */
intel_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) {
enum plane_id plane_id = plane->id;
@@ -4271,13 +4545,10 @@ icl_get_total_relative_data_rate(struct intel_crtc_state *crtc_state,
}
static int
-skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
- struct skl_ddb_allocation *ddb /* out */)
+skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state)
{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_crtc *crtc = crtc_state->uapi.crtc;
- struct drm_i915_private *dev_priv = to_i915(crtc->dev);
- struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
+ struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc);
+ struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
struct skl_ddb_entry *alloc = &crtc_state->wm.skl.ddb;
u16 alloc_size, start = 0;
u16 total[I915_MAX_PLANES] = {};
@@ -4294,9 +4565,6 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
memset(crtc_state->wm.skl.plane_ddb_y, 0, sizeof(crtc_state->wm.skl.plane_ddb_y));
memset(crtc_state->wm.skl.plane_ddb_uv, 0, sizeof(crtc_state->wm.skl.plane_ddb_uv));
- if (WARN_ON(!state))
- return 0;
-
if (!crtc_state->hw.active) {
alloc->start = alloc->end = 0;
return 0;
@@ -4314,7 +4582,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
skl_ddb_get_pipe_allocation_limits(dev_priv, crtc_state, total_data_rate,
- ddb, alloc, &num_active);
+ alloc, &num_active);
alloc_size = skl_ddb_entry_size(alloc);
if (alloc_size == 0)
return 0;
@@ -4335,13 +4603,14 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
*/
for (level = ilk_wm_max_level(dev_priv); level >= 0; level--) {
blocks = 0;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
if (plane_id == PLANE_CURSOR) {
if (wm->wm[level].min_ddb_alloc > total[PLANE_CURSOR]) {
- WARN_ON(wm->wm[level].min_ddb_alloc != U16_MAX);
+ drm_WARN_ON(&dev_priv->drm,
+ wm->wm[level].min_ddb_alloc != U16_MAX);
blocks = U32_MAX;
break;
}
@@ -4371,7 +4640,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
* watermark level, plus an extra share of the leftover blocks
* proportional to its relative data rate.
*/
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
const struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
u64 rate;
@@ -4406,11 +4675,11 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
alloc_size -= extra;
total_data_rate -= rate;
}
- WARN_ON(alloc_size != 0 || total_data_rate != 0);
+ drm_WARN_ON(&dev_priv->drm, alloc_size != 0 || total_data_rate != 0);
/* Set the actual DDB start/end points for each plane */
start = alloc->start;
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_ddb_entry *plane_alloc =
&crtc_state->wm.skl.plane_ddb_y[plane_id];
struct skl_ddb_entry *uv_plane_alloc =
@@ -4420,7 +4689,8 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
continue;
/* Gen11+ uses a separate plane for UV watermarks */
- WARN_ON(INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
+ drm_WARN_ON(&dev_priv->drm,
+ INTEL_GEN(dev_priv) >= 11 && uv_total[plane_id]);
/* Leave disabled planes at (0,0) */
if (total[plane_id]) {
@@ -4443,7 +4713,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
* that aren't actually possible.
*/
for (level++; level <= ilk_wm_max_level(dev_priv); level++) {
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -4480,7 +4750,7 @@ skl_allocate_pipe_ddb(struct intel_crtc_state *crtc_state,
* Go back and disable the transition watermark if it turns out we
* don't have enough DDB blocks for it.
*/
- for_each_plane_id_on_crtc(intel_crtc, plane_id) {
+ for_each_plane_id_on_crtc(crtc, plane_id) {
struct skl_plane_wm *wm =
&crtc_state->wm.skl.optimal.planes[plane_id];
@@ -4844,45 +5114,36 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state,
}
}
-static u32
-skl_compute_linetime_wm(const struct intel_crtc_state *crtc_state)
-{
- struct drm_atomic_state *state = crtc_state->uapi.state;
- struct drm_i915_private *dev_priv = to_i915(state->dev);
- uint_fixed_16_16_t linetime_us;
- u32 linetime_wm;
-
- linetime_us = intel_get_linetime_us(crtc_state);
- linetime_wm = fixed16_to_u32_round_up(mul_u32_fixed16(8, linetime_us));
-
- /* Display WA #1135: BXT:ALL GLK:ALL */
- if (IS_GEN9_LP(dev_priv) && dev_priv->ipc_enabled)
- linetime_wm /= 2;
-
- return linetime_wm;
-}
-
static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
const struct skl_wm_params *wp,
struct skl_plane_wm *wm)
{
struct drm_device *dev = crtc_state->uapi.crtc->dev;
const struct drm_i915_private *dev_priv = to_i915(dev);
- u16 trans_min, trans_y_tile_min;
- const u16 trans_amount = 10; /* This is configurable amount */
+ u16 trans_min, trans_amount, trans_y_tile_min;
u16 wm0_sel_res_b, trans_offset_b, res_blocks;
- /* Transition WM are not recommended by HW team for GEN9 */
- if (INTEL_GEN(dev_priv) <= 9)
- return;
-
/* Transition WM don't make any sense if ipc is disabled */
if (!dev_priv->ipc_enabled)
return;
- trans_min = 14;
+ /*
+ * WaDisableTWM:skl,kbl,cfl,bxt
+ * Transition WM are not recommended by HW team for GEN9
+ */
+ if (IS_GEN9_BC(dev_priv) || IS_BROXTON(dev_priv))
+ return;
+
if (INTEL_GEN(dev_priv) >= 11)
trans_min = 4;
+ else
+ trans_min = 14;
+
+ /* Display WA #1140: glk,cnl */
+ if (IS_CANNONLAKE(dev_priv) || IS_GEMINILAKE(dev_priv))
+ trans_amount = 0;
+ else
+ trans_amount = 10; /* This is configurable amount */
trans_offset_b = trans_min + trans_amount;
@@ -4909,7 +5170,6 @@ static void skl_compute_transition_wm(const struct intel_crtc_state *crtc_state,
/* WA BUG:1938466 add one block for non y-tile planes */
if (IS_CNL_REVID(dev_priv, CNL_REVID_A0, CNL_REVID_A0))
res_blocks += 1;
-
}
/*
@@ -5049,8 +5309,6 @@ static int skl_build_pipe_wm(struct intel_crtc_state *crtc_state)
return ret;
}
- pipe_wm->linetime = skl_compute_linetime_wm(crtc_state);
-
return 0;
}
@@ -5059,9 +5317,10 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv,
const struct skl_ddb_entry *entry)
{
if (entry->end)
- I915_WRITE_FW(reg, (entry->end - 1) << 16 | entry->start);
+ intel_de_write_fw(dev_priv, reg,
+ (entry->end - 1) << 16 | entry->start);
else
- I915_WRITE_FW(reg, 0);
+ intel_de_write_fw(dev_priv, reg, 0);
}
static void skl_write_wm_level(struct drm_i915_private *dev_priv,
@@ -5077,7 +5336,7 @@ static void skl_write_wm_level(struct drm_i915_private *dev_priv,
val |= level->plane_res_b;
val |= level->plane_res_l << PLANE_WM_LINES_SHIFT;
- I915_WRITE_FW(reg, val);
+ intel_de_write_fw(dev_priv, reg, val);
}
void skl_write_plane_wm(struct intel_plane *plane,
@@ -5153,31 +5412,18 @@ static bool skl_plane_wm_equals(struct drm_i915_private *dev_priv,
int level, max_level = ilk_wm_max_level(dev_priv);
for (level = 0; level <= max_level; level++) {
- if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]) ||
- !skl_wm_level_equals(&wm1->uv_wm[level], &wm2->uv_wm[level]))
+ /*
+ * We don't check uv_wm as the hardware doesn't actually
+ * use it. It only gets used for calculating the required
+ * ddb allocation.
+ */
+ if (!skl_wm_level_equals(&wm1->wm[level], &wm2->wm[level]))
return false;
}
return skl_wm_level_equals(&wm1->trans_wm, &wm2->trans_wm);
}
-static bool skl_pipe_wm_equals(struct intel_crtc *crtc,
- const struct skl_pipe_wm *wm1,
- const struct skl_pipe_wm *wm2)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- enum plane_id plane_id;
-
- for_each_plane_id_on_crtc(crtc, plane_id) {
- if (!skl_plane_wm_equals(dev_priv,
- &wm1->planes[plane_id],
- &wm2->planes[plane_id]))
- return false;
- }
-
- return wm1->linetime == wm2->linetime;
-}
-
static inline bool skl_ddb_entries_overlap(const struct skl_ddb_entry *a,
const struct skl_ddb_entry *b)
{
@@ -5231,18 +5477,17 @@ skl_ddb_add_affected_planes(const struct intel_crtc_state *old_crtc_state,
static int
skl_compute_ddb(struct intel_atomic_state *state)
{
- const struct drm_i915_private *dev_priv = to_i915(state->base.dev);
- struct skl_ddb_allocation *ddb = &state->wm_results.ddb;
+ struct drm_i915_private *dev_priv = to_i915(state->base.dev);
struct intel_crtc_state *old_crtc_state;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc *crtc;
int ret, i;
- memcpy(ddb, &dev_priv->wm.skl_hw.ddb, sizeof(*ddb));
+ state->enabled_dbuf_slices_mask = dev_priv->enabled_dbuf_slices_mask;
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
- ret = skl_allocate_pipe_ddb(new_crtc_state, ddb);
+ ret = skl_allocate_pipe_ddb(new_crtc_state);
if (ret)
return ret;
@@ -5439,8 +5684,6 @@ skl_ddb_add_affected_pipes(struct intel_atomic_state *state)
* to grab the lock on *all* CRTC's.
*/
if (state->active_pipe_changes || state->modeset) {
- state->wm_results.dirty_pipes = INTEL_INFO(dev_priv)->pipe_mask;
-
ret = intel_add_all_pipes(state);
if (ret)
return ret;
@@ -5515,12 +5758,8 @@ skl_compute_wm(struct intel_atomic_state *state)
struct intel_crtc *crtc;
struct intel_crtc_state *new_crtc_state;
struct intel_crtc_state *old_crtc_state;
- struct skl_ddb_values *results = &state->wm_results;
int ret, i;
- /* Clear all dirty flags */
- results->dirty_pipes = 0;
-
ret = skl_ddb_add_affected_pipes(state);
if (ret)
return ret;
@@ -5528,68 +5767,36 @@ skl_compute_wm(struct intel_atomic_state *state)
/*
* Calculate WM's for all pipes that are part of this transaction.
* Note that skl_ddb_add_affected_pipes may have added more CRTC's that
- * weren't otherwise being modified (and set bits in dirty_pipes) if
- * pipe allocations had to change.
+ * weren't otherwise being modified if pipe allocations had to change.
*/
for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
new_crtc_state, i) {
ret = skl_build_pipe_wm(new_crtc_state);
if (ret)
return ret;
-
- ret = skl_wm_add_affected_planes(state, crtc);
- if (ret)
- return ret;
-
- if (!skl_pipe_wm_equals(crtc,
- &old_crtc_state->wm.skl.optimal,
- &new_crtc_state->wm.skl.optimal))
- results->dirty_pipes |= BIT(crtc->pipe);
}
ret = skl_compute_ddb(state);
if (ret)
return ret;
+ /*
+ * skl_compute_ddb() will have adjusted the final watermarks
+ * based on how much ddb is available. Now we can actually
+ * check if the final watermarks changed.
+ */
+ for_each_oldnew_intel_crtc_in_state(state, crtc, old_crtc_state,
+ new_crtc_state, i) {
+ ret = skl_wm_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+
skl_print_wm_changes(state);
return 0;
}
-static void skl_atomic_update_crtc_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- const struct skl_pipe_wm *pipe_wm = &crtc_state->wm.skl.optimal;
- enum pipe pipe = crtc->pipe;
-
- if ((state->wm_results.dirty_pipes & BIT(crtc->pipe)) == 0)
- return;
-
- I915_WRITE(PIPE_WM_LINETIME(pipe), pipe_wm->linetime);
-}
-
-static void skl_initial_wm(struct intel_atomic_state *state,
- struct intel_crtc *crtc)
-{
- struct drm_i915_private *dev_priv = to_i915(crtc->base.dev);
- const struct intel_crtc_state *crtc_state =
- intel_atomic_get_new_crtc_state(state, crtc);
- struct skl_ddb_values *results = &state->wm_results;
-
- if ((results->dirty_pipes & BIT(crtc->pipe)) == 0)
- return;
-
- mutex_lock(&dev_priv->wm.wm_mutex);
-
- if (crtc_state->uapi.active_changed)
- skl_atomic_update_crtc_wm(state, crtc);
-
- mutex_unlock(&dev_priv->wm.wm_mutex);
-}
-
static void ilk_compute_wm_config(struct drm_i915_private *dev_priv,
struct intel_wm_config *config)
{
@@ -5712,25 +5919,18 @@ void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
if (!crtc->active)
return;
-
- out->linetime = I915_READ(PIPE_WM_LINETIME(pipe));
}
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv)
{
- struct skl_ddb_values *hw = &dev_priv->wm.skl_hw;
- struct skl_ddb_allocation *ddb = &dev_priv->wm.skl_hw.ddb;
struct intel_crtc *crtc;
struct intel_crtc_state *crtc_state;
- skl_ddb_get_hw_state(dev_priv, ddb);
+ skl_ddb_get_hw_state(dev_priv);
for_each_intel_crtc(&dev_priv->drm, crtc) {
crtc_state = to_intel_crtc_state(crtc->base.state);
skl_pipe_wm_get_hw_state(crtc, &crtc_state->wm.skl.optimal);
-
- if (crtc->active)
- hw->dirty_pipes |= BIT(crtc->pipe);
}
if (dev_priv->active_pipes) {
@@ -5754,8 +5954,6 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
};
hw->wm_pipe[pipe] = I915_READ(wm0_pipe_reg[pipe]);
- if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
- hw->wm_linetime[pipe] = I915_READ(PIPE_WM_LINETIME(pipe));
memset(active, 0, sizeof(*active));
@@ -5774,7 +5972,6 @@ static void ilk_pipe_wm_get_hw_state(struct intel_crtc *crtc)
active->wm[0].pri_val = (tmp & WM0_PIPE_PLANE_MASK) >> WM0_PIPE_PLANE_SHIFT;
active->wm[0].spr_val = (tmp & WM0_PIPE_SPRITE_MASK) >> WM0_PIPE_SPRITE_SHIFT;
active->wm[0].cur_val = tmp & WM0_PIPE_CURSOR_MASK;
- active->linetime = hw->wm_linetime[pipe];
} else {
int level, max_level = ilk_wm_max_level(dev_priv);
@@ -6629,20 +6826,9 @@ static void icl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(GEN10_DFR_RATIO_EN_AND_CHICKEN,
I915_READ(GEN10_DFR_RATIO_EN_AND_CHICKEN) & ~DFR_DISABLE);
- /* WaEnable32PlaneMode:icl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS,
- _MASKED_BIT_ENABLE(GEN11_ENABLE_32_PLANE_MODE));
-
- /*
- * Wa_1408615072:icl,ehl (vsunit)
- * Wa_1407596294:icl,ehl (hsunit)
- */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE,
- 0, VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
-
- /* Wa_1407352427:icl,ehl */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
- 0, PSDUNIT_CLKGATE_DIS);
+ /*Wa_14010594013:icl, ehl */
+ intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1,
+ 0, CNL_DELAY_PMRSP);
}
static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -6650,10 +6836,6 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
u32 vd_pg_enable = 0;
unsigned int i;
- /* Wa_1408615072:tgl */
- intel_uncore_rmw(&dev_priv->uncore, UNSLICE_UNIT_LEVEL_CLKGATE2,
- 0, VSUNIT_CLKGATE_DIS_TGL);
-
/* This is not a WA. Enable VD HCP & MFX_ENC powergate */
for (i = 0; i < I915_MAX_VCS; i++) {
if (HAS_ENGINE(dev_priv, _VCS(i)))
@@ -6663,6 +6845,11 @@ static void tgl_init_clock_gating(struct drm_i915_private *dev_priv)
I915_WRITE(POWERGATE_ENABLE,
I915_READ(POWERGATE_ENABLE) | vd_pg_enable);
+
+ /* Wa_1409825376:tgl (pre-prod)*/
+ if (IS_TGL_REVID(dev_priv, TGL_REVID_A0, TGL_REVID_A0))
+ I915_WRITE(GEN9_CLKGATE_DIS_3, I915_READ(GEN9_CLKGATE_DIS_3) |
+ TGL_VRH_GATING_DIS);
}
static void cnp_init_clock_gating(struct drm_i915_private *dev_priv)
@@ -7248,8 +7435,6 @@ void intel_init_pm(struct drm_i915_private *dev_priv)
/* For FIFO watermark updates */
if (INTEL_GEN(dev_priv) >= 9) {
skl_setup_wm_latency(dev_priv);
- dev_priv->display.initial_watermarks = skl_initial_wm;
- dev_priv->display.atomic_update_watermarks = skl_atomic_update_crtc_wm;
dev_priv->display.compute_global_watermarks = skl_compute_wm;
} else if (HAS_PCH_SPLIT(dev_priv)) {
ilk_setup_wm_latency(dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_pm.h b/drivers/gpu/drm/i915/intel_pm.h
index c06c6a846d9a..d60a85421c5a 100644
--- a/drivers/gpu/drm/i915/intel_pm.h
+++ b/drivers/gpu/drm/i915/intel_pm.h
@@ -17,7 +17,6 @@ struct intel_atomic_state;
struct intel_crtc;
struct intel_crtc_state;
struct intel_plane;
-struct skl_ddb_allocation;
struct skl_ddb_entry;
struct skl_pipe_wm;
struct skl_wm_level;
@@ -33,11 +32,11 @@ void g4x_wm_get_hw_state(struct drm_i915_private *dev_priv);
void vlv_wm_get_hw_state(struct drm_i915_private *dev_priv);
void ilk_wm_get_hw_state(struct drm_i915_private *dev_priv);
void skl_wm_get_hw_state(struct drm_i915_private *dev_priv);
+u8 intel_enabled_dbuf_slices_mask(struct drm_i915_private *dev_priv);
void skl_pipe_ddb_get_hw_state(struct intel_crtc *crtc,
struct skl_ddb_entry *ddb_y,
struct skl_ddb_entry *ddb_uv);
-void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv,
- struct skl_ddb_allocation *ddb /* out */);
+void skl_ddb_get_hw_state(struct drm_i915_private *dev_priv);
void skl_pipe_wm_get_hw_state(struct intel_crtc *crtc,
struct skl_pipe_wm *out);
void g4x_wm_sanitize(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_sideband.c b/drivers/gpu/drm/i915/intel_sideband.c
index cbfb7171d62d..1447e7516cb7 100644
--- a/drivers/gpu/drm/i915/intel_sideband.c
+++ b/drivers/gpu/drm/i915/intel_sideband.c
@@ -241,8 +241,9 @@ u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
* FIXME: There might be some registers where all 1's is a valid value,
* so ideally we should check the register offset instead...
*/
- WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
- pipe_name(pipe), reg, val);
+ drm_WARN(&i915->drm, val == 0xffffffff,
+ "DPIO read pipe %c reg 0x%x == 0x%x\n",
+ pipe_name(pipe), reg, val);
return val;
}
@@ -365,6 +366,10 @@ static inline int gen7_check_mailbox_status(u32 mbox)
return -ETIMEDOUT;
case GEN7_PCODE_ILLEGAL_DATA:
return -EINVAL;
+ case GEN11_PCODE_ILLEGAL_SUBCOMMAND:
+ return -ENXIO;
+ case GEN11_PCODE_LOCKED:
+ return -EBUSY;
case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
return -EOVERFLOW;
default:
@@ -525,7 +530,7 @@ int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
*/
drm_dbg_kms(&i915->drm,
"PCODE timeout, retrying with preemption disabled\n");
- WARN_ON_ONCE(timeout_base_ms > 3);
+ drm_WARN_ON_ONCE(&i915->drm, timeout_base_ms > 3);
preempt_disable();
ret = wait_for_atomic(COND, 50);
preempt_enable();
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 5f2cf6f43b8b..abb18b90d7c3 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -324,8 +324,9 @@ static void __gen6_gt_wait_for_thread_c0(struct intel_uncore *uncore)
* w/a for a sporadic read returning 0 by waiting for the GT
* thread to wake up.
*/
- WARN_ONCE(wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
- "GT thread status wait timed out\n");
+ drm_WARN_ONCE(&uncore->i915->drm,
+ wait_for_atomic_us(gt_thread_status(uncore) == 0, 5000),
+ "GT thread status wait timed out\n");
}
static void fw_domains_get_with_thread_status(struct intel_uncore *uncore,
@@ -441,7 +442,7 @@ intel_uncore_forcewake_reset(struct intel_uncore *uncore)
cond_resched();
}
- WARN_ON(active_domains);
+ drm_WARN_ON(&uncore->i915->drm, active_domains);
fw = uncore->fw_domains_active;
if (fw)
@@ -757,9 +758,9 @@ void assert_forcewakes_inactive(struct intel_uncore *uncore)
if (!uncore->funcs.force_wake_get)
return;
- WARN(uncore->fw_domains_active,
- "Expected all fw_domains to be inactive, but %08x are still on\n",
- uncore->fw_domains_active);
+ drm_WARN(&uncore->i915->drm, uncore->fw_domains_active,
+ "Expected all fw_domains to be inactive, but %08x are still on\n",
+ uncore->fw_domains_active);
}
void assert_forcewakes_active(struct intel_uncore *uncore,
@@ -779,9 +780,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
assert_rpm_wakelock_held(uncore->rpm);
fw_domains &= uncore->fw_domains;
- WARN(fw_domains & ~uncore->fw_domains_active,
- "Expected %08x fw_domains to be active, but %08x are off\n",
- fw_domains, fw_domains & ~uncore->fw_domains_active);
+ drm_WARN(&uncore->i915->drm, fw_domains & ~uncore->fw_domains_active,
+ "Expected %08x fw_domains to be active, but %08x are off\n",
+ fw_domains, fw_domains & ~uncore->fw_domains_active);
/*
* Check that the caller has an explicit wakeref and we don't mistake
@@ -794,9 +795,9 @@ void assert_forcewakes_active(struct intel_uncore *uncore,
if (uncore->fw_domains_timer & domain->mask)
expect++; /* pending automatic release */
- if (WARN(actual < expect,
- "Expected domain %d to be held awake by caller, count=%d\n",
- domain->id, actual))
+ if (drm_WARN(&uncore->i915->drm, actual < expect,
+ "Expected domain %d to be held awake by caller, count=%d\n",
+ domain->id, actual))
break;
}
@@ -866,9 +867,9 @@ find_fw_domain(struct intel_uncore *uncore, u32 offset)
if (entry->domains == FORCEWAKE_ALL)
return uncore->fw_domains;
- WARN(entry->domains & ~uncore->fw_domains,
- "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
- entry->domains & ~uncore->fw_domains, offset);
+ drm_WARN(&uncore->i915->drm, entry->domains & ~uncore->fw_domains,
+ "Uninitialized forcewake domain(s) 0x%x accessed at 0x%x\n",
+ entry->domains & ~uncore->fw_domains, offset);
return entry->domains;
}
@@ -1158,10 +1159,11 @@ __unclaimed_reg_debug(struct intel_uncore *uncore,
const bool read,
const bool before)
{
- if (WARN(check_for_unclaimed_mmio(uncore) && !before,
- "Unclaimed %s register 0x%x\n",
- read ? "read from" : "write to",
- i915_mmio_reg_offset(reg)))
+ if (drm_WARN(&uncore->i915->drm,
+ check_for_unclaimed_mmio(uncore) && !before,
+ "Unclaimed %s register 0x%x\n",
+ read ? "read from" : "write to",
+ i915_mmio_reg_offset(reg)))
/* Only report the first N failures */
i915_modparams.mmio_debug--;
}
@@ -1436,8 +1438,8 @@ static int __fw_domain_init(struct intel_uncore *uncore,
if (!d)
return -ENOMEM;
- WARN_ON(!i915_mmio_reg_valid(reg_set));
- WARN_ON(!i915_mmio_reg_valid(reg_ack));
+ drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_set));
+ drm_WARN_ON(&uncore->i915->drm, !i915_mmio_reg_valid(reg_ack));
d->uncore = uncore;
d->wake_count = 0;
@@ -1482,8 +1484,8 @@ static void fw_domain_fini(struct intel_uncore *uncore,
return;
uncore->fw_domains &= ~BIT(domain_id);
- WARN_ON(d->wake_count);
- WARN_ON(hrtimer_cancel(&d->timer));
+ drm_WARN_ON(&uncore->i915->drm, d->wake_count);
+ drm_WARN_ON(&uncore->i915->drm, hrtimer_cancel(&d->timer));
kfree(d);
}
@@ -1613,7 +1615,7 @@ static int intel_uncore_fw_domains_init(struct intel_uncore *uncore)
#undef fw_domain_init
/* All future platforms are expected to require complex power gating */
- WARN_ON(!ret && uncore->fw_domains == 0);
+ drm_WARN_ON(&i915->drm, !ret && uncore->fw_domains == 0);
out:
if (ret)
@@ -2108,7 +2110,7 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
{
enum forcewake_domains fw_domains = 0;
- WARN_ON(!op);
+ drm_WARN_ON(&uncore->i915->drm, !op);
if (!intel_uncore_has_forcewake(uncore))
return 0;
@@ -2119,7 +2121,7 @@ intel_uncore_forcewake_for_reg(struct intel_uncore *uncore,
if (op & FW_REG_WRITE)
fw_domains |= uncore->funcs.write_fw_domains(uncore, reg);
- WARN_ON(fw_domains & ~uncore->fw_domains);
+ drm_WARN_ON(&uncore->i915->drm, fw_domains & ~uncore->fw_domains);
return fw_domains;
}
diff --git a/drivers/gpu/drm/i915/selftests/i915_active.c b/drivers/gpu/drm/i915/selftests/i915_active.c
index ef572a0c2566..68bbb1580162 100644
--- a/drivers/gpu/drm/i915/selftests/i915_active.c
+++ b/drivers/gpu/drm/i915/selftests/i915_active.c
@@ -201,11 +201,57 @@ static int live_active_retire(void *arg)
return err;
}
+static int live_active_barrier(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ struct intel_engine_cs *engine;
+ struct live_active *active;
+ int err = 0;
+
+ /* Check that we get a callback when requests retire upon waiting */
+
+ active = __live_alloc(i915);
+ if (!active)
+ return -ENOMEM;
+
+ err = i915_active_acquire(&active->base);
+ if (err)
+ goto out;
+
+ for_each_uabi_engine(engine, i915) {
+ err = i915_active_acquire_preallocate_barrier(&active->base,
+ engine);
+ if (err)
+ break;
+
+ i915_active_acquire_barrier(&active->base);
+ }
+
+ i915_active_release(&active->base);
+
+ if (err == 0)
+ err = i915_active_wait(&active->base);
+
+ if (err == 0 && !READ_ONCE(active->retired)) {
+ pr_err("i915_active not retired after flushing barriers!\n");
+ err = -EINVAL;
+ }
+
+out:
+ __live_put(active);
+
+ if (igt_flush_test(i915))
+ err = -EIO;
+
+ return err;
+}
+
int i915_active_live_selftests(struct drm_i915_private *i915)
{
static const struct i915_subtest tests[] = {
SUBTEST(live_active_wait),
SUBTEST(live_active_retire),
+ SUBTEST(live_active_barrier),
};
if (intel_gt_is_wedged(&i915->gt))
@@ -265,28 +311,40 @@ static void spin_unlock_wait(spinlock_t *lock)
spin_unlock_irq(lock);
}
+static void active_flush(struct i915_active *ref,
+ struct i915_active_fence *active)
+{
+ struct dma_fence *fence;
+
+ fence = xchg(__active_fence_slot(active), NULL);
+ if (!fence)
+ return;
+
+ spin_lock_irq(fence->lock);
+ __list_del_entry(&active->cb.node);
+ spin_unlock_irq(fence->lock); /* serialise with fence->cb_list */
+ atomic_dec(&ref->count);
+
+ GEM_BUG_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
+}
+
void i915_active_unlock_wait(struct i915_active *ref)
{
if (i915_active_acquire_if_busy(ref)) {
struct active_node *it, *n;
+ /* Wait for all active callbacks */
rcu_read_lock();
- rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
- struct dma_fence *f;
-
- /* Wait for all active callbacks */
- f = rcu_dereference(it->base.fence);
- if (f)
- spin_unlock_wait(f->lock);
- }
+ active_flush(ref, &ref->excl);
+ rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node)
+ active_flush(ref, &it->base);
rcu_read_unlock();
i915_active_release(ref);
}
/* And wait for the retire callback */
- spin_lock_irq(&ref->tree_lock);
- spin_unlock_irq(&ref->tree_lock);
+ spin_unlock_wait(&ref->tree_lock);
/* ... which may have been on a thread instead */
flush_work(&ref->work);
diff --git a/drivers/gpu/drm/i915/selftests/i915_buddy.c b/drivers/gpu/drm/i915/selftests/i915_buddy.c
index 1b856bae67b5..939a6caebb03 100644
--- a/drivers/gpu/drm/i915/selftests/i915_buddy.c
+++ b/drivers/gpu/drm/i915/selftests/i915_buddy.c
@@ -298,10 +298,12 @@ static void igt_mm_config(u64 *size, u64 *chunk_size)
static int igt_buddy_alloc_smoke(void *arg)
{
struct i915_buddy_mm mm;
- int max_order;
+ IGT_TIMEOUT(end_time);
+ I915_RND_STATE(prng);
u64 chunk_size;
u64 mm_size;
- int err;
+ int *order;
+ int err, i;
igt_mm_config(&mm_size, &chunk_size);
@@ -313,10 +315,16 @@ static int igt_buddy_alloc_smoke(void *arg)
return err;
}
- for (max_order = mm.max_order; max_order >= 0; max_order--) {
+ order = i915_random_order(mm.max_order + 1, &prng);
+ if (!order)
+ goto out_fini;
+
+ for (i = 0; i <= mm.max_order; ++i) {
struct i915_buddy_block *block;
- int order;
+ int max_order = order[i];
+ bool timeout = false;
LIST_HEAD(blocks);
+ int order;
u64 total;
err = igt_check_mm(&mm);
@@ -360,6 +368,11 @@ retry:
}
total += i915_buddy_block_size(&mm, block);
+
+ if (__igt_timeout(end_time, NULL)) {
+ timeout = true;
+ break;
+ }
} while (total < mm.size);
if (!err)
@@ -373,7 +386,7 @@ retry:
pr_err("post-mm check failed\n");
}
- if (err)
+ if (err || timeout)
break;
cond_resched();
@@ -382,6 +395,8 @@ retry:
if (err == -ENOMEM)
err = 0;
+ kfree(order);
+out_fini:
i915_buddy_fini(&mm);
return err;
diff --git a/drivers/gpu/drm/i915/selftests/i915_gem.c b/drivers/gpu/drm/i915/selftests/i915_gem.c
index 78f36faf2bbe..623759b73bb4 100644
--- a/drivers/gpu/drm/i915/selftests/i915_gem.c
+++ b/drivers/gpu/drm/i915/selftests/i915_gem.c
@@ -98,7 +98,7 @@ static void pm_suspend(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_gem_suspend_gtt_mappings(i915);
+ i915_ggtt_suspend(&i915->ggtt);
i915_gem_suspend_late(i915);
}
}
@@ -108,7 +108,7 @@ static void pm_hibernate(struct drm_i915_private *i915)
intel_wakeref_t wakeref;
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_gem_suspend_gtt_mappings(i915);
+ i915_ggtt_suspend(&i915->ggtt);
i915_gem_freeze(i915);
i915_gem_freeze_late(i915);
@@ -124,7 +124,7 @@ static void pm_resume(struct drm_i915_private *i915)
* that runtime-pm just works.
*/
with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
- i915_gem_restore_gtt_mappings(i915);
+ i915_ggtt_resume(&i915->ggtt);
i915_gem_restore_fences(&i915->ggtt);
i915_gem_resume(i915);
diff --git a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
index 34138c7bdd15..0a953bfc0585 100644
--- a/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_live_selftests.h
@@ -43,6 +43,7 @@ selftest(reset, intel_reset_live_selftests)
selftest(memory_region, intel_memory_region_live_selftests)
selftest(hangcheck, intel_hangcheck_live_selftests)
selftest(execlists, intel_execlists_live_selftests)
+selftest(ring_submission, intel_ring_submission_live_selftests)
selftest(perf, i915_perf_live_selftests)
/* Here be dragons: keep last to run last! */
selftest(late_gt_pm, intel_gt_pm_late_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
index 5a577a1332f5..3bf7f53e9924 100644
--- a/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
+++ b/drivers/gpu/drm/i915/selftests/i915_perf_selftests.h
@@ -17,3 +17,4 @@
*/
selftest(engine_cs, intel_engine_cs_perf_selftests)
selftest(blt, i915_gem_object_blt_perf_selftests)
+selftest(region, intel_memory_region_perf_selftests)
diff --git a/drivers/gpu/drm/i915/selftests/igt_spinner.c b/drivers/gpu/drm/i915/selftests/igt_spinner.c
index e8a58fe49c39..9ad4ab088466 100644
--- a/drivers/gpu/drm/i915/selftests/igt_spinner.c
+++ b/drivers/gpu/drm/i915/selftests/igt_spinner.c
@@ -183,7 +183,7 @@ igt_spinner_create_request(struct igt_spinner *spin,
cancel_rq:
if (err) {
- i915_request_skip(rq, err);
+ i915_request_set_error_once(rq, err);
i915_request_add(rq);
}
unpin_hws:
diff --git a/drivers/gpu/drm/i915/selftests/intel_memory_region.c b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
index 3ef3620e0da5..2a1d4ba1f9f3 100644
--- a/drivers/gpu/drm/i915/selftests/intel_memory_region.c
+++ b/drivers/gpu/drm/i915/selftests/intel_memory_region.c
@@ -4,6 +4,7 @@
*/
#include <linux/prime_numbers.h>
+#include <linux/sort.h>
#include "../i915_selftest.h"
@@ -19,6 +20,7 @@
#include "gem/selftests/mock_context.h"
#include "gt/intel_engine_user.h"
#include "gt/intel_gt.h"
+#include "i915_memcpy.h"
#include "selftests/igt_flush_test.h"
#include "selftests/i915_random.h"
@@ -572,6 +574,195 @@ out_put:
return err;
}
+static const char *repr_type(u32 type)
+{
+ switch (type) {
+ case I915_MAP_WB:
+ return "WB";
+ case I915_MAP_WC:
+ return "WC";
+ }
+
+ return "";
+}
+
+static struct drm_i915_gem_object *
+create_region_for_mapping(struct intel_memory_region *mr, u64 size, u32 type,
+ void **out_addr)
+{
+ struct drm_i915_gem_object *obj;
+ void *addr;
+
+ obj = i915_gem_object_create_region(mr, size, 0);
+ if (IS_ERR(obj))
+ return obj;
+
+ addr = i915_gem_object_pin_map(obj, type);
+ if (IS_ERR(addr)) {
+ i915_gem_object_put(obj);
+ if (PTR_ERR(addr) == -ENXIO)
+ return ERR_PTR(-ENODEV);
+ return addr;
+ }
+
+ *out_addr = addr;
+ return obj;
+}
+
+static int wrap_ktime_compare(const void *A, const void *B)
+{
+ const ktime_t *a = A, *b = B;
+
+ return ktime_compare(*a, *b);
+}
+
+static void igt_memcpy_long(void *dst, const void *src, size_t size)
+{
+ unsigned long *tmp = dst;
+ const unsigned long *s = src;
+
+ size = size / sizeof(unsigned long);
+ while (size--)
+ *tmp++ = *s++;
+}
+
+static inline void igt_memcpy(void *dst, const void *src, size_t size)
+{
+ memcpy(dst, src, size);
+}
+
+static inline void igt_memcpy_from_wc(void *dst, const void *src, size_t size)
+{
+ i915_memcpy_from_wc(dst, src, size);
+}
+
+static int _perf_memcpy(struct intel_memory_region *src_mr,
+ struct intel_memory_region *dst_mr,
+ u64 size, u32 src_type, u32 dst_type)
+{
+ struct drm_i915_private *i915 = src_mr->i915;
+ const struct {
+ const char *name;
+ void (*copy)(void *dst, const void *src, size_t size);
+ bool skip;
+ } tests[] = {
+ {
+ "memcpy",
+ igt_memcpy,
+ },
+ {
+ "memcpy_long",
+ igt_memcpy_long,
+ },
+ {
+ "memcpy_from_wc",
+ igt_memcpy_from_wc,
+ !i915_has_memcpy_from_wc(),
+ },
+ };
+ struct drm_i915_gem_object *src, *dst;
+ void *src_addr, *dst_addr;
+ int ret = 0;
+ int i;
+
+ src = create_region_for_mapping(src_mr, size, src_type, &src_addr);
+ if (IS_ERR(src)) {
+ ret = PTR_ERR(src);
+ goto out;
+ }
+
+ dst = create_region_for_mapping(dst_mr, size, dst_type, &dst_addr);
+ if (IS_ERR(dst)) {
+ ret = PTR_ERR(dst);
+ goto out_unpin_src;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(tests); ++i) {
+ ktime_t t[5];
+ int pass;
+
+ if (tests[i].skip)
+ continue;
+
+ for (pass = 0; pass < ARRAY_SIZE(t); pass++) {
+ ktime_t t0, t1;
+
+ t0 = ktime_get();
+
+ tests[i].copy(dst_addr, src_addr, size);
+
+ t1 = ktime_get();
+ t[pass] = ktime_sub(t1, t0);
+ }
+
+ sort(t, ARRAY_SIZE(t), sizeof(*t), wrap_ktime_compare, NULL);
+ pr_info("%s src(%s, %s) -> dst(%s, %s) %14s %4llu KiB copy: %5lld MiB/s\n",
+ __func__,
+ src_mr->name,
+ repr_type(src_type),
+ dst_mr->name,
+ repr_type(dst_type),
+ tests[i].name,
+ size >> 10,
+ div64_u64(mul_u32_u32(4 * size,
+ 1000 * 1000 * 1000),
+ t[1] + 2 * t[2] + t[3]) >> 20);
+
+ cond_resched();
+ }
+
+ i915_gem_object_unpin_map(dst);
+ i915_gem_object_put(dst);
+out_unpin_src:
+ i915_gem_object_unpin_map(src);
+ i915_gem_object_put(src);
+
+ i915_gem_drain_freed_objects(i915);
+out:
+ if (ret == -ENODEV)
+ ret = 0;
+
+ return ret;
+}
+
+static int perf_memcpy(void *arg)
+{
+ struct drm_i915_private *i915 = arg;
+ static const u32 types[] = {
+ I915_MAP_WB,
+ I915_MAP_WC,
+ };
+ static const u32 sizes[] = {
+ SZ_4K,
+ SZ_64K,
+ SZ_4M,
+ };
+ struct intel_memory_region *src_mr, *dst_mr;
+ int src_id, dst_id;
+ int i, j, k;
+ int ret;
+
+ for_each_memory_region(src_mr, i915, src_id) {
+ for_each_memory_region(dst_mr, i915, dst_id) {
+ for (i = 0; i < ARRAY_SIZE(sizes); ++i) {
+ for (j = 0; j < ARRAY_SIZE(types); ++j) {
+ for (k = 0; k < ARRAY_SIZE(types); ++k) {
+ ret = _perf_memcpy(src_mr,
+ dst_mr,
+ sizes[i],
+ types[j],
+ types[k]);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ }
+ }
+
+ return 0;
+}
+
int intel_memory_region_mock_selftests(void)
{
static const struct i915_subtest tests[] = {
@@ -619,3 +810,15 @@ int intel_memory_region_live_selftests(struct drm_i915_private *i915)
return i915_live_subtests(tests, i915);
}
+
+int intel_memory_region_perf_selftests(struct drm_i915_private *i915)
+{
+ static const struct i915_subtest tests[] = {
+ SUBTEST(perf_memcpy),
+ };
+
+ if (intel_gt_is_wedged(&i915->gt))
+ return 0;
+
+ return i915_live_subtests(tests, i915);
+}
diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
index 3b8986983afc..754d0eb6beaa 100644
--- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c
+++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c
@@ -144,7 +144,6 @@ struct drm_i915_private *mock_gem_device(void)
goto put_device;
}
i915->drm.pdev = pdev;
- i915->drm.dev_private = i915;
intel_runtime_pm_init_early(&i915->runtime_pm);
diff --git a/drivers/gpu/drm/i915/vlv_suspend.c b/drivers/gpu/drm/i915/vlv_suspend.c
new file mode 100644
index 000000000000..23adb64d640a
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_suspend.c
@@ -0,0 +1,489 @@
+// SPDX-License-Identifier: MIT
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#include <linux/kernel.h>
+
+#include <drm/drm_print.h>
+
+#include "i915_drv.h"
+#include "i915_reg.h"
+#include "i915_trace.h"
+#include "i915_utils.h"
+#include "intel_pm.h"
+#include "vlv_suspend.h"
+
+struct vlv_s0ix_state {
+ /* GAM */
+ u32 wr_watermark;
+ u32 gfx_prio_ctrl;
+ u32 arb_mode;
+ u32 gfx_pend_tlb0;
+ u32 gfx_pend_tlb1;
+ u32 lra_limits[GEN7_LRA_LIMITS_REG_NUM];
+ u32 media_max_req_count;
+ u32 gfx_max_req_count;
+ u32 render_hwsp;
+ u32 ecochk;
+ u32 bsd_hwsp;
+ u32 blt_hwsp;
+ u32 tlb_rd_addr;
+
+ /* MBC */
+ u32 g3dctl;
+ u32 gsckgctl;
+ u32 mbctl;
+
+ /* GCP */
+ u32 ucgctl1;
+ u32 ucgctl3;
+ u32 rcgctl1;
+ u32 rcgctl2;
+ u32 rstctl;
+ u32 misccpctl;
+
+ /* GPM */
+ u32 gfxpause;
+ u32 rpdeuhwtc;
+ u32 rpdeuc;
+ u32 ecobus;
+ u32 pwrdwnupctl;
+ u32 rp_down_timeout;
+ u32 rp_deucsw;
+ u32 rcubmabdtmr;
+ u32 rcedata;
+ u32 spare2gh;
+
+ /* Display 1 CZ domain */
+ u32 gt_imr;
+ u32 gt_ier;
+ u32 pm_imr;
+ u32 pm_ier;
+ u32 gt_scratch[GEN7_GT_SCRATCH_REG_NUM];
+
+ /* GT SA CZ domain */
+ u32 tilectl;
+ u32 gt_fifoctl;
+ u32 gtlc_wake_ctrl;
+ u32 gtlc_survive;
+ u32 pmwgicz;
+
+ /* Display 2 CZ domain */
+ u32 gu_ctl0;
+ u32 gu_ctl1;
+ u32 pcbr;
+ u32 clock_gate_dis2;
+};
+
+/*
+ * Save all Gunit registers that may be lost after a D3 and a subsequent
+ * S0i[R123] transition. The list of registers needing a save/restore is
+ * defined in the VLV2_S0IXRegs document. This documents marks all Gunit
+ * registers in the following way:
+ * - Driver: saved/restored by the driver
+ * - Punit : saved/restored by the Punit firmware
+ * - No, w/o marking: no need to save/restore, since the register is R/O or
+ * used internally by the HW in a way that doesn't depend
+ * keeping the content across a suspend/resume.
+ * - Debug : used for debugging
+ *
+ * We save/restore all registers marked with 'Driver', with the following
+ * exceptions:
+ * - Registers out of use, including also registers marked with 'Debug'.
+ * These have no effect on the driver's operation, so we don't save/restore
+ * them to reduce the overhead.
+ * - Registers that are fully setup by an initialization function called from
+ * the resume path. For example many clock gating and RPS/RC6 registers.
+ * - Registers that provide the right functionality with their reset defaults.
+ *
+ * TODO: Except for registers that based on the above 3 criteria can be safely
+ * ignored, we save/restore all others, practically treating the HW context as
+ * a black-box for the driver. Further investigation is needed to reduce the
+ * saved/restored registers even further, by following the same 3 criteria.
+ */
+static void vlv_save_gunit_s0ix_state(struct drm_i915_private *i915)
+{
+ struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
+ struct intel_uncore *uncore = &i915->uncore;
+ int i;
+
+ if (!s)
+ return;
+
+ /* GAM 0x4000-0x4770 */
+ s->wr_watermark = intel_uncore_read(uncore, GEN7_WR_WATERMARK);
+ s->gfx_prio_ctrl = intel_uncore_read(uncore, GEN7_GFX_PRIO_CTRL);
+ s->arb_mode = intel_uncore_read(uncore, ARB_MODE);
+ s->gfx_pend_tlb0 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB0);
+ s->gfx_pend_tlb1 = intel_uncore_read(uncore, GEN7_GFX_PEND_TLB1);
+
+ for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+ s->lra_limits[i] = intel_uncore_read(uncore, GEN7_LRA_LIMITS(i));
+
+ s->media_max_req_count = intel_uncore_read(uncore, GEN7_MEDIA_MAX_REQ_COUNT);
+ s->gfx_max_req_count = intel_uncore_read(uncore, GEN7_GFX_MAX_REQ_COUNT);
+
+ s->render_hwsp = intel_uncore_read(uncore, RENDER_HWS_PGA_GEN7);
+ s->ecochk = intel_uncore_read(uncore, GAM_ECOCHK);
+ s->bsd_hwsp = intel_uncore_read(uncore, BSD_HWS_PGA_GEN7);
+ s->blt_hwsp = intel_uncore_read(uncore, BLT_HWS_PGA_GEN7);
+
+ s->tlb_rd_addr = intel_uncore_read(uncore, GEN7_TLB_RD_ADDR);
+
+ /* MBC 0x9024-0x91D0, 0x8500 */
+ s->g3dctl = intel_uncore_read(uncore, VLV_G3DCTL);
+ s->gsckgctl = intel_uncore_read(uncore, VLV_GSCKGCTL);
+ s->mbctl = intel_uncore_read(uncore, GEN6_MBCTL);
+
+ /* GCP 0x9400-0x9424, 0x8100-0x810C */
+ s->ucgctl1 = intel_uncore_read(uncore, GEN6_UCGCTL1);
+ s->ucgctl3 = intel_uncore_read(uncore, GEN6_UCGCTL3);
+ s->rcgctl1 = intel_uncore_read(uncore, GEN6_RCGCTL1);
+ s->rcgctl2 = intel_uncore_read(uncore, GEN6_RCGCTL2);
+ s->rstctl = intel_uncore_read(uncore, GEN6_RSTCTL);
+ s->misccpctl = intel_uncore_read(uncore, GEN7_MISCCPCTL);
+
+ /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+ s->gfxpause = intel_uncore_read(uncore, GEN6_GFXPAUSE);
+ s->rpdeuhwtc = intel_uncore_read(uncore, GEN6_RPDEUHWTC);
+ s->rpdeuc = intel_uncore_read(uncore, GEN6_RPDEUC);
+ s->ecobus = intel_uncore_read(uncore, ECOBUS);
+ s->pwrdwnupctl = intel_uncore_read(uncore, VLV_PWRDWNUPCTL);
+ s->rp_down_timeout = intel_uncore_read(uncore, GEN6_RP_DOWN_TIMEOUT);
+ s->rp_deucsw = intel_uncore_read(uncore, GEN6_RPDEUCSW);
+ s->rcubmabdtmr = intel_uncore_read(uncore, GEN6_RCUBMABDTMR);
+ s->rcedata = intel_uncore_read(uncore, VLV_RCEDATA);
+ s->spare2gh = intel_uncore_read(uncore, VLV_SPAREG2H);
+
+ /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+ s->gt_imr = intel_uncore_read(uncore, GTIMR);
+ s->gt_ier = intel_uncore_read(uncore, GTIER);
+ s->pm_imr = intel_uncore_read(uncore, GEN6_PMIMR);
+ s->pm_ier = intel_uncore_read(uncore, GEN6_PMIER);
+
+ for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+ s->gt_scratch[i] = intel_uncore_read(uncore, GEN7_GT_SCRATCH(i));
+
+ /* GT SA CZ domain, 0x100000-0x138124 */
+ s->tilectl = intel_uncore_read(uncore, TILECTL);
+ s->gt_fifoctl = intel_uncore_read(uncore, GTFIFOCTL);
+ s->gtlc_wake_ctrl = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
+ s->gtlc_survive = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
+ s->pmwgicz = intel_uncore_read(uncore, VLV_PMWGICZ);
+
+ /* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ s->gu_ctl0 = intel_uncore_read(uncore, VLV_GU_CTL0);
+ s->gu_ctl1 = intel_uncore_read(uncore, VLV_GU_CTL1);
+ s->pcbr = intel_uncore_read(uncore, VLV_PCBR);
+ s->clock_gate_dis2 = intel_uncore_read(uncore, VLV_GUNIT_CLOCK_GATE2);
+
+ /*
+ * Not saving any of:
+ * DFT, 0x9800-0x9EC0
+ * SARB, 0xB000-0xB1FC
+ * GAC, 0x5208-0x524C, 0x14000-0x14C000
+ * PCI CFG
+ */
+}
+
+static void vlv_restore_gunit_s0ix_state(struct drm_i915_private *i915)
+{
+ struct vlv_s0ix_state *s = i915->vlv_s0ix_state;
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+ int i;
+
+ if (!s)
+ return;
+
+ /* GAM 0x4000-0x4770 */
+ intel_uncore_write(uncore, GEN7_WR_WATERMARK, s->wr_watermark);
+ intel_uncore_write(uncore, GEN7_GFX_PRIO_CTRL, s->gfx_prio_ctrl);
+ intel_uncore_write(uncore, ARB_MODE, s->arb_mode | (0xffff << 16));
+ intel_uncore_write(uncore, GEN7_GFX_PEND_TLB0, s->gfx_pend_tlb0);
+ intel_uncore_write(uncore, GEN7_GFX_PEND_TLB1, s->gfx_pend_tlb1);
+
+ for (i = 0; i < ARRAY_SIZE(s->lra_limits); i++)
+ intel_uncore_write(uncore, GEN7_LRA_LIMITS(i), s->lra_limits[i]);
+
+ intel_uncore_write(uncore, GEN7_MEDIA_MAX_REQ_COUNT, s->media_max_req_count);
+ intel_uncore_write(uncore, GEN7_GFX_MAX_REQ_COUNT, s->gfx_max_req_count);
+
+ intel_uncore_write(uncore, RENDER_HWS_PGA_GEN7, s->render_hwsp);
+ intel_uncore_write(uncore, GAM_ECOCHK, s->ecochk);
+ intel_uncore_write(uncore, BSD_HWS_PGA_GEN7, s->bsd_hwsp);
+ intel_uncore_write(uncore, BLT_HWS_PGA_GEN7, s->blt_hwsp);
+
+ intel_uncore_write(uncore, GEN7_TLB_RD_ADDR, s->tlb_rd_addr);
+
+ /* MBC 0x9024-0x91D0, 0x8500 */
+ intel_uncore_write(uncore, VLV_G3DCTL, s->g3dctl);
+ intel_uncore_write(uncore, VLV_GSCKGCTL, s->gsckgctl);
+ intel_uncore_write(uncore, GEN6_MBCTL, s->mbctl);
+
+ /* GCP 0x9400-0x9424, 0x8100-0x810C */
+ intel_uncore_write(uncore, GEN6_UCGCTL1, s->ucgctl1);
+ intel_uncore_write(uncore, GEN6_UCGCTL3, s->ucgctl3);
+ intel_uncore_write(uncore, GEN6_RCGCTL1, s->rcgctl1);
+ intel_uncore_write(uncore, GEN6_RCGCTL2, s->rcgctl2);
+ intel_uncore_write(uncore, GEN6_RSTCTL, s->rstctl);
+ intel_uncore_write(uncore, GEN7_MISCCPCTL, s->misccpctl);
+
+ /* GPM 0xA000-0xAA84, 0x8000-0x80FC */
+ intel_uncore_write(uncore, GEN6_GFXPAUSE, s->gfxpause);
+ intel_uncore_write(uncore, GEN6_RPDEUHWTC, s->rpdeuhwtc);
+ intel_uncore_write(uncore, GEN6_RPDEUC, s->rpdeuc);
+ intel_uncore_write(uncore, ECOBUS, s->ecobus);
+ intel_uncore_write(uncore, VLV_PWRDWNUPCTL, s->pwrdwnupctl);
+ intel_uncore_write(uncore, GEN6_RP_DOWN_TIMEOUT, s->rp_down_timeout);
+ intel_uncore_write(uncore, GEN6_RPDEUCSW, s->rp_deucsw);
+ intel_uncore_write(uncore, GEN6_RCUBMABDTMR, s->rcubmabdtmr);
+ intel_uncore_write(uncore, VLV_RCEDATA, s->rcedata);
+ intel_uncore_write(uncore, VLV_SPAREG2H, s->spare2gh);
+
+ /* Display CZ domain, 0x4400C-0x4402C, 0x4F000-0x4F11F */
+ intel_uncore_write(uncore, GTIMR, s->gt_imr);
+ intel_uncore_write(uncore, GTIER, s->gt_ier);
+ intel_uncore_write(uncore, GEN6_PMIMR, s->pm_imr);
+ intel_uncore_write(uncore, GEN6_PMIER, s->pm_ier);
+
+ for (i = 0; i < ARRAY_SIZE(s->gt_scratch); i++)
+ intel_uncore_write(uncore, GEN7_GT_SCRATCH(i), s->gt_scratch[i]);
+
+ /* GT SA CZ domain, 0x100000-0x138124 */
+ intel_uncore_write(uncore, TILECTL, s->tilectl);
+ intel_uncore_write(uncore, GTFIFOCTL, s->gt_fifoctl);
+ /*
+ * Preserve the GT allow wake and GFX force clock bit, they are not
+ * be restored, as they are used to control the s0ix suspend/resume
+ * sequence by the caller.
+ */
+ val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
+ val &= VLV_GTLC_ALLOWWAKEREQ;
+ val |= s->gtlc_wake_ctrl & ~VLV_GTLC_ALLOWWAKEREQ;
+ intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
+
+ val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
+ val &= VLV_GFX_CLK_FORCE_ON_BIT;
+ val |= s->gtlc_survive & ~VLV_GFX_CLK_FORCE_ON_BIT;
+ intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
+
+ intel_uncore_write(uncore, VLV_PMWGICZ, s->pmwgicz);
+
+ /* Gunit-Display CZ domain, 0x182028-0x1821CF */
+ intel_uncore_write(uncore, VLV_GU_CTL0, s->gu_ctl0);
+ intel_uncore_write(uncore, VLV_GU_CTL1, s->gu_ctl1);
+ intel_uncore_write(uncore, VLV_PCBR, s->pcbr);
+ intel_uncore_write(uncore, VLV_GUNIT_CLOCK_GATE2, s->clock_gate_dis2);
+}
+
+static int vlv_wait_for_pw_status(struct drm_i915_private *i915,
+ u32 mask, u32 val)
+{
+ i915_reg_t reg = VLV_GTLC_PW_STATUS;
+ u32 reg_value;
+ int ret;
+
+ /* The HW does not like us polling for PW_STATUS frequently, so
+ * use the sleeping loop rather than risk the busy spin within
+ * intel_wait_for_register().
+ *
+ * Transitioning between RC6 states should be at most 2ms (see
+ * valleyview_enable_rps) so use a 3ms timeout.
+ */
+ ret = wait_for(((reg_value =
+ intel_uncore_read_notrace(&i915->uncore, reg)) & mask)
+ == val, 3);
+
+ /* just trace the final value */
+ trace_i915_reg_rw(false, reg, reg_value, sizeof(reg_value), true);
+
+ return ret;
+}
+
+static int vlv_force_gfx_clock(struct drm_i915_private *i915, bool force_on)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 val;
+ int err;
+
+ val = intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG);
+ val &= ~VLV_GFX_CLK_FORCE_ON_BIT;
+ if (force_on)
+ val |= VLV_GFX_CLK_FORCE_ON_BIT;
+ intel_uncore_write(uncore, VLV_GTLC_SURVIVABILITY_REG, val);
+
+ if (!force_on)
+ return 0;
+
+ err = intel_wait_for_register(uncore,
+ VLV_GTLC_SURVIVABILITY_REG,
+ VLV_GFX_CLK_STATUS_BIT,
+ VLV_GFX_CLK_STATUS_BIT,
+ 20);
+ if (err)
+ drm_err(&i915->drm,
+ "timeout waiting for GFX clock force-on (%08x)\n",
+ intel_uncore_read(uncore, VLV_GTLC_SURVIVABILITY_REG));
+
+ return err;
+}
+
+static int vlv_allow_gt_wake(struct drm_i915_private *i915, bool allow)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+ u32 mask;
+ u32 val;
+ int err;
+
+ val = intel_uncore_read(uncore, VLV_GTLC_WAKE_CTRL);
+ val &= ~VLV_GTLC_ALLOWWAKEREQ;
+ if (allow)
+ val |= VLV_GTLC_ALLOWWAKEREQ;
+ intel_uncore_write(uncore, VLV_GTLC_WAKE_CTRL, val);
+ intel_uncore_posting_read(uncore, VLV_GTLC_WAKE_CTRL);
+
+ mask = VLV_GTLC_ALLOWWAKEACK;
+ val = allow ? mask : 0;
+
+ err = vlv_wait_for_pw_status(i915, mask, val);
+ if (err)
+ drm_err(&i915->drm, "timeout disabling GT waking\n");
+
+ return err;
+}
+
+static void vlv_wait_for_gt_wells(struct drm_i915_private *dev_priv,
+ bool wait_for_on)
+{
+ u32 mask;
+ u32 val;
+
+ mask = VLV_GTLC_PW_MEDIA_STATUS_MASK | VLV_GTLC_PW_RENDER_STATUS_MASK;
+ val = wait_for_on ? mask : 0;
+
+ /*
+ * RC6 transitioning can be delayed up to 2 msec (see
+ * valleyview_enable_rps), use 3 msec for safety.
+ *
+ * This can fail to turn off the rc6 if the GPU is stuck after a failed
+ * reset and we are trying to force the machine to sleep.
+ */
+ if (vlv_wait_for_pw_status(dev_priv, mask, val))
+ drm_dbg(&dev_priv->drm,
+ "timeout waiting for GT wells to go %s\n",
+ onoff(wait_for_on));
+}
+
+static void vlv_check_no_gt_access(struct drm_i915_private *i915)
+{
+ struct intel_uncore *uncore = &i915->uncore;
+
+ if (!(intel_uncore_read(uncore, VLV_GTLC_PW_STATUS) & VLV_GTLC_ALLOWWAKEERR))
+ return;
+
+ drm_dbg(&i915->drm, "GT register access while GT waking disabled\n");
+ intel_uncore_write(uncore, VLV_GTLC_PW_STATUS, VLV_GTLC_ALLOWWAKEERR);
+}
+
+int vlv_suspend_complete(struct drm_i915_private *dev_priv)
+{
+ u32 mask;
+ int err;
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ return 0;
+
+ /*
+ * Bspec defines the following GT well on flags as debug only, so
+ * don't treat them as hard failures.
+ */
+ vlv_wait_for_gt_wells(dev_priv, false);
+
+ mask = VLV_GTLC_RENDER_CTX_EXISTS | VLV_GTLC_MEDIA_CTX_EXISTS;
+ drm_WARN_ON(&dev_priv->drm,
+ (intel_uncore_read(&dev_priv->uncore, VLV_GTLC_WAKE_CTRL) & mask) != mask);
+
+ vlv_check_no_gt_access(dev_priv);
+
+ err = vlv_force_gfx_clock(dev_priv, true);
+ if (err)
+ goto err1;
+
+ err = vlv_allow_gt_wake(dev_priv, false);
+ if (err)
+ goto err2;
+
+ vlv_save_gunit_s0ix_state(dev_priv);
+
+ err = vlv_force_gfx_clock(dev_priv, false);
+ if (err)
+ goto err2;
+
+ return 0;
+
+err2:
+ /* For safety always re-enable waking and disable gfx clock forcing */
+ vlv_allow_gt_wake(dev_priv, true);
+err1:
+ vlv_force_gfx_clock(dev_priv, false);
+
+ return err;
+}
+
+int vlv_resume_prepare(struct drm_i915_private *dev_priv, bool rpm_resume)
+{
+ int err;
+ int ret;
+
+ if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ return 0;
+
+ /*
+ * If any of the steps fail just try to continue, that's the best we
+ * can do at this point. Return the first error code (which will also
+ * leave RPM permanently disabled).
+ */
+ ret = vlv_force_gfx_clock(dev_priv, true);
+
+ vlv_restore_gunit_s0ix_state(dev_priv);
+
+ err = vlv_allow_gt_wake(dev_priv, true);
+ if (!ret)
+ ret = err;
+
+ err = vlv_force_gfx_clock(dev_priv, false);
+ if (!ret)
+ ret = err;
+
+ vlv_check_no_gt_access(dev_priv);
+
+ if (rpm_resume)
+ intel_init_clock_gating(dev_priv);
+
+ return ret;
+}
+
+int vlv_suspend_init(struct drm_i915_private *i915)
+{
+ if (!IS_VALLEYVIEW(i915))
+ return 0;
+
+ /* we write all the values in the struct, so no need to zero it out */
+ i915->vlv_s0ix_state = kmalloc(sizeof(*i915->vlv_s0ix_state),
+ GFP_KERNEL);
+ if (!i915->vlv_s0ix_state)
+ return -ENOMEM;
+
+ return 0;
+}
+
+void vlv_suspend_cleanup(struct drm_i915_private *i915)
+{
+ if (!i915->vlv_s0ix_state)
+ return;
+
+ kfree(i915->vlv_s0ix_state);
+ i915->vlv_s0ix_state = NULL;
+}
diff --git a/drivers/gpu/drm/i915/vlv_suspend.h b/drivers/gpu/drm/i915/vlv_suspend.h
new file mode 100644
index 000000000000..895091cb1f62
--- /dev/null
+++ b/drivers/gpu/drm/i915/vlv_suspend.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * Copyright © 2020 Intel Corporation
+ */
+
+#ifndef __VLV_SUSPEND_H__
+#define __VLV_SUSPEND_H__
+
+#include <linux/types.h>
+
+struct drm_i915_private;
+
+int vlv_suspend_init(struct drm_i915_private *i915);
+void vlv_suspend_cleanup(struct drm_i915_private *i915);
+int vlv_suspend_complete(struct drm_i915_private *i915);
+int vlv_resume_prepare(struct drm_i915_private *i915, bool rpm_resume);
+
+#endif /* __VLV_SUSPEND_H__ */
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 8cb2665b2c74..4da22a94790c 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -446,7 +446,7 @@ static int imx_ldb_register(struct drm_device *drm,
if (imx_ldb_ch->bridge) {
ret = drm_bridge_attach(&imx_ldb_ch->encoder,
- imx_ldb_ch->bridge, NULL);
+ imx_ldb_ch->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to initialize bridge with drm\n");
return ret;
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 28826c0aa24a..6776ebb3246d 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -359,7 +359,7 @@ static int ipu_plane_atomic_check(struct drm_plane *plane,
if (!fb)
return 0;
- if (!state->crtc)
+ if (WARN_ON(!state->crtc))
return -EINVAL;
crtc_state =
diff --git a/drivers/gpu/drm/imx/parallel-display.c b/drivers/gpu/drm/imx/parallel-display.c
index 3dca424059f7..08fafa4bf8c2 100644
--- a/drivers/gpu/drm/imx/parallel-display.c
+++ b/drivers/gpu/drm/imx/parallel-display.c
@@ -24,6 +24,7 @@
struct imx_parallel_display {
struct drm_connector connector;
struct drm_encoder encoder;
+ struct drm_bridge bridge;
struct device *dev;
void *edid;
int edid_len;
@@ -31,7 +32,7 @@ struct imx_parallel_display {
u32 bus_flags;
struct drm_display_mode mode;
struct drm_panel *panel;
- struct drm_bridge *bridge;
+ struct drm_bridge *next_bridge;
};
static inline struct imx_parallel_display *con_to_imxpd(struct drm_connector *c)
@@ -44,6 +45,11 @@ static inline struct imx_parallel_display *enc_to_imxpd(struct drm_encoder *e)
return container_of(e, struct imx_parallel_display, encoder);
}
+static inline struct imx_parallel_display *bridge_to_imxpd(struct drm_bridge *b)
+{
+ return container_of(b, struct imx_parallel_display, bridge);
+}
+
static int imx_pd_connector_get_modes(struct drm_connector *connector)
{
struct imx_parallel_display *imxpd = con_to_imxpd(connector);
@@ -89,37 +95,148 @@ static struct drm_encoder *imx_pd_connector_best_encoder(
return &imxpd->encoder;
}
-static void imx_pd_encoder_enable(struct drm_encoder *encoder)
+static void imx_pd_bridge_enable(struct drm_bridge *bridge)
{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
drm_panel_prepare(imxpd->panel);
drm_panel_enable(imxpd->panel);
}
-static void imx_pd_encoder_disable(struct drm_encoder *encoder)
+static void imx_pd_bridge_disable(struct drm_bridge *bridge)
{
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
drm_panel_disable(imxpd->panel);
drm_panel_unprepare(imxpd->panel);
}
-static int imx_pd_encoder_atomic_check(struct drm_encoder *encoder,
- struct drm_crtc_state *crtc_state,
- struct drm_connector_state *conn_state)
+static const u32 imx_pd_bus_fmts[] = {
+ MEDIA_BUS_FMT_RGB888_1X24,
+ MEDIA_BUS_FMT_BGR888_1X24,
+ MEDIA_BUS_FMT_GBR888_1X24,
+ MEDIA_BUS_FMT_RGB666_1X18,
+ MEDIA_BUS_FMT_RGB666_1X24_CPADHI,
+ MEDIA_BUS_FMT_RGB565_1X16,
+};
+
+static u32 *
+imx_pd_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts)
{
- struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
struct drm_display_info *di = &conn_state->connector->display_info;
- struct imx_parallel_display *imxpd = enc_to_imxpd(encoder);
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ u32 *output_fmts;
- if (!imxpd->bus_format && di->num_bus_formats) {
- imx_crtc_state->bus_flags = di->bus_flags;
- imx_crtc_state->bus_format = di->bus_formats[0];
- } else {
- imx_crtc_state->bus_flags = imxpd->bus_flags;
- imx_crtc_state->bus_format = imxpd->bus_format;
+ if (!imxpd->bus_format && !di->num_bus_formats) {
+ *num_output_fmts = ARRAY_SIZE(imx_pd_bus_fmts);
+ return kmemdup(imx_pd_bus_fmts, sizeof(imx_pd_bus_fmts),
+ GFP_KERNEL);
+ }
+
+ *num_output_fmts = 1;
+ output_fmts = kmalloc(sizeof(*output_fmts), GFP_KERNEL);
+ if (!output_fmts)
+ return NULL;
+
+ if (!imxpd->bus_format && di->num_bus_formats)
+ output_fmts[0] = di->bus_formats[0];
+ else
+ output_fmts[0] = imxpd->bus_format;
+
+ return output_fmts;
+}
+
+static bool imx_pd_format_supported(u32 output_fmt)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(imx_pd_bus_fmts); i++) {
+ if (imx_pd_bus_fmts[i] == output_fmt)
+ return true;
+ }
+
+ return false;
+}
+
+static u32 *
+imx_pd_bridge_atomic_get_input_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ u32 *input_fmts;
+
+ /*
+ * If the next bridge does not support bus format negotiation, let's
+ * use the static bus format definition (imxpd->bus_format) if it's
+ * specified, RGB888 when it's not.
+ */
+ if (output_fmt == MEDIA_BUS_FMT_FIXED)
+ output_fmt = imxpd->bus_format ? : MEDIA_BUS_FMT_RGB888_1X24;
+
+ /* Now make sure the requested output format is supported. */
+ if ((imxpd->bus_format && imxpd->bus_format != output_fmt) ||
+ !imx_pd_format_supported(output_fmt)) {
+ *num_input_fmts = 0;
+ return NULL;
+ }
+
+ *num_input_fmts = 1;
+ input_fmts = kmalloc(sizeof(*input_fmts), GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = output_fmt;
+ return input_fmts;
+}
+
+static int imx_pd_bridge_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct imx_crtc_state *imx_crtc_state = to_imx_crtc_state(crtc_state);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct imx_parallel_display *imxpd = bridge_to_imxpd(bridge);
+ struct drm_bridge_state *next_bridge_state = NULL;
+ struct drm_bridge *next_bridge;
+ u32 bus_flags, bus_fmt;
+
+ next_bridge = drm_bridge_get_next_bridge(bridge);
+ if (next_bridge)
+ next_bridge_state = drm_atomic_get_new_bridge_state(crtc_state->state,
+ next_bridge);
+
+ if (next_bridge_state)
+ bus_flags = next_bridge_state->input_bus_cfg.flags;
+ else if (!imxpd->bus_format && di->num_bus_formats)
+ bus_flags = di->bus_flags;
+ else
+ bus_flags = imxpd->bus_flags;
+
+ bus_fmt = bridge_state->input_bus_cfg.format;
+ if (!imx_pd_format_supported(bus_fmt))
+ return -EINVAL;
+
+ if (bus_flags &
+ ~(DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_PIXDATA_DRIVE_NEGEDGE)) {
+ dev_warn(imxpd->dev, "invalid bus_flags (%x)\n", bus_flags);
+ return -EINVAL;
}
+
+ bridge_state->output_bus_cfg.flags = bus_flags;
+ bridge_state->input_bus_cfg.flags = bus_flags;
+ imx_crtc_state->bus_flags = bus_flags;
+ imx_crtc_state->bus_format = bridge_state->input_bus_cfg.format;
imx_crtc_state->di_hsync_pin = 2;
imx_crtc_state->di_vsync_pin = 3;
@@ -143,10 +260,15 @@ static const struct drm_encoder_funcs imx_pd_encoder_funcs = {
.destroy = imx_drm_encoder_destroy,
};
-static const struct drm_encoder_helper_funcs imx_pd_encoder_helper_funcs = {
- .enable = imx_pd_encoder_enable,
- .disable = imx_pd_encoder_disable,
- .atomic_check = imx_pd_encoder_atomic_check,
+static const struct drm_bridge_funcs imx_pd_bridge_funcs = {
+ .enable = imx_pd_bridge_enable,
+ .disable = imx_pd_bridge_disable,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_check = imx_pd_bridge_atomic_check,
+ .atomic_get_input_bus_fmts = imx_pd_bridge_atomic_get_input_bus_fmts,
+ .atomic_get_output_bus_fmts = imx_pd_bridge_atomic_get_output_bus_fmts,
};
static int imx_pd_register(struct drm_device *drm,
@@ -166,11 +288,13 @@ static int imx_pd_register(struct drm_device *drm,
*/
imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
- drm_encoder_helper_add(encoder, &imx_pd_encoder_helper_funcs);
drm_encoder_init(drm, encoder, &imx_pd_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
- if (!imxpd->bridge) {
+ imxpd->bridge.funcs = &imx_pd_bridge_funcs;
+ drm_bridge_attach(encoder, &imxpd->bridge, NULL, 0);
+
+ if (!imxpd->next_bridge) {
drm_connector_helper_add(&imxpd->connector,
&imx_pd_connector_helper_funcs);
drm_connector_init(drm, &imxpd->connector,
@@ -181,8 +305,9 @@ static int imx_pd_register(struct drm_device *drm,
if (imxpd->panel)
drm_panel_attach(imxpd->panel, &imxpd->connector);
- if (imxpd->bridge) {
- ret = drm_bridge_attach(encoder, imxpd->bridge, NULL);
+ if (imxpd->next_bridge) {
+ ret = drm_bridge_attach(encoder, imxpd->next_bridge,
+ &imxpd->bridge, 0);
if (ret < 0) {
dev_err(imxpd->dev, "failed to attach bridge: %d\n",
ret);
@@ -227,7 +352,8 @@ static int imx_pd_bind(struct device *dev, struct device *master, void *data)
imxpd->bus_format = bus_format;
/* port@1 is the output port */
- ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel, &imxpd->bridge);
+ ret = drm_of_find_panel_or_bridge(np, 1, 0, &imxpd->panel,
+ &imxpd->next_bridge);
if (ret && ret != -ENODEV)
return ret;
diff --git a/drivers/gpu/drm/ingenic/ingenic-drm.c b/drivers/gpu/drm/ingenic/ingenic-drm.c
index 6d47ef7b148c..9dfe7cb530e1 100644
--- a/drivers/gpu/drm/ingenic/ingenic-drm.c
+++ b/drivers/gpu/drm/ingenic/ingenic-drm.c
@@ -737,7 +737,7 @@ static int ingenic_drm_probe(struct platform_device *pdev)
return ret;
}
- ret = drm_bridge_attach(&priv->encoder, bridge, NULL);
+ ret = drm_bridge_attach(&priv->encoder, bridge, NULL, 0);
if (ret) {
dev_err(dev, "Unable to attach bridge");
return ret;
diff --git a/drivers/gpu/drm/lima/lima_drv.c b/drivers/gpu/drm/lima/lima_drv.c
index 124efe4fa97b..2daac64d8955 100644
--- a/drivers/gpu/drm/lima/lima_drv.c
+++ b/drivers/gpu/drm/lima/lima_drv.c
@@ -15,10 +15,14 @@
#include "lima_vm.h"
int lima_sched_timeout_ms;
+uint lima_heap_init_nr_pages = 8;
MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms");
module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
+MODULE_PARM_DESC(heap_init_nr_pages, "heap buffer init number of pages");
+module_param_named(heap_init_nr_pages, lima_heap_init_nr_pages, uint, 0444);
+
static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_get_param *args = data;
@@ -68,7 +72,7 @@ static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_
if (args->pad)
return -EINVAL;
- if (args->flags)
+ if (args->flags & ~(LIMA_BO_FLAG_HEAP))
return -EINVAL;
if (args->size == 0)
@@ -241,6 +245,12 @@ static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
DEFINE_DRM_GEM_FOPS(lima_drm_driver_fops);
+/**
+ * Changelog:
+ *
+ * - 1.1.0 - add heap buffer support
+ */
+
static struct drm_driver lima_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_SYNCOBJ,
.open = lima_drm_driver_open,
@@ -250,9 +260,9 @@ static struct drm_driver lima_drm_driver = {
.fops = &lima_drm_driver_fops,
.name = "lima",
.desc = "lima DRM",
- .date = "20190217",
+ .date = "20191231",
.major = 1,
- .minor = 0,
+ .minor = 1,
.patchlevel = 0,
.gem_create_object = lima_gem_create_object,
diff --git a/drivers/gpu/drm/lima/lima_drv.h b/drivers/gpu/drm/lima/lima_drv.h
index 69c7344715c9..f492ecc6a5d9 100644
--- a/drivers/gpu/drm/lima/lima_drv.h
+++ b/drivers/gpu/drm/lima/lima_drv.h
@@ -9,6 +9,7 @@
#include "lima_ctx.h"
extern int lima_sched_timeout_ms;
+extern uint lima_heap_init_nr_pages;
struct lima_vm;
struct lima_bo;
diff --git a/drivers/gpu/drm/lima/lima_gem.c b/drivers/gpu/drm/lima/lima_gem.c
index d0059d8c97d8..5404e0d668db 100644
--- a/drivers/gpu/drm/lima/lima_gem.c
+++ b/drivers/gpu/drm/lima/lima_gem.c
@@ -4,6 +4,8 @@
#include <linux/mm.h>
#include <linux/sync_file.h>
#include <linux/pagemap.h>
+#include <linux/shmem_fs.h>
+#include <linux/dma-mapping.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
@@ -15,6 +17,83 @@
#include "lima_gem.h"
#include "lima_vm.h"
+int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm)
+{
+ struct page **pages;
+ struct address_space *mapping = bo->base.base.filp->f_mapping;
+ struct device *dev = bo->base.base.dev->dev;
+ size_t old_size = bo->heap_size;
+ size_t new_size = bo->heap_size ? bo->heap_size * 2 :
+ (lima_heap_init_nr_pages << PAGE_SHIFT);
+ struct sg_table sgt;
+ int i, ret;
+
+ if (bo->heap_size >= bo->base.base.size)
+ return -ENOSPC;
+
+ new_size = min(new_size, bo->base.base.size);
+
+ mutex_lock(&bo->base.pages_lock);
+
+ if (bo->base.pages) {
+ pages = bo->base.pages;
+ } else {
+ pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
+ sizeof(*pages), GFP_KERNEL | __GFP_ZERO);
+ if (!pages) {
+ mutex_unlock(&bo->base.pages_lock);
+ return -ENOMEM;
+ }
+
+ bo->base.pages = pages;
+ bo->base.pages_use_count = 1;
+
+ mapping_set_unevictable(mapping);
+ }
+
+ for (i = old_size >> PAGE_SHIFT; i < new_size >> PAGE_SHIFT; i++) {
+ struct page *page = shmem_read_mapping_page(mapping, i);
+
+ if (IS_ERR(page)) {
+ mutex_unlock(&bo->base.pages_lock);
+ return PTR_ERR(page);
+ }
+ pages[i] = page;
+ }
+
+ mutex_unlock(&bo->base.pages_lock);
+
+ ret = sg_alloc_table_from_pages(&sgt, pages, i, 0,
+ new_size, GFP_KERNEL);
+ if (ret)
+ return ret;
+
+ if (bo->base.sgt) {
+ dma_unmap_sg(dev, bo->base.sgt->sgl,
+ bo->base.sgt->nents, DMA_BIDIRECTIONAL);
+ sg_free_table(bo->base.sgt);
+ } else {
+ bo->base.sgt = kmalloc(sizeof(*bo->base.sgt), GFP_KERNEL);
+ if (!bo->base.sgt) {
+ sg_free_table(&sgt);
+ return -ENOMEM;
+ }
+ }
+
+ dma_map_sg(dev, sgt.sgl, sgt.nents, DMA_BIDIRECTIONAL);
+
+ *bo->base.sgt = sgt;
+
+ if (vm) {
+ ret = lima_vm_map_bo(vm, bo, old_size >> PAGE_SHIFT);
+ if (ret)
+ return ret;
+ }
+
+ bo->heap_size = new_size;
+ return 0;
+}
+
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
@@ -22,7 +101,8 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
gfp_t mask;
struct drm_gem_shmem_object *shmem;
struct drm_gem_object *obj;
- struct sg_table *sgt;
+ struct lima_bo *bo;
+ bool is_heap = flags & LIMA_BO_FLAG_HEAP;
shmem = drm_gem_shmem_create(dev, size);
if (IS_ERR(shmem))
@@ -36,10 +116,18 @@ int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
mask |= __GFP_DMA32;
mapping_set_gfp_mask(obj->filp->f_mapping, mask);
- sgt = drm_gem_shmem_get_pages_sgt(obj);
- if (IS_ERR(sgt)) {
- err = PTR_ERR(sgt);
- goto out;
+ if (is_heap) {
+ bo = to_lima_bo(obj);
+ err = lima_heap_alloc(bo, NULL);
+ if (err)
+ goto out;
+ } else {
+ struct sg_table *sgt = drm_gem_shmem_get_pages_sgt(obj);
+
+ if (IS_ERR(sgt)) {
+ err = PTR_ERR(sgt);
+ goto out;
+ }
}
err = drm_gem_handle_create(file, obj, handle);
@@ -79,17 +167,47 @@ static void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *f
lima_vm_bo_del(vm, bo);
}
+static int lima_gem_pin(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (bo->heap_size)
+ return -EINVAL;
+
+ return drm_gem_shmem_pin(obj);
+}
+
+static void *lima_gem_vmap(struct drm_gem_object *obj)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (bo->heap_size)
+ return ERR_PTR(-EINVAL);
+
+ return drm_gem_shmem_vmap(obj);
+}
+
+static int lima_gem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
+{
+ struct lima_bo *bo = to_lima_bo(obj);
+
+ if (bo->heap_size)
+ return -EINVAL;
+
+ return drm_gem_shmem_mmap(obj, vma);
+}
+
static const struct drm_gem_object_funcs lima_gem_funcs = {
.free = lima_gem_free_object,
.open = lima_gem_object_open,
.close = lima_gem_object_close,
.print_info = drm_gem_shmem_print_info,
- .pin = drm_gem_shmem_pin,
+ .pin = lima_gem_pin,
.unpin = drm_gem_shmem_unpin,
.get_sg_table = drm_gem_shmem_get_sg_table,
- .vmap = drm_gem_shmem_vmap,
+ .vmap = lima_gem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .mmap = drm_gem_shmem_mmap,
+ .mmap = lima_gem_mmap,
};
struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size)
diff --git a/drivers/gpu/drm/lima/lima_gem.h b/drivers/gpu/drm/lima/lima_gem.h
index 1800feb3e47f..ccea06142f4b 100644
--- a/drivers/gpu/drm/lima/lima_gem.h
+++ b/drivers/gpu/drm/lima/lima_gem.h
@@ -7,12 +7,15 @@
#include <drm/drm_gem_shmem_helper.h>
struct lima_submit;
+struct lima_vm;
struct lima_bo {
struct drm_gem_shmem_object base;
struct mutex lock;
struct list_head va;
+
+ size_t heap_size;
};
static inline struct lima_bo *
@@ -31,6 +34,7 @@ static inline struct dma_resv *lima_bo_resv(struct lima_bo *bo)
return bo->base.base.resv;
}
+int lima_heap_alloc(struct lima_bo *bo, struct lima_vm *vm);
struct drm_gem_object *lima_gem_create_object(struct drm_device *dev, size_t size);
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
diff --git a/drivers/gpu/drm/lima/lima_gp.c b/drivers/gpu/drm/lima/lima_gp.c
index ccf49faedebf..d8841c870d90 100644
--- a/drivers/gpu/drm/lima/lima_gp.c
+++ b/drivers/gpu/drm/lima/lima_gp.c
@@ -11,6 +11,8 @@
#include "lima_device.h"
#include "lima_gp.h"
#include "lima_regs.h"
+#include "lima_gem.h"
+#include "lima_vm.h"
#define gp_write(reg, data) writel(data, ip->iomem + reg)
#define gp_read(reg) readl(ip->iomem + reg)
@@ -20,6 +22,7 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
struct lima_ip *ip = data;
struct lima_device *dev = ip->dev;
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
+ struct lima_sched_task *task = pipe->current_task;
u32 state = gp_read(LIMA_GP_INT_STAT);
u32 status = gp_read(LIMA_GP_STATUS);
bool done = false;
@@ -29,8 +32,16 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
return IRQ_NONE;
if (state & LIMA_GP_IRQ_MASK_ERROR) {
- dev_err(dev->dev, "gp error irq state=%x status=%x\n",
- state, status);
+ if ((state & LIMA_GP_IRQ_MASK_ERROR) ==
+ LIMA_GP_IRQ_PLBU_OUT_OF_MEM) {
+ dev_dbg(dev->dev, "gp out of heap irq status=%x\n",
+ status);
+ } else {
+ dev_err(dev->dev, "gp error irq state=%x status=%x\n",
+ state, status);
+ if (task)
+ task->recoverable = false;
+ }
/* mask all interrupts before hard reset */
gp_write(LIMA_GP_INT_MASK, 0);
@@ -43,6 +54,7 @@ static irqreturn_t lima_gp_irq_handler(int irq, void *data)
bool active = status & (LIMA_GP_STATUS_VS_ACTIVE |
LIMA_GP_STATUS_PLBU_ACTIVE);
done = valid && !active;
+ pipe->error = false;
}
gp_write(LIMA_GP_INT_CLEAR, state);
@@ -121,6 +133,22 @@ static void lima_gp_task_run(struct lima_sched_pipe *pipe,
u32 cmd = 0;
int i;
+ /* update real heap buffer size for GP */
+ for (i = 0; i < task->num_bos; i++) {
+ struct lima_bo *bo = task->bos[i];
+
+ if (bo->heap_size &&
+ lima_vm_get_va(task->vm, bo) ==
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2]) {
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] =
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] +
+ bo->heap_size;
+ task->recoverable = true;
+ task->heap = bo;
+ break;
+ }
+ }
+
if (f[LIMA_GP_VSCL_START_ADDR >> 2] !=
f[LIMA_GP_VSCL_END_ADDR >> 2])
cmd |= LIMA_GP_CMD_START_VS;
@@ -184,6 +212,36 @@ static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
lima_sched_pipe_task_done(pipe);
}
+static int lima_gp_task_recover(struct lima_sched_pipe *pipe)
+{
+ struct lima_ip *ip = pipe->processor[0];
+ struct lima_sched_task *task = pipe->current_task;
+ struct drm_lima_gp_frame *frame = task->frame;
+ u32 *f = frame->frame;
+ size_t fail_size =
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] -
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2];
+
+ if (fail_size == task->heap->heap_size) {
+ int ret;
+
+ ret = lima_heap_alloc(task->heap, task->vm);
+ if (ret < 0)
+ return ret;
+ }
+
+ gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
+ /* Resume from where we stopped, i.e. new start is old end */
+ gp_write(LIMA_GP_PLBU_ALLOC_START_ADDR,
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]);
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2] =
+ f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] + task->heap->heap_size;
+ gp_write(LIMA_GP_PLBU_ALLOC_END_ADDR,
+ f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2]);
+ gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC);
+ return 0;
+}
+
static void lima_gp_print_version(struct lima_ip *ip)
{
u32 version, major, minor;
@@ -270,6 +328,7 @@ int lima_gp_pipe_init(struct lima_device *dev)
pipe->task_fini = lima_gp_task_fini;
pipe->task_error = lima_gp_task_error;
pipe->task_mmu_error = lima_gp_task_mmu_error;
+ pipe->task_recover = lima_gp_task_recover;
return 0;
}
diff --git a/drivers/gpu/drm/lima/lima_mmu.c b/drivers/gpu/drm/lima/lima_mmu.c
index 97ec09dee572..f79d2af427e7 100644
--- a/drivers/gpu/drm/lima/lima_mmu.c
+++ b/drivers/gpu/drm/lima/lima_mmu.c
@@ -99,6 +99,11 @@ void lima_mmu_fini(struct lima_ip *ip)
}
+void lima_mmu_flush_tlb(struct lima_ip *ip)
+{
+ mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
+}
+
void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
{
struct lima_device *dev = ip->dev;
diff --git a/drivers/gpu/drm/lima/lima_mmu.h b/drivers/gpu/drm/lima/lima_mmu.h
index 8c78319bcc8e..4f8ccbebcba1 100644
--- a/drivers/gpu/drm/lima/lima_mmu.h
+++ b/drivers/gpu/drm/lima/lima_mmu.h
@@ -10,6 +10,7 @@ struct lima_vm;
int lima_mmu_init(struct lima_ip *ip);
void lima_mmu_fini(struct lima_ip *ip);
+void lima_mmu_flush_tlb(struct lima_ip *ip);
void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm);
void lima_mmu_page_fault_resume(struct lima_ip *ip);
diff --git a/drivers/gpu/drm/lima/lima_regs.h b/drivers/gpu/drm/lima/lima_regs.h
index ace8ecefbe90..0124c90e0153 100644
--- a/drivers/gpu/drm/lima/lima_regs.h
+++ b/drivers/gpu/drm/lima/lima_regs.h
@@ -239,6 +239,7 @@
#define LIMA_MMU_STATUS_REPLAY_BUFFER_EMPTY BIT(4)
#define LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE BIT(5)
#define LIMA_MMU_STATUS_BUS_ID(x) ((x >> 6) & 0x1F)
+#define LIMA_MMU_STATUS_STALL_NOT_ACTIVE BIT(31)
#define LIMA_MMU_COMMAND 0x0008
#define LIMA_MMU_COMMAND_ENABLE_PAGING 0x00
#define LIMA_MMU_COMMAND_DISABLE_PAGING 0x01
diff --git a/drivers/gpu/drm/lima/lima_sched.c b/drivers/gpu/drm/lima/lima_sched.c
index b561dd05bd62..3886999b4533 100644
--- a/drivers/gpu/drm/lima/lima_sched.c
+++ b/drivers/gpu/drm/lima/lima_sched.c
@@ -313,6 +313,26 @@ static const struct drm_sched_backend_ops lima_sched_ops = {
.free_job = lima_sched_free_job,
};
+static void lima_sched_recover_work(struct work_struct *work)
+{
+ struct lima_sched_pipe *pipe =
+ container_of(work, struct lima_sched_pipe, recover_work);
+ int i;
+
+ for (i = 0; i < pipe->num_l2_cache; i++)
+ lima_l2_cache_flush(pipe->l2_cache[i]);
+
+ if (pipe->bcast_mmu) {
+ lima_mmu_flush_tlb(pipe->bcast_mmu);
+ } else {
+ for (i = 0; i < pipe->num_mmu; i++)
+ lima_mmu_flush_tlb(pipe->mmu[i]);
+ }
+
+ if (pipe->task_recover(pipe))
+ drm_sched_fault(&pipe->base);
+}
+
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
{
unsigned int timeout = lima_sched_timeout_ms > 0 ?
@@ -321,6 +341,8 @@ int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
pipe->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&pipe->fence_lock);
+ INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
+
return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0,
msecs_to_jiffies(timeout), name);
}
@@ -332,11 +354,14 @@ void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
{
- if (pipe->error)
- drm_sched_fault(&pipe->base);
- else {
- struct lima_sched_task *task = pipe->current_task;
-
+ struct lima_sched_task *task = pipe->current_task;
+
+ if (pipe->error) {
+ if (task && task->recoverable)
+ schedule_work(&pipe->recover_work);
+ else
+ drm_sched_fault(&pipe->base);
+ } else {
pipe->task_fini(pipe);
dma_fence_signal(task->fence);
}
diff --git a/drivers/gpu/drm/lima/lima_sched.h b/drivers/gpu/drm/lima/lima_sched.h
index 1d814fecbcc0..d64393fb50a9 100644
--- a/drivers/gpu/drm/lima/lima_sched.h
+++ b/drivers/gpu/drm/lima/lima_sched.h
@@ -20,6 +20,9 @@ struct lima_sched_task {
struct lima_bo **bos;
int num_bos;
+ bool recoverable;
+ struct lima_bo *heap;
+
/* pipe fence */
struct dma_fence *fence;
};
@@ -68,6 +71,9 @@ struct lima_sched_pipe {
void (*task_fini)(struct lima_sched_pipe *pipe);
void (*task_error)(struct lima_sched_pipe *pipe);
void (*task_mmu_error)(struct lima_sched_pipe *pipe);
+ int (*task_recover)(struct lima_sched_pipe *pipe);
+
+ struct work_struct recover_work;
};
int lima_sched_task_init(struct lima_sched_task *task,
diff --git a/drivers/gpu/drm/lima/lima_vm.c b/drivers/gpu/drm/lima/lima_vm.c
index 840e2350d872..5b92fb82674a 100644
--- a/drivers/gpu/drm/lima/lima_vm.c
+++ b/drivers/gpu/drm/lima/lima_vm.c
@@ -155,6 +155,7 @@ err_out0:
void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
{
struct lima_bo_va *bo_va;
+ u32 size;
mutex_lock(&bo->lock);
@@ -166,8 +167,9 @@ void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
mutex_lock(&vm->lock);
+ size = bo->heap_size ? bo->heap_size : bo_va->node.size;
lima_vm_unmap_range(vm, bo_va->node.start,
- bo_va->node.start + bo_va->node.size - 1);
+ bo_va->node.start + size - 1);
drm_mm_remove_node(&bo_va->node);
@@ -277,3 +279,45 @@ void lima_vm_print(struct lima_vm *vm)
}
}
}
+
+int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff)
+{
+ struct lima_bo_va *bo_va;
+ struct sg_dma_page_iter sg_iter;
+ int offset = 0, err;
+ u32 base;
+
+ mutex_lock(&bo->lock);
+
+ bo_va = lima_vm_bo_find(vm, bo);
+ if (!bo_va) {
+ err = -ENOENT;
+ goto err_out0;
+ }
+
+ mutex_lock(&vm->lock);
+
+ base = bo_va->node.start + (pageoff << PAGE_SHIFT);
+ for_each_sg_dma_page(bo->base.sgt->sgl, &sg_iter,
+ bo->base.sgt->nents, pageoff) {
+ err = lima_vm_map_page(vm, sg_page_iter_dma_address(&sg_iter),
+ base + offset);
+ if (err)
+ goto err_out1;
+
+ offset += PAGE_SIZE;
+ }
+
+ mutex_unlock(&vm->lock);
+
+ mutex_unlock(&bo->lock);
+ return 0;
+
+err_out1:
+ if (offset)
+ lima_vm_unmap_range(vm, base, base + offset - 1);
+ mutex_unlock(&vm->lock);
+err_out0:
+ mutex_unlock(&bo->lock);
+ return err;
+}
diff --git a/drivers/gpu/drm/lima/lima_vm.h b/drivers/gpu/drm/lima/lima_vm.h
index e0bdedcf14dd..22aeec77d84d 100644
--- a/drivers/gpu/drm/lima/lima_vm.h
+++ b/drivers/gpu/drm/lima/lima_vm.h
@@ -58,5 +58,6 @@ static inline void lima_vm_put(struct lima_vm *vm)
}
void lima_vm_print(struct lima_vm *vm);
+int lima_vm_map_bo(struct lima_vm *vm, struct lima_bo *bo, int pageoff);
#endif
diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c
index 9008ddcfc528..f28cb7a576ba 100644
--- a/drivers/gpu/drm/mcde/mcde_drv.c
+++ b/drivers/gpu/drm/mcde/mcde_drv.c
@@ -20,11 +20,11 @@
* input formats including most variants of RGB and YUV.
*
* The hardware has four display pipes, and the layout is a little
- * bit like this:
+ * bit like this::
*
- * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
- * External 0..5 0..3 A,B, 3 x DSI bridge
- * source 0..9 C0,C1 2 x DPI
+ * Memory -> Overlay -> Channel -> FIFO -> 5 formatters -> DSI/DPI
+ * External 0..5 0..3 A,B, 3 x DSI bridge
+ * source 0..9 C0,C1 2 x DPI
*
* FIFOs A and B are for LCD and HDMI while FIFO CO/C1 are for
* panels with embedded buffer.
@@ -43,6 +43,7 @@
* to change as we exploit more of the hardware capabilities.
*
* TODO:
+ *
* - Enabled damaged rectangles using drm_plane_enable_fb_damage_clips()
* so we can selectively just transmit the damaged area to a
* command-only display.
diff --git a/drivers/gpu/drm/mcde/mcde_dsi.c b/drivers/gpu/drm/mcde/mcde_dsi.c
index bb6528b01cd0..7af5ebb0c436 100644
--- a/drivers/gpu/drm/mcde/mcde_dsi.c
+++ b/drivers/gpu/drm/mcde/mcde_dsi.c
@@ -986,7 +986,8 @@ static void mcde_dsi_bridge_disable(struct drm_bridge *bridge)
clk_disable_unprepare(d->lp_clk);
}
-static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
+static int mcde_dsi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct mcde_dsi *d = bridge_to_mcde_dsi(bridge);
struct drm_device *drm = bridge->dev;
@@ -998,7 +999,7 @@ static int mcde_dsi_bridge_attach(struct drm_bridge *bridge)
}
/* Attach the DSI bridge to the output (panel etc) bridge */
- ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge);
+ ret = drm_bridge_attach(bridge->encoder, d->bridge_out, bridge, flags);
if (ret) {
dev_err(d->dev, "failed to attach the DSI bridge\n");
return ret;
diff --git a/drivers/gpu/drm/mediatek/mtk_dpi.c b/drivers/gpu/drm/mediatek/mtk_dpi.c
index 087f5ce732e1..4f0ce4cd5b8c 100644
--- a/drivers/gpu/drm/mediatek/mtk_dpi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dpi.c
@@ -607,7 +607,7 @@ static int mtk_dpi_bind(struct device *dev, struct device *master, void *data)
/* Currently DPI0 is fixed to be driven by OVL1 */
dpi->encoder.possible_crtcs = BIT(1);
- ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL);
+ ret = drm_bridge_attach(&dpi->encoder, dpi->bridge, NULL, 0);
if (ret) {
dev_err(dev, "Failed to attach bridge: %d\n", ret);
goto err_cleanup;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 0dfcd1787e65..fe85e487e477 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -486,6 +486,7 @@ static void mtk_drm_crtc_hw_config(struct mtk_drm_crtc *mtk_crtc)
}
#if IS_REACHABLE(CONFIG_MTK_CMDQ)
if (mtk_crtc->cmdq_client) {
+ mbox_flush(mtk_crtc->cmdq_client->chan, 2000);
cmdq_handle = cmdq_pkt_create(mtk_crtc->cmdq_client, PAGE_SIZE);
cmdq_pkt_clear_event(cmdq_handle, mtk_crtc->cmdq_event);
cmdq_pkt_wfe(cmdq_handle, mtk_crtc->cmdq_event);
@@ -636,10 +637,18 @@ static const struct drm_crtc_helper_funcs mtk_crtc_helper_funcs = {
static int mtk_drm_crtc_init(struct drm_device *drm,
struct mtk_drm_crtc *mtk_crtc,
- struct drm_plane *primary,
- struct drm_plane *cursor, unsigned int pipe)
+ unsigned int pipe)
{
- int ret;
+ struct drm_plane *primary = NULL;
+ struct drm_plane *cursor = NULL;
+ int i, ret;
+
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
+ if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_PRIMARY)
+ primary = &mtk_crtc->planes[i];
+ else if (mtk_crtc->planes[i].type == DRM_PLANE_TYPE_CURSOR)
+ cursor = &mtk_crtc->planes[i];
+ }
ret = drm_crtc_init_with_planes(drm, &mtk_crtc->base, primary, cursor,
&mtk_crtc_funcs, NULL);
@@ -689,11 +698,12 @@ static int mtk_drm_crtc_num_comp_planes(struct mtk_drm_crtc *mtk_crtc,
}
static inline
-enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx)
+enum drm_plane_type mtk_drm_crtc_plane_type(unsigned int plane_idx,
+ unsigned int num_planes)
{
if (plane_idx == 0)
return DRM_PLANE_TYPE_PRIMARY;
- else if (plane_idx == 1)
+ else if (plane_idx == (num_planes - 1))
return DRM_PLANE_TYPE_CURSOR;
else
return DRM_PLANE_TYPE_OVERLAY;
@@ -712,7 +722,8 @@ static int mtk_drm_crtc_init_comp_planes(struct drm_device *drm_dev,
ret = mtk_plane_init(drm_dev,
&mtk_crtc->planes[mtk_crtc->layer_nr],
BIT(pipe),
- mtk_drm_crtc_plane_type(mtk_crtc->layer_nr),
+ mtk_drm_crtc_plane_type(mtk_crtc->layer_nr,
+ num_planes),
mtk_ddp_comp_supported_rotations(comp));
if (ret)
return ret;
@@ -807,9 +818,7 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
return ret;
}
- ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
- mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
- NULL, pipe);
+ ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, pipe);
if (ret < 0)
return ret;
@@ -828,7 +837,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
drm_crtc_index(&mtk_crtc->base));
mtk_crtc->cmdq_client = NULL;
}
- ret = of_property_read_u32_index(dev->of_node, "mediatek,gce-events",
+ ret = of_property_read_u32_index(priv->mutex_node,
+ "mediatek,gce-events",
drm_crtc_index(&mtk_crtc->base),
&mtk_crtc->cmdq_event);
if (ret)
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
index 1f5a112bb034..57c88de9a329 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.c
@@ -471,6 +471,7 @@ int mtk_ddp_comp_init(struct device *dev, struct device_node *node,
/* Only DMA capable components need the LARB property */
comp->larb_dev = NULL;
if (type != MTK_DISP_OVL &&
+ type != MTK_DISP_OVL_2L &&
type != MTK_DISP_RDMA &&
type != MTK_DISP_WDMA)
return 0;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_plane.c b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
index 914cc7619cd7..c2bd683a87c8 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_plane.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_plane.c
@@ -80,6 +80,7 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
struct drm_crtc_state *crtc_state;
+ int ret;
if (plane != state->crtc->cursor)
return -EINVAL;
@@ -90,6 +91,11 @@ static int mtk_plane_atomic_async_check(struct drm_plane *plane,
if (!plane->state->fb)
return -EINVAL;
+ ret = mtk_drm_crtc_plane_check(state->crtc, plane,
+ to_mtk_plane_state(state));
+ if (ret)
+ return ret;
+
if (state->state)
crtc_state = drm_atomic_get_existing_crtc_state(state->state,
state->crtc);
@@ -115,6 +121,7 @@ static void mtk_plane_atomic_async_update(struct drm_plane *plane,
plane->state->src_y = new_state->src_y;
plane->state->src_h = new_state->src_h;
plane->state->src_w = new_state->src_w;
+ swap(plane->state->fb, new_state->fb);
state->pending.async_dirty = true;
mtk_drm_crtc_async_update(new_state->crtc, plane, new_state);
diff --git a/drivers/gpu/drm/mediatek/mtk_dsi.c b/drivers/gpu/drm/mediatek/mtk_dsi.c
index 5fa1073cf26b..0ede69830a9d 100644
--- a/drivers/gpu/drm/mediatek/mtk_dsi.c
+++ b/drivers/gpu/drm/mediatek/mtk_dsi.c
@@ -904,7 +904,7 @@ static int mtk_dsi_create_conn_enc(struct drm_device *drm, struct mtk_dsi *dsi)
/* If there's a bridge, attach to it and let it create the connector */
if (dsi->bridge) {
- ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL);
+ ret = drm_bridge_attach(&dsi->encoder, dsi->bridge, NULL, 0);
if (ret) {
DRM_ERROR("Failed to attach bridge to drm\n");
goto err_encoder_cleanup;
diff --git a/drivers/gpu/drm/mediatek/mtk_hdmi.c b/drivers/gpu/drm/mediatek/mtk_hdmi.c
index 5e4a4dbda443..a8b20557539b 100644
--- a/drivers/gpu/drm/mediatek/mtk_hdmi.c
+++ b/drivers/gpu/drm/mediatek/mtk_hdmi.c
@@ -1297,11 +1297,17 @@ static void mtk_hdmi_hpd_event(bool hpd, struct device *dev)
* Bridge callbacks
*/
-static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
+static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct mtk_hdmi *hdmi = hdmi_ctx_from_bridge(bridge);
int ret;
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
+
ret = drm_connector_init_with_ddc(bridge->encoder->dev, &hdmi->conn,
&mtk_hdmi_connector_funcs,
DRM_MODE_CONNECTOR_HDMIA,
@@ -1326,7 +1332,7 @@ static int mtk_hdmi_bridge_attach(struct drm_bridge *bridge)
if (hdmi->next_bridge) {
ret = drm_bridge_attach(bridge->encoder, hdmi->next_bridge,
- bridge);
+ bridge, flags);
if (ret) {
dev_err(hdmi->dev,
"Failed to attach external bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/meson/meson_dw_hdmi.c b/drivers/gpu/drm/meson/meson_dw_hdmi.c
index 3bb7ffe5fc39..e8c94915a4fc 100644
--- a/drivers/gpu/drm/meson/meson_dw_hdmi.c
+++ b/drivers/gpu/drm/meson/meson_dw_hdmi.c
@@ -16,6 +16,7 @@
#include <drm/bridge/dw_hdmi.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
#include <drm/drm_device.h>
#include <drm/drm_edid.h>
#include <drm/drm_probe_helper.h>
@@ -135,6 +136,7 @@ struct meson_dw_hdmi_data {
struct meson_dw_hdmi {
struct drm_encoder encoder;
+ struct drm_bridge bridge;
struct dw_hdmi_plat_data dw_plat_data;
struct meson_drm *priv;
struct device *dev;
@@ -148,9 +150,12 @@ struct meson_dw_hdmi {
struct regulator *hdmi_supply;
u32 irq_stat;
struct dw_hdmi *hdmi;
+ unsigned long output_bus_fmt;
};
#define encoder_to_meson_dw_hdmi(x) \
container_of(x, struct meson_dw_hdmi, encoder)
+#define bridge_to_meson_dw_hdmi(x) \
+ container_of(x, struct meson_dw_hdmi, bridge)
static inline int dw_hdmi_is_compatible(struct meson_dw_hdmi *dw_hdmi,
const char *compat)
@@ -297,6 +302,10 @@ static void meson_hdmi_phy_setup_mode(struct meson_dw_hdmi *dw_hdmi,
struct meson_drm *priv = dw_hdmi->priv;
unsigned int pixel_clock = mode->clock;
+ /* For 420, pixel clock is half unlike venc clock */
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ pixel_clock /= 2;
+
if (dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(dw_hdmi, "amlogic,meson-gxm-dw-hdmi")) {
if (pixel_clock >= 371250) {
@@ -368,29 +377,40 @@ static inline void meson_dw_hdmi_phy_reset(struct meson_dw_hdmi *dw_hdmi)
}
static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
- struct drm_display_mode *mode)
+ const struct drm_display_mode *mode)
{
struct meson_drm *priv = dw_hdmi->priv;
int vic = drm_match_cea_mode(mode);
+ unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
vclk_freq = mode->clock;
+ /* For 420, pixel clock is half unlike venc clock */
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ vclk_freq /= 2;
+
+ /* TMDS clock is pixel_clock * 10 */
+ phy_freq = vclk_freq * 10;
+
if (!vic) {
- meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, vclk_freq,
- vclk_freq, vclk_freq, false);
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_DMT, phy_freq,
+ vclk_freq, vclk_freq, vclk_freq, false);
return;
}
+ /* 480i/576i needs global pixel doubling */
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
venc_freq = vclk_freq;
hdmi_freq = vclk_freq;
- if (meson_venc_hdmi_venc_repeat(vic))
+ /* VENC double pixels for 1080i, 720p and YUV420 modes */
+ if (meson_venc_hdmi_venc_repeat(vic) ||
+ dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
venc_freq *= 2;
vclk_freq = max(venc_freq, hdmi_freq);
@@ -398,11 +418,11 @@ static void dw_hdmi_set_vclk(struct meson_dw_hdmi *dw_hdmi,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- DRM_DEBUG_DRIVER("vclk:%d venc=%d hdmi=%d enci=%d\n",
- vclk_freq, venc_freq, hdmi_freq,
+ DRM_DEBUG_DRIVER("vclk:%d phy=%d venc=%d hdmi=%d enci=%d\n",
+ phy_freq, vclk_freq, venc_freq, hdmi_freq,
priv->venc.hdmi_use_enci);
- meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, vclk_freq,
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_HDMI, phy_freq, vclk_freq,
venc_freq, hdmi_freq, priv->venc.hdmi_use_enci);
}
@@ -437,8 +457,9 @@ static int dw_hdmi_phy_init(struct dw_hdmi *hdmi, void *data,
/* Enable normal output to PHY */
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_BIST_CNTL, BIT(12));
- /* TMDS pattern setup (TOFIX Handle the YUV420 case) */
- if (mode->clock > 340000) {
+ /* TMDS pattern setup */
+ if (mode->clock > 340000 &&
+ dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_YUV8_1X24) {
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_01,
0);
dw_hdmi->data->top_write(dw_hdmi, HDMITX_TOP_TMDS_CLK_PTTN_23,
@@ -613,6 +634,8 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
const struct drm_display_mode *mode)
{
struct meson_drm *priv = connector->dev->dev_private;
+ bool is_hdmi2_sink = connector->display_info.hdmi.scdc.supported;
+ unsigned int phy_freq;
unsigned int vclk_freq;
unsigned int venc_freq;
unsigned int hdmi_freq;
@@ -621,9 +644,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
DRM_DEBUG_DRIVER("Modeline " DRM_MODE_FMT "\n", DRM_MODE_ARG(mode));
- /* If sink max TMDS clock, we reject the mode */
+ /* If sink does not support 540MHz, reject the non-420 HDMI2 modes */
if (connector->display_info.max_tmds_clock &&
- mode->clock > connector->display_info.max_tmds_clock)
+ mode->clock > connector->display_info.max_tmds_clock &&
+ !drm_mode_is_420_only(&connector->display_info, mode) &&
+ !drm_mode_is_420_also(&connector->display_info, mode))
return MODE_BAD;
/* Check against non-VIC supported modes */
@@ -639,6 +664,15 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
vclk_freq = mode->clock;
+ /* For 420, pixel clock is half unlike venc clock */
+ if (drm_mode_is_420_only(&connector->display_info, mode) ||
+ (!is_hdmi2_sink &&
+ drm_mode_is_420_also(&connector->display_info, mode)))
+ vclk_freq /= 2;
+
+ /* TMDS clock is pixel_clock * 10 */
+ phy_freq = vclk_freq * 10;
+
/* 480i/576i needs global pixel doubling */
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
vclk_freq *= 2;
@@ -646,8 +680,11 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
venc_freq = vclk_freq;
hdmi_freq = vclk_freq;
- /* VENC double pixels for 1080i and 720p modes */
- if (meson_venc_hdmi_venc_repeat(vic))
+ /* VENC double pixels for 1080i, 720p and YUV420 modes */
+ if (meson_venc_hdmi_venc_repeat(vic) ||
+ drm_mode_is_420_only(&connector->display_info, mode) ||
+ (!is_hdmi2_sink &&
+ drm_mode_is_420_also(&connector->display_info, mode)))
venc_freq *= 2;
vclk_freq = max(venc_freq, hdmi_freq);
@@ -655,14 +692,19 @@ dw_hdmi_mode_valid(struct drm_connector *connector,
if (mode->flags & DRM_MODE_FLAG_DBLCLK)
venc_freq /= 2;
- dev_dbg(connector->dev->dev, "%s: vclk:%d venc=%d hdmi=%d\n", __func__,
- vclk_freq, venc_freq, hdmi_freq);
+ dev_dbg(connector->dev->dev, "%s: vclk:%d phy=%d venc=%d hdmi=%d\n",
+ __func__, phy_freq, vclk_freq, venc_freq, hdmi_freq);
- return meson_vclk_vic_supported_freq(vclk_freq);
+ return meson_vclk_vic_supported_freq(phy_freq, vclk_freq);
}
/* Encoder */
+static const u32 meson_dw_hdmi_out_bus_fmts[] = {
+ MEDIA_BUS_FMT_YUV8_1X24,
+ MEDIA_BUS_FMT_UYYVYY8_0_5X24,
+};
+
static void meson_venc_hdmi_encoder_destroy(struct drm_encoder *encoder)
{
drm_encoder_cleanup(encoder);
@@ -672,16 +714,54 @@ static const struct drm_encoder_funcs meson_venc_hdmi_encoder_funcs = {
.destroy = meson_venc_hdmi_encoder_destroy,
};
-static int meson_venc_hdmi_encoder_atomic_check(struct drm_encoder *encoder,
+static u32 *
+meson_venc_hdmi_encoder_get_inp_bus_fmts(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts)
+{
+ u32 *input_fmts = NULL;
+ int i;
+
+ *num_input_fmts = 0;
+
+ for (i = 0 ; i < ARRAY_SIZE(meson_dw_hdmi_out_bus_fmts) ; ++i) {
+ if (output_fmt == meson_dw_hdmi_out_bus_fmts[i]) {
+ *num_input_fmts = 1;
+ input_fmts = kcalloc(*num_input_fmts,
+ sizeof(*input_fmts),
+ GFP_KERNEL);
+ if (!input_fmts)
+ return NULL;
+
+ input_fmts[0] = output_fmt;
+
+ break;
+ }
+ }
+
+ return input_fmts;
+}
+
+static int meson_venc_hdmi_encoder_atomic_check(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
+
+ dw_hdmi->output_bus_fmt = bridge_state->output_bus_cfg.format;
+
+ DRM_DEBUG_DRIVER("output_bus_fmt %lx\n", dw_hdmi->output_bus_fmt);
+
return 0;
}
-static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
+static void meson_venc_hdmi_encoder_disable(struct drm_bridge *bridge)
{
- struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
DRM_DEBUG_DRIVER("\n");
@@ -693,9 +773,9 @@ static void meson_venc_hdmi_encoder_disable(struct drm_encoder *encoder)
writel_relaxed(0, priv->io_base + _REG(ENCP_VIDEO_EN));
}
-static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
+static void meson_venc_hdmi_encoder_enable(struct drm_bridge *bridge)
{
- struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
DRM_DEBUG_DRIVER("%s\n", priv->venc.hdmi_use_enci ? "VENCI" : "VENCP");
@@ -706,32 +786,47 @@ static void meson_venc_hdmi_encoder_enable(struct drm_encoder *encoder)
writel_relaxed(1, priv->io_base + _REG(ENCP_VIDEO_EN));
}
-static void meson_venc_hdmi_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void meson_venc_hdmi_encoder_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
- struct meson_dw_hdmi *dw_hdmi = encoder_to_meson_dw_hdmi(encoder);
+ struct meson_dw_hdmi *dw_hdmi = bridge_to_meson_dw_hdmi(bridge);
struct meson_drm *priv = dw_hdmi->priv;
int vic = drm_match_cea_mode(mode);
+ unsigned int ycrcb_map = VPU_HDMI_OUTPUT_CBYCR;
+ bool yuv420_mode = false;
DRM_DEBUG_DRIVER("\"%s\" vic %d\n", mode->name, vic);
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24) {
+ ycrcb_map = VPU_HDMI_OUTPUT_CRYCB;
+ yuv420_mode = true;
+ }
+
/* VENC + VENC-DVI Mode setup */
- meson_venc_hdmi_mode_set(priv, vic, mode);
+ meson_venc_hdmi_mode_set(priv, vic, ycrcb_map, yuv420_mode, mode);
/* VCLK Set clock */
dw_hdmi_set_vclk(dw_hdmi, mode);
- /* Setup YUV444 to HDMI-TX, no 10bit diphering */
- writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+ if (dw_hdmi->output_bus_fmt == MEDIA_BUS_FMT_UYYVYY8_0_5X24)
+ /* Setup YUV420 to HDMI-TX, no 10bit diphering */
+ writel_relaxed(2 | (2 << 2),
+ priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
+ else
+ /* Setup YUV444 to HDMI-TX, no 10bit diphering */
+ writel_relaxed(0, priv->io_base + _REG(VPU_HDMI_FMT_CTRL));
}
-static const struct drm_encoder_helper_funcs
- meson_venc_hdmi_encoder_helper_funcs = {
- .atomic_check = meson_venc_hdmi_encoder_atomic_check,
- .disable = meson_venc_hdmi_encoder_disable,
- .enable = meson_venc_hdmi_encoder_enable,
- .mode_set = meson_venc_hdmi_encoder_mode_set,
+static const struct drm_bridge_funcs meson_venc_hdmi_encoder_bridge_funcs = {
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_get_input_bus_fmts = meson_venc_hdmi_encoder_get_inp_bus_fmts,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_check = meson_venc_hdmi_encoder_atomic_check,
+ .enable = meson_venc_hdmi_encoder_enable,
+ .disable = meson_venc_hdmi_encoder_disable,
+ .mode_set = meson_venc_hdmi_encoder_mode_set,
};
/* DW HDMI Regmap */
@@ -852,6 +947,7 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
struct drm_device *drm = data;
struct meson_drm *priv = drm->dev_private;
struct dw_hdmi_plat_data *dw_plat_data;
+ struct drm_bridge *next_bridge;
struct drm_encoder *encoder;
struct resource *res;
int irq;
@@ -953,8 +1049,6 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
/* Encoder */
- drm_encoder_helper_add(encoder, &meson_venc_hdmi_encoder_helper_funcs);
-
ret = drm_encoder_init(drm, encoder, &meson_venc_hdmi_encoder_funcs,
DRM_MODE_ENCODER_TMDS, "meson_hdmi");
if (ret) {
@@ -962,6 +1056,9 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
return ret;
}
+ meson_dw_hdmi->bridge.funcs = &meson_venc_hdmi_encoder_bridge_funcs;
+ drm_bridge_attach(encoder, &meson_dw_hdmi->bridge, NULL, 0);
+
encoder->possible_crtcs = BIT(0);
DRM_DEBUG_DRIVER("encoder initialized\n");
@@ -974,8 +1071,8 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
dw_plat_data->phy_ops = &meson_dw_hdmi_phy_ops;
dw_plat_data->phy_name = "meson_dw_hdmi_phy";
dw_plat_data->phy_data = meson_dw_hdmi;
- dw_plat_data->input_bus_format = MEDIA_BUS_FMT_YUV8_1X24;
dw_plat_data->input_bus_encoding = V4L2_YCBCR_ENC_709;
+ dw_plat_data->ycbcr_420_allowed = true;
if (dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxl-dw-hdmi") ||
dw_hdmi_is_compatible(meson_dw_hdmi, "amlogic,meson-gxm-dw-hdmi") ||
@@ -984,11 +1081,16 @@ static int meson_dw_hdmi_bind(struct device *dev, struct device *master,
platform_set_drvdata(pdev, meson_dw_hdmi);
- meson_dw_hdmi->hdmi = dw_hdmi_bind(pdev, encoder,
- &meson_dw_hdmi->dw_plat_data);
+ meson_dw_hdmi->hdmi = dw_hdmi_probe(pdev,
+ &meson_dw_hdmi->dw_plat_data);
if (IS_ERR(meson_dw_hdmi->hdmi))
return PTR_ERR(meson_dw_hdmi->hdmi);
+ next_bridge = of_drm_find_bridge(pdev->dev.of_node);
+ if (next_bridge)
+ drm_bridge_attach(encoder, next_bridge,
+ &meson_dw_hdmi->bridge, 0);
+
DRM_DEBUG_DRIVER("HDMI controller initialized\n");
return 0;
diff --git a/drivers/gpu/drm/meson/meson_vclk.c b/drivers/gpu/drm/meson/meson_vclk.c
index f690793ae2d5..fdf26dac9fa8 100644
--- a/drivers/gpu/drm/meson/meson_vclk.c
+++ b/drivers/gpu/drm/meson/meson_vclk.c
@@ -354,12 +354,17 @@ enum {
/* 2970 /1 /1 /1 /5 /2 => /1 /1 */
MESON_VCLK_HDMI_297000,
/* 5940 /1 /1 /2 /5 /1 => /1 /1 */
- MESON_VCLK_HDMI_594000
+ MESON_VCLK_HDMI_594000,
+/* 2970 /1 /1 /1 /5 /1 => /1 /2 */
+ MESON_VCLK_HDMI_594000_YUV420,
};
struct meson_vclk_params {
+ unsigned int pll_freq;
+ unsigned int phy_freq;
+ unsigned int vclk_freq;
+ unsigned int venc_freq;
unsigned int pixel_freq;
- unsigned int pll_base_freq;
unsigned int pll_od1;
unsigned int pll_od2;
unsigned int pll_od3;
@@ -367,8 +372,11 @@ struct meson_vclk_params {
unsigned int vclk_div;
} params[] = {
[MESON_VCLK_HDMI_ENCI_54000] = {
+ .pll_freq = 4320000,
+ .phy_freq = 270000,
+ .vclk_freq = 54000,
+ .venc_freq = 54000,
.pixel_freq = 54000,
- .pll_base_freq = 4320000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -376,8 +384,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_54000] = {
- .pixel_freq = 54000,
- .pll_base_freq = 4320000,
+ .pll_freq = 4320000,
+ .phy_freq = 270000,
+ .vclk_freq = 54000,
+ .venc_freq = 54000,
+ .pixel_freq = 27000,
.pll_od1 = 4,
.pll_od2 = 4,
.pll_od3 = 1,
@@ -385,8 +396,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_DDR_148500] = {
- .pixel_freq = 148500,
- .pll_base_freq = 2970000,
+ .pll_freq = 2970000,
+ .phy_freq = 742500,
+ .vclk_freq = 148500,
+ .venc_freq = 148500,
+ .pixel_freq = 74250,
.pll_od1 = 4,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -394,8 +408,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_74250] = {
+ .pll_freq = 2970000,
+ .phy_freq = 742500,
+ .vclk_freq = 74250,
+ .venc_freq = 74250,
.pixel_freq = 74250,
- .pll_base_freq = 2970000,
.pll_od1 = 2,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -403,8 +420,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_148500] = {
+ .pll_freq = 2970000,
+ .phy_freq = 1485000,
+ .vclk_freq = 148500,
+ .venc_freq = 148500,
.pixel_freq = 148500,
- .pll_base_freq = 2970000,
.pll_od1 = 1,
.pll_od2 = 2,
.pll_od3 = 2,
@@ -412,8 +432,11 @@ struct meson_vclk_params {
.vclk_div = 1,
},
[MESON_VCLK_HDMI_297000] = {
+ .pll_freq = 5940000,
+ .phy_freq = 2970000,
+ .venc_freq = 297000,
+ .vclk_freq = 297000,
.pixel_freq = 297000,
- .pll_base_freq = 5940000,
.pll_od1 = 2,
.pll_od2 = 1,
.pll_od3 = 1,
@@ -421,14 +444,29 @@ struct meson_vclk_params {
.vclk_div = 2,
},
[MESON_VCLK_HDMI_594000] = {
+ .pll_freq = 5940000,
+ .phy_freq = 5940000,
+ .venc_freq = 594000,
+ .vclk_freq = 594000,
.pixel_freq = 594000,
- .pll_base_freq = 5940000,
.pll_od1 = 1,
.pll_od2 = 1,
.pll_od3 = 2,
.vid_pll_div = VID_PLL_DIV_5,
.vclk_div = 1,
},
+ [MESON_VCLK_HDMI_594000_YUV420] = {
+ .pll_freq = 5940000,
+ .phy_freq = 2970000,
+ .venc_freq = 594000,
+ .vclk_freq = 594000,
+ .pixel_freq = 297000,
+ .pll_od1 = 2,
+ .pll_od2 = 1,
+ .pll_od3 = 1,
+ .vid_pll_div = VID_PLL_DIV_5,
+ .vclk_div = 1,
+ },
{ /* sentinel */ },
};
@@ -701,6 +739,7 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
unsigned int od, m, frac, od1, od2, od3;
if (meson_hdmi_pll_find_params(priv, pll_freq, &m, &frac, &od)) {
+ /* OD2 goes to the PHY, and needs to be *10, so keep OD3=1 */
od3 = 1;
if (od < 4) {
od1 = 2;
@@ -723,21 +762,28 @@ static void meson_hdmi_pll_generic_set(struct meson_drm *priv,
}
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int freq)
+meson_vclk_vic_supported_freq(unsigned int phy_freq,
+ unsigned int vclk_freq)
{
int i;
- DRM_DEBUG_DRIVER("freq = %d\n", freq);
+ DRM_DEBUG_DRIVER("phy_freq = %d vclk_freq = %d\n",
+ phy_freq, vclk_freq);
for (i = 0 ; params[i].pixel_freq ; ++i) {
DRM_DEBUG_DRIVER("i = %d pixel_freq = %d alt = %d\n",
i, params[i].pixel_freq,
FREQ_1000_1001(params[i].pixel_freq));
+ DRM_DEBUG_DRIVER("i = %d phy_freq = %d alt = %d\n",
+ i, params[i].phy_freq,
+ FREQ_1000_1001(params[i].phy_freq/10)*10);
/* Match strict frequency */
- if (freq == params[i].pixel_freq)
+ if (phy_freq == params[i].phy_freq &&
+ vclk_freq == params[i].vclk_freq)
return MODE_OK;
/* Match 1000/1001 variant */
- if (freq == FREQ_1000_1001(params[i].pixel_freq))
+ if (phy_freq == (FREQ_1000_1001(params[i].phy_freq/10)*10) &&
+ vclk_freq == FREQ_1000_1001(params[i].vclk_freq))
return MODE_OK;
}
@@ -965,8 +1011,9 @@ static void meson_vclk_set(struct meson_drm *priv, unsigned int pll_base_freq,
}
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int vclk_freq, unsigned int venc_freq,
- unsigned int dac_freq, bool hdmi_use_enci)
+ unsigned int phy_freq, unsigned int vclk_freq,
+ unsigned int venc_freq, unsigned int dac_freq,
+ bool hdmi_use_enci)
{
bool vic_alternate_clock = false;
unsigned int freq;
@@ -986,7 +1033,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
* - venc_div = 1
* - encp encoder
*/
- meson_vclk_set(priv, vclk_freq * 10, 0, 0, 0,
+ meson_vclk_set(priv, phy_freq, 0, 0, 0,
VID_PLL_DIV_5, 2, 1, 1, false, false);
return;
}
@@ -1008,9 +1055,11 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
}
for (freq = 0 ; params[freq].pixel_freq ; ++freq) {
- if (vclk_freq == params[freq].pixel_freq ||
- vclk_freq == FREQ_1000_1001(params[freq].pixel_freq)) {
- if (vclk_freq != params[freq].pixel_freq)
+ if ((phy_freq == params[freq].phy_freq ||
+ phy_freq == FREQ_1000_1001(params[freq].phy_freq/10)*10) &&
+ (vclk_freq == params[freq].vclk_freq ||
+ vclk_freq == FREQ_1000_1001(params[freq].vclk_freq))) {
+ if (vclk_freq != params[freq].vclk_freq)
vic_alternate_clock = true;
else
vic_alternate_clock = false;
@@ -1039,7 +1088,7 @@ void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
return;
}
- meson_vclk_set(priv, params[freq].pll_base_freq,
+ meson_vclk_set(priv, params[freq].pll_freq,
params[freq].pll_od1, params[freq].pll_od2,
params[freq].pll_od3, params[freq].vid_pll_div,
params[freq].vclk_div, hdmi_tx_div, venc_div,
diff --git a/drivers/gpu/drm/meson/meson_vclk.h b/drivers/gpu/drm/meson/meson_vclk.h
index b62125540aef..aed0ab2efa71 100644
--- a/drivers/gpu/drm/meson/meson_vclk.h
+++ b/drivers/gpu/drm/meson/meson_vclk.h
@@ -25,10 +25,11 @@ enum {
enum drm_mode_status
meson_vclk_dmt_supported_freq(struct meson_drm *priv, unsigned int freq);
enum drm_mode_status
-meson_vclk_vic_supported_freq(unsigned int freq);
+meson_vclk_vic_supported_freq(unsigned int phy_freq, unsigned int vclk_freq);
void meson_vclk_setup(struct meson_drm *priv, unsigned int target,
- unsigned int vclk_freq, unsigned int venc_freq,
- unsigned int dac_freq, bool hdmi_use_enci);
+ unsigned int phy_freq, unsigned int vclk_freq,
+ unsigned int venc_freq, unsigned int dac_freq,
+ bool hdmi_use_enci);
#endif /* __MESON_VCLK_H */
diff --git a/drivers/gpu/drm/meson/meson_venc.c b/drivers/gpu/drm/meson/meson_venc.c
index 4efd7864d5bf..f93c725b6f02 100644
--- a/drivers/gpu/drm/meson/meson_venc.c
+++ b/drivers/gpu/drm/meson/meson_venc.c
@@ -946,7 +946,9 @@ bool meson_venc_hdmi_venc_repeat(int vic)
EXPORT_SYMBOL_GPL(meson_venc_hdmi_venc_repeat);
void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
- struct drm_display_mode *mode)
+ unsigned int ycrcb_map,
+ bool yuv420_mode,
+ const struct drm_display_mode *mode)
{
union meson_hdmi_venc_mode *vmode = NULL;
union meson_hdmi_venc_mode vmode_dmt;
@@ -1528,14 +1530,14 @@ void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
reg |= VPU_HDMI_INV_VSYNC;
- /* Output data format: CbYCr */
- reg |= VPU_HDMI_OUTPUT_CBYCR;
+ /* Output data format */
+ reg |= ycrcb_map;
/*
* Write rate to the async FIFO between VENC and HDMI.
* One write every 2 wr_clk.
*/
- if (venc_repeat)
+ if (venc_repeat || yuv420_mode)
reg |= VPU_HDMI_WR_RATE(2);
/*
diff --git a/drivers/gpu/drm/meson/meson_venc.h b/drivers/gpu/drm/meson/meson_venc.h
index 576768bdd08d..9138255ffc9e 100644
--- a/drivers/gpu/drm/meson/meson_venc.h
+++ b/drivers/gpu/drm/meson/meson_venc.h
@@ -60,7 +60,9 @@ extern struct meson_cvbs_enci_mode meson_cvbs_enci_ntsc;
void meson_venci_cvbs_mode_set(struct meson_drm *priv,
struct meson_cvbs_enci_mode *mode);
void meson_venc_hdmi_mode_set(struct meson_drm *priv, int vic,
- struct drm_display_mode *mode);
+ unsigned int ycrcb_map,
+ bool yuv420_mode,
+ const struct drm_display_mode *mode);
unsigned int meson_venci_get_field(struct meson_drm *priv);
void meson_venc_enable_vsync(struct meson_drm *priv);
diff --git a/drivers/gpu/drm/meson/meson_venc_cvbs.c b/drivers/gpu/drm/meson/meson_venc_cvbs.c
index 1bd6b6d15ffb..541f9eb2a135 100644
--- a/drivers/gpu/drm/meson/meson_venc_cvbs.c
+++ b/drivers/gpu/drm/meson/meson_venc_cvbs.c
@@ -213,8 +213,10 @@ static void meson_venc_cvbs_encoder_mode_set(struct drm_encoder *encoder,
meson_venci_cvbs_mode_set(priv, meson_mode->enci);
/* Setup 27MHz vclk2 for ENCI and VDAC */
- meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS, MESON_VCLK_CVBS,
- MESON_VCLK_CVBS, MESON_VCLK_CVBS, true);
+ meson_vclk_setup(priv, MESON_VCLK_TARGET_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+ MESON_VCLK_CVBS, MESON_VCLK_CVBS,
+ true);
}
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.h b/drivers/gpu/drm/mgag200/mgag200_drv.h
index aa32aad222c2..9691252d6233 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.h
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.h
@@ -95,7 +95,6 @@
#define MATROX_DPMS_CLEARED (-1)
#define to_mga_crtc(x) container_of(x, struct mga_crtc, base)
-#define to_mga_encoder(x) container_of(x, struct mga_encoder, base)
#define to_mga_connector(x) container_of(x, struct mga_connector, base)
struct mga_crtc {
@@ -110,12 +109,6 @@ struct mga_mode_info {
struct mga_crtc *crtc;
};
-struct mga_encoder {
- struct drm_encoder base;
- int last_dpms;
-};
-
-
struct mga_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
@@ -185,6 +178,8 @@ struct mga_device {
/* SE model number stored in reg 0x1e24 */
u32 unique_rev_id;
+
+ struct drm_encoder encoder;
};
static inline enum mga_type
diff --git a/drivers/gpu/drm/mgag200/mgag200_mode.c b/drivers/gpu/drm/mgag200/mgag200_mode.c
index 62a8e9ccb16d..d90e83959fca 100644
--- a/drivers/gpu/drm/mgag200/mgag200_mode.c
+++ b/drivers/gpu/drm/mgag200/mgag200_mode.c
@@ -15,6 +15,7 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
+#include <drm/drm_simple_kms_helper.h>
#include "mgag200_drv.h"
@@ -1449,76 +1450,6 @@ static void mga_crtc_init(struct mga_device *mdev)
drm_crtc_helper_add(&mga_crtc->base, &mga_helper_funcs);
}
-/*
- * The encoder comes after the CRTC in the output pipeline, but before
- * the connector. It's responsible for ensuring that the digital
- * stream is appropriately converted into the output format. Setup is
- * very simple in this case - all we have to do is inform qemu of the
- * colour depth in order to ensure that it displays appropriately
- */
-
-/*
- * These functions are analagous to those in the CRTC code, but are intended
- * to handle any encoder-specific limitations
- */
-static void mga_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
-{
-
-}
-
-static void mga_encoder_dpms(struct drm_encoder *encoder, int state)
-{
- return;
-}
-
-static void mga_encoder_prepare(struct drm_encoder *encoder)
-{
-}
-
-static void mga_encoder_commit(struct drm_encoder *encoder)
-{
-}
-
-static void mga_encoder_destroy(struct drm_encoder *encoder)
-{
- struct mga_encoder *mga_encoder = to_mga_encoder(encoder);
- drm_encoder_cleanup(encoder);
- kfree(mga_encoder);
-}
-
-static const struct drm_encoder_helper_funcs mga_encoder_helper_funcs = {
- .dpms = mga_encoder_dpms,
- .mode_set = mga_encoder_mode_set,
- .prepare = mga_encoder_prepare,
- .commit = mga_encoder_commit,
-};
-
-static const struct drm_encoder_funcs mga_encoder_encoder_funcs = {
- .destroy = mga_encoder_destroy,
-};
-
-static struct drm_encoder *mga_encoder_init(struct drm_device *dev)
-{
- struct drm_encoder *encoder;
- struct mga_encoder *mga_encoder;
-
- mga_encoder = kzalloc(sizeof(struct mga_encoder), GFP_KERNEL);
- if (!mga_encoder)
- return NULL;
-
- encoder = &mga_encoder->base;
- encoder->possible_crtcs = 0x1;
-
- drm_encoder_init(dev, encoder, &mga_encoder_encoder_funcs,
- DRM_MODE_ENCODER_DAC, NULL);
- drm_encoder_helper_add(encoder, &mga_encoder_helper_funcs);
-
- return encoder;
-}
-
-
static int mga_vga_get_modes(struct drm_connector *connector)
{
struct mga_connector *mga_connector = to_mga_connector(connector);
@@ -1686,8 +1617,9 @@ static struct drm_connector *mga_vga_init(struct drm_device *dev)
int mgag200_modeset_init(struct mga_device *mdev)
{
- struct drm_encoder *encoder;
+ struct drm_encoder *encoder = &mdev->encoder;
struct drm_connector *connector;
+ int ret;
mdev->mode_info.mode_config_initialized = true;
@@ -1698,11 +1630,15 @@ int mgag200_modeset_init(struct mga_device *mdev)
mga_crtc_init(mdev);
- encoder = mga_encoder_init(mdev->dev);
- if (!encoder) {
- DRM_ERROR("mga_encoder_init failed\n");
- return -1;
+ ret = drm_simple_encoder_init(mdev->dev, encoder,
+ DRM_MODE_ENCODER_DAC);
+ if (ret) {
+ drm_err(mdev->dev,
+ "drm_simple_encoder_init() failed, error %d\n",
+ ret);
+ return ret;
}
+ encoder->possible_crtcs = 0x1;
connector = mga_vga_init(mdev->dev);
if (!connector) {
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 983afeaee737..748cd379065f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -796,12 +796,41 @@ bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
return true;
}
+#define GBIF_CLIENT_HALT_MASK BIT(0)
+#define GBIF_ARB_HALT_MASK BIT(1)
+
+static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ if (!a6xx_has_gbif(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
+ 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return;
+ }
+
+ /* Halt new client requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
+
+ /* Halt all AXI requests on GBIF */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
+ spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
+ (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
+
+ /* The GBIF halt needs to be explicitly cleared */
+ gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
+}
+
/* Gracefully try to shut down the GMU and by extension the GPU */
static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
- struct msm_gpu *gpu = &adreno_gpu->base;
u32 val;
/*
@@ -819,11 +848,7 @@ static void a6xx_gmu_shutdown(struct a6xx_gmu *gmu)
return;
}
- /* Clear the VBIF pipe before shutting down */
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf)
- == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+ a6xx_bus_clear_pending_transactions(adreno_gpu);
/* tell the GMU we want to slumber */
a6xx_gmu_notify_slumber(gmu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index daf07800cde0..68af24150de5 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -378,18 +378,6 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
int ret;
- /*
- * During a previous slumber, GBIF halt is asserted to ensure
- * no further transaction can go through GPU before GPU
- * headswitch is turned off.
- *
- * This halt is deasserted once headswitch goes off but
- * incase headswitch doesn't goes off clear GBIF halt
- * here to ensure GPU wake-up doesn't fail because of
- * halted GPU transactions.
- */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
-
/* Make sure the GMU keeps the GPU on while we set it up */
a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
@@ -470,10 +458,12 @@ static int a6xx_hw_init(struct msm_gpu *gpu)
/* Select CP0 to always count cycles */
gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
- gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
- gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
- gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
- gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
+ if (adreno_is_a630(adreno_gpu)) {
+ gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
+ }
/* Enable fault detection */
gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
@@ -748,39 +738,6 @@ static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
};
-#define GBIF_CLIENT_HALT_MASK BIT(0)
-#define GBIF_ARB_HALT_MASK BIT(1)
-
-static void a6xx_bus_clear_pending_transactions(struct adreno_gpu *adreno_gpu)
-{
- struct msm_gpu *gpu = &adreno_gpu->base;
-
- if(!a6xx_has_gbif(adreno_gpu)){
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
- spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) &
- 0xf) == 0xf);
- gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
-
- return;
- }
-
- /* Halt new client requests on GBIF */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_CLIENT_HALT_MASK);
- spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
- (GBIF_CLIENT_HALT_MASK)) == GBIF_CLIENT_HALT_MASK);
-
- /* Halt all AXI requests on GBIF */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, GBIF_ARB_HALT_MASK);
- spin_until((gpu_read(gpu, REG_A6XX_GBIF_HALT_ACK) &
- (GBIF_ARB_HALT_MASK)) == GBIF_ARB_HALT_MASK);
-
- /*
- * GMU needs DDR access in slumber path. Deassert GBIF halt now
- * to allow for GMU to access system memory.
- */
- gpu_write(gpu, REG_A6XX_GBIF_HALT, 0x0);
-}
-
static int a6xx_pm_resume(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
@@ -805,16 +762,6 @@ static int a6xx_pm_suspend(struct msm_gpu *gpu)
devfreq_suspend_device(gpu->devfreq.devfreq);
- /*
- * Make sure the GMU is idle before continuing (because some transitions
- * may use VBIF
- */
- a6xx_gmu_wait_for_idle(&a6xx_gpu->gmu);
-
- /* Clear the VBIF pipe before shutting down */
- /* FIXME: This accesses the GPU - do we need to make sure it is on? */
- a6xx_bus_clear_pending_transactions(adreno_gpu);
-
return a6xx_gmu_stop(a6xx_gpu);
}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index eda11abc5f01..e450e0b97211 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -7,6 +7,7 @@
#include "a6xx_gmu.h"
#include "a6xx_gmu.xml.h"
+#include "a6xx_gpu.h"
#define HFI_MSG_ID(val) [val] = #val
@@ -216,48 +217,82 @@ static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
NULL, 0);
}
-static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
+static void a618_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
{
- struct a6xx_hfi_msg_bw_table msg = { 0 };
+ /* Send a single "off" entry since the 618 GMU doesn't do bus scaling */
+ msg->bw_level_num = 1;
+
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x01;
+
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5003c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
+
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
/*
- * The sdm845 GMU doesn't do bus frequency scaling on its own but it
- * does need at least one entry in the list because it might be accessed
- * when the GMU is shutting down. Send a single "off" entry.
+ * These are the CX (CNOC) votes - these are used by the GMU but the
+ * votes are known and fixed for the target
*/
+ msg->cnoc_cmds_num = 1;
+ msg->cnoc_wait_bitmask = 0x01;
+
+ msg->cnoc_cmds_addrs[0] = 0x5007c;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+}
- msg.bw_level_num = 1;
+static void a6xx_build_bw_table(struct a6xx_hfi_msg_bw_table *msg)
+{
+ /* Send a single "off" entry since the 630 GMU doesn't do bus scaling */
+ msg->bw_level_num = 1;
- msg.ddr_cmds_num = 3;
- msg.ddr_wait_bitmask = 0x07;
+ msg->ddr_cmds_num = 3;
+ msg->ddr_wait_bitmask = 0x07;
- msg.ddr_cmds_addrs[0] = 0x50000;
- msg.ddr_cmds_addrs[1] = 0x5005c;
- msg.ddr_cmds_addrs[2] = 0x5000c;
+ msg->ddr_cmds_addrs[0] = 0x50000;
+ msg->ddr_cmds_addrs[1] = 0x5005c;
+ msg->ddr_cmds_addrs[2] = 0x5000c;
- msg.ddr_cmds_data[0][0] = 0x40000000;
- msg.ddr_cmds_data[0][1] = 0x40000000;
- msg.ddr_cmds_data[0][2] = 0x40000000;
+ msg->ddr_cmds_data[0][0] = 0x40000000;
+ msg->ddr_cmds_data[0][1] = 0x40000000;
+ msg->ddr_cmds_data[0][2] = 0x40000000;
/*
* These are the CX (CNOC) votes. This is used but the values for the
* sdm845 GMU are known and fixed so we can hard code them.
*/
- msg.cnoc_cmds_num = 3;
- msg.cnoc_wait_bitmask = 0x05;
+ msg->cnoc_cmds_num = 3;
+ msg->cnoc_wait_bitmask = 0x05;
- msg.cnoc_cmds_addrs[0] = 0x50034;
- msg.cnoc_cmds_addrs[1] = 0x5007c;
- msg.cnoc_cmds_addrs[2] = 0x5004c;
+ msg->cnoc_cmds_addrs[0] = 0x50034;
+ msg->cnoc_cmds_addrs[1] = 0x5007c;
+ msg->cnoc_cmds_addrs[2] = 0x5004c;
- msg.cnoc_cmds_data[0][0] = 0x40000000;
- msg.cnoc_cmds_data[0][1] = 0x00000000;
- msg.cnoc_cmds_data[0][2] = 0x40000000;
+ msg->cnoc_cmds_data[0][0] = 0x40000000;
+ msg->cnoc_cmds_data[0][1] = 0x00000000;
+ msg->cnoc_cmds_data[0][2] = 0x40000000;
+
+ msg->cnoc_cmds_data[1][0] = 0x60000001;
+ msg->cnoc_cmds_data[1][1] = 0x20000001;
+ msg->cnoc_cmds_data[1][2] = 0x60000001;
+}
+
+
+static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_bw_table msg = { 0 };
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
- msg.cnoc_cmds_data[1][0] = 0x60000001;
- msg.cnoc_cmds_data[1][1] = 0x20000001;
- msg.cnoc_cmds_data[1][2] = 0x60000001;
+ if (adreno_is_a618(adreno_gpu))
+ a618_build_bw_table(&msg);
+ else
+ a6xx_build_bw_table(&msg);
return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
NULL, 0);
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index bf513411b243..17448505a9b5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -1272,6 +1272,8 @@ static const struct drm_crtc_funcs dpu_crtc_funcs = {
.atomic_destroy_state = dpu_crtc_destroy_state,
.late_register = dpu_crtc_late_register,
.early_unregister = dpu_crtc_early_unregister,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
};
static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
index f8ac3bf60fd6..58d3400668f5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -512,7 +512,6 @@ static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
if (cur_mode->vdisplay == adj_mode->vdisplay &&
cur_mode->hdisplay == adj_mode->hdisplay &&
drm_mode_vrefresh(cur_mode) == drm_mode_vrefresh(adj_mode)) {
- adj_mode->private = cur_mode->private;
adj_mode->private_flags |= cur_mode->private_flags;
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
index 528632690f1e..a05282dede91 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -255,13 +255,13 @@ static const struct dpu_format dpu_format_map[] = {
INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
DPU_FETCH_LINEAR, 1),
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 29705e773a4b..80d3cfc14007 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -12,6 +12,7 @@
#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
+#define HW_REV 0x0
#define HW_INTR_STATUS 0x0010
/* Max BW defined in KBps */
@@ -22,6 +23,17 @@ struct dpu_irq_controller {
struct irq_domain *domain;
};
+struct dpu_hw_cfg {
+ u32 val;
+ u32 offset;
+};
+
+struct dpu_mdss_hw_init_handler {
+ u32 hw_rev;
+ u32 hw_reg_count;
+ struct dpu_hw_cfg* hw_cfg;
+};
+
struct dpu_mdss {
struct msm_mdss base;
void __iomem *mmio;
@@ -32,6 +44,44 @@ struct dpu_mdss {
u32 num_paths;
};
+static struct dpu_hw_cfg hw_cfg[] = {
+ {
+ /* UBWC global settings */
+ .val = 0x1E,
+ .offset = 0x144,
+ }
+};
+
+static struct dpu_mdss_hw_init_handler cfg_handler[] = {
+ { .hw_rev = DPU_HW_VER_620,
+ .hw_reg_count = ARRAY_SIZE(hw_cfg),
+ .hw_cfg = hw_cfg
+ },
+};
+
+static void dpu_mdss_hw_init(struct dpu_mdss *dpu_mdss, u32 hw_rev)
+{
+ int i;
+ u32 count = 0;
+ struct dpu_hw_cfg *hw_cfg = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+ if (cfg_handler[i].hw_rev == hw_rev) {
+ hw_cfg = cfg_handler[i].hw_cfg;
+ count = cfg_handler[i].hw_reg_count;
+ break;
+ }
+ }
+
+ for (i = 0; i < count; i++ ) {
+ writel_relaxed(hw_cfg->val,
+ dpu_mdss->mmio + hw_cfg->offset);
+ hw_cfg++;
+ }
+
+ return;
+}
+
static int dpu_mdss_parse_data_bus_icc_path(struct drm_device *dev,
struct dpu_mdss *dpu_mdss)
{
@@ -174,12 +224,18 @@ static int dpu_mdss_enable(struct msm_mdss *mdss)
struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
struct dss_module_power *mp = &dpu_mdss->mp;
int ret;
+ u32 mdss_rev;
dpu_mdss_icc_request_bw(mdss);
ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
- if (ret)
+ if (ret) {
DPU_ERROR("clock enable failed, ret:%d\n", ret);
+ return ret;
+ }
+
+ mdss_rev = readl_relaxed(dpu_mdss->mmio + HW_REV);
+ dpu_mdss_hw_init(dpu_mdss, mdss_rev);
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
index f34dca5d4532..c9239b07fe4f 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_crtc.c
@@ -481,6 +481,8 @@ static const struct drm_crtc_funcs mdp4_crtc_funcs = {
.reset = drm_atomic_helper_crtc_reset,
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
};
static const struct drm_crtc_helper_funcs mdp4_crtc_helper_funcs = {
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 05cc04f729d6..998bef1190a3 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -405,6 +405,83 @@ static void mdp5_crtc_mode_set_nofb(struct drm_crtc *crtc)
spin_unlock_irqrestore(&mdp5_crtc->lm_lock, flags);
}
+static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_encoder *encoder;
+
+ drm_for_each_encoder(encoder, dev)
+ if (encoder->crtc == crtc)
+ return encoder;
+
+ return NULL;
+}
+
+static bool mdp5_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ unsigned int pipe = crtc->index;
+ struct drm_encoder *encoder;
+ int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
+
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder) {
+ DRM_ERROR("no encoder found for crtc %d\n", pipe);
+ return false;
+ }
+
+ vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
+ vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
+
+ /*
+ * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
+ * the end of VFP. Translate the porch values relative to the line
+ * counter positions.
+ */
+
+ vactive_start = vsw + vbp + 1;
+
+ vactive_end = vactive_start + mode->crtc_vdisplay;
+
+ /* last scan line before VSYNC */
+ vfp_end = mode->crtc_vtotal;
+
+ if (stime)
+ *stime = ktime_get();
+
+ line = mdp5_encoder_get_linecount(encoder);
+
+ if (line < vactive_start)
+ line -= vactive_start;
+ else if (line > vactive_end)
+ line = line - vfp_end - vactive_start;
+ else
+ line -= vactive_start;
+
+ *vpos = line;
+ *hpos = 0;
+
+ if (etime)
+ *etime = ktime_get();
+
+ return true;
+}
+
+static u32 mdp5_crtc_get_vblank_counter(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ encoder = get_encoder_from_crtc(crtc);
+ if (!encoder)
+ return 0;
+
+ return mdp5_encoder_get_framecount(encoder);
+}
+
static void mdp5_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
@@ -1054,6 +1131,10 @@ static const struct drm_crtc_funcs mdp5_crtc_funcs = {
.cursor_set = mdp5_crtc_cursor_set,
.cursor_move = mdp5_crtc_cursor_move,
.atomic_print_state = mdp5_crtc_atomic_print_state,
+ .get_vblank_counter = mdp5_crtc_get_vblank_counter,
+ .enable_vblank = msm_crtc_enable_vblank,
+ .disable_vblank = msm_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
@@ -1063,6 +1144,7 @@ static const struct drm_crtc_helper_funcs mdp5_crtc_helper_funcs = {
.atomic_flush = mdp5_crtc_atomic_flush,
.atomic_enable = mdp5_crtc_atomic_enable,
.atomic_disable = mdp5_crtc_atomic_disable,
+ .get_scanout_position = mdp5_crtc_get_scanout_position,
};
static void mdp5_crtc_vblank_irq(struct mdp_irq *irq, uint32_t irqstatus)
@@ -1109,8 +1191,8 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc)
ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion,
msecs_to_jiffies(50));
if (ret == 0)
- dev_warn(dev->dev, "pp done time out, lm=%d\n",
- mdp5_cstate->pipeline.mixer->lm);
+ dev_warn_ratelimited(dev->dev, "pp done time out, lm=%d\n",
+ mdp5_cstate->pipeline.mixer->lm);
}
static void mdp5_crtc_wait_for_flush_done(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index e43ecd4be10a..6650f478b226 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -583,98 +583,6 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
return 0;
}
-static struct drm_encoder *get_encoder_from_crtc(struct drm_crtc *crtc)
-{
- struct drm_device *dev = crtc->dev;
- struct drm_encoder *encoder;
-
- drm_for_each_encoder(encoder, dev)
- if (encoder->crtc == crtc)
- return encoder;
-
- return NULL;
-}
-
-static bool mdp5_get_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
- int line, vsw, vbp, vactive_start, vactive_end, vfp_end;
-
- crtc = priv->crtcs[pipe];
- if (!crtc) {
- DRM_ERROR("Invalid crtc %d\n", pipe);
- return false;
- }
-
- encoder = get_encoder_from_crtc(crtc);
- if (!encoder) {
- DRM_ERROR("no encoder found for crtc %d\n", pipe);
- return false;
- }
-
- vsw = mode->crtc_vsync_end - mode->crtc_vsync_start;
- vbp = mode->crtc_vtotal - mode->crtc_vsync_end;
-
- /*
- * the line counter is 1 at the start of the VSYNC pulse and VTOTAL at
- * the end of VFP. Translate the porch values relative to the line
- * counter positions.
- */
-
- vactive_start = vsw + vbp + 1;
-
- vactive_end = vactive_start + mode->crtc_vdisplay;
-
- /* last scan line before VSYNC */
- vfp_end = mode->crtc_vtotal;
-
- if (stime)
- *stime = ktime_get();
-
- line = mdp5_encoder_get_linecount(encoder);
-
- if (line < vactive_start) {
- line -= vactive_start;
- } else if (line > vactive_end) {
- line = line - vfp_end - vactive_start;
- } else {
- line -= vactive_start;
- }
-
- *vpos = line;
- *hpos = 0;
-
- if (etime)
- *etime = ktime_get();
-
- return true;
-}
-
-static u32 mdp5_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
-{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_crtc *crtc;
- struct drm_encoder *encoder;
-
- if (pipe >= priv->num_crtcs)
- return 0;
-
- crtc = priv->crtcs[pipe];
- if (!crtc)
- return 0;
-
- encoder = get_encoder_from_crtc(crtc);
- if (!encoder)
- return 0;
-
- return mdp5_encoder_get_framecount(encoder);
-}
-
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -762,9 +670,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
dev->mode_config.max_width = 0xffff;
dev->mode_config.max_height = 0xffff;
- dev->driver->get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos;
- dev->driver->get_scanout_position = mdp5_get_scanoutpos;
- dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
dev->max_vblank_count = 0; /* max_vblank_count is set on each CRTC */
dev->vblank_disable_immediate = true;
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 104115d112eb..4b363bd7ddff 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -336,7 +336,7 @@ static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
return num;
}
-static int dsi_mgr_connector_mode_valid(struct drm_connector *connector,
+static enum drm_mode_status dsi_mgr_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
int id = dsi_mgr_connector_get_id(connector);
@@ -506,6 +506,7 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
struct msm_dsi *msm_dsi1 = dsi_mgr_get_dsi(DSI_1);
struct mipi_dsi_host *host = msm_dsi->host;
struct drm_panel *panel = msm_dsi->panel;
+ struct msm_dsi_pll *src_pll;
bool is_dual_dsi = IS_DUAL_DSI();
int ret;
@@ -539,6 +540,10 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
id, ret);
}
+ /* Save PLL status if it is a clock source */
+ src_pll = msm_dsi_phy_get_pll(msm_dsi->phy);
+ msm_dsi_pll_save_state(src_pll);
+
ret = msm_dsi_host_power_off(host);
if (ret)
pr_err("%s: host %d power off failed,%d\n", __func__, id, ret);
@@ -684,7 +689,7 @@ struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
bridge = &dsi_bridge->base;
bridge->funcs = &dsi_mgr_bridge_funcs;
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret)
goto fail;
@@ -713,7 +718,7 @@ struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
encoder = msm_dsi->encoder;
/* link the internal dsi bridge to the external bridge */
- drm_bridge_attach(encoder, ext_bridge, int_bridge);
+ drm_bridge_attach(encoder, ext_bridge, int_bridge, 0);
/*
* we need the drm_connector created by the external bridge
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index b0cfa67d2a57..f509ebd77500 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -724,10 +724,6 @@ void msm_dsi_phy_disable(struct msm_dsi_phy *phy)
if (!phy || !phy->cfg->ops.disable)
return;
- /* Save PLL status if it is a clock source */
- if (phy->usecase != MSM_DSI_PHY_SLAVE)
- msm_dsi_pll_save_state(phy->pll);
-
phy->cfg->ops.disable(phy);
dsi_phy_regulator_disable(phy);
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index 1c894548dd72..6ac04fc303f5 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -411,6 +411,12 @@ static int dsi_pll_10nm_vco_prepare(struct clk_hw *hw)
if (pll_10nm->slave)
dsi_pll_enable_pll_bias(pll_10nm->slave);
+ rc = dsi_pll_10nm_vco_set_rate(hw,pll_10nm->vco_current_rate, 0);
+ if (rc) {
+ pr_err("vco_set_rate failed, rc=%d\n", rc);
+ return rc;
+ }
+
/* Start PLL */
pll_write(pll_10nm->phy_cmn_mmio + REG_DSI_10nm_PHY_CMN_PLL_CNTRL,
0x01);
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index ad4e963ccd9b..a78d6077802b 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -178,7 +178,7 @@ int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
goto fail;
}
- ret = drm_bridge_attach(encoder, edp->bridge, NULL);
+ ret = drm_bridge_attach(encoder, edp->bridge, NULL, 0);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
index b65b5cc2dba2..c69a37e0c708 100644
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ b/drivers/gpu/drm/msm/edp/edp_bridge.c
@@ -97,7 +97,7 @@ struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp)
bridge = &edp_bridge->base;
bridge->funcs = &edp_bridge_funcs;
- ret = drm_bridge_attach(edp->encoder, bridge, NULL);
+ ret = drm_bridge_attach(edp->encoder, bridge, NULL, 0);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 1a9b6289637d..3a8646535c14 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -327,7 +327,7 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi,
goto fail;
}
- ret = drm_bridge_attach(encoder, hdmi->bridge, NULL);
+ ret = drm_bridge_attach(encoder, hdmi->bridge, NULL, 0);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index ba81338a9bf8..6e380db9287b 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -287,7 +287,7 @@ struct drm_bridge *msm_hdmi_bridge_init(struct hdmi *hdmi)
bridge = &hdmi_bridge->base;
bridge->funcs = &msm_hdmi_bridge_funcs;
- ret = drm_bridge_attach(hdmi->encoder, bridge, NULL);
+ ret = drm_bridge_attach(hdmi->encoder, bridge, NULL, 0);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index c26219c7a49f..2a82c23a6e4d 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -441,6 +441,14 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
if (ret)
goto err_msm_uninit;
+ if (!dev->dma_parms) {
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+ if (!dev->dma_parms)
+ return -ENOMEM;
+ }
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {
@@ -660,8 +668,10 @@ static void msm_irq_uninstall(struct drm_device *dev)
kms->funcs->irq_uninstall(kms);
}
-static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int msm_crtc_enable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
@@ -670,8 +680,10 @@ static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
return vblank_ctrl_queue_work(priv, pipe, true);
}
-static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void msm_crtc_disable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
if (!kms)
@@ -996,8 +1008,6 @@ static struct drm_driver msm_driver = {
.irq_preinstall = msm_irq_preinstall,
.irq_postinstall = msm_irq_postinstall,
.irq_uninstall = msm_irq_uninstall,
- .enable_vblank = msm_enable_vblank,
- .disable_vblank = msm_disable_vblank,
.gem_free_object_unlocked = msm_gem_free_object,
.gem_vm_ops = &vm_ops,
.dumb_create = msm_gem_dumb_create,
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 740bf7c70d8f..194d900a460e 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -232,6 +232,9 @@ struct drm_atomic_state *msm_atomic_state_alloc(struct drm_device *dev);
void msm_atomic_state_clear(struct drm_atomic_state *state);
void msm_atomic_state_free(struct drm_atomic_state *state);
+int msm_crtc_enable_vblank(struct drm_crtc *crtc);
+void msm_crtc_disable_vblank(struct drm_crtc *crtc);
+
int msm_gem_init_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, int npages);
void msm_gem_purge_vma(struct msm_gem_address_space *aspace,
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index db48867df47d..47235f8c5922 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -160,16 +160,12 @@ struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &msm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
+ ret = drm_fb_helper_init(dev, helper);
if (ret) {
DRM_DEV_ERROR(dev->dev, "could not init fbdev: ret=%d\n", ret);
goto fail;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret)
- goto fini;
-
/* the fw fb could be anywhere in memory */
drm_fb_helper_remove_conflicting_framebuffers(NULL, "msm", false);
diff --git a/drivers/gpu/drm/nouveau/dispnv04/crtc.c b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
index 37c50ea8f847..1f08de4241e0 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/crtc.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/crtc.c
@@ -1248,6 +1248,9 @@ static const struct drm_crtc_funcs nv04_crtc_funcs = {
.set_config = drm_crtc_helper_set_config,
.page_flip = nv04_crtc_page_flip,
.destroy = nv_crtc_destroy,
+ .enable_vblank = nouveau_display_vblank_enable,
+ .disable_vblank = nouveau_display_vblank_disable,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
@@ -1258,6 +1261,7 @@ static const struct drm_crtc_helper_funcs nv04_crtc_helper_funcs = {
.mode_set_base = nv04_crtc_mode_set_base,
.mode_set_base_atomic = nv04_crtc_mode_set_base_atomic,
.disable = nv_crtc_disable,
+ .get_scanout_position = nouveau_display_scanoutpos,
};
static const uint32_t modeset_formats[] = {
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index a3dc2ba19fb2..4d1c58468dbc 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1256,30 +1256,6 @@ nv50_mstm_prepare(struct nv50_mstm *mstm)
}
}
-static void
-nv50_mstm_destroy_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
- struct nv50_mstc *mstc = nv50_mstc(connector);
-
- drm_connector_unregister(&mstc->connector);
-
- drm_fb_helper_remove_one_connector(&drm->fbcon->helper, &mstc->connector);
-
- drm_connector_put(&mstc->connector);
-}
-
-static void
-nv50_mstm_register_connector(struct drm_connector *connector)
-{
- struct nouveau_drm *drm = nouveau_drm(connector->dev);
-
- drm_fb_helper_add_one_connector(&drm->fbcon->helper, connector);
-
- drm_connector_register(connector);
-}
-
static struct drm_connector *
nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, const char *path)
@@ -1298,8 +1274,6 @@ nv50_mstm_add_connector(struct drm_dp_mst_topology_mgr *mgr,
static const struct drm_dp_mst_topology_cbs
nv50_mstm = {
.add_connector = nv50_mstm_add_connector,
- .register_connector = nv50_mstm_register_connector,
- .destroy_connector = nv50_mstm_destroy_connector,
};
void
diff --git a/drivers/gpu/drm/nouveau/dispnv50/head.c b/drivers/gpu/drm/nouveau/dispnv50/head.c
index d9d64602947d..8f6455697ba7 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/head.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/head.c
@@ -29,6 +29,7 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
+#include <drm/drm_vblank.h>
#include "nouveau_connector.h"
void
nv50_head_flush_clr(struct nv50_head *head,
@@ -413,6 +414,7 @@ nv50_head_atomic_check(struct drm_crtc *crtc, struct drm_crtc_state *state)
static const struct drm_crtc_helper_funcs
nv50_head_help = {
.atomic_check = nv50_head_atomic_check,
+ .get_scanout_position = nouveau_display_scanoutpos,
};
static void
@@ -481,6 +483,9 @@ nv50_head_func = {
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = nv50_head_atomic_duplicate_state,
.atomic_destroy_state = nv50_head_atomic_destroy_state,
+ .enable_vblank = nouveau_display_vblank_enable,
+ .disable_vblank = nouveau_display_vblank_disable,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
struct nv50_head *
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 890315291b01..bb737f9281e6 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -458,6 +458,8 @@ nv50_wndw_atomic_check(struct drm_plane *plane, struct drm_plane_state *state)
asyw->clr.ntfy = armw->ntfy.handle != 0;
asyw->clr.sema = armw->sema.handle != 0;
asyw->clr.xlut = armw->xlut.handle != 0;
+ if (asyw->clr.xlut && asyw->visible)
+ asyw->set.xlut = asyw->xlut.handle != 0;
asyw->clr.csc = armw->csc.valid;
if (wndw->func->image_clr)
asyw->clr.image = armw->image.handle[0] != 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 1b62ccc57aef..2b4b21b02e40 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -647,13 +647,6 @@ nouveau_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags)
}
static int
-nouveau_bo_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- /* We'll do this from user space. */
- return 0;
-}
-
-static int
nouveau_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -1697,7 +1690,6 @@ struct ttm_bo_driver nouveau_bo_driver = {
.ttm_tt_create = &nouveau_ttm_tt_create,
.ttm_tt_populate = &nouveau_ttm_tt_populate,
.ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate,
- .invalidate_caches = nouveau_bo_invalidate_caches,
.init_mem_type = nouveau_bo_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = nouveau_bo_evict_flags,
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 53f9bceaf17a..700817dc4fa0 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -54,15 +54,10 @@ nouveau_display_vblank_handler(struct nvif_notify *notify)
}
int
-nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
+nouveau_display_vblank_enable(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
- crtc = drm_crtc_from_index(dev, pipe);
- if (!crtc)
- return -EINVAL;
-
nv_crtc = nouveau_crtc(crtc);
nvif_notify_get(&nv_crtc->vblank);
@@ -70,15 +65,10 @@ nouveau_display_vblank_enable(struct drm_device *dev, unsigned int pipe)
}
void
-nouveau_display_vblank_disable(struct drm_device *dev, unsigned int pipe)
+nouveau_display_vblank_disable(struct drm_crtc *crtc)
{
- struct drm_crtc *crtc;
struct nouveau_crtc *nv_crtc;
- crtc = drm_crtc_from_index(dev, pipe);
- if (!crtc)
- return;
-
nv_crtc = nouveau_crtc(crtc);
nvif_notify_put(&nv_crtc->vblank);
}
@@ -136,21 +126,13 @@ nouveau_display_scanoutpos_head(struct drm_crtc *crtc, int *vpos, int *hpos,
}
bool
-nouveau_display_scanoutpos(struct drm_device *dev, unsigned int pipe,
+nouveau_display_scanoutpos(struct drm_crtc *crtc,
bool in_vblank_irq, int *vpos, int *hpos,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode)
{
- struct drm_crtc *crtc;
-
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- if (nouveau_crtc(crtc)->index == pipe) {
- return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
- stime, etime);
- }
- }
-
- return false;
+ return nouveau_display_scanoutpos_head(crtc, vpos, hpos,
+ stime, etime);
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.h b/drivers/gpu/drm/nouveau/nouveau_display.h
index 6e8e66882e45..de004018ab5c 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.h
+++ b/drivers/gpu/drm/nouveau/nouveau_display.h
@@ -61,11 +61,12 @@ int nouveau_display_init(struct drm_device *dev, bool resume, bool runtime);
void nouveau_display_fini(struct drm_device *dev, bool suspend, bool runtime);
int nouveau_display_suspend(struct drm_device *dev, bool runtime);
void nouveau_display_resume(struct drm_device *dev, bool runtime);
-int nouveau_display_vblank_enable(struct drm_device *, unsigned int);
-void nouveau_display_vblank_disable(struct drm_device *, unsigned int);
-bool nouveau_display_scanoutpos(struct drm_device *, unsigned int,
- bool, int *, int *, ktime_t *,
- ktime_t *, const struct drm_display_mode *);
+int nouveau_display_vblank_enable(struct drm_crtc *crtc);
+void nouveau_display_vblank_disable(struct drm_crtc *crtc);
+bool nouveau_display_scanoutpos(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
int nouveau_display_dumb_create(struct drm_file *, struct drm_device *,
struct drm_mode_create_dumb *args);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index b65ae817eabf..6b1629c14dd7 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -1120,11 +1120,6 @@ driver_stub = {
.debugfs_init = nouveau_drm_debugfs_init,
#endif
- .enable_vblank = nouveau_display_vblank_enable,
- .disable_vblank = nouveau_display_vblank_disable,
- .get_scanout_position = nouveau_display_scanoutpos,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-
.ioctls = nouveau_ioctls,
.num_ioctls = ARRAY_SIZE(nouveau_ioctls),
.fops = &nouveau_driver_fops,
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 0c5cdda3c336..24d543a01f43 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -558,14 +558,10 @@ nouveau_fbcon_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
- ret = drm_fb_helper_init(dev, &fbcon->helper, 4);
+ ret = drm_fb_helper_init(dev, &fbcon->helper);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(&fbcon->helper);
- if (ret)
- goto fini;
-
if (preferred_bpp != 8 && preferred_bpp != 16 && preferred_bpp != 32) {
if (drm->client.device.info.ram_size <= 32 * 1024 * 1024)
preferred_bpp = 8;
diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
index d865d8aeac3c..c85dd8afa3c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
@@ -72,7 +72,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
index c7d700916eae..8ebbe1656008 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/base.c
@@ -2579,6 +2579,7 @@ nv166_chipset = {
static const struct nvkm_device_chip
nv167_chipset = {
.name = "TU117",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2607,6 +2608,7 @@ nv167_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
+ .gr = tu102_gr_new,
.nvdec[0] = gm107_nvdec_new,
.nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
@@ -2615,6 +2617,7 @@ nv167_chipset = {
static const struct nvkm_device_chip
nv168_chipset = {
.name = "TU116",
+ .acr = tu102_acr_new,
.bar = tu102_bar_new,
.bios = nvkm_bios_new,
.bus = gf100_bus_new,
@@ -2643,6 +2646,7 @@ nv168_chipset = {
.disp = tu102_disp_new,
.dma = gv100_dma_new,
.fifo = tu102_fifo_new,
+ .gr = tu102_gr_new,
.nvdec[0] = gm107_nvdec_new,
.nvenc[0] = gm107_nvenc_new,
.sec2 = tu102_sec2_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
index 454668b1cf54..a9efa4d78be9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/tu102.c
@@ -164,6 +164,32 @@ MODULE_FIRMWARE("nvidia/tu106/gr/sw_nonctx.bin");
MODULE_FIRMWARE("nvidia/tu106/gr/sw_bundle_init.bin");
MODULE_FIRMWARE("nvidia/tu106/gr/sw_method_init.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu117/gr/sw_method_init.bin");
+
+MODULE_FIRMWARE("nvidia/tu116/gr/fecs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/fecs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/fecs_data.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/fecs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_bl.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_inst.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_data.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/gpccs_sig.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/sw_ctx.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/sw_nonctx.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/sw_bundle_init.bin");
+MODULE_FIRMWARE("nvidia/tu116/gr/sw_method_init.bin");
+
static const struct gf100_gr_fwif
tu102_gr_fwif[] = {
{ 0, gm200_gr_load, &tu102_gr, &gp108_gr_fecs_acr, &gp108_gr_gpccs_acr },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
index 7f4b89d82d32..d28d8f36ae24 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/acr/tu102.c
@@ -107,6 +107,12 @@ MODULE_FIRMWARE("nvidia/tu104/acr/ucode_unload.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/unload_bl.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/ucode_unload.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/ucode_unload.bin");
+
+MODULE_FIRMWARE("nvidia/tu117/acr/unload_bl.bin");
+MODULE_FIRMWARE("nvidia/tu117/acr/ucode_unload.bin");
+
static const struct nvkm_acr_hsf_fwif
tu102_acr_unload_fwif[] = {
{ 0, nvkm_acr_hsfw_load, &gp108_acr_unload_0 },
@@ -130,6 +136,8 @@ tu102_acr_asb_0 = {
MODULE_FIRMWARE("nvidia/tu102/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/tu104/acr/ucode_asb.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/ucode_asb.bin");
+MODULE_FIRMWARE("nvidia/tu117/acr/ucode_asb.bin");
static const struct nvkm_acr_hsf_fwif
tu102_acr_asb_fwif[] = {
@@ -154,6 +162,12 @@ MODULE_FIRMWARE("nvidia/tu104/acr/ucode_ahesasc.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/bl.bin");
MODULE_FIRMWARE("nvidia/tu106/acr/ucode_ahesasc.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu116/acr/ucode_ahesasc.bin");
+
+MODULE_FIRMWARE("nvidia/tu117/acr/bl.bin");
+MODULE_FIRMWARE("nvidia/tu117/acr/ucode_ahesasc.bin");
+
static const struct nvkm_acr_hsf_fwif
tu102_acr_ahesasc_fwif[] = {
{ 0, nvkm_acr_hsfw_load, &tu102_acr_ahesasc_0 },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
index 389bad312bf2..10ff5d053f7e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gv100.c
@@ -51,3 +51,5 @@ MODULE_FIRMWARE("nvidia/gv100/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu102/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu104/nvdec/scrubber.bin");
MODULE_FIRMWARE("nvidia/tu106/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu116/nvdec/scrubber.bin");
+MODULE_FIRMWARE("nvidia/tu117/nvdec/scrubber.bin");
diff --git a/drivers/gpu/drm/omapdrm/displays/Kconfig b/drivers/gpu/drm/omapdrm/displays/Kconfig
index b562a8cd61bf..f2be594c7eff 100644
--- a/drivers/gpu/drm/omapdrm/displays/Kconfig
+++ b/drivers/gpu/drm/omapdrm/displays/Kconfig
@@ -1,28 +1,6 @@
# SPDX-License-Identifier: GPL-2.0-only
menu "OMAPDRM External Display Device Drivers"
-config DRM_OMAP_ENCODER_OPA362
- tristate "OPA362 external analog amplifier"
- help
- Driver for OPA362 external analog TV amplifier controlled
- through a GPIO.
-
-config DRM_OMAP_ENCODER_TPD12S015
- tristate "TPD12S015 HDMI ESD protection and level shifter"
- help
- Driver for TPD12S015, which offers HDMI ESD protection and level
- shifting.
-
-config DRM_OMAP_CONNECTOR_HDMI
- tristate "HDMI Connector"
- help
- Driver for a generic HDMI connector.
-
-config DRM_OMAP_CONNECTOR_ANALOG_TV
- tristate "Analog TV Connector"
- help
- Driver for a generic analog TV connector.
-
config DRM_OMAP_PANEL_DSI_CM
tristate "Generic DSI Command Mode Panel"
depends on BACKLIGHT_CLASS_DEVICE
diff --git a/drivers/gpu/drm/omapdrm/displays/Makefile b/drivers/gpu/drm/omapdrm/displays/Makefile
index cb76859dc574..488ddf153613 100644
--- a/drivers/gpu/drm/omapdrm/displays/Makefile
+++ b/drivers/gpu/drm/omapdrm/displays/Makefile
@@ -1,6 +1,2 @@
# SPDX-License-Identifier: GPL-2.0
-obj-$(CONFIG_DRM_OMAP_ENCODER_OPA362) += encoder-opa362.o
-obj-$(CONFIG_DRM_OMAP_ENCODER_TPD12S015) += encoder-tpd12s015.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_HDMI) += connector-hdmi.o
-obj-$(CONFIG_DRM_OMAP_CONNECTOR_ANALOG_TV) += connector-analog-tv.o
obj-$(CONFIG_DRM_OMAP_PANEL_DSI_CM) += panel-dsi-cm.o
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
deleted file mode 100644
index 0d20fab605d7..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ /dev/null
@@ -1,97 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Analog TV Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/slab.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct device *dev;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tvc_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void tvc_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static const struct omap_dss_device_ops tvc_ops = {
- .connect = tvc_connect,
- .disconnect = tvc_disconnect,
-};
-
-static int tvc_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
- ddata->dev = &pdev->dev;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &tvc_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_VENC;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit tvc_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-
- omapdss_device_unregister(&ddata->dssdev);
-
- return 0;
-}
-
-static const struct of_device_id tvc_of_match[] = {
- { .compatible = "omapdss,svideo-connector", },
- { .compatible = "omapdss,composite-video-connector", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tvc_of_match);
-
-static struct platform_driver tvc_connector_driver = {
- .probe = tvc_probe,
- .remove = __exit_p(tvc_remove),
- .driver = {
- .name = "connector-analog-tv",
- .of_match_table = tvc_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(tvc_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
-MODULE_DESCRIPTION("Analog TV Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
deleted file mode 100644
index f5d69d810bb8..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ /dev/null
@@ -1,183 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * HDMI Connector driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
- void *hpd_cb_data;
- struct mutex hpd_lock;
-
- struct device *dev;
-
- struct gpio_desc *hpd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int hdmic_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return 0;
-}
-
-static void hdmic_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
-}
-
-static bool hdmic_detect(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
-}
-
-static void hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops hdmic_ops = {
- .connect = hdmic_connect,
- .disconnect = hdmic_disconnect,
-
- .detect = hdmic_detect,
- .register_hpd_cb = hdmic_register_hpd_cb,
- .unregister_hpd_cb = hdmic_unregister_hpd_cb,
-};
-
-static irqreturn_t hdmic_hpd_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
-
- mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_cb) {
- enum drm_connector_status status;
-
- if (hdmic_detect(&ddata->dssdev))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
-
- ddata->hpd_cb(ddata->hpd_cb_data, status);
- }
- mutex_unlock(&ddata->hpd_lock);
-
- return IRQ_HANDLED;
-}
-
-static int hdmic_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
- int r;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
- ddata->dev = &pdev->dev;
-
- mutex_init(&ddata->hpd_lock);
-
- /* HPD GPIO */
- gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
- if (IS_ERR(gpio)) {
- dev_err(&pdev->dev, "failed to parse HPD gpio\n");
- return PTR_ERR(gpio);
- }
-
- ddata->hpd_gpio = gpio;
-
- if (ddata->hpd_gpio) {
- r = devm_request_threaded_irq(&pdev->dev,
- gpiod_to_irq(ddata->hpd_gpio),
- NULL, hdmic_hpd_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
- IRQF_ONESHOT,
- "hdmic hpd", ddata);
- if (r)
- return r;
- }
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &hdmic_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
- dssdev->display = true;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
- dssdev->ops_flags = ddata->hpd_gpio
- ? OMAP_DSS_DEVICE_OP_DETECT | OMAP_DSS_DEVICE_OP_HPD
- : 0;
-
- omapdss_display_init(dssdev);
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit hdmic_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
-
- omapdss_device_unregister(&ddata->dssdev);
-
- return 0;
-}
-
-static const struct of_device_id hdmic_of_match[] = {
- { .compatible = "omapdss,hdmi-connector", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, hdmic_of_match);
-
-static struct platform_driver hdmi_connector_driver = {
- .probe = hdmic_probe,
- .remove = __exit_p(hdmic_remove),
- .driver = {
- .name = "connector-hdmi",
- .of_match_table = hdmic_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(hdmi_connector_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
-MODULE_DESCRIPTION("HDMI Connector driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
deleted file mode 100644
index b992387ed674..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ /dev/null
@@ -1,137 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * OPA362 analog video amplifier with output/power control
- *
- * Copyright (C) 2014 Golden Delicious Computers
- * Author: H. Nikolaus Schaller <[email protected]>
- *
- * based on encoder-tfp410
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/gpio/consumer.h>
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
-
- struct gpio_desc *enable_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int opa362_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void opa362_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static void opa362_enable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->enable_gpio)
- gpiod_set_value_cansleep(ddata->enable_gpio, 1);
-}
-
-static void opa362_disable(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (ddata->enable_gpio)
- gpiod_set_value_cansleep(ddata->enable_gpio, 0);
-}
-
-static const struct omap_dss_device_ops opa362_ops = {
- .connect = opa362_connect,
- .disconnect = opa362_disconnect,
- .enable = opa362_enable,
- .disable = opa362_disable,
-};
-
-static int opa362_probe(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata;
- struct omap_dss_device *dssdev;
- struct gpio_desc *gpio;
-
- dev_dbg(&pdev->dev, "probe\n");
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- gpio = devm_gpiod_get_optional(&pdev->dev, "enable", GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->enable_gpio = gpio;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &opa362_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_VENC;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(1) | BIT(0);
-
- dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
- if (IS_ERR(dssdev->next)) {
- if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to find video sink\n");
- return PTR_ERR(dssdev->next);
- }
-
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit opa362_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- if (dssdev->next)
- omapdss_device_put(dssdev->next);
- omapdss_device_unregister(&ddata->dssdev);
-
- opa362_disable(dssdev);
-
- return 0;
-}
-
-static const struct of_device_id opa362_of_match[] = {
- { .compatible = "omapdss,ti,opa362", },
- {},
-};
-MODULE_DEVICE_TABLE(of, opa362_of_match);
-
-static struct platform_driver opa362_driver = {
- .probe = opa362_probe,
- .remove = __exit_p(opa362_remove),
- .driver = {
- .name = "amplifier-opa362",
- .of_match_table = opa362_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(opa362_driver);
-
-MODULE_AUTHOR("H. Nikolaus Schaller <[email protected]>");
-MODULE_DESCRIPTION("OPA362 analog video amplifier with output/power control");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
deleted file mode 100644
index 089105c5aa0a..000000000000
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ /dev/null
@@ -1,217 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * TPD12S015 HDMI ESD protection & level shifter chip driver
- *
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/completion.h>
-#include <linux/delay.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
-#include <linux/gpio/consumer.h>
-#include <linux/mutex.h>
-
-#include "../dss/omapdss.h"
-
-struct panel_drv_data {
- struct omap_dss_device dssdev;
- void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
- void *hpd_cb_data;
- struct mutex hpd_lock;
-
- struct gpio_desc *ct_cp_hpd_gpio;
- struct gpio_desc *ls_oe_gpio;
- struct gpio_desc *hpd_gpio;
-};
-
-#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-
-static int tpd_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
- int r;
-
- r = omapdss_device_connect(dst->dss, dst, dst->next);
- if (r)
- return r;
-
- gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
- gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
-
- /* DC-DC converter needs at max 300us to get to 90% of 5V */
- udelay(300);
-
- return 0;
-}
-
-static void tpd_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct panel_drv_data *ddata = to_panel_data(dst);
-
- gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
- gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
-
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static bool tpd_detect(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- return gpiod_get_value_cansleep(ddata->hpd_gpio);
-}
-
-static void tpd_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static const struct omap_dss_device_ops tpd_ops = {
- .connect = tpd_connect,
- .disconnect = tpd_disconnect,
- .detect = tpd_detect,
- .register_hpd_cb = tpd_register_hpd_cb,
- .unregister_hpd_cb = tpd_unregister_hpd_cb,
-};
-
-static irqreturn_t tpd_hpd_isr(int irq, void *data)
-{
- struct panel_drv_data *ddata = data;
-
- mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_cb) {
- enum drm_connector_status status;
-
- if (tpd_detect(&ddata->dssdev))
- status = connector_status_connected;
- else
- status = connector_status_disconnected;
-
- ddata->hpd_cb(ddata->hpd_cb_data, status);
- }
- mutex_unlock(&ddata->hpd_lock);
-
- return IRQ_HANDLED;
-}
-
-static int tpd_probe(struct platform_device *pdev)
-{
- struct omap_dss_device *dssdev;
- struct panel_drv_data *ddata;
- int r;
- struct gpio_desc *gpio;
-
- ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
- if (!ddata)
- return -ENOMEM;
-
- platform_set_drvdata(pdev, ddata);
-
- gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 0,
- GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->ct_cp_hpd_gpio = gpio;
-
- gpio = devm_gpiod_get_index_optional(&pdev->dev, NULL, 1,
- GPIOD_OUT_LOW);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->ls_oe_gpio = gpio;
-
- gpio = devm_gpiod_get_index(&pdev->dev, NULL, 2,
- GPIOD_IN);
- if (IS_ERR(gpio))
- return PTR_ERR(gpio);
-
- ddata->hpd_gpio = gpio;
-
- mutex_init(&ddata->hpd_lock);
-
- r = devm_request_threaded_irq(&pdev->dev, gpiod_to_irq(ddata->hpd_gpio),
- NULL, tpd_hpd_isr,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
- "tpd12s015 hpd", ddata);
- if (r)
- return r;
-
- dssdev = &ddata->dssdev;
- dssdev->ops = &tpd_ops;
- dssdev->dev = &pdev->dev;
- dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
- dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(1) | BIT(0);
- dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT
- | OMAP_DSS_DEVICE_OP_HPD;
-
- dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
- if (IS_ERR(dssdev->next)) {
- if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to find video sink\n");
- return PTR_ERR(dssdev->next);
- }
-
- omapdss_device_register(dssdev);
-
- return 0;
-}
-
-static int __exit tpd_remove(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *dssdev = &ddata->dssdev;
-
- if (dssdev->next)
- omapdss_device_put(dssdev->next);
- omapdss_device_unregister(&ddata->dssdev);
-
- return 0;
-}
-
-static const struct of_device_id tpd_of_match[] = {
- { .compatible = "omapdss,ti,tpd12s015", },
- {},
-};
-
-MODULE_DEVICE_TABLE(of, tpd_of_match);
-
-static struct platform_driver tpd_driver = {
- .probe = tpd_probe,
- .remove = __exit_p(tpd_remove),
- .driver = {
- .name = "tpd12s015",
- .of_match_table = tpd_of_match,
- .suppress_bind_attrs = true,
- },
-};
-
-module_platform_driver(tpd_driver);
-
-MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
-MODULE_DESCRIPTION("TPD12S015 driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 564e3e1a1891..3484b5d4a91c 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -678,7 +678,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
if (r)
goto err;
- ddata->enabled = 1;
+ ddata->enabled = true;
if (!ddata->intro_printed) {
dev_info(&ddata->pdev->dev, "panel revision %02x.%02x.%02x\n",
@@ -729,7 +729,7 @@ static void dsicm_power_off(struct panel_drv_data *ddata)
if (ddata->vpnl)
regulator_disable(ddata->vpnl);
- ddata->enabled = 0;
+ ddata->enabled = false;
}
static int dsicm_panel_reset(struct panel_drv_data *ddata)
@@ -1265,7 +1265,7 @@ static int dsicm_probe(struct platform_device *pdev)
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
dssdev->display = true;
dssdev->owner = THIS_MODULE;
- dssdev->of_ports = BIT(0);
+ dssdev->of_port = 0;
dssdev->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
diff --git a/drivers/gpu/drm/omapdrm/dss/Makefile b/drivers/gpu/drm/omapdrm/dss/Makefile
index 5950c3f52c2e..f967e6948f2e 100644
--- a/drivers/gpu/drm/omapdrm/dss/Makefile
+++ b/drivers/gpu/drm/omapdrm/dss/Makefile
@@ -2,7 +2,7 @@
obj-$(CONFIG_OMAP2_DSS_INIT) += omapdss-boot-init.o
obj-$(CONFIG_OMAP_DSS_BASE) += omapdss-base.o
-omapdss-base-y := base.o display.o dss-of.o output.o
+omapdss-base-y := base.o display.o output.o
obj-$(CONFIG_OMAP2_DSS) += omapdss.o
# Core DSS files
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index a1970b9db6ab..c7650a7c155d 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -149,8 +149,7 @@ struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from)
goto done;
}
- if (dssdev->id &&
- (dssdev->next || dssdev->bridge || dssdev->panel))
+ if (dssdev->id && (dssdev->next || dssdev->bridge))
goto done;
}
@@ -185,11 +184,10 @@ int omapdss_device_connect(struct dss_device *dss,
if (!dst) {
/*
* The destination is NULL when the source is connected to a
- * bridge or panel instead of a DSS device. Stop here, we will
- * attach the bridge or panel later when we will have a DRM
- * encoder.
+ * bridge instead of a DSS device. Stop here, we will attach
+ * the bridge later when we will have a DRM encoder.
*/
- return src && (src->bridge || src->panel) ? 0 : -EINVAL;
+ return src && src->bridge ? 0 : -EINVAL;
}
if (omapdss_device_is_connected(dst))
@@ -197,10 +195,12 @@ int omapdss_device_connect(struct dss_device *dss,
dst->dss = dss;
- ret = dst->ops->connect(src, dst);
- if (ret < 0) {
- dst->dss = NULL;
- return ret;
+ if (dst->ops && dst->ops->connect) {
+ ret = dst->ops->connect(src, dst);
+ if (ret < 0) {
+ dst->dss = NULL;
+ return ret;
+ }
}
return 0;
@@ -217,7 +217,7 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
dst ? dev_name(dst->dev) : "NULL");
if (!dst) {
- WARN_ON(!src->bridge && !src->panel);
+ WARN_ON(!src->bridge);
return;
}
@@ -228,29 +228,18 @@ void omapdss_device_disconnect(struct omap_dss_device *src,
WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
- dst->ops->disconnect(src, dst);
+ if (dst->ops && dst->ops->disconnect)
+ dst->ops->disconnect(src, dst);
dst->dss = NULL;
}
EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
-void omapdss_device_pre_enable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- omapdss_device_pre_enable(dssdev->next);
-
- if (dssdev->ops->pre_enable)
- dssdev->ops->pre_enable(dssdev);
-}
-EXPORT_SYMBOL_GPL(omapdss_device_pre_enable);
-
void omapdss_device_enable(struct omap_dss_device *dssdev)
{
if (!dssdev)
return;
- if (dssdev->ops->enable)
+ if (dssdev->ops && dssdev->ops->enable)
dssdev->ops->enable(dssdev);
omapdss_device_enable(dssdev->next);
@@ -266,25 +255,11 @@ void omapdss_device_disable(struct omap_dss_device *dssdev)
omapdss_device_disable(dssdev->next);
- if (dssdev->ops->disable)
+ if (dssdev->ops && dssdev->ops->disable)
dssdev->ops->disable(dssdev);
}
EXPORT_SYMBOL_GPL(omapdss_device_disable);
-void omapdss_device_post_disable(struct omap_dss_device *dssdev)
-{
- if (!dssdev)
- return;
-
- if (dssdev->ops->post_disable)
- dssdev->ops->post_disable(dssdev);
-
- omapdss_device_post_disable(dssdev->next);
-
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
-}
-EXPORT_SYMBOL_GPL(omapdss_device_post_disable);
-
/* -----------------------------------------------------------------------------
* Components Handling
*/
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 8a3f61f5825f..3b82158b1bfd 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -40,15 +40,6 @@ void omapdss_display_init(struct omap_dss_device *dssdev)
}
EXPORT_SYMBOL_GPL(omapdss_display_init);
-struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output)
-{
- while (output->next)
- output = output->next;
-
- return omapdss_device_get(output);
-}
-EXPORT_SYMBOL_GPL(omapdss_display_get);
-
int omapdss_display_get_modes(struct drm_connector *connector,
const struct videomode *vm)
{
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 95147437b990..5110acb0c6c1 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -9,20 +9,22 @@
#define DSS_SUBSYS_NAME "DPI"
-#include <linux/kernel.h>
+#include <linux/clk.h>
#include <linux/delay.h>
-#include <linux/export.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regulator/consumer.h>
#include <linux/string.h>
-#include <linux/of.h>
-#include <linux/clk.h>
#include <linux/sys_soc.h>
-#include "omapdss.h"
+#include <drm/drm_bridge.h>
+
#include "dss.h"
+#include "omapdss.h"
struct dpi_data {
struct platform_device *pdev;
@@ -34,19 +36,19 @@ struct dpi_data {
enum dss_clk_source clk_src;
struct dss_pll *pll;
- struct mutex lock;
-
struct dss_lcd_mgr_config mgr_config;
unsigned long pixelclock;
int data_lines;
struct omap_dss_device output;
+ struct drm_bridge bridge;
};
-static struct dpi_data *dpi_get_data_from_dssdev(struct omap_dss_device *dssdev)
-{
- return container_of(dssdev, struct dpi_data, output);
-}
+#define drm_bridge_to_dpi(bridge) container_of(bridge, struct dpi_data, bridge)
+
+/* -----------------------------------------------------------------------------
+ * Clock Handling and PLL
+ */
static enum dss_clk_source dpi_get_clk_src_dra7xx(struct dpi_data *dpi,
enum omap_channel channel)
@@ -283,9 +285,7 @@ static bool dpi_dss_clk_calc(struct dpi_data *dpi, unsigned long pck,
-static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
- unsigned long pck_req, unsigned long *fck, int *lck_div,
- int *pck_div)
+static int dpi_set_pll_clk(struct dpi_data *dpi, unsigned long pck_req)
{
struct dpi_clk_calc_ctx ctx;
int r;
@@ -299,19 +299,15 @@ static int dpi_set_pll_clk(struct dpi_data *dpi, enum omap_channel channel,
if (r)
return r;
- dss_select_lcd_clk_source(dpi->dss, channel, dpi->clk_src);
+ dss_select_lcd_clk_source(dpi->dss, dpi->output.dispc_channel,
+ dpi->clk_src);
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
- *lck_div = ctx.dispc_cinfo.lck_div;
- *pck_div = ctx.dispc_cinfo.pck_div;
-
return 0;
}
-static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
- unsigned long *fck, int *lck_div, int *pck_div)
+static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req)
{
struct dpi_clk_calc_ctx ctx;
int r;
@@ -327,29 +323,19 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
dpi->mgr_config.clock_info = ctx.dispc_cinfo;
- *fck = ctx.fck;
- *lck_div = ctx.dispc_cinfo.lck_div;
- *pck_div = ctx.dispc_cinfo.pck_div;
-
return 0;
}
static int dpi_set_mode(struct dpi_data *dpi)
{
- int lck_div = 0, pck_div = 0;
- unsigned long fck = 0;
- int r = 0;
+ int r;
if (dpi->pll)
- r = dpi_set_pll_clk(dpi, dpi->output.dispc_channel,
- dpi->pixelclock, &fck, &lck_div, &pck_div);
+ r = dpi_set_pll_clk(dpi, dpi->pixelclock);
else
- r = dpi_set_dispc_clk(dpi, dpi->pixelclock, &fck,
- &lck_div, &pck_div);
- if (r)
- return r;
+ r = dpi_set_dispc_clk(dpi, dpi->pixelclock);
- return 0;
+ return r;
}
static void dpi_config_lcd_manager(struct dpi_data *dpi)
@@ -366,25 +352,149 @@ static void dpi_config_lcd_manager(struct dpi_data *dpi)
dss_mgr_set_lcd_config(&dpi->output, &dpi->mgr_config);
}
-static void dpi_display_enable(struct omap_dss_device *dssdev)
+static int dpi_clock_update(struct dpi_data *dpi, unsigned long *clock)
+{
+ int lck_div, pck_div;
+ unsigned long fck;
+ struct dpi_clk_calc_ctx ctx;
+
+ if (dpi->pll) {
+ if (!dpi_pll_clk_calc(dpi, *clock, &ctx))
+ return -EINVAL;
+
+ fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
+ } else {
+ if (!dpi_dss_clk_calc(dpi, *clock, &ctx))
+ return -EINVAL;
+
+ fck = ctx.fck;
+ }
+
+ lck_div = ctx.dispc_cinfo.lck_div;
+ pck_div = ctx.dispc_cinfo.pck_div;
+
+ *clock = fck / lck_div / pck_div;
+
+ return 0;
+}
+
+static int dpi_verify_pll(struct dss_pll *pll)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- struct omap_dss_device *out = &dpi->output;
int r;
- mutex_lock(&dpi->lock);
+ /* do initial setup with the PLL to see if it is operational */
+
+ r = dss_pll_enable(pll);
+ if (r)
+ return r;
+
+ dss_pll_disable(pll);
+
+ return 0;
+}
+
+static void dpi_init_pll(struct dpi_data *dpi)
+{
+ struct dss_pll *pll;
+
+ if (dpi->pll)
+ return;
+
+ dpi->clk_src = dpi_get_clk_src(dpi);
+
+ pll = dss_pll_find_by_src(dpi->dss, dpi->clk_src);
+ if (!pll)
+ return;
+
+ if (dpi_verify_pll(pll)) {
+ DSSWARN("PLL not operational\n");
+ return;
+ }
+
+ dpi->pll = pll;
+}
+
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int dpi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ dpi_init_pll(dpi);
+
+ return drm_bridge_attach(bridge->encoder, dpi->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+dpi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+ unsigned long clock = mode->clock * 1000;
+ int ret;
+
+ if (mode->hdisplay % 8 != 0)
+ return MODE_BAD_WIDTH;
+
+ if (mode->clock == 0)
+ return MODE_NOCLOCK;
+
+ ret = dpi_clock_update(dpi, &clock);
+ if (ret < 0)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static bool dpi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+ unsigned long clock = mode->clock * 1000;
+ int ret;
+
+ ret = dpi_clock_update(dpi, &clock);
+ if (ret < 0)
+ return false;
+
+ adjusted_mode->clock = clock / 1000;
+
+ return true;
+}
+
+static void dpi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+
+ dpi->pixelclock = adjusted_mode->clock * 1000;
+}
+
+static void dpi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
+ int r;
if (dpi->vdds_dsi_reg) {
r = regulator_enable(dpi->vdds_dsi_reg);
if (r)
- goto err_reg_enable;
+ return;
}
r = dispc_runtime_get(dpi->dss->dispc);
if (r)
goto err_get_dispc;
- r = dss_dpi_select_source(dpi->dss, dpi->id, out->dispc_channel);
+ r = dss_dpi_select_source(dpi->dss, dpi->id, dpi->output.dispc_channel);
if (r)
goto err_src_sel;
@@ -406,8 +516,6 @@ static void dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_mgr_enable;
- mutex_unlock(&dpi->lock);
-
return;
err_mgr_enable:
@@ -420,15 +528,11 @@ err_src_sel:
err_get_dispc:
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
-err_reg_enable:
- mutex_unlock(&dpi->lock);
}
-static void dpi_display_disable(struct omap_dss_device *dssdev)
+static void dpi_bridge_disable(struct drm_bridge *bridge)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- mutex_lock(&dpi->lock);
+ struct dpi_data *dpi = drm_bridge_to_dpi(bridge);
dss_mgr_disable(&dpi->output);
@@ -442,99 +546,34 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
if (dpi->vdds_dsi_reg)
regulator_disable(dpi->vdds_dsi_reg);
-
- mutex_unlock(&dpi->lock);
}
-static void dpi_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- DSSDBG("dpi_set_timings\n");
-
- mutex_lock(&dpi->lock);
-
- dpi->pixelclock = mode->clock * 1000;
-
- mutex_unlock(&dpi->lock);
-}
+static const struct drm_bridge_funcs dpi_bridge_funcs = {
+ .attach = dpi_bridge_attach,
+ .mode_valid = dpi_bridge_mode_valid,
+ .mode_fixup = dpi_bridge_mode_fixup,
+ .mode_set = dpi_bridge_mode_set,
+ .enable = dpi_bridge_enable,
+ .disable = dpi_bridge_disable,
+};
-static int dpi_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
+static void dpi_bridge_init(struct dpi_data *dpi)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- int lck_div, pck_div;
- unsigned long fck;
- unsigned long pck;
- struct dpi_clk_calc_ctx ctx;
- bool ok;
+ dpi->bridge.funcs = &dpi_bridge_funcs;
+ dpi->bridge.of_node = dpi->pdev->dev.of_node;
+ dpi->bridge.type = DRM_MODE_CONNECTOR_DPI;
- if (mode->hdisplay % 8 != 0)
- return -EINVAL;
-
- if (mode->clock == 0)
- return -EINVAL;
-
- if (dpi->pll) {
- ok = dpi_pll_clk_calc(dpi, mode->clock * 1000, &ctx);
- if (!ok)
- return -EINVAL;
-
- fck = ctx.pll_cinfo.clkout[ctx.clkout_idx];
- } else {
- ok = dpi_dss_clk_calc(dpi, mode->clock * 1000, &ctx);
- if (!ok)
- return -EINVAL;
-
- fck = ctx.fck;
- }
-
- lck_div = ctx.dispc_cinfo.lck_div;
- pck_div = ctx.dispc_cinfo.pck_div;
-
- pck = fck / lck_div / pck_div;
-
- mode->clock = pck / 1000;
-
- return 0;
+ drm_bridge_add(&dpi->bridge);
}
-static int dpi_verify_pll(struct dss_pll *pll)
+static void dpi_bridge_cleanup(struct dpi_data *dpi)
{
- int r;
-
- /* do initial setup with the PLL to see if it is operational */
-
- r = dss_pll_enable(pll);
- if (r)
- return r;
-
- dss_pll_disable(pll);
-
- return 0;
+ drm_bridge_remove(&dpi->bridge);
}
-static void dpi_init_pll(struct dpi_data *dpi)
-{
- struct dss_pll *pll;
-
- if (dpi->pll)
- return;
-
- dpi->clk_src = dpi_get_clk_src(dpi);
-
- pll = dss_pll_find_by_src(dpi->dss, dpi->clk_src);
- if (!pll)
- return;
-
- if (dpi_verify_pll(pll)) {
- DSSWARN("PLL not operational\n");
- return;
- }
-
- dpi->pll = pll;
-}
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
/*
* Return a hardcoded channel for the DPI output. This should work for
@@ -572,39 +611,14 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi)
}
}
-static int dpi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dst);
-
- dpi_init_pll(dpi);
-
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
-
-static void dpi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static const struct omap_dss_device_ops dpi_ops = {
- .connect = dpi_connect,
- .disconnect = dpi_disconnect,
-
- .enable = dpi_display_enable,
- .disable = dpi_display_disable,
-
- .check_timings = dpi_check_timings,
- .set_timings = dpi_set_timings,
-};
-
static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
{
struct omap_dss_device *out = &dpi->output;
u32 port_num = 0;
int r;
+ dpi_bridge_init(dpi);
+
of_property_read_u32(port, "reg", &port_num);
dpi->id = port_num <= 2 ? port_num : 0;
@@ -625,13 +639,14 @@ static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
out->id = OMAP_DSS_OUTPUT_DPI;
out->type = OMAP_DISPLAY_TYPE_DPI;
out->dispc_channel = dpi_get_channel(dpi);
- out->of_ports = BIT(port_num);
- out->ops = &dpi_ops;
+ out->of_port = port_num;
out->owner = THIS_MODULE;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &dpi->bridge);
+ if (r < 0) {
+ dpi_bridge_cleanup(dpi);
return r;
+ }
omapdss_device_register(out);
@@ -645,8 +660,14 @@ static void dpi_uninit_output_port(struct device_node *port)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+
+ dpi_bridge_cleanup(dpi);
}
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
+
static const struct soc_device_attribute dpi_soc_devices[] = {
{ .machine = "OMAP3[456]*" },
{ .machine = "[AD]M37*" },
@@ -706,8 +727,6 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
dpi->dss = dss;
port->data = dpi;
- mutex_init(&dpi->lock);
-
r = dpi_init_regulator(dpi);
if (r)
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index da16ea095f13..79ddfbfd1b58 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -5116,12 +5116,12 @@ static int dsi_init_output(struct dsi_data *dsi)
out->dispc_channel = dsi_get_channel(dsi);
out->ops = &dsi_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
+ out->of_port = 0;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE
| DRM_BUS_FLAG_DE_HIGH
| DRM_BUS_FLAG_SYNC_DRIVE_NEGEDGE;
- r = omapdss_device_init_output(out);
+ r = omapdss_device_init_output(out, NULL);
if (r < 0)
return r;
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
deleted file mode 100644
index b7981f3b80ad..000000000000
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ /dev/null
@@ -1,28 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com/
- * Author: Tomi Valkeinen <[email protected]>
- */
-
-#include <linux/err.h>
-#include <linux/of.h>
-#include <linux/of_graph.h>
-
-#include "omapdss.h"
-
-struct omap_dss_device *
-omapdss_of_find_connected_device(struct device_node *node, unsigned int port)
-{
- struct device_node *remote_node;
- struct omap_dss_device *dssdev;
-
- remote_node = of_graph_get_remote_node(node, port, 0);
- if (!remote_node)
- return NULL;
-
- dssdev = omapdss_find_device_by_node(remote_node);
- of_node_put(remote_node);
-
- return dssdev ? dssdev : ERR_PTR(-EPROBE_DEFER);
-}
-EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index 225ec808b01a..b76fc2b56227 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -1151,46 +1151,38 @@ static const struct dss_features dra7xx_dss_feats = {
.has_lcd_clk_src = true,
};
-static int dss_init_ports(struct dss_device *dss)
+static void __dss_uninit_ports(struct dss_device *dss, unsigned int num_ports)
{
struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
unsigned int i;
- int r;
- for (i = 0; i < dss->feat->num_ports; i++) {
+ for (i = 0; i < num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
if (!port)
continue;
switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- r = dpi_init_port(dss, pdev, port, dss->feat->model);
- if (r)
- return r;
+ dpi_uninit_port(port);
break;
-
case OMAP_DISPLAY_TYPE_SDI:
- r = sdi_init_port(dss, pdev, port);
- if (r)
- return r;
+ sdi_uninit_port(port);
break;
-
default:
break;
}
}
-
- return 0;
}
-static void dss_uninit_ports(struct dss_device *dss)
+static int dss_init_ports(struct dss_device *dss)
{
struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
- int i;
+ unsigned int i;
+ int r;
for (i = 0; i < dss->feat->num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
@@ -1199,15 +1191,32 @@ static void dss_uninit_ports(struct dss_device *dss)
switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- dpi_uninit_port(port);
+ r = dpi_init_port(dss, pdev, port, dss->feat->model);
+ if (r)
+ goto error;
break;
+
case OMAP_DISPLAY_TYPE_SDI:
- sdi_uninit_port(port);
+ r = sdi_init_port(dss, pdev, port);
+ if (r)
+ goto error;
break;
+
default:
break;
}
}
+
+ return 0;
+
+error:
+ __dss_uninit_ports(dss, i);
+ return r;
+}
+
+static void dss_uninit_ports(struct dss_device *dss)
+{
+ __dss_uninit_ports(dss, dss->feat->num_ports);
}
static int dss_video_pll_probe(struct dss_device *dss)
@@ -1543,7 +1552,8 @@ static void dss_shutdown(struct platform_device *pdev)
DSSDBG("shutdown\n");
for_each_dss_output(dssdev) {
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
+ if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE &&
+ dssdev->ops && dssdev->ops->disable)
dssdev->ops->disable(dssdev);
}
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index c867552c925c..3a40833d3368 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -14,6 +14,7 @@
#include <linux/hdmi.h>
#include <sound/omap-hdmi-audio.h>
#include <media/cec.h>
+#include <drm/drm_bridge.h>
#include "omapdss.h"
#include "dss.h"
@@ -364,6 +365,7 @@ struct omap_hdmi {
bool core_enabled;
struct omap_dss_device output;
+ struct drm_bridge bridge;
struct platform_device *audio_pdev;
void (*audio_abort_cb)(struct device *dev);
@@ -378,6 +380,6 @@ struct omap_hdmi {
bool display_enabled;
};
-#define dssdev_to_hdmi(dssdev) container_of(dssdev, struct omap_hdmi, output)
+#define drm_bridge_to_hdmi(b) container_of(b, struct omap_hdmi, bridge)
#endif
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 0f557fad4513..2578c95570f6 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -28,6 +28,9 @@
#include <sound/omap-hdmi-audio.h>
#include <media/cec.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+
#include "omapdss.h"
#include "hdmi4_core.h"
#include "hdmi4_cec.h"
@@ -237,20 +240,6 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
hdmi_power_off_core(hdmi);
}
-static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- mutex_lock(&hdmi->lock);
-
- drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
-
- dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
-
- mutex_unlock(&hdmi->lock);
-}
-
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
@@ -272,57 +261,139 @@ static int hdmi_dump_regs(struct seq_file *s, void *p)
return 0;
}
-static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
+static void hdmi_start_audio_stream(struct omap_hdmi *hd)
{
- int r;
+ hdmi_wp_audio_enable(&hd->wp, true);
+ hdmi4_audio_start(&hd->core, &hd->wp);
+}
- mutex_lock(&hdmi->lock);
+static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+{
+ hdmi4_audio_stop(&hd->core, &hd->wp);
+ hdmi_wp_audio_enable(&hd->wp, false);
+}
- r = hdmi_runtime_get(hdmi);
- BUG_ON(r);
+int hdmi4_core_enable(struct hdmi_core_data *core)
+{
+ struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
+ int r = 0;
- r = hdmi4_read_edid(&hdmi->core, buf, len);
+ DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
+
+ mutex_lock(&hdmi->lock);
+
+ r = hdmi_power_on_core(hdmi);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
- hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
+ return 0;
+err0:
+ mutex_unlock(&hdmi->lock);
return r;
}
-static void hdmi_start_audio_stream(struct omap_hdmi *hd)
+void hdmi4_core_disable(struct hdmi_core_data *core)
{
- hdmi_wp_audio_enable(&hd->wp, true);
- hdmi4_audio_start(&hd->core, &hd->wp);
+ struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
+
+ DSSDBG("Enter omapdss_hdmi4_core_disable\n");
+
+ mutex_lock(&hdmi->lock);
+
+ hdmi_power_off_core(hdmi);
+
+ mutex_unlock(&hdmi->lock);
}
-static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int hdmi4_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
- hdmi4_audio_stop(&hd->core, &hd->wp);
- hdmi_wp_audio_enable(&hd->wp, false);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ bridge, flags);
}
-static void hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi4_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ mutex_lock(&hdmi->lock);
+
+ drm_display_mode_to_videomode(adjusted_mode, &hdmi->cfg.vm);
+
+ dispc_set_tv_pclk(hdmi->dss->dispc, adjusted_mode->clock * 1000);
+
+ mutex_unlock(&hdmi->lock);
+}
+
+static void hdmi4_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct drm_atomic_state *state = bridge_state->base.state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ struct drm_crtc_state *crtc_state;
unsigned long flags;
- int r;
+ int ret;
+
+ /*
+ * None of these should fail, as the bridge can't be enabled without a
+ * valid CRTC to connector path with fully populated new states.
+ */
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ hdmi->cfg.hdmi_dvi_mode = connector->display_info.is_hdmi
+ ? HDMI_HDMI : HDMI_DVI;
- DSSDBG("ENTER hdmi_display_enable\n");
+ if (connector->display_info.is_hdmi) {
+ const struct drm_display_mode *mode;
+ struct hdmi_avi_infoframe avi;
+
+ mode = &crtc_state->adjusted_mode;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+ mode);
+ if (ret == 0)
+ hdmi->cfg.infoframe = avi;
+ }
mutex_lock(&hdmi->lock);
- r = hdmi_power_on_full(hdmi);
- if (r) {
+ ret = hdmi_power_on_full(hdmi);
+ if (ret) {
DSSERR("failed to power on device\n");
goto done;
}
if (hdmi->audio_configured) {
- r = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
- &hdmi->audio_config,
- hdmi->cfg.vm.pixelclock);
- if (r) {
- DSSERR("Error restoring audio configuration: %d", r);
+ ret = hdmi4_audio_config(&hdmi->core, &hdmi->wp,
+ &hdmi->audio_config,
+ hdmi->cfg.vm.pixelclock);
+ if (ret) {
+ DSSERR("Error restoring audio configuration: %d", ret);
hdmi->audio_abort_cb(&hdmi->pdev->dev);
hdmi->audio_configured = false;
}
@@ -338,13 +409,12 @@ done:
mutex_unlock(&hdmi->lock);
}
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
+static void hdmi4_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
- DSSDBG("Enter hdmi_display_disable\n");
-
mutex_lock(&hdmi->lock);
spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
@@ -357,58 +427,21 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi->lock);
}
-int hdmi4_core_enable(struct hdmi_core_data *core)
+static void hdmi4_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status)
{
- struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
- int r = 0;
-
- DSSDBG("ENTER omapdss_hdmi4_core_enable\n");
-
- mutex_lock(&hdmi->lock);
-
- r = hdmi_power_on_core(hdmi);
- if (r) {
- DSSERR("failed to power on device\n");
- goto err0;
- }
-
- mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
-}
-
-void hdmi4_core_disable(struct hdmi_core_data *core)
-{
- struct omap_hdmi *hdmi = container_of(core, struct omap_hdmi, core);
-
- DSSDBG("Enter omapdss_hdmi4_core_disable\n");
-
- mutex_lock(&hdmi->lock);
-
- hdmi_power_off_core(hdmi);
-
- mutex_unlock(&hdmi->lock);
-}
-
-static int hdmi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
-static void hdmi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
+ if (status == connector_status_disconnected)
+ hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
}
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
+static struct edid *hdmi4_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct edid *edid = NULL;
+ unsigned int cec_addr;
bool need_enable;
int r;
@@ -417,63 +450,65 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev,
if (need_enable) {
r = hdmi4_core_enable(&hdmi->core);
if (r)
- return r;
+ return NULL;
}
- r = read_edid(hdmi, edid, len);
- if (r >= 256)
- hdmi4_cec_set_phys_addr(&hdmi->core,
- cec_get_edid_phys_addr(edid, r, NULL));
- else
- hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
- if (need_enable)
- hdmi4_core_disable(&hdmi->core);
+ mutex_lock(&hdmi->lock);
+ r = hdmi_runtime_get(hdmi);
+ BUG_ON(r);
- return r;
-}
+ r = hdmi4_core_ddc_init(&hdmi->core);
+ if (r)
+ goto done;
-static void hdmi_lost_hotplug(struct omap_dss_device *dssdev)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ edid = drm_do_get_edid(connector, hdmi4_core_ddc_read, &hdmi->core);
- hdmi4_cec_set_phys_addr(&hdmi->core, CEC_PHYS_ADDR_INVALID);
-}
+done:
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
-static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ if (edid && edid->extensions) {
+ unsigned int len = (edid->extensions + 1) * EDID_LENGTH;
- hdmi->cfg.infoframe = *avi;
- return 0;
-}
+ cec_addr = cec_get_edid_phys_addr((u8 *)edid, len, NULL);
+ } else {
+ cec_addr = CEC_PHYS_ADDR_INVALID;
+ }
-static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
- bool hdmi_mode)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ hdmi4_cec_set_phys_addr(&hdmi->core, cec_addr);
- hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
- return 0;
-}
+ if (need_enable)
+ hdmi4_core_disable(&hdmi->core);
-static const struct omap_dss_device_ops hdmi_ops = {
- .connect = hdmi_connect,
- .disconnect = hdmi_disconnect,
+ return edid;
+}
- .enable = hdmi_display_enable,
- .disable = hdmi_display_disable,
+static const struct drm_bridge_funcs hdmi4_bridge_funcs = {
+ .attach = hdmi4_bridge_attach,
+ .mode_set = hdmi4_bridge_mode_set,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = hdmi4_bridge_enable,
+ .atomic_disable = hdmi4_bridge_disable,
+ .hpd_notify = hdmi4_bridge_hpd_notify,
+ .get_edid = hdmi4_bridge_get_edid,
+};
- .set_timings = hdmi_display_set_timings,
+static void hdmi4_bridge_init(struct omap_hdmi *hdmi)
+{
+ hdmi->bridge.funcs = &hdmi4_bridge_funcs;
+ hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- .read_edid = hdmi_read_edid,
+ drm_bridge_add(&hdmi->bridge);
+}
- .hdmi = {
- .lost_hotplug = hdmi_lost_hotplug,
- .set_infoframe = hdmi_set_infoframe,
- .set_hdmi_mode = hdmi_set_hdmi_mode,
- },
-};
+static void hdmi4_bridge_cleanup(struct omap_hdmi *hdmi)
+{
+ drm_bridge_remove(&hdmi->bridge);
+}
/* -----------------------------------------------------------------------------
* Audio Callbacks
@@ -666,19 +701,21 @@ static int hdmi4_init_output(struct omap_hdmi *hdmi)
struct omap_dss_device *out = &hdmi->output;
int r;
+ hdmi4_bridge_init(hdmi);
+
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops = &hdmi_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
- out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+ out->of_port = 0;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &hdmi->bridge);
+ if (r < 0) {
+ hdmi4_bridge_cleanup(hdmi);
return r;
+ }
omapdss_device_register(out);
@@ -691,6 +728,8 @@ static void hdmi4_uninit_output(struct omap_hdmi *hdmi)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+
+ hdmi4_bridge_cleanup(hdmi);
}
static int hdmi4_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
index ea5d5c228534..751985a2679a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.c
@@ -32,7 +32,7 @@ static inline void __iomem *hdmi_av_base(struct hdmi_core_data *core)
return core->base + HDMI_CORE_AV;
}
-static int hdmi_core_ddc_init(struct hdmi_core_data *core)
+int hdmi4_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
@@ -74,13 +74,11 @@ static int hdmi_core_ddc_init(struct hdmi_core_data *core)
return 0;
}
-static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
- u8 *pedid, int ext)
+int hdmi4_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len)
{
+ struct hdmi_core_data *core = data;
void __iomem *base = core->base;
u32 i;
- char checksum;
- u32 offset = 0;
/* HDMI_CORE_DDC_STATUS_IN_PROG */
if (hdmi_wait_for_bit_change(base, HDMI_CORE_DDC_STATUS,
@@ -89,24 +87,21 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
return -ETIMEDOUT;
}
- if (ext % 2 != 0)
- offset = 0x80;
-
/* Load Segment Address Register */
- REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, ext / 2, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_SEGM, block / 2, 7, 0);
/* Load Slave Address Register */
REG_FLD_MOD(base, HDMI_CORE_DDC_ADDR, 0xA0 >> 1, 7, 1);
/* Load Offset Address Register */
- REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, offset, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_OFFSET, block % 2 ? 0x80 : 0, 7, 0);
/* Load Byte Count */
- REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, 0x80, 7, 0);
+ REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT1, len, 7, 0);
REG_FLD_MOD(base, HDMI_CORE_DDC_COUNT2, 0x0, 1, 0);
/* Set DDC_CMD */
- if (ext)
+ if (block)
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x4, 3, 0);
else
REG_FLD_MOD(base, HDMI_CORE_DDC_CMD, 0x2, 3, 0);
@@ -122,7 +117,7 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
return -EIO;
}
- for (i = 0; i < 0x80; ++i) {
+ for (i = 0; i < len; ++i) {
int t;
/* IN_PROG */
@@ -141,48 +136,12 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core,
udelay(1);
}
- pedid[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
- }
-
- checksum = 0;
- for (i = 0; i < 0x80; ++i)
- checksum += pedid[i];
-
- if (checksum != 0) {
- DSSERR("E-EDID checksum failed!!\n");
- return -EIO;
+ buf[i] = REG_GET(base, HDMI_CORE_DDC_DATA, 7, 0);
}
return 0;
}
-int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
-{
- int r, l;
-
- if (len < 128)
- return -EINVAL;
-
- r = hdmi_core_ddc_init(core);
- if (r)
- return r;
-
- r = hdmi_core_ddc_edid(core, edid, 0);
- if (r)
- return r;
-
- l = 128;
-
- if (len >= 128 * 2 && edid[0x7e] > 0) {
- r = hdmi_core_ddc_edid(core, edid + 0x80, 1);
- if (r)
- return r;
- l += 128;
- }
-
- return l;
-}
-
static void hdmi_core_init(struct hdmi_core_video_config *video_cfg)
{
DSSDBG("Enter hdmi_core_init\n");
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
index 11c4b7ba1eee..dc64ae2aa300 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4_core.h
@@ -249,7 +249,9 @@ struct hdmi_core_packet_enable_repeat {
u32 generic_pkt_repeat;
};
-int hdmi4_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+int hdmi4_core_ddc_init(struct hdmi_core_data *core);
+int hdmi4_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
+
void hdmi4_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
struct hdmi_config *cfg);
void hdmi4_core_dump(struct hdmi_core_data *core, struct seq_file *s);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index d9463b332554..4d4c1fabd0a1 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -31,6 +31,9 @@
#include <linux/of_graph.h>
#include <sound/omap-hdmi-audio.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_state_helper.h>
+
#include "omapdss.h"
#include "hdmi5_core.h"
#include "dss.h"
@@ -236,20 +239,6 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
hdmi_power_off_core(hdmi);
}
-static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- mutex_lock(&hdmi->lock);
-
- drm_display_mode_to_videomode(mode, &hdmi->cfg.vm);
-
- dispc_set_tv_pclk(hdmi->dss->dispc, mode->clock * 1000);
-
- mutex_unlock(&hdmi->lock);
-}
-
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
@@ -271,66 +260,138 @@ static int hdmi_dump_regs(struct seq_file *s, void *p)
return 0;
}
-static int read_edid(struct omap_hdmi *hdmi, u8 *buf, int len)
+static void hdmi_start_audio_stream(struct omap_hdmi *hd)
{
- int r;
- int idlemode;
+ REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+ hdmi_wp_audio_enable(&hd->wp, true);
+ hdmi_wp_audio_core_req_enable(&hd->wp, true);
+}
- mutex_lock(&hdmi->lock);
+static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+{
+ hdmi_wp_audio_core_req_enable(&hd->wp, false);
+ hdmi_wp_audio_enable(&hd->wp, false);
+ REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
+}
- r = hdmi_runtime_get(hdmi);
- BUG_ON(r);
+static int hdmi_core_enable(struct omap_hdmi *hdmi)
+{
+ int r = 0;
- idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
- /* No-idle mode */
- REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
+ DSSDBG("ENTER omapdss_hdmi_core_enable\n");
- r = hdmi5_read_edid(&hdmi->core, buf, len);
+ mutex_lock(&hdmi->lock);
- REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
+ r = hdmi_power_on_core(hdmi);
+ if (r) {
+ DSSERR("failed to power on device\n");
+ goto err0;
+ }
- hdmi_runtime_put(hdmi);
mutex_unlock(&hdmi->lock);
+ return 0;
+err0:
+ mutex_unlock(&hdmi->lock);
return r;
}
-static void hdmi_start_audio_stream(struct omap_hdmi *hd)
+static void hdmi_core_disable(struct omap_hdmi *hdmi)
{
- REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
- hdmi_wp_audio_enable(&hd->wp, true);
- hdmi_wp_audio_core_req_enable(&hd->wp, true);
+ DSSDBG("Enter omapdss_hdmi_core_disable\n");
+
+ mutex_lock(&hdmi->lock);
+
+ hdmi_power_off_core(hdmi);
+
+ mutex_unlock(&hdmi->lock);
}
-static void hdmi_stop_audio_stream(struct omap_hdmi *hd)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int hdmi5_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
- hdmi_wp_audio_core_req_enable(&hd->wp, false);
- hdmi_wp_audio_enable(&hd->wp, false);
- REG_FLD_MOD(hd->wp.base, HDMI_WP_SYSCONFIG, hd->wp_idlemode, 3, 2);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, hdmi->output.next_bridge,
+ bridge, flags);
}
-static void hdmi_display_enable(struct omap_dss_device *dssdev)
+static void hdmi5_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
- unsigned long flags;
- int r;
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+
+ mutex_lock(&hdmi->lock);
- DSSDBG("ENTER hdmi_display_enable\n");
+ drm_display_mode_to_videomode(adjusted_mode, &hdmi->cfg.vm);
+
+ dispc_set_tv_pclk(hdmi->dss->dispc, adjusted_mode->clock * 1000);
+
+ mutex_unlock(&hdmi->lock);
+}
+
+static void hdmi5_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
+{
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct drm_atomic_state *state = bridge_state->base.state;
+ struct drm_connector_state *conn_state;
+ struct drm_connector *connector;
+ struct drm_crtc_state *crtc_state;
+ unsigned long flags;
+ int ret;
+
+ /*
+ * None of these should fail, as the bridge can't be enabled without a
+ * valid CRTC to connector path with fully populated new states.
+ */
+ connector = drm_atomic_get_new_connector_for_encoder(state,
+ bridge->encoder);
+ if (WARN_ON(!connector))
+ return;
+ conn_state = drm_atomic_get_new_connector_state(state, connector);
+ if (WARN_ON(!conn_state))
+ return;
+ crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc);
+ if (WARN_ON(!crtc_state))
+ return;
+
+ hdmi->cfg.hdmi_dvi_mode = connector->display_info.is_hdmi
+ ? HDMI_HDMI : HDMI_DVI;
+
+ if (connector->display_info.is_hdmi) {
+ const struct drm_display_mode *mode;
+ struct hdmi_avi_infoframe avi;
+
+ mode = &crtc_state->adjusted_mode;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
+ mode);
+ if (ret == 0)
+ hdmi->cfg.infoframe = avi;
+ }
mutex_lock(&hdmi->lock);
- r = hdmi_power_on_full(hdmi);
- if (r) {
+ ret = hdmi_power_on_full(hdmi);
+ if (ret) {
DSSERR("failed to power on device\n");
goto done;
}
if (hdmi->audio_configured) {
- r = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
- &hdmi->audio_config,
- hdmi->cfg.vm.pixelclock);
- if (r) {
- DSSERR("Error restoring audio configuration: %d", r);
+ ret = hdmi5_audio_config(&hdmi->core, &hdmi->wp,
+ &hdmi->audio_config,
+ hdmi->cfg.vm.pixelclock);
+ if (ret) {
+ DSSERR("Error restoring audio configuration: %d", ret);
hdmi->audio_abort_cb(&hdmi->pdev->dev);
hdmi->audio_configured = false;
}
@@ -346,13 +407,12 @@ done:
mutex_unlock(&hdmi->lock);
}
-static void hdmi_display_disable(struct omap_dss_device *dssdev)
+static void hdmi5_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
unsigned long flags;
- DSSDBG("Enter hdmi_display_disable\n");
-
mutex_lock(&hdmi->lock);
spin_lock_irqsave(&hdmi->audio_playing_lock, flags);
@@ -365,109 +425,74 @@ static void hdmi_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&hdmi->lock);
}
-static int hdmi_core_enable(struct omap_hdmi *hdmi)
+static struct edid *hdmi5_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector)
{
- int r = 0;
-
- DSSDBG("ENTER omapdss_hdmi_core_enable\n");
+ struct omap_hdmi *hdmi = drm_bridge_to_hdmi(bridge);
+ struct edid *edid;
+ bool need_enable;
+ int idlemode;
+ int r;
- mutex_lock(&hdmi->lock);
+ need_enable = hdmi->core_enabled == false;
- r = hdmi_power_on_core(hdmi);
- if (r) {
- DSSERR("failed to power on device\n");
- goto err0;
+ if (need_enable) {
+ r = hdmi_core_enable(hdmi);
+ if (r)
+ return NULL;
}
- mutex_unlock(&hdmi->lock);
- return 0;
-
-err0:
- mutex_unlock(&hdmi->lock);
- return r;
-}
-
-static void hdmi_core_disable(struct omap_hdmi *hdmi)
-{
- DSSDBG("Enter omapdss_hdmi_core_disable\n");
-
mutex_lock(&hdmi->lock);
+ r = hdmi_runtime_get(hdmi);
+ BUG_ON(r);
- hdmi_power_off_core(hdmi);
-
- mutex_unlock(&hdmi->lock);
-}
-
-static int hdmi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- return omapdss_device_connect(dst->dss, dst, dst->next);
-}
+ idlemode = REG_GET(hdmi->wp.base, HDMI_WP_SYSCONFIG, 3, 2);
+ /* No-idle mode */
+ REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, 1, 3, 2);
-static void hdmi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
+ hdmi5_core_ddc_init(&hdmi->core);
-static int hdmi_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
- bool need_enable;
- int r;
+ edid = drm_do_get_edid(connector, hdmi5_core_ddc_read, &hdmi->core);
- need_enable = hdmi->core_enabled == false;
+ hdmi5_core_ddc_uninit(&hdmi->core);
- if (need_enable) {
- r = hdmi_core_enable(hdmi);
- if (r)
- return r;
- }
+ REG_FLD_MOD(hdmi->wp.base, HDMI_WP_SYSCONFIG, idlemode, 3, 2);
- r = read_edid(hdmi, edid, len);
+ hdmi_runtime_put(hdmi);
+ mutex_unlock(&hdmi->lock);
if (need_enable)
hdmi_core_disable(hdmi);
- return r;
+ return (struct edid *)edid;
}
-static int hdmi_set_infoframe(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi)
+static const struct drm_bridge_funcs hdmi5_bridge_funcs = {
+ .attach = hdmi5_bridge_attach,
+ .mode_set = hdmi5_bridge_mode_set,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
+ .atomic_enable = hdmi5_bridge_enable,
+ .atomic_disable = hdmi5_bridge_disable,
+ .get_edid = hdmi5_bridge_get_edid,
+};
+
+static void hdmi5_bridge_init(struct omap_hdmi *hdmi)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
+ hdmi->bridge.funcs = &hdmi5_bridge_funcs;
+ hdmi->bridge.of_node = hdmi->pdev->dev.of_node;
+ hdmi->bridge.ops = DRM_BRIDGE_OP_EDID;
+ hdmi->bridge.type = DRM_MODE_CONNECTOR_HDMIA;
- hdmi->cfg.infoframe = *avi;
- return 0;
+ drm_bridge_add(&hdmi->bridge);
}
-static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
- bool hdmi_mode)
+static void hdmi5_bridge_cleanup(struct omap_hdmi *hdmi)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- hdmi->cfg.hdmi_dvi_mode = hdmi_mode ? HDMI_HDMI : HDMI_DVI;
- return 0;
+ drm_bridge_remove(&hdmi->bridge);
}
-static const struct omap_dss_device_ops hdmi_ops = {
- .connect = hdmi_connect,
- .disconnect = hdmi_disconnect,
-
- .enable = hdmi_display_enable,
- .disable = hdmi_display_disable,
-
- .set_timings = hdmi_display_set_timings,
-
- .read_edid = hdmi_read_edid,
-
- .hdmi = {
- .set_infoframe = hdmi_set_infoframe,
- .set_hdmi_mode = hdmi_set_hdmi_mode,
- },
-};
-
/* -----------------------------------------------------------------------------
* Audio Callbacks
*/
@@ -650,19 +675,21 @@ static int hdmi5_init_output(struct omap_hdmi *hdmi)
struct omap_dss_device *out = &hdmi->output;
int r;
+ hdmi5_bridge_init(hdmi);
+
out->dev = &hdmi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_HDMI;
out->type = OMAP_DISPLAY_TYPE_HDMI;
out->name = "hdmi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops = &hdmi_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
- out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+ out->of_port = 0;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &hdmi->bridge);
+ if (r < 0) {
+ hdmi5_bridge_cleanup(hdmi);
return r;
+ }
omapdss_device_register(out);
@@ -675,6 +702,8 @@ static void hdmi5_uninit_output(struct omap_hdmi *hdmi)
omapdss_device_unregister(out);
omapdss_device_cleanup_output(out);
+
+ hdmi5_bridge_cleanup(hdmi);
}
static int hdmi5_probe_of(struct omap_hdmi *hdmi)
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index ff4d35c8771f..7dd587035160 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -23,7 +23,7 @@
#include "hdmi5_core.h"
-static void hdmi_core_ddc_init(struct hdmi_core_data *core)
+void hdmi5_core_ddc_init(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
const unsigned long long iclk = 266000000; /* DSS L3 ICLK */
@@ -102,7 +102,7 @@ static void hdmi_core_ddc_init(struct hdmi_core_data *core)
REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x0, 2, 2);
}
-static void hdmi_core_ddc_uninit(struct hdmi_core_data *core)
+void hdmi5_core_ddc_uninit(struct hdmi_core_data *core)
{
void __iomem *base = core->base;
@@ -112,14 +112,14 @@ static void hdmi_core_ddc_uninit(struct hdmi_core_data *core)
REG_FLD_MOD(base, HDMI_CORE_I2CM_INT, 0x1, 2, 2);
}
-static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
+int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len)
{
+ struct hdmi_core_data *core = data;
void __iomem *base = core->base;
u8 cur_addr;
- char checksum = 0;
const int retries = 1000;
- u8 seg_ptr = ext / 2;
- u8 edidbase = ((ext % 2) * 0x80);
+ u8 seg_ptr = block / 2;
+ u8 edidbase = ((block % 2) * EDID_LENGTH);
REG_FLD_MOD(base, HDMI_CORE_I2CM_SEGPTR, seg_ptr, 7, 0);
@@ -127,7 +127,7 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
* TODO: We use polling here, although we probably should use proper
* interrupts.
*/
- for (cur_addr = 0; cur_addr < 128; ++cur_addr) {
+ for (cur_addr = 0; cur_addr < len; ++cur_addr) {
int i;
/* clear ERROR and DONE */
@@ -164,45 +164,13 @@ static int hdmi_core_ddc_edid(struct hdmi_core_data *core, u8 *pedid, u8 ext)
return -EIO;
}
- pedid[cur_addr] = REG_GET(base, HDMI_CORE_I2CM_DATAI, 7, 0);
- checksum += pedid[cur_addr];
+ buf[cur_addr] = REG_GET(base, HDMI_CORE_I2CM_DATAI, 7, 0);
}
return 0;
}
-int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len)
-{
- int r, n, i;
- int max_ext_blocks = (len / 128) - 1;
-
- if (len < 128)
- return -EINVAL;
-
- hdmi_core_ddc_init(core);
-
- r = hdmi_core_ddc_edid(core, edid, 0);
- if (r)
- goto out;
-
- n = edid[0x7e];
-
- if (n > max_ext_blocks)
- n = max_ext_blocks;
-
- for (i = 1; i <= n; i++) {
- r = hdmi_core_ddc_edid(core, edid + i * EDID_LENGTH, i);
- if (r)
- goto out;
- }
-
-out:
- hdmi_core_ddc_uninit(core);
-
- return r ? r : len;
-}
-
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s)
{
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
index f10b8a283011..65eadefdb3f9 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.h
@@ -281,7 +281,10 @@ struct csc_table {
u16 c1, c2, c3, c4;
};
-int hdmi5_read_edid(struct hdmi_core_data *core, u8 *edid, int len);
+void hdmi5_core_ddc_init(struct hdmi_core_data *core);
+int hdmi5_core_ddc_read(void *data, u8 *buf, unsigned int block, size_t len);
+void hdmi5_core_ddc_uninit(struct hdmi_core_data *core);
+
void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s);
int hdmi5_core_handle_irqs(struct hdmi_core_data *core);
void hdmi5_configure(struct hdmi_core_data *core, struct hdmi_wp_data *wp,
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
index 31502857f013..00372f4ce711 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
@@ -174,12 +174,7 @@ static const struct of_device_id omapdss_of_match[] __initconst = {
};
static const struct of_device_id omapdss_of_fixups_whitelist[] __initconst = {
- { .compatible = "composite-video-connector" },
- { .compatible = "hdmi-connector" },
{ .compatible = "panel-dsi-cm" },
- { .compatible = "svideo-connector" },
- { .compatible = "ti,opa362" },
- { .compatible = "ti,tpd12s015" },
{},
};
@@ -192,7 +187,7 @@ static int __init omapdss_boot_init(void)
dss = of_find_matching_node(NULL, omapdss_of_match);
if (dss == NULL || !of_device_is_available(dss))
- return 0;
+ goto put_node;
omapdss_walk_device(dss, true);
@@ -217,6 +212,8 @@ static int __init omapdss_boot_init(void)
kfree(n);
}
+put_node:
+ of_node_put(dss);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 79f6b195c7cf..ab19d4af8de7 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -285,13 +285,6 @@ struct omap_dss_writeback_info {
u8 pre_mult_alpha;
};
-struct omapdss_hdmi_ops {
- void (*lost_hotplug)(struct omap_dss_device *dssdev);
- int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
- int (*set_infoframe)(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi);
-};
-
struct omapdss_dsi_ops {
void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
bool enter_ulps);
@@ -349,46 +342,23 @@ struct omap_dss_device_ops {
void (*disconnect)(struct omap_dss_device *dssdev,
struct omap_dss_device *dst);
- void (*pre_enable)(struct omap_dss_device *dssdev);
void (*enable)(struct omap_dss_device *dssdev);
void (*disable)(struct omap_dss_device *dssdev);
- void (*post_disable)(struct omap_dss_device *dssdev);
int (*check_timings)(struct omap_dss_device *dssdev,
struct drm_display_mode *mode);
- void (*set_timings)(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode);
-
- bool (*detect)(struct omap_dss_device *dssdev);
-
- void (*register_hpd_cb)(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data);
- void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
-
- int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
int (*get_modes)(struct omap_dss_device *dssdev,
struct drm_connector *connector);
- union {
- const struct omapdss_hdmi_ops hdmi;
- const struct omapdss_dsi_ops dsi;
- };
+ const struct omapdss_dsi_ops dsi;
};
/**
* enum omap_dss_device_ops_flag - Indicates which device ops are supported
- * @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection
- * @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations
- * @OMAP_DSS_DEVICE_OP_EDID: The device supports reading EDID
* @OMAP_DSS_DEVICE_OP_MODES: The device supports reading modes
*/
enum omap_dss_device_ops_flag {
- OMAP_DSS_DEVICE_OP_DETECT = BIT(0),
- OMAP_DSS_DEVICE_OP_HPD = BIT(1),
- OMAP_DSS_DEVICE_OP_EDID = BIT(2),
OMAP_DSS_DEVICE_OP_MODES = BIT(3),
};
@@ -400,6 +370,7 @@ struct omap_dss_device {
struct dss_device *dss;
struct omap_dss_device *next;
struct drm_bridge *bridge;
+ struct drm_bridge *next_bridge;
struct drm_panel *panel;
struct list_head list;
@@ -436,8 +407,8 @@ struct omap_dss_device {
/* output instance */
enum omap_dss_output_id id;
- /* bitmask of port numbers in DT */
- unsigned int of_ports;
+ /* port number in DT */
+ unsigned int of_port;
};
struct omap_dss_driver {
@@ -461,7 +432,6 @@ static inline bool omapdss_is_initialized(void)
}
void omapdss_display_init(struct omap_dss_device *dssdev);
-struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output);
int omapdss_display_get_modes(struct drm_connector *connector,
const struct videomode *vm);
@@ -475,10 +445,8 @@ int omapdss_device_connect(struct dss_device *dss,
struct omap_dss_device *dst);
void omapdss_device_disconnect(struct omap_dss_device *src,
struct omap_dss_device *dst);
-void omapdss_device_pre_enable(struct omap_dss_device *dssdev);
void omapdss_device_enable(struct omap_dss_device *dssdev);
void omapdss_device_disable(struct omap_dss_device *dssdev);
-void omapdss_device_post_disable(struct omap_dss_device *dssdev);
int omap_dss_get_num_overlay_managers(void);
@@ -487,7 +455,8 @@ int omap_dss_get_num_overlays(void);
#define for_each_dss_output(d) \
while ((d = omapdss_device_next_output(d)) != NULL)
struct omap_dss_device *omapdss_device_next_output(struct omap_dss_device *from);
-int omapdss_device_init_output(struct omap_dss_device *out);
+int omapdss_device_init_output(struct omap_dss_device *out,
+ struct drm_bridge *local_bridge);
void omapdss_device_cleanup_output(struct omap_dss_device *out);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
@@ -502,9 +471,6 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
return dssdev->state == OMAP_DSS_DISPLAY_ACTIVE;
}
-struct omap_dss_device *
-omapdss_of_find_connected_device(struct device_node *node, unsigned int port);
-
enum dss_writeback_channel {
DSS_WB_LCD1_MGR = 0,
DSS_WB_LCD2_MGR = 1,
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 0693d34fca1b..ce21c798cca6 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -4,7 +4,6 @@
* Author: Archit Taneja <[email protected]>
*/
-#include <linux/bitops.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
@@ -18,12 +17,14 @@
#include "dss.h"
#include "omapdss.h"
-int omapdss_device_init_output(struct omap_dss_device *out)
+int omapdss_device_init_output(struct omap_dss_device *out,
+ struct drm_bridge *local_bridge)
{
struct device_node *remote_node;
+ int ret;
remote_node = of_graph_get_remote_node(out->dev->of_node,
- ffs(out->of_ports) - 1, 0);
+ out->of_port, 0);
if (!remote_node) {
dev_dbg(out->dev, "failed to find video sink\n");
return 0;
@@ -39,17 +40,55 @@ int omapdss_device_init_output(struct omap_dss_device *out)
if (out->next && out->type != out->next->type) {
dev_err(out->dev, "output type and display type don't match\n");
- omapdss_device_put(out->next);
- out->next = NULL;
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
- return out->next || out->bridge || out->panel ? 0 : -EPROBE_DEFER;
+ if (out->panel) {
+ struct drm_bridge *bridge;
+
+ bridge = drm_panel_bridge_add(out->panel);
+ if (IS_ERR(bridge)) {
+ dev_err(out->dev,
+ "unable to create panel bridge (%ld)\n",
+ PTR_ERR(bridge));
+ ret = PTR_ERR(bridge);
+ goto error;
+ }
+
+ out->bridge = bridge;
+ }
+
+ if (local_bridge) {
+ if (!out->bridge) {
+ ret = -EPROBE_DEFER;
+ goto error;
+ }
+
+ out->next_bridge = out->bridge;
+ out->bridge = local_bridge;
+ }
+
+ if (!out->next && !out->bridge) {
+ ret = -EPROBE_DEFER;
+ goto error;
+ }
+
+ return 0;
+
+error:
+ omapdss_device_cleanup_output(out);
+ out->next = NULL;
+ return ret;
}
EXPORT_SYMBOL(omapdss_device_init_output);
void omapdss_device_cleanup_output(struct omap_dss_device *out)
{
+ if (out->bridge && out->panel)
+ drm_panel_bridge_remove(out->next_bridge ?
+ out->next_bridge : out->bridge);
+
if (out->next)
omapdss_device_put(out->next);
}
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 3b447c01fa2a..417a8740ad0a 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -6,17 +6,19 @@
#define DSS_SUBSYS_NAME "SDI"
-#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/err.h>
-#include <linux/regulator/consumer.h>
#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
#include <linux/string.h>
-#include <linux/of.h>
-#include "omapdss.h"
+#include <drm/drm_bridge.h>
+
#include "dss.h"
+#include "omapdss.h"
struct sdi_device {
struct platform_device *pdev;
@@ -30,9 +32,11 @@ struct sdi_device {
int datapairs;
struct omap_dss_device output;
+ struct drm_bridge bridge;
};
-#define dssdev_to_sdi(dssdev) container_of(dssdev, struct sdi_device, output)
+#define drm_bridge_to_sdi(bridge) \
+ container_of(bridge, struct sdi_device, bridge)
struct sdi_clk_calc_ctx {
struct sdi_device *sdi;
@@ -118,9 +122,82 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
dss_mgr_set_lcd_config(&sdi->output, &sdi->mgr_config);
}
-static void sdi_display_enable(struct omap_dss_device *dssdev)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int sdi_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, sdi->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+sdi_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+ unsigned long pixelclock = mode->clock * 1000;
+ struct dispc_clock_info dispc_cinfo;
+ unsigned long fck;
+ int ret;
+
+ if (pixelclock == 0)
+ return MODE_NOCLOCK;
+
+ ret = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
+ if (ret < 0)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+static bool sdi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+ unsigned long pixelclock = mode->clock * 1000;
+ struct dispc_clock_info dispc_cinfo;
+ unsigned long fck;
+ unsigned long pck;
+ int ret;
+
+ ret = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
+ if (ret < 0)
+ return false;
+
+ pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
+
+ if (pck != pixelclock)
+ dev_dbg(&sdi->pdev->dev,
+ "pixel clock adjusted from %lu Hz to %lu Hz\n",
+ pixelclock, pck);
+
+ adjusted_mode->clock = pck / 1000;
+
+ return true;
+}
+
+static void sdi_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
+
+ sdi->pixelclock = adjusted_mode->clock * 1000;
+}
+
+static void sdi_bridge_enable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
struct dispc_clock_info dispc_cinfo;
unsigned long fck;
int r;
@@ -181,9 +258,10 @@ err_get_dispc:
regulator_disable(sdi->vdds_sdi_reg);
}
-static void sdi_display_disable(struct omap_dss_device *dssdev)
+static void sdi_bridge_disable(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+ struct sdi_device *sdi = drm_bridge_to_sdi(bridge);
dss_mgr_disable(&sdi->output);
@@ -194,86 +272,56 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
regulator_disable(sdi->vdds_sdi_reg);
}
-static void sdi_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
-
- sdi->pixelclock = mode->clock * 1000;
-}
+static const struct drm_bridge_funcs sdi_bridge_funcs = {
+ .attach = sdi_bridge_attach,
+ .mode_valid = sdi_bridge_mode_valid,
+ .mode_fixup = sdi_bridge_mode_fixup,
+ .mode_set = sdi_bridge_mode_set,
+ .atomic_enable = sdi_bridge_enable,
+ .atomic_disable = sdi_bridge_disable,
+};
-static int sdi_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
+static void sdi_bridge_init(struct sdi_device *sdi)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- struct dispc_clock_info dispc_cinfo;
- unsigned long pixelclock = mode->clock * 1000;
- unsigned long fck;
- unsigned long pck;
- int r;
+ sdi->bridge.funcs = &sdi_bridge_funcs;
+ sdi->bridge.of_node = sdi->pdev->dev.of_node;
+ sdi->bridge.type = DRM_MODE_CONNECTOR_LVDS;
- if (pixelclock == 0)
- return -EINVAL;
-
- r = sdi_calc_clock_div(sdi, pixelclock, &fck, &dispc_cinfo);
- if (r)
- return r;
-
- pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
-
- if (pck != pixelclock) {
- DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n",
- pixelclock, pck);
-
- mode->clock = pck / 1000;
- }
-
- return 0;
+ drm_bridge_add(&sdi->bridge);
}
-static int sdi_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static void sdi_bridge_cleanup(struct sdi_device *sdi)
{
- return omapdss_device_connect(dst->dss, dst, dst->next);
+ drm_bridge_remove(&sdi->bridge);
}
-static void sdi_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
-{
- omapdss_device_disconnect(dst, dst->next);
-}
-
-static const struct omap_dss_device_ops sdi_ops = {
- .connect = sdi_connect,
- .disconnect = sdi_disconnect,
-
- .enable = sdi_display_enable,
- .disable = sdi_display_disable,
-
- .check_timings = sdi_check_timings,
- .set_timings = sdi_set_timings,
-};
+/* -----------------------------------------------------------------------------
+ * Initialisation and Cleanup
+ */
static int sdi_init_output(struct sdi_device *sdi)
{
struct omap_dss_device *out = &sdi->output;
int r;
+ sdi_bridge_init(sdi);
+
out->dev = &sdi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
out->type = OMAP_DISPLAY_TYPE_SDI;
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
- out->of_ports = BIT(1);
- out->ops = &sdi_ops;
+ out->of_port = 1;
out->owner = THIS_MODULE;
out->bus_flags = DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE /* 15.5.9.1.2 */
| DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &sdi->bridge);
+ if (r < 0) {
+ sdi_bridge_cleanup(sdi);
return r;
+ }
omapdss_device_register(out);
@@ -284,6 +332,8 @@ static void sdi_uninit_output(struct sdi_device *sdi)
{
omapdss_device_unregister(&sdi->output);
omapdss_device_cleanup_output(&sdi->output);
+
+ sdi_bridge_cleanup(sdi);
}
int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index 596a297d5813..766553bb2f87 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -13,7 +13,6 @@
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
-#include <linux/mutex.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/string.h>
@@ -26,6 +25,8 @@
#include <linux/component.h>
#include <linux/sys_soc.h>
+#include <drm/drm_bridge.h>
+
#include "omapdss.h"
#include "dss.h"
@@ -289,7 +290,6 @@ static const struct drm_display_mode omap_dss_ntsc_mode = {
struct venc_device {
struct platform_device *pdev;
void __iomem *base;
- struct mutex venc_lock;
struct regulator *vdda_dac_reg;
struct dss_device *dss;
@@ -303,9 +303,10 @@ struct venc_device {
bool requires_tv_dac_clk;
struct omap_dss_device output;
+ struct drm_bridge bridge;
};
-#define dssdev_to_venc(dssdev) container_of(dssdev, struct venc_device, output)
+#define drm_bridge_to_venc(b) container_of(b, struct venc_device, bridge)
static inline void venc_write_reg(struct venc_device *venc, int idx, u32 val)
{
@@ -477,56 +478,6 @@ static void venc_power_off(struct venc_device *venc)
venc_runtime_put(venc);
}
-static void venc_display_enable(struct omap_dss_device *dssdev)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- DSSDBG("venc_display_enable\n");
-
- mutex_lock(&venc->venc_lock);
-
- venc_power_on(venc);
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static void venc_display_disable(struct omap_dss_device *dssdev)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- DSSDBG("venc_display_disable\n");
-
- mutex_lock(&venc->venc_lock);
-
- venc_power_off(venc);
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static int venc_get_modes(struct omap_dss_device *dssdev,
- struct drm_connector *connector)
-{
- static const struct drm_display_mode *modes[] = {
- &omap_dss_pal_mode,
- &omap_dss_ntsc_mode,
- };
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(modes); ++i) {
- struct drm_display_mode *mode;
-
- mode = drm_mode_duplicate(connector->dev, modes[i]);
- if (!mode)
- return i;
-
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
- }
-
- return ARRAY_SIZE(modes);
-}
-
static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mode)
{
if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
@@ -545,57 +496,6 @@ static enum venc_videomode venc_get_videomode(const struct drm_display_mode *mod
return VENC_MODE_UNKNOWN;
}
-static void venc_set_timings(struct omap_dss_device *dssdev,
- const struct drm_display_mode *mode)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
- enum venc_videomode venc_mode = venc_get_videomode(mode);
-
- DSSDBG("venc_set_timings\n");
-
- mutex_lock(&venc->venc_lock);
-
- switch (venc_mode) {
- default:
- WARN_ON_ONCE(1);
- /* Fall-through */
- case VENC_MODE_PAL:
- venc->config = &venc_config_pal_trm;
- break;
-
- case VENC_MODE_NTSC:
- venc->config = &venc_config_ntsc_trm;
- break;
- }
-
- dispc_set_tv_pclk(venc->dss->dispc, 13500000);
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static int venc_check_timings(struct omap_dss_device *dssdev,
- struct drm_display_mode *mode)
-{
- DSSDBG("venc_check_timings\n");
-
- switch (venc_get_videomode(mode)) {
- case VENC_MODE_PAL:
- drm_mode_copy(mode, &omap_dss_pal_mode);
- break;
-
- case VENC_MODE_NTSC:
- drm_mode_copy(mode, &omap_dss_ntsc_mode);
- break;
-
- default:
- return -EINVAL;
- }
-
- drm_mode_set_crtcinfo(mode, CRTC_INTERLACE_HALVE_V);
- drm_mode_set_name(mode);
- return 0;
-}
-
static int venc_dump_regs(struct seq_file *s, void *p)
{
struct venc_device *venc = s->private;
@@ -673,31 +573,149 @@ static int venc_get_clocks(struct venc_device *venc)
return 0;
}
-static int venc_connect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+/* -----------------------------------------------------------------------------
+ * DRM Bridge Operations
+ */
+
+static int venc_bridge_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
+{
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+ if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR))
+ return -EINVAL;
+
+ return drm_bridge_attach(bridge->encoder, venc->output.next_bridge,
+ bridge, flags);
+}
+
+static enum drm_mode_status
+venc_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ switch (venc_get_videomode(mode)) {
+ case VENC_MODE_PAL:
+ case VENC_MODE_NTSC:
+ return MODE_OK;
+
+ default:
+ return MODE_BAD;
+ }
+}
+
+static bool venc_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ const struct drm_display_mode *venc_mode;
+
+ switch (venc_get_videomode(adjusted_mode)) {
+ case VENC_MODE_PAL:
+ venc_mode = &omap_dss_pal_mode;
+ break;
+
+ case VENC_MODE_NTSC:
+ venc_mode = &omap_dss_ntsc_mode;
+ break;
+
+ default:
+ return false;
+ }
+
+ drm_mode_copy(adjusted_mode, venc_mode);
+ drm_mode_set_crtcinfo(adjusted_mode, CRTC_INTERLACE_HALVE_V);
+ drm_mode_set_name(adjusted_mode);
+
+ return true;
+}
+
+static void venc_bridge_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode)
+{
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+ enum venc_videomode venc_mode = venc_get_videomode(adjusted_mode);
+
+ switch (venc_mode) {
+ default:
+ WARN_ON_ONCE(1);
+ /* Fall-through */
+ case VENC_MODE_PAL:
+ venc->config = &venc_config_pal_trm;
+ break;
+
+ case VENC_MODE_NTSC:
+ venc->config = &venc_config_ntsc_trm;
+ break;
+ }
+
+ dispc_set_tv_pclk(venc->dss->dispc, 13500000);
+}
+
+static void venc_bridge_enable(struct drm_bridge *bridge)
{
- return omapdss_device_connect(dst->dss, dst, dst->next);
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+ venc_power_on(venc);
}
-static void venc_disconnect(struct omap_dss_device *src,
- struct omap_dss_device *dst)
+static void venc_bridge_disable(struct drm_bridge *bridge)
{
- omapdss_device_disconnect(dst, dst->next);
+ struct venc_device *venc = drm_bridge_to_venc(bridge);
+
+ venc_power_off(venc);
}
-static const struct omap_dss_device_ops venc_ops = {
- .connect = venc_connect,
- .disconnect = venc_disconnect,
+static int venc_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector)
+{
+ static const struct drm_display_mode *modes[] = {
+ &omap_dss_pal_mode,
+ &omap_dss_ntsc_mode,
+ };
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(modes); ++i) {
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, modes[i]);
+ if (!mode)
+ return i;
- .enable = venc_display_enable,
- .disable = venc_display_disable,
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+ }
- .check_timings = venc_check_timings,
- .set_timings = venc_set_timings,
+ return ARRAY_SIZE(modes);
+}
- .get_modes = venc_get_modes,
+static const struct drm_bridge_funcs venc_bridge_funcs = {
+ .attach = venc_bridge_attach,
+ .mode_valid = venc_bridge_mode_valid,
+ .mode_fixup = venc_bridge_mode_fixup,
+ .mode_set = venc_bridge_mode_set,
+ .enable = venc_bridge_enable,
+ .disable = venc_bridge_disable,
+ .get_modes = venc_bridge_get_modes,
};
+static void venc_bridge_init(struct venc_device *venc)
+{
+ venc->bridge.funcs = &venc_bridge_funcs;
+ venc->bridge.of_node = venc->pdev->dev.of_node;
+ venc->bridge.ops = DRM_BRIDGE_OP_MODES;
+ venc->bridge.type = DRM_MODE_CONNECTOR_SVIDEO;
+ venc->bridge.interlace_allowed = true;
+
+ drm_bridge_add(&venc->bridge);
+}
+
+static void venc_bridge_cleanup(struct venc_device *venc)
+{
+ drm_bridge_remove(&venc->bridge);
+}
+
/* -----------------------------------------------------------------------------
* Component Bind & Unbind
*/
@@ -747,19 +765,22 @@ static int venc_init_output(struct venc_device *venc)
struct omap_dss_device *out = &venc->output;
int r;
+ venc_bridge_init(venc);
+
out->dev = &venc->pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
out->type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops = &venc_ops;
out->owner = THIS_MODULE;
- out->of_ports = BIT(0);
+ out->of_port = 0;
out->ops_flags = OMAP_DSS_DEVICE_OP_MODES;
- r = omapdss_device_init_output(out);
- if (r < 0)
+ r = omapdss_device_init_output(out, &venc->bridge);
+ if (r < 0) {
+ venc_bridge_cleanup(venc);
return r;
+ }
omapdss_device_register(out);
@@ -770,6 +791,8 @@ static void venc_uninit_output(struct venc_device *venc)
{
omapdss_device_unregister(&venc->output);
omapdss_device_cleanup_output(&venc->output);
+
+ venc_bridge_cleanup(venc);
}
static int venc_probe_of(struct venc_device *venc)
@@ -839,8 +862,6 @@ static int venc_probe(struct platform_device *pdev)
if (soc_device_match(venc_soc_devices))
venc->requires_tv_dac_clk = true;
- mutex_init(&venc->venc_lock);
-
venc->config = &venc_config_pal_trm;
venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 94cded387174..528764566b17 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -6,7 +6,6 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc.h>
-#include <drm/drm_panel.h>
#include <drm/drm_probe_helper.h>
#include "omap_drv.h"
@@ -20,124 +19,12 @@
struct omap_connector {
struct drm_connector base;
struct omap_dss_device *output;
- struct omap_dss_device *hpd;
- bool hdmi_mode;
};
-static void omap_connector_hpd_notify(struct drm_connector *connector,
- enum drm_connector_status status)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev;
-
- if (status != connector_status_disconnected)
- return;
-
- /*
- * Notify all devics in the pipeline of disconnection. This is required
- * to let the HDMI encoders reset their internal state related to
- * connection status, such as the CEC address.
- */
- for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
- if (dssdev->ops && dssdev->ops->hdmi.lost_hotplug)
- dssdev->ops->hdmi.lost_hotplug(dssdev);
- }
-}
-
-static void omap_connector_hpd_cb(void *cb_data,
- enum drm_connector_status status)
-{
- struct omap_connector *omap_connector = cb_data;
- struct drm_connector *connector = &omap_connector->base;
- struct drm_device *dev = connector->dev;
- enum drm_connector_status old_status;
-
- mutex_lock(&dev->mode_config.mutex);
- old_status = connector->status;
- connector->status = status;
- mutex_unlock(&dev->mode_config.mutex);
-
- if (old_status == status)
- return;
-
- omap_connector_hpd_notify(connector, status);
-
- drm_kms_helper_hotplug_event(dev);
-}
-
-void omap_connector_enable_hpd(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *hpd = omap_connector->hpd;
-
- if (hpd)
- hpd->ops->register_hpd_cb(hpd, omap_connector_hpd_cb,
- omap_connector);
-}
-
-void omap_connector_disable_hpd(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *hpd = omap_connector->hpd;
-
- if (hpd)
- hpd->ops->unregister_hpd_cb(hpd);
-}
-
-bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
-
- return omap_connector->hdmi_mode;
-}
-
-static struct omap_dss_device *
-omap_connector_find_device(struct drm_connector *connector,
- enum omap_dss_device_ops_flag op)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = NULL;
- struct omap_dss_device *d;
-
- for (d = omap_connector->output; d; d = d->next) {
- if (d->ops_flags & op)
- dssdev = d;
- }
-
- return dssdev;
-}
-
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
- struct omap_dss_device *dssdev;
- enum drm_connector_status status;
-
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_DETECT);
-
- if (dssdev) {
- status = dssdev->ops->detect(dssdev)
- ? connector_status_connected
- : connector_status_disconnected;
-
- omap_connector_hpd_notify(connector, status);
- } else {
- switch (connector->connector_type) {
- case DRM_MODE_CONNECTOR_DPI:
- case DRM_MODE_CONNECTOR_LVDS:
- case DRM_MODE_CONNECTOR_DSI:
- status = connector_status_connected;
- break;
- default:
- status = connector_status_unknown;
- break;
- }
- }
-
- VERB("%s: %d (force=%d)", connector->name, status, force);
-
- return status;
+ return connector_status_connected;
}
static void omap_connector_destroy(struct drm_connector *connector)
@@ -146,14 +33,6 @@ static void omap_connector_destroy(struct drm_connector *connector)
DBG("%s", connector->name);
- if (omap_connector->hpd) {
- struct omap_dss_device *hpd = omap_connector->hpd;
-
- hpd->ops->unregister_hpd_cb(hpd);
- omapdss_device_put(hpd);
- omap_connector->hpd = NULL;
- }
-
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
@@ -162,81 +41,27 @@ static void omap_connector_destroy(struct drm_connector *connector)
kfree(omap_connector);
}
-#define MAX_EDID 512
-
-static int omap_connector_get_modes_edid(struct drm_connector *connector,
- struct omap_dss_device *dssdev)
-{
- struct omap_connector *omap_connector = to_omap_connector(connector);
- enum drm_connector_status status;
- void *edid;
- int n;
-
- status = omap_connector_detect(connector, false);
- if (status != connector_status_connected)
- goto no_edid;
-
- edid = kzalloc(MAX_EDID, GFP_KERNEL);
- if (!edid)
- goto no_edid;
-
- if (dssdev->ops->read_edid(dssdev, edid, MAX_EDID) <= 0 ||
- !drm_edid_is_valid(edid)) {
- kfree(edid);
- goto no_edid;
- }
-
- drm_connector_update_edid_property(connector, edid);
- n = drm_add_edid_modes(connector, edid);
-
- omap_connector->hdmi_mode = drm_detect_hdmi_monitor(edid);
-
- kfree(edid);
- return n;
-
-no_edid:
- drm_connector_update_edid_property(connector, NULL);
- return 0;
-}
-
static int omap_connector_get_modes(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev;
+ struct omap_dss_device *dssdev = NULL;
+ struct omap_dss_device *d;
DBG("%s", connector->name);
/*
- * If display exposes EDID, then we parse that in the normal way to
- * build table of supported modes.
+ * If the display pipeline reports modes (e.g. with a fixed resolution
+ * panel or an analog TV output), query it.
*/
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_EDID);
- if (dssdev)
- return omap_connector_get_modes_edid(connector, dssdev);
+ for (d = omap_connector->output; d; d = d->next) {
+ if (d->ops_flags & OMAP_DSS_DEVICE_OP_MODES)
+ dssdev = d;
+ }
- /*
- * Otherwise if the display pipeline reports modes (e.g. with a fixed
- * resolution panel or an analog TV output), query it.
- */
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_MODES);
if (dssdev)
return dssdev->ops->get_modes(dssdev, connector);
- /*
- * Otherwise if the display pipeline uses a drm_panel, we delegate the
- * operation to the panel API.
- */
- if (omap_connector->output->panel)
- return drm_panel_get_modes(omap_connector->output->panel,
- connector);
-
- /*
- * We can't retrieve modes, which can happen for instance for a DVI or
- * VGA output with the DDC bus unconnected. The KMS core will add the
- * default modes.
- */
+ /* We can't retrieve modes. The KMS core will add the default modes. */
return 0;
}
@@ -249,7 +74,7 @@ enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
drm_mode_copy(adjusted_mode, mode);
for (; dssdev; dssdev = dssdev->next) {
- if (!dssdev->ops->check_timings)
+ if (!dssdev->ops || !dssdev->ops->check_timings)
continue;
ret = dssdev->ops->check_timings(dssdev, adjusted_mode);
@@ -298,35 +123,6 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.mode_valid = omap_connector_mode_valid,
};
-static int omap_connector_get_type(struct omap_dss_device *output)
-{
- struct omap_dss_device *display;
- enum omap_display_type type;
-
- display = omapdss_display_get(output);
- type = display->type;
- omapdss_device_put(display);
-
- switch (type) {
- case OMAP_DISPLAY_TYPE_HDMI:
- return DRM_MODE_CONNECTOR_HDMIA;
- case OMAP_DISPLAY_TYPE_DVI:
- return DRM_MODE_CONNECTOR_DVID;
- case OMAP_DISPLAY_TYPE_DSI:
- return DRM_MODE_CONNECTOR_DSI;
- case OMAP_DISPLAY_TYPE_DPI:
- case OMAP_DISPLAY_TYPE_DBI:
- return DRM_MODE_CONNECTOR_DPI;
- case OMAP_DISPLAY_TYPE_VENC:
- /* TODO: This could also be composite */
- return DRM_MODE_CONNECTOR_SVIDEO;
- case OMAP_DISPLAY_TYPE_SDI:
- return DRM_MODE_CONNECTOR_LVDS;
- default:
- return DRM_MODE_CONNECTOR_Unknown;
- }
-}
-
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
@@ -334,7 +130,6 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
- struct omap_dss_device *dssdev;
DBG("%s", output->name);
@@ -349,27 +144,9 @@ struct drm_connector *omap_connector_init(struct drm_device *dev,
connector->doublescan_allowed = 0;
drm_connector_init(dev, connector, &omap_connector_funcs,
- omap_connector_get_type(output));
+ DRM_MODE_CONNECTOR_DSI);
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
- /*
- * Initialize connector status handling. First try to find a device that
- * supports hot-plug reporting. If it fails, fall back to a device that
- * support polling. If that fails too, we don't support hot-plug
- * detection at all.
- */
- dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_HPD);
- if (dssdev) {
- omap_connector->hpd = omapdss_device_get(dssdev);
- connector->polled = DRM_CONNECTOR_POLL_HPD;
- } else {
- dssdev = omap_connector_find_device(connector,
- OMAP_DSS_DEVICE_OP_DETECT);
- if (dssdev)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
- }
-
return connector;
fail:
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
index 13607bda33d8..0ecd4f1655b7 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ b/drivers/gpu/drm/omapdrm/omap_connector.h
@@ -21,9 +21,6 @@ struct omap_dss_device;
struct drm_connector *omap_connector_init(struct drm_device *dev,
struct omap_dss_device *output,
struct drm_encoder *encoder);
-bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
-void omap_connector_enable_hpd(struct drm_connector *connector);
-void omap_connector_disable_hpd(struct drm_connector *connector);
enum drm_mode_status omap_connector_mode_fixup(struct omap_dss_device *dssdev,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode);
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 3c5ddbf30e97..fce7e944a280 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -831,7 +831,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
* OMAP_DSS_CHANNEL_DIGIT. X server assumes 256 element gamma
* tables so lets use that. Size of HW gamma table can be
* extracted with dispc_mgr_gamma_size(). If it returns 0
- * gamma table is not supprted.
+ * gamma table is not supported.
*/
if (priv->dispc_ops->mgr_gamma_size(priv->dispc, channel)) {
unsigned int gamma_lut_size = 256;
diff --git a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
index 252f5ebb1acc..42ec51bb7b1b 100644
--- a/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
+++ b/drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
@@ -82,12 +82,11 @@ static const u32 reg[][4] = {
static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
{
- struct dma_device *dma_dev = dmm->wa_dma_chan->device;
struct dma_async_tx_descriptor *tx;
enum dma_status status;
dma_cookie_t cookie;
- tx = dma_dev->device_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
+ tx = dmaengine_prep_dma_memcpy(dmm->wa_dma_chan, dst, src, 4, 0);
if (!tx) {
dev_err(dmm->dev, "Failed to prepare DMA memcpy\n");
return -EIO;
@@ -99,7 +98,6 @@ static int dmm_dma_copy(struct dmm *dmm, dma_addr_t src, dma_addr_t dst)
return -EIO;
}
- dma_async_issue_pending(dmm->wa_dma_chan);
status = dma_sync_wait(dmm->wa_dma_chan, cookie);
if (status != DMA_COMPLETE)
dev_err(dmm->dev, "i878 wa DMA copy failure\n");
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index d2750f60f519..cdafd7ef1c32 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -12,6 +12,7 @@
#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
#include <drm/drm_bridge.h>
+#include <drm/drm_bridge_connector.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
@@ -134,9 +135,6 @@ static void omap_disconnect_pipelines(struct drm_device *ddev)
for (i = 0; i < priv->num_pipes; i++) {
struct omap_drm_pipeline *pipe = &priv->pipes[i];
- if (pipe->output->panel)
- drm_panel_detach(pipe->output->panel);
-
omapdss_device_disconnect(NULL, pipe->output);
omapdss_device_put(pipe->output);
@@ -209,11 +207,12 @@ static int omap_display_id(struct omap_dss_device *output)
struct device_node *node = NULL;
if (output->next) {
- struct omap_dss_device *display;
+ struct omap_dss_device *display = output;
+
+ while (display->next)
+ display = display->next;
- display = omapdss_display_get(output);
node = display->dev->of_node;
- omapdss_device_put(display);
} else if (output->bridge) {
struct drm_bridge *bridge = output->bridge;
@@ -221,8 +220,6 @@ static int omap_display_id(struct omap_dss_device *output)
bridge = drm_bridge_get_next_bridge(bridge);
node = bridge->of_node;
- } else if (output->panel) {
- node = output->panel->dev->of_node;
}
return node ? of_alias_get_id(node, "display") : -ENODEV;
@@ -297,9 +294,14 @@ static int omap_modeset_init(struct drm_device *dev)
if (pipe->output->bridge) {
ret = drm_bridge_attach(pipe->encoder,
- pipe->output->bridge, NULL);
- if (ret < 0)
+ pipe->output->bridge, NULL,
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR);
+ if (ret < 0) {
+ dev_err(priv->dev,
+ "unable to attach bridge %pOF\n",
+ pipe->output->bridge->of_node);
return ret;
+ }
}
id = omap_display_id(pipe->output);
@@ -330,20 +332,28 @@ static int omap_modeset_init(struct drm_device *dev)
struct drm_encoder *encoder = pipe->encoder;
struct drm_crtc *crtc;
- if (!pipe->output->bridge) {
+ if (pipe->output->next) {
pipe->connector = omap_connector_init(dev, pipe->output,
encoder);
if (!pipe->connector)
return -ENOMEM;
+ } else {
+ pipe->connector = drm_bridge_connector_init(dev, encoder);
+ if (IS_ERR(pipe->connector)) {
+ dev_err(priv->dev,
+ "unable to create bridge connector for %s\n",
+ pipe->output->name);
+ return PTR_ERR(pipe->connector);
+ }
+ }
- drm_connector_attach_encoder(pipe->connector, encoder);
+ drm_connector_attach_encoder(pipe->connector, encoder);
- if (pipe->output->panel) {
- ret = drm_panel_attach(pipe->output->panel,
- pipe->connector);
- if (ret < 0)
- return ret;
- }
+ if (pipe->output->panel) {
+ ret = drm_panel_attach(pipe->output->panel,
+ pipe->connector);
+ if (ret < 0)
+ return ret;
}
crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
@@ -382,6 +392,23 @@ static int omap_modeset_init(struct drm_device *dev)
return 0;
}
+static void omap_modeset_fini(struct drm_device *ddev)
+{
+ struct omap_drm_private *priv = ddev->dev_private;
+ unsigned int i;
+
+ omap_drm_irq_uninstall(ddev);
+
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+
+ if (pipe->output->panel)
+ drm_panel_detach(pipe->output->panel);
+ }
+
+ drm_mode_config_cleanup(ddev);
+}
+
/*
* Enable the HPD in external components if supported
*/
@@ -391,8 +418,13 @@ static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
unsigned int i;
for (i = 0; i < priv->num_pipes; i++) {
- if (priv->pipes[i].connector)
- omap_connector_enable_hpd(priv->pipes[i].connector);
+ struct drm_connector *connector = priv->pipes[i].connector;
+
+ if (!connector)
+ continue;
+
+ if (priv->pipes[i].output->bridge)
+ drm_bridge_connector_enable_hpd(connector);
}
}
@@ -405,8 +437,13 @@ static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
unsigned int i;
for (i = 0; i < priv->num_pipes; i++) {
- if (priv->pipes[i].connector)
- omap_connector_disable_hpd(priv->pipes[i].connector);
+ struct drm_connector *connector = priv->pipes[i].connector;
+
+ if (!connector)
+ continue;
+
+ if (priv->pipes[i].output->bridge)
+ drm_bridge_connector_disable_hpd(connector);
}
}
@@ -629,8 +666,7 @@ err_cleanup_helpers:
omap_fbdev_fini(ddev);
err_cleanup_modeset:
- drm_mode_config_cleanup(ddev);
- omap_drm_irq_uninstall(ddev);
+ omap_modeset_fini(ddev);
err_gem_deinit:
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
@@ -655,9 +691,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_atomic_helper_shutdown(ddev);
- drm_mode_config_cleanup(ddev);
-
- omap_drm_irq_uninstall(ddev);
+ omap_modeset_fini(ddev);
omap_gem_deinit(ddev);
destroy_workqueue(priv->wq);
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index 4f2165a37795..ae4b867a67a3 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -10,7 +10,6 @@
#include <drm/drm_crtc.h>
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_edid.h>
-#include <drm/drm_panel.h>
#include "omap_drv.h"
@@ -70,30 +69,6 @@ static void omap_encoder_update_videomode_flags(struct videomode *vm,
}
}
-static void omap_encoder_hdmi_mode_set(struct drm_connector *connector,
- struct drm_encoder *encoder,
- struct drm_display_mode *adjusted_mode)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->output;
- bool hdmi_mode;
-
- hdmi_mode = omap_connector_get_hdmi_mode(connector);
-
- if (dssdev->ops->hdmi.set_hdmi_mode)
- dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
-
- if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
- struct hdmi_avi_infoframe avi;
- int r;
-
- r = drm_hdmi_avi_infoframe_from_display_mode(&avi, connector,
- adjusted_mode);
- if (r == 0)
- dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
- }
-}
-
static void omap_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
@@ -138,17 +113,8 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
bus_flags = connector->display_info.bus_flags;
omap_encoder_update_videomode_flags(&vm, bus_flags);
- /* Set timings for all devices in the display pipeline. */
+ /* Set timings for the dss manager. */
dss_mgr_set_timings(output, &vm);
-
- for (dssdev = output; dssdev; dssdev = dssdev->next) {
- if (dssdev->ops->set_timings)
- dssdev->ops->set_timings(dssdev, adjusted_mode);
- }
-
- /* Set the HDMI mode and HDMI infoframe if applicable. */
- if (output->type == OMAP_DISPLAY_TYPE_HDMI)
- omap_encoder_hdmi_mode_set(connector, encoder, adjusted_mode);
}
static void omap_encoder_disable(struct drm_encoder *encoder)
@@ -159,33 +125,12 @@ static void omap_encoder_disable(struct drm_encoder *encoder)
dev_dbg(dev->dev, "disable(%s)\n", dssdev->name);
- /* Disable the panel if present. */
- if (dssdev->panel) {
- drm_panel_disable(dssdev->panel);
- drm_panel_unprepare(dssdev->panel);
- }
-
/*
* Disable the chain of external devices, starting at the one at the
- * internal encoder's output.
+ * internal encoder's output. This is used for DSI outputs only, as
+ * dssdev->next is NULL for all other outputs.
*/
omapdss_device_disable(dssdev->next);
-
- /*
- * Disable the internal encoder. This will disable the DSS output. The
- * DSI is treated as an exception as DSI pipelines still use the legacy
- * flow where the pipeline output controls the encoder.
- */
- if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
- dssdev->ops->disable(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
- }
-
- /*
- * Perform the post-disable operations on the chain of external devices
- * to complete the display pipeline disable.
- */
- omapdss_device_post_disable(dssdev->next);
}
static void omap_encoder_enable(struct drm_encoder *encoder)
@@ -196,30 +141,12 @@ static void omap_encoder_enable(struct drm_encoder *encoder)
dev_dbg(dev->dev, "enable(%s)\n", dssdev->name);
- /* Prepare the chain of external devices for pipeline enable. */
- omapdss_device_pre_enable(dssdev->next);
-
- /*
- * Enable the internal encoder. This will enable the DSS output. The
- * DSI is treated as an exception as DSI pipelines still use the legacy
- * flow where the pipeline output controls the encoder.
- */
- if (dssdev->type != OMAP_DISPLAY_TYPE_DSI) {
- dssdev->ops->enable(dssdev);
- dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
- }
-
/*
* Enable the chain of external devices, starting at the one at the
- * internal encoder's output.
+ * internal encoder's output. This is used for DSI outputs only, as
+ * dssdev->next is NULL for all other outputs.
*/
omapdss_device_enable(dssdev->next);
-
- /* Enable the panel if present. */
- if (dssdev->panel) {
- drm_panel_prepare(dssdev->panel);
- drm_panel_enable(dssdev->panel);
- }
}
static int omap_encoder_atomic_check(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index b06e5cbfd03a..09a84919ef73 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -242,14 +242,10 @@ void omap_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, priv->num_pipes);
+ ret = drm_fb_helper_init(dev, helper);
if (ret)
goto fail;
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret)
- goto fini;
-
ret = drm_fb_helper_initial_config(helper, 32);
if (ret)
goto fini;
diff --git a/drivers/gpu/drm/panel/Kconfig b/drivers/gpu/drm/panel/Kconfig
index ae44ac2ec106..a1723c1b5fbf 100644
--- a/drivers/gpu/drm/panel/Kconfig
+++ b/drivers/gpu/drm/panel/Kconfig
@@ -29,6 +29,15 @@ config DRM_PANEL_BOE_HIMAX8279D
24 bit RGB per pixel. It provides a MIPI DSI interface to
the host and has a built-in LED backlight.
+config DRM_PANEL_BOE_TV101WUM_NL6
+ tristate "BOE TV101WUM and AUO KD101N80 45NA 1200x1920 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to support for BOE TV101WUM and AUO KD101N80
+ 45NA WUXGA PANEL DSI Video Mode panel
+
config DRM_PANEL_LVDS
tristate "Generic LVDS panel driver"
depends on OF
@@ -50,6 +59,25 @@ config DRM_PANEL_SIMPLE
that it can be automatically turned off when the panel goes into a
low power state.
+config DRM_PANEL_ELIDA_KD35T133
+ tristate "Elida KD35T133 panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Elida
+ KD35T133 controller for 320x480 LCD panels with MIPI-DSI
+ system interfaces.
+
+config DRM_PANEL_FEIXIN_K101_IM2BA02
+ tristate "Feixin K101 IM2BA02 panel"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the Feixin K101 IM2BA02
+ 4-lane 800x1280 MIPI DSI panel.
+
config DRM_PANEL_FEIYANG_FY07024DI26A30D
tristate "Feiyang FY07024DI26A30-D MIPI-DSI LCD panel"
depends on OF
@@ -149,6 +177,16 @@ config DRM_PANEL_NEC_NL8048HL11
panel (found on the Zoom2/3/3630 SDP boards). To compile this driver
as a module, choose M here.
+config DRM_PANEL_NOVATEK_NT35510
+ tristate "Novatek NT35510 RGB panel driver"
+ depends on OF
+ depends on DRM_MIPI_DSI
+ depends on BACKLIGHT_CLASS_DEVICE
+ help
+ Say Y here if you want to enable support for the panels built
+ around the Novatek NT35510 display controller, such as some
+ Hydis panels.
+
config DRM_PANEL_NOVATEK_NT39016
tristate "Novatek NT39016 RGB/SPI panel"
depends on OF && SPI
@@ -275,6 +313,12 @@ config DRM_PANEL_SAMSUNG_S6E63M0
Say Y here if you want to enable support for Samsung S6E63M0
AMOLED LCD panel.
+config DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01
+ tristate "Samsung AMS452EF01 panel with S6E88A0 DSI video mode controller"
+ depends on OF
+ select DRM_MIPI_DSI
+ select VIDEOMODE_HELPERS
+
config DRM_PANEL_SAMSUNG_S6E8AA0
tristate "Samsung S6E8AA0 DSI video mode panel"
depends on OF
diff --git a/drivers/gpu/drm/panel/Makefile b/drivers/gpu/drm/panel/Makefile
index 7c4d3c581fd4..96a883cd6630 100644
--- a/drivers/gpu/drm/panel/Makefile
+++ b/drivers/gpu/drm/panel/Makefile
@@ -1,8 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
obj-$(CONFIG_DRM_PANEL_ARM_VERSATILE) += panel-arm-versatile.o
obj-$(CONFIG_DRM_PANEL_BOE_HIMAX8279D) += panel-boe-himax8279d.o
+obj-$(CONFIG_DRM_PANEL_BOE_TV101WUM_NL6) += panel-boe-tv101wum-nl6.o
obj-$(CONFIG_DRM_PANEL_LVDS) += panel-lvds.o
obj-$(CONFIG_DRM_PANEL_SIMPLE) += panel-simple.o
+obj-$(CONFIG_DRM_PANEL_ELIDA_KD35T133) += panel-elida-kd35t133.o
+obj-$(CONFIG_DRM_PANEL_FEIXIN_K101_IM2BA02) += panel-feixin-k101-im2ba02.o
obj-$(CONFIG_DRM_PANEL_FEIYANG_FY07024DI26A30D) += panel-feiyang-fy07024di26a30d.o
obj-$(CONFIG_DRM_PANEL_ILITEK_IL9322) += panel-ilitek-ili9322.o
obj-$(CONFIG_DRM_PANEL_ILITEK_ILI9881C) += panel-ilitek-ili9881c.o
@@ -13,6 +16,7 @@ obj-$(CONFIG_DRM_PANEL_LEADTEK_LTK500HD1829) += panel-leadtek-ltk500hd1829.o
obj-$(CONFIG_DRM_PANEL_LG_LB035Q02) += panel-lg-lb035q02.o
obj-$(CONFIG_DRM_PANEL_LG_LG4573) += panel-lg-lg4573.o
obj-$(CONFIG_DRM_PANEL_NEC_NL8048HL11) += panel-nec-nl8048hl11.o
+obj-$(CONFIG_DRM_PANEL_NOVATEK_NT35510) += panel-novatek-nt35510.o
obj-$(CONFIG_DRM_PANEL_NOVATEK_NT39016) += panel-novatek-nt39016.o
obj-$(CONFIG_DRM_PANEL_OLIMEX_LCD_OLINUXINO) += panel-olimex-lcd-olinuxino.o
obj-$(CONFIG_DRM_PANEL_ORISETECH_OTM8009A) += panel-orisetech-otm8009a.o
@@ -28,6 +32,7 @@ obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6D16D0) += panel-samsung-s6d16d0.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E3HA2) += panel-samsung-s6e3ha2.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63J0X03) += panel-samsung-s6e63j0x03.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E63M0) += panel-samsung-s6e63m0.o
+obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E88A0_AMS452EF01) += panel-samsung-s6e88a0-ams452ef01.o
obj-$(CONFIG_DRM_PANEL_SAMSUNG_S6E8AA0) += panel-samsung-s6e8aa0.o
obj-$(CONFIG_DRM_PANEL_SEIKO_43WVF1G) += panel-seiko-43wvf1g.o
obj-$(CONFIG_DRM_PANEL_SHARP_LQ101R1SX01) += panel-sharp-lq101r1sx01.o
diff --git a/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
new file mode 100644
index 000000000000..48a164257d18
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-boe-tv101wum-nl6.c
@@ -0,0 +1,854 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Jitao Shi <[email protected]>
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_panel.h>
+
+#include <video/mipi_display.h>
+
+struct panel_desc {
+ const struct drm_display_mode *modes;
+ unsigned int bpc;
+
+ /**
+ * @width_mm: width of the panel's active display area
+ * @height_mm: height of the panel's active display area
+ */
+ struct {
+ unsigned int width_mm;
+ unsigned int height_mm;
+ } size;
+
+ unsigned long mode_flags;
+ enum mipi_dsi_pixel_format format;
+ const struct panel_init_cmd *init_cmds;
+ unsigned int lanes;
+ bool discharge_on_disable;
+};
+
+struct boe_panel {
+ struct drm_panel base;
+ struct mipi_dsi_device *dsi;
+
+ const struct panel_desc *desc;
+
+ struct regulator *pp1800;
+ struct regulator *avee;
+ struct regulator *avdd;
+ struct gpio_desc *enable_gpio;
+
+ bool prepared;
+};
+
+enum dsi_cmd_type {
+ INIT_DCS_CMD,
+ DELAY_CMD,
+};
+
+struct panel_init_cmd {
+ enum dsi_cmd_type type;
+ size_t len;
+ const char *data;
+};
+
+#define _INIT_DCS_CMD(...) { \
+ .type = INIT_DCS_CMD, \
+ .len = sizeof((char[]){__VA_ARGS__}), \
+ .data = (char[]){__VA_ARGS__} }
+
+#define _INIT_DELAY_CMD(...) { \
+ .type = DELAY_CMD,\
+ .len = sizeof((char[]){__VA_ARGS__}), \
+ .data = (char[]){__VA_ARGS__} }
+
+static const struct panel_init_cmd boe_init_cmd[] = {
+ _INIT_DELAY_CMD(24),
+ _INIT_DCS_CMD(0xB0, 0x05),
+ _INIT_DCS_CMD(0xB1, 0xE5),
+ _INIT_DCS_CMD(0xB3, 0x52),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xB3, 0x88),
+ _INIT_DCS_CMD(0xB0, 0x04),
+ _INIT_DCS_CMD(0xB8, 0x00),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xB6, 0x03),
+ _INIT_DCS_CMD(0xBA, 0x8B),
+ _INIT_DCS_CMD(0xBF, 0x1A),
+ _INIT_DCS_CMD(0xC0, 0x0F),
+ _INIT_DCS_CMD(0xC2, 0x0C),
+ _INIT_DCS_CMD(0xC3, 0x02),
+ _INIT_DCS_CMD(0xC4, 0x0C),
+ _INIT_DCS_CMD(0xC5, 0x02),
+ _INIT_DCS_CMD(0xB0, 0x01),
+ _INIT_DCS_CMD(0xE0, 0x26),
+ _INIT_DCS_CMD(0xE1, 0x26),
+ _INIT_DCS_CMD(0xDC, 0x00),
+ _INIT_DCS_CMD(0xDD, 0x00),
+ _INIT_DCS_CMD(0xCC, 0x26),
+ _INIT_DCS_CMD(0xCD, 0x26),
+ _INIT_DCS_CMD(0xC8, 0x00),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xD2, 0x03),
+ _INIT_DCS_CMD(0xD3, 0x03),
+ _INIT_DCS_CMD(0xE6, 0x04),
+ _INIT_DCS_CMD(0xE7, 0x04),
+ _INIT_DCS_CMD(0xC4, 0x09),
+ _INIT_DCS_CMD(0xC5, 0x09),
+ _INIT_DCS_CMD(0xD8, 0x0A),
+ _INIT_DCS_CMD(0xD9, 0x0A),
+ _INIT_DCS_CMD(0xC2, 0x0B),
+ _INIT_DCS_CMD(0xC3, 0x0B),
+ _INIT_DCS_CMD(0xD6, 0x0C),
+ _INIT_DCS_CMD(0xD7, 0x0C),
+ _INIT_DCS_CMD(0xC0, 0x05),
+ _INIT_DCS_CMD(0xC1, 0x05),
+ _INIT_DCS_CMD(0xD4, 0x06),
+ _INIT_DCS_CMD(0xD5, 0x06),
+ _INIT_DCS_CMD(0xCA, 0x07),
+ _INIT_DCS_CMD(0xCB, 0x07),
+ _INIT_DCS_CMD(0xDE, 0x08),
+ _INIT_DCS_CMD(0xDF, 0x08),
+ _INIT_DCS_CMD(0xB0, 0x02),
+ _INIT_DCS_CMD(0xC0, 0x00),
+ _INIT_DCS_CMD(0xC1, 0x0D),
+ _INIT_DCS_CMD(0xC2, 0x17),
+ _INIT_DCS_CMD(0xC3, 0x26),
+ _INIT_DCS_CMD(0xC4, 0x31),
+ _INIT_DCS_CMD(0xC5, 0x1C),
+ _INIT_DCS_CMD(0xC6, 0x2C),
+ _INIT_DCS_CMD(0xC7, 0x33),
+ _INIT_DCS_CMD(0xC8, 0x31),
+ _INIT_DCS_CMD(0xC9, 0x37),
+ _INIT_DCS_CMD(0xCA, 0x37),
+ _INIT_DCS_CMD(0xCB, 0x37),
+ _INIT_DCS_CMD(0xCC, 0x39),
+ _INIT_DCS_CMD(0xCD, 0x2E),
+ _INIT_DCS_CMD(0xCE, 0x2F),
+ _INIT_DCS_CMD(0xCF, 0x2F),
+ _INIT_DCS_CMD(0xD0, 0x07),
+ _INIT_DCS_CMD(0xD2, 0x00),
+ _INIT_DCS_CMD(0xD3, 0x0D),
+ _INIT_DCS_CMD(0xD4, 0x17),
+ _INIT_DCS_CMD(0xD5, 0x26),
+ _INIT_DCS_CMD(0xD6, 0x31),
+ _INIT_DCS_CMD(0xD7, 0x3F),
+ _INIT_DCS_CMD(0xD8, 0x3F),
+ _INIT_DCS_CMD(0xD9, 0x3F),
+ _INIT_DCS_CMD(0xDA, 0x3F),
+ _INIT_DCS_CMD(0xDB, 0x37),
+ _INIT_DCS_CMD(0xDC, 0x37),
+ _INIT_DCS_CMD(0xDD, 0x37),
+ _INIT_DCS_CMD(0xDE, 0x39),
+ _INIT_DCS_CMD(0xDF, 0x2E),
+ _INIT_DCS_CMD(0xE0, 0x2F),
+ _INIT_DCS_CMD(0xE1, 0x2F),
+ _INIT_DCS_CMD(0xE2, 0x07),
+ _INIT_DCS_CMD(0xB0, 0x03),
+ _INIT_DCS_CMD(0xC8, 0x0B),
+ _INIT_DCS_CMD(0xC9, 0x07),
+ _INIT_DCS_CMD(0xC3, 0x00),
+ _INIT_DCS_CMD(0xE7, 0x00),
+ _INIT_DCS_CMD(0xC5, 0x2A),
+ _INIT_DCS_CMD(0xDE, 0x2A),
+ _INIT_DCS_CMD(0xCA, 0x43),
+ _INIT_DCS_CMD(0xC9, 0x07),
+ _INIT_DCS_CMD(0xE4, 0xC0),
+ _INIT_DCS_CMD(0xE5, 0x0D),
+ _INIT_DCS_CMD(0xCB, 0x00),
+ _INIT_DCS_CMD(0xB0, 0x06),
+ _INIT_DCS_CMD(0xB8, 0xA5),
+ _INIT_DCS_CMD(0xC0, 0xA5),
+ _INIT_DCS_CMD(0xC7, 0x0F),
+ _INIT_DCS_CMD(0xD5, 0x32),
+ _INIT_DCS_CMD(0xB8, 0x00),
+ _INIT_DCS_CMD(0xC0, 0x00),
+ _INIT_DCS_CMD(0xBC, 0x00),
+ _INIT_DCS_CMD(0xB0, 0x07),
+ _INIT_DCS_CMD(0xB1, 0x00),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x0F),
+ _INIT_DCS_CMD(0xB4, 0x25),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4E),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x97),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x22),
+ _INIT_DCS_CMD(0xBB, 0xA4),
+ _INIT_DCS_CMD(0xBC, 0x2B),
+ _INIT_DCS_CMD(0xBD, 0x2F),
+ _INIT_DCS_CMD(0xBE, 0xA9),
+ _INIT_DCS_CMD(0xBF, 0x25),
+ _INIT_DCS_CMD(0xC0, 0x61),
+ _INIT_DCS_CMD(0xC1, 0x97),
+ _INIT_DCS_CMD(0xC2, 0xB2),
+ _INIT_DCS_CMD(0xC3, 0xCD),
+ _INIT_DCS_CMD(0xC4, 0xD9),
+ _INIT_DCS_CMD(0xC5, 0xE7),
+ _INIT_DCS_CMD(0xC6, 0xF4),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x08),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x05),
+ _INIT_DCS_CMD(0xB3, 0x11),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x98),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x23),
+ _INIT_DCS_CMD(0xBB, 0xA6),
+ _INIT_DCS_CMD(0xBC, 0x2C),
+ _INIT_DCS_CMD(0xBD, 0x30),
+ _INIT_DCS_CMD(0xBE, 0xAA),
+ _INIT_DCS_CMD(0xBF, 0x26),
+ _INIT_DCS_CMD(0xC0, 0x62),
+ _INIT_DCS_CMD(0xC1, 0x9B),
+ _INIT_DCS_CMD(0xC2, 0xB5),
+ _INIT_DCS_CMD(0xC3, 0xCF),
+ _INIT_DCS_CMD(0xC4, 0xDB),
+ _INIT_DCS_CMD(0xC5, 0xE8),
+ _INIT_DCS_CMD(0xC6, 0xF5),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x09),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x16),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x3B),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x73),
+ _INIT_DCS_CMD(0xB8, 0x99),
+ _INIT_DCS_CMD(0xB9, 0xE0),
+ _INIT_DCS_CMD(0xBA, 0x26),
+ _INIT_DCS_CMD(0xBB, 0xAD),
+ _INIT_DCS_CMD(0xBC, 0x36),
+ _INIT_DCS_CMD(0xBD, 0x3A),
+ _INIT_DCS_CMD(0xBE, 0xAE),
+ _INIT_DCS_CMD(0xBF, 0x2A),
+ _INIT_DCS_CMD(0xC0, 0x66),
+ _INIT_DCS_CMD(0xC1, 0x9E),
+ _INIT_DCS_CMD(0xC2, 0xB8),
+ _INIT_DCS_CMD(0xC3, 0xD1),
+ _INIT_DCS_CMD(0xC4, 0xDD),
+ _INIT_DCS_CMD(0xC5, 0xE9),
+ _INIT_DCS_CMD(0xC6, 0xF6),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x0A),
+ _INIT_DCS_CMD(0xB1, 0x00),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x0F),
+ _INIT_DCS_CMD(0xB4, 0x25),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4E),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x97),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x22),
+ _INIT_DCS_CMD(0xBB, 0xA4),
+ _INIT_DCS_CMD(0xBC, 0x2B),
+ _INIT_DCS_CMD(0xBD, 0x2F),
+ _INIT_DCS_CMD(0xBE, 0xA9),
+ _INIT_DCS_CMD(0xBF, 0x25),
+ _INIT_DCS_CMD(0xC0, 0x61),
+ _INIT_DCS_CMD(0xC1, 0x97),
+ _INIT_DCS_CMD(0xC2, 0xB2),
+ _INIT_DCS_CMD(0xC3, 0xCD),
+ _INIT_DCS_CMD(0xC4, 0xD9),
+ _INIT_DCS_CMD(0xC5, 0xE7),
+ _INIT_DCS_CMD(0xC6, 0xF4),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x0B),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x05),
+ _INIT_DCS_CMD(0xB3, 0x11),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x39),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x72),
+ _INIT_DCS_CMD(0xB8, 0x98),
+ _INIT_DCS_CMD(0xB9, 0xDC),
+ _INIT_DCS_CMD(0xBA, 0x23),
+ _INIT_DCS_CMD(0xBB, 0xA6),
+ _INIT_DCS_CMD(0xBC, 0x2C),
+ _INIT_DCS_CMD(0xBD, 0x30),
+ _INIT_DCS_CMD(0xBE, 0xAA),
+ _INIT_DCS_CMD(0xBF, 0x26),
+ _INIT_DCS_CMD(0xC0, 0x62),
+ _INIT_DCS_CMD(0xC1, 0x9B),
+ _INIT_DCS_CMD(0xC2, 0xB5),
+ _INIT_DCS_CMD(0xC3, 0xCF),
+ _INIT_DCS_CMD(0xC4, 0xDB),
+ _INIT_DCS_CMD(0xC5, 0xE8),
+ _INIT_DCS_CMD(0xC6, 0xF5),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x0C),
+ _INIT_DCS_CMD(0xB1, 0x04),
+ _INIT_DCS_CMD(0xB2, 0x02),
+ _INIT_DCS_CMD(0xB3, 0x16),
+ _INIT_DCS_CMD(0xB4, 0x24),
+ _INIT_DCS_CMD(0xB5, 0x3B),
+ _INIT_DCS_CMD(0xB6, 0x4F),
+ _INIT_DCS_CMD(0xB7, 0x73),
+ _INIT_DCS_CMD(0xB8, 0x99),
+ _INIT_DCS_CMD(0xB9, 0xE0),
+ _INIT_DCS_CMD(0xBA, 0x26),
+ _INIT_DCS_CMD(0xBB, 0xAD),
+ _INIT_DCS_CMD(0xBC, 0x36),
+ _INIT_DCS_CMD(0xBD, 0x3A),
+ _INIT_DCS_CMD(0xBE, 0xAE),
+ _INIT_DCS_CMD(0xBF, 0x2A),
+ _INIT_DCS_CMD(0xC0, 0x66),
+ _INIT_DCS_CMD(0xC1, 0x9E),
+ _INIT_DCS_CMD(0xC2, 0xB8),
+ _INIT_DCS_CMD(0xC3, 0xD1),
+ _INIT_DCS_CMD(0xC4, 0xDD),
+ _INIT_DCS_CMD(0xC5, 0xE9),
+ _INIT_DCS_CMD(0xC6, 0xF6),
+ _INIT_DCS_CMD(0xC7, 0xFA),
+ _INIT_DCS_CMD(0xC8, 0xFC),
+ _INIT_DCS_CMD(0xC9, 0x00),
+ _INIT_DCS_CMD(0xCA, 0x00),
+ _INIT_DCS_CMD(0xCB, 0x16),
+ _INIT_DCS_CMD(0xCC, 0xAF),
+ _INIT_DCS_CMD(0xCD, 0xFF),
+ _INIT_DCS_CMD(0xCE, 0xFF),
+ _INIT_DCS_CMD(0xB0, 0x00),
+ _INIT_DCS_CMD(0xB3, 0x08),
+ _INIT_DCS_CMD(0xB0, 0x04),
+ _INIT_DCS_CMD(0xB8, 0x68),
+ _INIT_DELAY_CMD(150),
+ {},
+};
+
+static const struct panel_init_cmd auo_kd101n80_45na_init_cmd[] = {
+ _INIT_DELAY_CMD(24),
+ _INIT_DCS_CMD(0x11),
+ _INIT_DELAY_CMD(120),
+ _INIT_DCS_CMD(0x29),
+ _INIT_DELAY_CMD(120),
+ {},
+};
+
+static const struct panel_init_cmd auo_b101uan08_3_init_cmd[] = {
+ _INIT_DELAY_CMD(24),
+ _INIT_DCS_CMD(0xB0, 0x01),
+ _INIT_DCS_CMD(0xC0, 0x48),
+ _INIT_DCS_CMD(0xC1, 0x48),
+ _INIT_DCS_CMD(0xC2, 0x47),
+ _INIT_DCS_CMD(0xC3, 0x47),
+ _INIT_DCS_CMD(0xC4, 0x46),
+ _INIT_DCS_CMD(0xC5, 0x46),
+ _INIT_DCS_CMD(0xC6, 0x45),
+ _INIT_DCS_CMD(0xC7, 0x45),
+ _INIT_DCS_CMD(0xC8, 0x64),
+ _INIT_DCS_CMD(0xC9, 0x64),
+ _INIT_DCS_CMD(0xCA, 0x4F),
+ _INIT_DCS_CMD(0xCB, 0x4F),
+ _INIT_DCS_CMD(0xCC, 0x40),
+ _INIT_DCS_CMD(0xCD, 0x40),
+ _INIT_DCS_CMD(0xCE, 0x66),
+ _INIT_DCS_CMD(0xCF, 0x66),
+ _INIT_DCS_CMD(0xD0, 0x4F),
+ _INIT_DCS_CMD(0xD1, 0x4F),
+ _INIT_DCS_CMD(0xD2, 0x41),
+ _INIT_DCS_CMD(0xD3, 0x41),
+ _INIT_DCS_CMD(0xD4, 0x48),
+ _INIT_DCS_CMD(0xD5, 0x48),
+ _INIT_DCS_CMD(0xD6, 0x47),
+ _INIT_DCS_CMD(0xD7, 0x47),
+ _INIT_DCS_CMD(0xD8, 0x46),
+ _INIT_DCS_CMD(0xD9, 0x46),
+ _INIT_DCS_CMD(0xDA, 0x45),
+ _INIT_DCS_CMD(0xDB, 0x45),
+ _INIT_DCS_CMD(0xDC, 0x64),
+ _INIT_DCS_CMD(0xDD, 0x64),
+ _INIT_DCS_CMD(0xDE, 0x4F),
+ _INIT_DCS_CMD(0xDF, 0x4F),
+ _INIT_DCS_CMD(0xE0, 0x40),
+ _INIT_DCS_CMD(0xE1, 0x40),
+ _INIT_DCS_CMD(0xE2, 0x66),
+ _INIT_DCS_CMD(0xE3, 0x66),
+ _INIT_DCS_CMD(0xE4, 0x4F),
+ _INIT_DCS_CMD(0xE5, 0x4F),
+ _INIT_DCS_CMD(0xE6, 0x41),
+ _INIT_DCS_CMD(0xE7, 0x41),
+ _INIT_DELAY_CMD(150),
+ {},
+};
+
+static inline struct boe_panel *to_boe_panel(struct drm_panel *panel)
+{
+ return container_of(panel, struct boe_panel, base);
+}
+
+static int boe_panel_init_dcs_cmd(struct boe_panel *boe)
+{
+ struct mipi_dsi_device *dsi = boe->dsi;
+ struct drm_panel *panel = &boe->base;
+ int i, err = 0;
+
+ if (boe->desc->init_cmds) {
+ const struct panel_init_cmd *init_cmds = boe->desc->init_cmds;
+
+ for (i = 0; init_cmds[i].len != 0; i++) {
+ const struct panel_init_cmd *cmd = &init_cmds[i];
+
+ switch (cmd->type) {
+ case DELAY_CMD:
+ msleep(cmd->data[0]);
+ err = 0;
+ break;
+
+ case INIT_DCS_CMD:
+ err = mipi_dsi_dcs_write(dsi, cmd->data[0],
+ cmd->len <= 1 ? NULL :
+ &cmd->data[1],
+ cmd->len - 1);
+ break;
+
+ default:
+ err = -EINVAL;
+ }
+
+ if (err < 0) {
+ dev_err(panel->dev,
+ "failed to write command %u\n", i);
+ return err;
+ }
+ }
+ }
+ return 0;
+}
+
+static int boe_panel_enter_sleep_mode(struct boe_panel *boe)
+{
+ struct mipi_dsi_device *dsi = boe->dsi;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static int boe_panel_unprepare(struct drm_panel *panel)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+ int ret;
+
+ if (!boe->prepared)
+ return 0;
+
+ ret = boe_panel_enter_sleep_mode(boe);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to set panel off: %d\n", ret);
+ return ret;
+ }
+
+ msleep(150);
+
+ if (boe->desc->discharge_on_disable) {
+ regulator_disable(boe->avee);
+ regulator_disable(boe->avdd);
+ usleep_range(5000, 7000);
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+ } else {
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(500, 1000);
+ regulator_disable(boe->avee);
+ regulator_disable(boe->avdd);
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+ }
+
+ boe->prepared = false;
+
+ return 0;
+}
+
+static int boe_panel_prepare(struct drm_panel *panel)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+ int ret;
+
+ if (boe->prepared)
+ return 0;
+
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(1000, 1500);
+
+ ret = regulator_enable(boe->pp1800);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(3000, 5000);
+
+ ret = regulator_enable(boe->avdd);
+ if (ret < 0)
+ goto poweroff1v8;
+ ret = regulator_enable(boe->avee);
+ if (ret < 0)
+ goto poweroffavdd;
+
+ usleep_range(5000, 10000);
+
+ gpiod_set_value(boe->enable_gpio, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(boe->enable_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value(boe->enable_gpio, 1);
+ usleep_range(6000, 10000);
+
+ ret = boe_panel_init_dcs_cmd(boe);
+ if (ret < 0) {
+ dev_err(panel->dev, "failed to init panel: %d\n", ret);
+ goto poweroff;
+ }
+
+ boe->prepared = true;
+
+ return 0;
+
+poweroff:
+ regulator_disable(boe->avee);
+poweroffavdd:
+ regulator_disable(boe->avdd);
+poweroff1v8:
+ usleep_range(5000, 7000);
+ regulator_disable(boe->pp1800);
+ gpiod_set_value(boe->enable_gpio, 0);
+
+ return ret;
+}
+
+static int boe_panel_enable(struct drm_panel *panel)
+{
+ msleep(130);
+ return 0;
+}
+
+static const struct drm_display_mode boe_tv101wum_nl6_default_mode = {
+ .clock = 159425,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 100,
+ .hsync_end = 1200 + 100 + 40,
+ .htotal = 1200 + 100 + 40 + 24,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 10,
+ .vsync_end = 1920 + 10 + 14,
+ .vtotal = 1920 + 10 + 14 + 4,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc boe_tv101wum_nl6_desc = {
+ .modes = &boe_tv101wum_nl6_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = boe_init_cmd,
+ .discharge_on_disable = false,
+};
+
+static const struct drm_display_mode auo_kd101n80_45na_default_mode = {
+ .clock = 157000,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 24,
+ .htotal = 1200 + 80 + 24 + 36,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 16,
+ .vsync_end = 1920 + 16 + 4,
+ .vtotal = 1920 + 16 + 4 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc auo_kd101n80_45na_desc = {
+ .modes = &auo_kd101n80_45na_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = auo_kd101n80_45na_init_cmd,
+ .discharge_on_disable = true,
+};
+
+static const struct drm_display_mode boe_tv101wum_n53_default_mode = {
+ .clock = 159916,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 80,
+ .hsync_end = 1200 + 80 + 24,
+ .htotal = 1200 + 80 + 24 + 60,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 20,
+ .vsync_end = 1920 + 20 + 4,
+ .vtotal = 1920 + 20 + 4 + 10,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc boe_tv101wum_n53_desc = {
+ .modes = &boe_tv101wum_n53_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = boe_init_cmd,
+};
+
+static const struct drm_display_mode auo_b101uan08_3_default_mode = {
+ .clock = 159667,
+ .hdisplay = 1200,
+ .hsync_start = 1200 + 60,
+ .hsync_end = 1200 + 60 + 4,
+ .htotal = 1200 + 60 + 4 + 80,
+ .vdisplay = 1920,
+ .vsync_start = 1920 + 34,
+ .vsync_end = 1920 + 34 + 2,
+ .vtotal = 1920 + 34 + 2 + 24,
+ .vrefresh = 60,
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+};
+
+static const struct panel_desc auo_b101uan08_3_desc = {
+ .modes = &auo_b101uan08_3_default_mode,
+ .bpc = 8,
+ .size = {
+ .width_mm = 135,
+ .height_mm = 216,
+ },
+ .lanes = 4,
+ .format = MIPI_DSI_FMT_RGB888,
+ .mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_LPM,
+ .init_cmds = auo_b101uan08_3_init_cmd,
+};
+
+static int boe_panel_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct boe_panel *boe = to_boe_panel(panel);
+ const struct drm_display_mode *m = boe->desc->modes;
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, m);
+ if (!mode) {
+ dev_err(panel->dev, "failed to add mode %ux%u@%u\n",
+ m->hdisplay, m->vdisplay, m->vrefresh);
+ return -ENOMEM;
+ }
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ connector->display_info.width_mm = boe->desc->size.width_mm;
+ connector->display_info.height_mm = boe->desc->size.height_mm;
+ connector->display_info.bpc = boe->desc->bpc;
+
+ return 1;
+}
+
+static const struct drm_panel_funcs boe_panel_funcs = {
+ .unprepare = boe_panel_unprepare,
+ .prepare = boe_panel_prepare,
+ .enable = boe_panel_enable,
+ .get_modes = boe_panel_get_modes,
+};
+
+static int boe_panel_add(struct boe_panel *boe)
+{
+ struct device *dev = &boe->dsi->dev;
+ int err;
+
+ boe->avdd = devm_regulator_get(dev, "avdd");
+ if (IS_ERR(boe->avdd))
+ return PTR_ERR(boe->avdd);
+
+ boe->avee = devm_regulator_get(dev, "avee");
+ if (IS_ERR(boe->avee))
+ return PTR_ERR(boe->avee);
+
+ boe->pp1800 = devm_regulator_get(dev, "pp1800");
+ if (IS_ERR(boe->pp1800))
+ return PTR_ERR(boe->pp1800);
+
+ boe->enable_gpio = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW);
+ if (IS_ERR(boe->enable_gpio)) {
+ dev_err(dev, "cannot get reset-gpios %ld\n",
+ PTR_ERR(boe->enable_gpio));
+ return PTR_ERR(boe->enable_gpio);
+ }
+
+ gpiod_set_value(boe->enable_gpio, 0);
+
+ drm_panel_init(&boe->base, dev, &boe_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ err = drm_panel_of_backlight(&boe->base);
+ if (err)
+ return err;
+
+ boe->base.funcs = &boe_panel_funcs;
+ boe->base.dev = &boe->dsi->dev;
+
+ return drm_panel_add(&boe->base);
+}
+
+static int boe_panel_probe(struct mipi_dsi_device *dsi)
+{
+ struct boe_panel *boe;
+ int ret;
+ const struct panel_desc *desc;
+
+ boe = devm_kzalloc(&dsi->dev, sizeof(*boe), GFP_KERNEL);
+ if (!boe)
+ return -ENOMEM;
+
+ desc = of_device_get_match_data(&dsi->dev);
+ dsi->lanes = desc->lanes;
+ dsi->format = desc->format;
+ dsi->mode_flags = desc->mode_flags;
+ boe->desc = desc;
+ boe->dsi = dsi;
+ ret = boe_panel_add(boe);
+ if (ret < 0)
+ return ret;
+
+ mipi_dsi_set_drvdata(dsi, boe);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret)
+ drm_panel_remove(&boe->base);
+
+ return ret;
+}
+
+static void boe_panel_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
+
+ drm_panel_disable(&boe->base);
+ drm_panel_unprepare(&boe->base);
+}
+
+static int boe_panel_remove(struct mipi_dsi_device *dsi)
+{
+ struct boe_panel *boe = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ boe_panel_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "failed to detach from DSI host: %d\n", ret);
+
+ if (boe->base.dev)
+ drm_panel_remove(&boe->base);
+
+ return 0;
+}
+
+static const struct of_device_id boe_of_match[] = {
+ { .compatible = "boe,tv101wum-nl6",
+ .data = &boe_tv101wum_nl6_desc
+ },
+ { .compatible = "auo,kd101n80-45na",
+ .data = &auo_kd101n80_45na_desc
+ },
+ { .compatible = "boe,tv101wum-n53",
+ .data = &boe_tv101wum_n53_desc
+ },
+ { .compatible = "auo,b101uan08.3",
+ .data = &auo_b101uan08_3_desc
+ },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, boe_of_match);
+
+static struct mipi_dsi_driver boe_panel_driver = {
+ .driver = {
+ .name = "panel-boe-tv101wum-nl6",
+ .of_match_table = boe_of_match,
+ },
+ .probe = boe_panel_probe,
+ .remove = boe_panel_remove,
+ .shutdown = boe_panel_shutdown,
+};
+module_mipi_dsi_driver(boe_panel_driver);
+
+MODULE_AUTHOR("Jitao Shi <[email protected]>");
+MODULE_DESCRIPTION("BOE tv101wum-nl6 1200x1920 video mode panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-elida-kd35t133.c b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
new file mode 100644
index 000000000000..711ded453c44
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-elida-kd35t133.c
@@ -0,0 +1,352 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Elida kd35t133 5.5" MIPI-DSI panel driver
+ * Copyright (C) 2020 Theobroma Systems Design und Consulting GmbH
+ *
+ * based on
+ *
+ * Rockteck jh057n00900 5.5" MIPI-DSI panel driver
+ * Copyright (C) Purism SPC 2019
+ */
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/media-bus-format.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/display_timing.h>
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+/* Manufacturer specific Commands send via DSI */
+#define KD35T133_CMD_INTERFACEMODECTRL 0xb0
+#define KD35T133_CMD_FRAMERATECTRL 0xb1
+#define KD35T133_CMD_DISPLAYINVERSIONCTRL 0xb4
+#define KD35T133_CMD_DISPLAYFUNCTIONCTRL 0xb6
+#define KD35T133_CMD_POWERCONTROL1 0xc0
+#define KD35T133_CMD_POWERCONTROL2 0xc1
+#define KD35T133_CMD_VCOMCONTROL 0xc5
+#define KD35T133_CMD_POSITIVEGAMMA 0xe0
+#define KD35T133_CMD_NEGATIVEGAMMA 0xe1
+#define KD35T133_CMD_SETIMAGEFUNCTION 0xe9
+#define KD35T133_CMD_ADJUSTCONTROL3 0xf7
+
+struct kd35t133 {
+ struct device *dev;
+ struct drm_panel panel;
+ struct gpio_desc *reset_gpio;
+ struct regulator *vdd;
+ struct regulator *iovcc;
+ bool prepared;
+};
+
+static inline struct kd35t133 *panel_to_kd35t133(struct drm_panel *panel)
+{
+ return container_of(panel, struct kd35t133, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, cmd, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write(dsi, cmd, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static int kd35t133_init_sequence(struct kd35t133 *ctx)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ struct device *dev = ctx->dev;
+
+ /*
+ * Init sequence was supplied by the panel vendor with minimal
+ * documentation.
+ */
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_POSITIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x04, 0x0f, 0x06, 0x3a, 0x56,
+ 0x4d, 0x03, 0x0a, 0x06, 0x30, 0x3e, 0x0f);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_NEGATIVEGAMMA,
+ 0x00, 0x13, 0x18, 0x01, 0x11, 0x06, 0x38, 0x34,
+ 0x4d, 0x06, 0x0d, 0x0b, 0x31, 0x37, 0x0f);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL1, 0x18, 0x17);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_POWERCONTROL2, 0x41);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_VCOMCONTROL, 0x00, 0x1a, 0x80);
+ dsi_dcs_write_seq(dsi, MIPI_DCS_SET_ADDRESS_MODE, 0x48);
+ dsi_dcs_write_seq(dsi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_INTERFACEMODECTRL, 0x00);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_FRAMERATECTRL, 0xa0);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYINVERSIONCTRL, 0x02);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_DISPLAYFUNCTIONCTRL,
+ 0x20, 0x02);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_SETIMAGEFUNCTION, 0x00);
+ dsi_dcs_write_seq(dsi, KD35T133_CMD_ADJUSTCONTROL3,
+ 0xa9, 0x51, 0x2c, 0x82);
+ mipi_dsi_dcs_write(dsi, MIPI_DCS_ENTER_INVERT_MODE, NULL, 0);
+
+ DRM_DEV_DEBUG_DRIVER(dev, "Panel init sequence done\n");
+ return 0;
+}
+
+static int kd35t133_unprepare(struct drm_panel *panel)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(ctx->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "failed to enter sleep mode: %d\n",
+ ret);
+ return ret;
+ }
+
+ regulator_disable(ctx->iovcc);
+ regulator_disable(ctx->vdd);
+
+ ctx->prepared = false;
+
+ return 0;
+}
+
+static int kd35t133_prepare(struct drm_panel *panel)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ DRM_DEV_DEBUG_DRIVER(ctx->dev, "Resetting the panel\n");
+ ret = regulator_enable(ctx->vdd);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable vdd supply: %d\n", ret);
+ return ret;
+ }
+
+ ret = regulator_enable(ctx->iovcc);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev,
+ "Failed to enable iovcc supply: %d\n", ret);
+ goto disable_vdd;
+ }
+
+ msleep(20);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10, 20);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+
+ msleep(20);
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to exit sleep mode: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(250);
+
+ ret = kd35t133_init_sequence(ctx);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Panel init sequence failed: %d\n",
+ ret);
+ goto disable_iovcc;
+ }
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to set display on: %d\n", ret);
+ goto disable_iovcc;
+ }
+
+ msleep(50);
+
+ ctx->prepared = true;
+
+ return 0;
+
+disable_iovcc:
+ regulator_disable(ctx->iovcc);
+disable_vdd:
+ regulator_disable(ctx->vdd);
+ return ret;
+}
+
+static const struct drm_display_mode default_mode = {
+ .hdisplay = 320,
+ .hsync_start = 320 + 130,
+ .hsync_end = 320 + 130 + 4,
+ .htotal = 320 + 130 + 4 + 130,
+ .vdisplay = 480,
+ .vsync_start = 480 + 2,
+ .vsync_end = 480 + 2 + 1,
+ .vtotal = 480 + 2 + 1 + 2,
+ .vrefresh = 60,
+ .clock = 17000,
+ .width_mm = 42,
+ .height_mm = 82,
+};
+
+static int kd35t133_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct kd35t133 *ctx = panel_to_kd35t133(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(ctx->dev, "Failed to add mode %ux%u@%u\n",
+ default_mode.hdisplay, default_mode.vdisplay,
+ default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs kd35t133_funcs = {
+ .unprepare = kd35t133_unprepare,
+ .prepare = kd35t133_prepare,
+ .get_modes = kd35t133_get_modes,
+};
+
+static int kd35t133_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct kd35t133 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ DRM_DEV_ERROR(dev, "cannot get reset gpio\n");
+ return PTR_ERR(ctx->reset_gpio);
+ }
+
+ ctx->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(ctx->vdd)) {
+ ret = PTR_ERR(ctx->vdd);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request vdd regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ ctx->iovcc = devm_regulator_get(dev, "iovcc");
+ if (IS_ERR(ctx->iovcc)) {
+ ret = PTR_ERR(ctx->iovcc);
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev,
+ "Failed to request iovcc regulator: %d\n",
+ ret);
+ return ret;
+ }
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 1;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST |
+ MIPI_DSI_MODE_LPM | MIPI_DSI_MODE_EOT_PACKET;
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &kd35t133_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ drm_panel_add(&ctx->panel);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_DEV_ERROR(dev, "mipi_dsi_attach failed: %d\n", ret);
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void kd35t133_shutdown(struct mipi_dsi_device *dsi)
+{
+ struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = drm_panel_unprepare(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to unprepare panel: %d\n",
+ ret);
+
+ ret = drm_panel_disable(&ctx->panel);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to disable panel: %d\n",
+ ret);
+}
+
+static int kd35t133_remove(struct mipi_dsi_device *dsi)
+{
+ struct kd35t133 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ kd35t133_shutdown(dsi);
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(&dsi->dev, "Failed to detach from DSI host: %d\n",
+ ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id kd35t133_of_match[] = {
+ { .compatible = "elida,kd35t133" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, kd35t133_of_match);
+
+static struct mipi_dsi_driver kd35t133_driver = {
+ .driver = {
+ .name = "panel-elida-kd35t133",
+ .of_match_table = kd35t133_of_match,
+ },
+ .probe = kd35t133_probe,
+ .remove = kd35t133_remove,
+ .shutdown = kd35t133_shutdown,
+};
+module_mipi_dsi_driver(kd35t133_driver);
+
+MODULE_AUTHOR("Heiko Stuebner <[email protected]>");
+MODULE_DESCRIPTION("DRM driver for Elida kd35t133 MIPI DSI panel");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
new file mode 100644
index 000000000000..fddbfddf6566
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-feixin-k101-im2ba02.c
@@ -0,0 +1,526 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019-2020 Icenowy Zheng <[email protected]>
+ */
+
+#include <linux/gpio/consumer.h>
+#include <linux/delay.h>
+#include <linux/mod_devicetable.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regulator/consumer.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define K101_IM2BA02_INIT_CMD_LEN 2
+
+static const char * const regulator_names[] = {
+ "dvdd",
+ "avdd",
+ "cvdd"
+};
+
+struct k101_im2ba02 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+
+ struct regulator_bulk_data supplies[ARRAY_SIZE(regulator_names)];
+ struct gpio_desc *reset;
+};
+
+static inline struct k101_im2ba02 *panel_to_k101_im2ba02(struct drm_panel *panel)
+{
+ return container_of(panel, struct k101_im2ba02, panel);
+}
+
+struct k101_im2ba02_init_cmd {
+ u8 data[K101_IM2BA02_INIT_CMD_LEN];
+};
+
+static const struct k101_im2ba02_init_cmd k101_im2ba02_init_cmds[] = {
+ /* Switch to page 0 */
+ { .data = { 0xE0, 0x00 } },
+
+ /* Seems to be some password */
+ { .data = { 0xE1, 0x93} },
+ { .data = { 0xE2, 0x65 } },
+ { .data = { 0xE3, 0xF8 } },
+
+ /* Lane number, 0x02 - 3 lanes, 0x03 - 4 lanes */
+ { .data = { 0x80, 0x03 } },
+
+ /* Sequence control */
+ { .data = { 0x70, 0x02 } },
+ { .data = { 0x71, 0x23 } },
+ { .data = { 0x72, 0x06 } },
+
+ /* Switch to page 1 */
+ { .data = { 0xE0, 0x01 } },
+
+ /* Set VCOM */
+ { .data = { 0x00, 0x00 } },
+ { .data = { 0x01, 0x66 } },
+ /* Set VCOM_Reverse */
+ { .data = { 0x03, 0x00 } },
+ { .data = { 0x04, 0x25 } },
+
+ /* Set Gamma Power, VG[MS][PN] */
+ { .data = { 0x17, 0x00 } },
+ { .data = { 0x18, 0x6D } },
+ { .data = { 0x19, 0x00 } },
+ { .data = { 0x1A, 0x00 } },
+ { .data = { 0x1B, 0xBF } }, /* VGMN = -4.5V */
+ { .data = { 0x1C, 0x00 } },
+
+ /* Set Gate Power */
+ { .data = { 0x1F, 0x3E } }, /* VGH_R = 15V */
+ { .data = { 0x20, 0x28 } }, /* VGL_R = -11V */
+ { .data = { 0x21, 0x28 } }, /* VGL_R2 = -11V */
+ { .data = { 0x22, 0x0E } }, /* PA[6:4] = 0, PA[0] = 0 */
+
+ /* Set Panel */
+ { .data = { 0x37, 0x09 } }, /* SS = 1, BGR = 1 */
+
+ /* Set RGBCYC */
+ { .data = { 0x38, 0x04 } }, /* JDT = 100 column inversion */
+ { .data = { 0x39, 0x08 } }, /* RGB_N_EQ1 */
+ { .data = { 0x3A, 0x12 } }, /* RGB_N_EQ2 */
+ { .data = { 0x3C, 0x78 } }, /* set EQ3 for TE_H */
+ { .data = { 0x3D, 0xFF } }, /* set CHGEN_ON */
+ { .data = { 0x3E, 0xFF } }, /* set CHGEN_OFF */
+ { .data = { 0x3F, 0x7F } }, /* set CHGEN_OFF2 */
+
+ /* Set TCON parameter */
+ { .data = { 0x40, 0x06 } }, /* RSO = 800 points */
+ { .data = { 0x41, 0xA0 } }, /* LN = 1280 lines */
+
+ /* Set power voltage */
+ { .data = { 0x55, 0x0F } }, /* DCDCM */
+ { .data = { 0x56, 0x01 } },
+ { .data = { 0x57, 0x69 } },
+ { .data = { 0x58, 0x0A } },
+ { .data = { 0x59, 0x0A } },
+ { .data = { 0x5A, 0x45 } },
+ { .data = { 0x5B, 0x15 } },
+
+ /* Set gamma */
+ { .data = { 0x5D, 0x7C } },
+ { .data = { 0x5E, 0x65 } },
+ { .data = { 0x5F, 0x55 } },
+ { .data = { 0x60, 0x49 } },
+ { .data = { 0x61, 0x44 } },
+ { .data = { 0x62, 0x35 } },
+ { .data = { 0x63, 0x3A } },
+ { .data = { 0x64, 0x23 } },
+ { .data = { 0x65, 0x3D } },
+ { .data = { 0x66, 0x3C } },
+ { .data = { 0x67, 0x3D } },
+ { .data = { 0x68, 0x5D } },
+ { .data = { 0x69, 0x4D } },
+ { .data = { 0x6A, 0x56 } },
+ { .data = { 0x6B, 0x48 } },
+ { .data = { 0x6C, 0x45 } },
+ { .data = { 0x6D, 0x38 } },
+ { .data = { 0x6E, 0x25 } },
+ { .data = { 0x6F, 0x00 } },
+ { .data = { 0x70, 0x7C } },
+ { .data = { 0x71, 0x65 } },
+ { .data = { 0x72, 0x55 } },
+ { .data = { 0x73, 0x49 } },
+ { .data = { 0x74, 0x44 } },
+ { .data = { 0x75, 0x35 } },
+ { .data = { 0x76, 0x3A } },
+ { .data = { 0x77, 0x23 } },
+ { .data = { 0x78, 0x3D } },
+ { .data = { 0x79, 0x3C } },
+ { .data = { 0x7A, 0x3D } },
+ { .data = { 0x7B, 0x5D } },
+ { .data = { 0x7C, 0x4D } },
+ { .data = { 0x7D, 0x56 } },
+ { .data = { 0x7E, 0x48 } },
+ { .data = { 0x7F, 0x45 } },
+ { .data = { 0x80, 0x38 } },
+ { .data = { 0x81, 0x25 } },
+ { .data = { 0x82, 0x00 } },
+
+ /* Switch to page 2, for GIP */
+ { .data = { 0xE0, 0x02 } },
+
+ { .data = { 0x00, 0x1E } },
+ { .data = { 0x01, 0x1E } },
+ { .data = { 0x02, 0x41 } },
+ { .data = { 0x03, 0x41 } },
+ { .data = { 0x04, 0x43 } },
+ { .data = { 0x05, 0x43 } },
+ { .data = { 0x06, 0x1F } },
+ { .data = { 0x07, 0x1F } },
+ { .data = { 0x08, 0x1F } },
+ { .data = { 0x09, 0x1F } },
+ { .data = { 0x0A, 0x1E } },
+ { .data = { 0x0B, 0x1E } },
+ { .data = { 0x0C, 0x1F } },
+ { .data = { 0x0D, 0x47 } },
+ { .data = { 0x0E, 0x47 } },
+ { .data = { 0x0F, 0x45 } },
+ { .data = { 0x10, 0x45 } },
+ { .data = { 0x11, 0x4B } },
+ { .data = { 0x12, 0x4B } },
+ { .data = { 0x13, 0x49 } },
+ { .data = { 0x14, 0x49 } },
+ { .data = { 0x15, 0x1F } },
+
+ { .data = { 0x16, 0x1E } },
+ { .data = { 0x17, 0x1E } },
+ { .data = { 0x18, 0x40 } },
+ { .data = { 0x19, 0x40 } },
+ { .data = { 0x1A, 0x42 } },
+ { .data = { 0x1B, 0x42 } },
+ { .data = { 0x1C, 0x1F } },
+ { .data = { 0x1D, 0x1F } },
+ { .data = { 0x1E, 0x1F } },
+ { .data = { 0x1F, 0x1f } },
+ { .data = { 0x20, 0x1E } },
+ { .data = { 0x21, 0x1E } },
+ { .data = { 0x22, 0x1f } },
+ { .data = { 0x23, 0x46 } },
+ { .data = { 0x24, 0x46 } },
+ { .data = { 0x25, 0x44 } },
+ { .data = { 0x26, 0x44 } },
+ { .data = { 0x27, 0x4A } },
+ { .data = { 0x28, 0x4A } },
+ { .data = { 0x29, 0x48 } },
+ { .data = { 0x2A, 0x48 } },
+ { .data = { 0x2B, 0x1f } },
+
+ { .data = { 0x2C, 0x1F } },
+ { .data = { 0x2D, 0x1F } },
+ { .data = { 0x2E, 0x42 } },
+ { .data = { 0x2F, 0x42 } },
+ { .data = { 0x30, 0x40 } },
+ { .data = { 0x31, 0x40 } },
+ { .data = { 0x32, 0x1E } },
+ { .data = { 0x33, 0x1E } },
+ { .data = { 0x34, 0x1F } },
+ { .data = { 0x35, 0x1F } },
+ { .data = { 0x36, 0x1E } },
+ { .data = { 0x37, 0x1E } },
+ { .data = { 0x38, 0x1F } },
+ { .data = { 0x39, 0x48 } },
+ { .data = { 0x3A, 0x48 } },
+ { .data = { 0x3B, 0x4A } },
+ { .data = { 0x3C, 0x4A } },
+ { .data = { 0x3D, 0x44 } },
+ { .data = { 0x3E, 0x44 } },
+ { .data = { 0x3F, 0x46 } },
+ { .data = { 0x40, 0x46 } },
+ { .data = { 0x41, 0x1F } },
+
+ { .data = { 0x42, 0x1F } },
+ { .data = { 0x43, 0x1F } },
+ { .data = { 0x44, 0x43 } },
+ { .data = { 0x45, 0x43 } },
+ { .data = { 0x46, 0x41 } },
+ { .data = { 0x47, 0x41 } },
+ { .data = { 0x48, 0x1E } },
+ { .data = { 0x49, 0x1E } },
+ { .data = { 0x4A, 0x1E } },
+ { .data = { 0x4B, 0x1F } },
+ { .data = { 0x4C, 0x1E } },
+ { .data = { 0x4D, 0x1E } },
+ { .data = { 0x4E, 0x1F } },
+ { .data = { 0x4F, 0x49 } },
+ { .data = { 0x50, 0x49 } },
+ { .data = { 0x51, 0x4B } },
+ { .data = { 0x52, 0x4B } },
+ { .data = { 0x53, 0x45 } },
+ { .data = { 0x54, 0x45 } },
+ { .data = { 0x55, 0x47 } },
+ { .data = { 0x56, 0x47 } },
+ { .data = { 0x57, 0x1F } },
+
+ { .data = { 0x58, 0x10 } },
+ { .data = { 0x59, 0x00 } },
+ { .data = { 0x5A, 0x00 } },
+ { .data = { 0x5B, 0x30 } },
+ { .data = { 0x5C, 0x02 } },
+ { .data = { 0x5D, 0x40 } },
+ { .data = { 0x5E, 0x01 } },
+ { .data = { 0x5F, 0x02 } },
+ { .data = { 0x60, 0x30 } },
+ { .data = { 0x61, 0x01 } },
+ { .data = { 0x62, 0x02 } },
+ { .data = { 0x63, 0x6A } },
+ { .data = { 0x64, 0x6A } },
+ { .data = { 0x65, 0x05 } },
+ { .data = { 0x66, 0x12 } },
+ { .data = { 0x67, 0x74 } },
+ { .data = { 0x68, 0x04 } },
+ { .data = { 0x69, 0x6A } },
+ { .data = { 0x6A, 0x6A } },
+ { .data = { 0x6B, 0x08 } },
+
+ { .data = { 0x6C, 0x00 } },
+ { .data = { 0x6D, 0x04 } },
+ { .data = { 0x6E, 0x04 } },
+ { .data = { 0x6F, 0x88 } },
+ { .data = { 0x70, 0x00 } },
+ { .data = { 0x71, 0x00 } },
+ { .data = { 0x72, 0x06 } },
+ { .data = { 0x73, 0x7B } },
+ { .data = { 0x74, 0x00 } },
+ { .data = { 0x75, 0x07 } },
+ { .data = { 0x76, 0x00 } },
+ { .data = { 0x77, 0x5D } },
+ { .data = { 0x78, 0x17 } },
+ { .data = { 0x79, 0x1F } },
+ { .data = { 0x7A, 0x00 } },
+ { .data = { 0x7B, 0x00 } },
+ { .data = { 0x7C, 0x00 } },
+ { .data = { 0x7D, 0x03 } },
+ { .data = { 0x7E, 0x7B } },
+
+ { .data = { 0xE0, 0x04 } },
+ { .data = { 0x2B, 0x2B } },
+ { .data = { 0x2E, 0x44 } },
+
+ { .data = { 0xE0, 0x01 } },
+ { .data = { 0x0E, 0x01 } },
+
+ { .data = { 0xE0, 0x03 } },
+ { .data = { 0x98, 0x2F } },
+
+ { .data = { 0xE0, 0x00 } },
+ { .data = { 0xE6, 0x02 } },
+ { .data = { 0xE7, 0x02 } },
+
+ { .data = { 0x11, 0x00 } },
+};
+
+static const struct k101_im2ba02_init_cmd timed_cmds[] = {
+ { .data = { 0x29, 0x00 } },
+ { .data = { 0x35, 0x00 } },
+};
+
+static int k101_im2ba02_prepare(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ unsigned int i;
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret)
+ return ret;
+
+ msleep(30);
+
+ gpiod_set_value(ctx->reset, 1);
+ msleep(50);
+
+ gpiod_set_value(ctx->reset, 0);
+ msleep(50);
+
+ gpiod_set_value(ctx->reset, 1);
+ msleep(200);
+
+ for (i = 0; i < ARRAY_SIZE(k101_im2ba02_init_cmds); i++) {
+ const struct k101_im2ba02_init_cmd *cmd = &k101_im2ba02_init_cmds[i];
+
+ ret = mipi_dsi_dcs_write_buffer(dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN);
+ if (ret < 0)
+ goto powerdown;
+ }
+
+ return 0;
+
+powerdown:
+ gpiod_set_value(ctx->reset, 0);
+ msleep(50);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static int k101_im2ba02_enable(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ const struct k101_im2ba02_init_cmd *cmd = &timed_cmds[1];
+ int ret;
+
+ msleep(150);
+
+ ret = mipi_dsi_dcs_set_display_on(ctx->dsi);
+ if (ret < 0)
+ return ret;
+
+ msleep(50);
+
+ return mipi_dsi_dcs_write_buffer(ctx->dsi, cmd->data, K101_IM2BA02_INIT_CMD_LEN);
+}
+
+static int k101_im2ba02_disable(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+
+ return mipi_dsi_dcs_set_display_off(ctx->dsi);
+}
+
+static int k101_im2ba02_unprepare(struct drm_panel *panel)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to set display off: %d\n",
+ ret);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(ctx->dsi);
+ if (ret < 0)
+ DRM_DEV_ERROR(panel->dev, "failed to enter sleep mode: %d\n",
+ ret);
+
+ msleep(200);
+
+ gpiod_set_value(ctx->reset, 0);
+ msleep(20);
+
+ return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+}
+
+static const struct drm_display_mode k101_im2ba02_default_mode = {
+ .clock = 70000,
+ .vrefresh = 60,
+
+ .hdisplay = 800,
+ .hsync_start = 800 + 20,
+ .hsync_end = 800 + 20 + 20,
+ .htotal = 800 + 20 + 20 + 20,
+
+ .vdisplay = 1280,
+ .vsync_start = 1280 + 16,
+ .vsync_end = 1280 + 16 + 4,
+ .vtotal = 1280 + 16 + 4 + 4,
+
+ .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED,
+ .width_mm = 136,
+ .height_mm = 217,
+};
+
+static int k101_im2ba02_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct k101_im2ba02 *ctx = panel_to_k101_im2ba02(panel);
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &k101_im2ba02_default_mode);
+ if (!mode) {
+ DRM_DEV_ERROR(&ctx->dsi->dev, "failed to add mode %ux%ux@%u\n",
+ k101_im2ba02_default_mode.hdisplay,
+ k101_im2ba02_default_mode.vdisplay,
+ k101_im2ba02_default_mode.vrefresh);
+ return -ENOMEM;
+ }
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs k101_im2ba02_funcs = {
+ .disable = k101_im2ba02_disable,
+ .unprepare = k101_im2ba02_unprepare,
+ .prepare = k101_im2ba02_prepare,
+ .enable = k101_im2ba02_enable,
+ .get_modes = k101_im2ba02_get_modes,
+};
+
+static int k101_im2ba02_dsi_probe(struct mipi_dsi_device *dsi)
+{
+ struct k101_im2ba02 *ctx;
+ unsigned int i;
+ int ret;
+
+ ctx = devm_kzalloc(&dsi->dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+ ctx->dsi = dsi;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); i++)
+ ctx->supplies[i].supply = regulator_names[i];
+
+ ret = devm_regulator_bulk_get(&dsi->dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get regulators\n");
+ return ret;
+ }
+
+ ctx->reset = devm_gpiod_get(&dsi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset)) {
+ DRM_DEV_ERROR(&dsi->dev, "Couldn't get our reset GPIO\n");
+ return PTR_ERR(ctx->reset);
+ }
+
+ drm_panel_init(&ctx->panel, &dsi->dev, &k101_im2ba02_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_of_backlight(&ctx->panel);
+ if (ret)
+ return ret;
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0)
+ return ret;
+
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->lanes = 4;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_panel_remove(&ctx->panel);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int k101_im2ba02_dsi_remove(struct mipi_dsi_device *dsi)
+{
+ struct k101_im2ba02 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id k101_im2ba02_of_match[] = {
+ { .compatible = "feixin,k101-im2ba02", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, k101_im2ba02_of_match);
+
+static struct mipi_dsi_driver k101_im2ba02_driver = {
+ .probe = k101_im2ba02_dsi_probe,
+ .remove = k101_im2ba02_dsi_remove,
+ .driver = {
+ .name = "feixin-k101-im2ba02",
+ .of_match_table = k101_im2ba02_of_match,
+ },
+};
+module_mipi_dsi_driver(k101_im2ba02_driver);
+
+MODULE_AUTHOR("Icenowy Zheng <[email protected]>");
+MODULE_DESCRIPTION("Feixin K101 IM2BA02 MIPI-DSI LCD panel");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
index f394d53a7da4..09935520e606 100644
--- a/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
+++ b/drivers/gpu/drm/panel/panel-ilitek-ili9322.c
@@ -540,7 +540,7 @@ static int ili9322_enable(struct drm_panel *panel)
/* Serial RGB modes */
static const struct drm_display_mode srgb_320x240_mode = {
- .clock = 2453500,
+ .clock = 24535,
.hdisplay = 320,
.hsync_start = 320 + 359,
.hsync_end = 320 + 359 + 1,
@@ -554,7 +554,7 @@ static const struct drm_display_mode srgb_320x240_mode = {
};
static const struct drm_display_mode srgb_360x240_mode = {
- .clock = 2700000,
+ .clock = 27000,
.hdisplay = 360,
.hsync_start = 360 + 35,
.hsync_end = 360 + 35 + 1,
@@ -569,7 +569,7 @@ static const struct drm_display_mode srgb_360x240_mode = {
/* This is the only mode listed for parallel RGB in the datasheet */
static const struct drm_display_mode prgb_320x240_mode = {
- .clock = 6400000,
+ .clock = 64000,
.hdisplay = 320,
.hsync_start = 320 + 38,
.hsync_end = 320 + 38 + 1,
@@ -584,7 +584,7 @@ static const struct drm_display_mode prgb_320x240_mode = {
/* YUV modes */
static const struct drm_display_mode yuv_640x320_mode = {
- .clock = 2454000,
+ .clock = 24540,
.hdisplay = 640,
.hsync_start = 640 + 252,
.hsync_end = 640 + 252 + 1,
@@ -598,7 +598,7 @@ static const struct drm_display_mode yuv_640x320_mode = {
};
static const struct drm_display_mode yuv_720x360_mode = {
- .clock = 2700000,
+ .clock = 27000,
.hdisplay = 720,
.hsync_start = 720 + 252,
.hsync_end = 720 + 252 + 1,
@@ -613,7 +613,7 @@ static const struct drm_display_mode yuv_720x360_mode = {
/* BT.656 VGA mode, 640x480 */
static const struct drm_display_mode itu_r_bt_656_640_mode = {
- .clock = 2454000,
+ .clock = 24540,
.hdisplay = 640,
.hsync_start = 640 + 3,
.hsync_end = 640 + 3 + 1,
@@ -628,7 +628,7 @@ static const struct drm_display_mode itu_r_bt_656_640_mode = {
/* BT.656 D1 mode 720x480 */
static const struct drm_display_mode itu_r_bt_656_720_mode = {
- .clock = 2700000,
+ .clock = 27000,
.hdisplay = 720,
.hsync_start = 720 + 3,
.hsync_end = 720 + 3 + 1,
diff --git a/drivers/gpu/drm/panel/panel-lg-lg4573.c b/drivers/gpu/drm/panel/panel-lg-lg4573.c
index b262b53dbd85..5907f2503755 100644
--- a/drivers/gpu/drm/panel/panel-lg-lg4573.c
+++ b/drivers/gpu/drm/panel/panel-lg-lg4573.c
@@ -197,7 +197,7 @@ static int lg4573_enable(struct drm_panel *panel)
}
static const struct drm_display_mode default_mode = {
- .clock = 27000,
+ .clock = 28341,
.hdisplay = 480,
.hsync_start = 480 + 10,
.hsync_end = 480 + 10 + 59,
diff --git a/drivers/gpu/drm/panel/panel-novatek-nt35510.c b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
new file mode 100644
index 000000000000..4a8fa908a2cf
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-novatek-nt35510.c
@@ -0,0 +1,1098 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Novatek NT35510 panel driver
+ * Copyright (C) 2020 Linus Walleij <[email protected]>
+ * Based on code by Robert Teather (C) 2012 Samsung
+ *
+ * This display driver (and I refer to the physical component NT35510,
+ * not this Linux kernel software driver) can handle:
+ * 480x864, 480x854, 480x800, 480x720 and 480x640 pixel displays.
+ * It has 480x840x24bit SRAM embedded for storing a frame.
+ * When powered on the display is by default in 480x800 mode.
+ *
+ * The actual panels using this component have different names, but
+ * the code needed to set up and configure the panel will be similar,
+ * so they should all use the NT35510 driver with appropriate configuration
+ * per-panel, e.g. for physical size.
+ *
+ * This driver is for the DSI interface to panels using the NT35510.
+ *
+ * The NT35510 can also use an RGB (DPI) interface combined with an
+ * I2C or SPI interface for setting up the NT35510. If this is needed
+ * this panel driver should be refactored to also support that use
+ * case.
+ */
+#include <linux/backlight.h>
+#include <linux/bitops.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of_device.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
+
+#define MCS_CMD_MAUCCTR 0xF0 /* Manufacturer command enable */
+#define MCS_CMD_READ_ID1 0xDA
+#define MCS_CMD_READ_ID2 0xDB
+#define MCS_CMD_READ_ID3 0xDC
+#define MCS_CMD_MTP_READ_SETTING 0xF8 /* Uncertain about name */
+#define MCS_CMD_MTP_READ_PARAM 0xFF /* Uncertain about name */
+
+/*
+ * These manufacturer commands are available after we enable manufacturer
+ * command set (MCS) for page 0.
+ */
+#define NT35510_P0_DOPCTR 0xB1
+#define NT35510_P0_SDHDTCTR 0xB6
+#define NT35510_P0_GSEQCTR 0xB7
+#define NT35510_P0_SDEQCTR 0xB8
+#define NT35510_P0_SDVPCTR 0xBA
+#define NT35510_P0_DPFRCTR1 0xBD
+#define NT35510_P0_DPFRCTR2 0xBE
+#define NT35510_P0_DPFRCTR3 0xBF
+#define NT35510_P0_DPMCTR12 0xCC
+
+#define NT35510_P0_DOPCTR_LEN 2
+#define NT35510_P0_GSEQCTR_LEN 2
+#define NT35510_P0_SDEQCTR_LEN 4
+#define NT35510_P0_SDVPCTR_LEN 1
+#define NT35510_P0_DPFRCTR1_LEN 5
+#define NT35510_P0_DPFRCTR2_LEN 5
+#define NT35510_P0_DPFRCTR3_LEN 5
+#define NT35510_P0_DPMCTR12_LEN 3
+
+#define NT35510_DOPCTR_0_RAMKP BIT(7) /* Contents kept in sleep */
+#define NT35510_DOPCTR_0_DSITE BIT(6) /* Enable TE signal */
+#define NT35510_DOPCTR_0_DSIG BIT(5) /* Enable generic read/write */
+#define NT35510_DOPCTR_0_DSIM BIT(4) /* Enable video mode on DSI */
+#define NT35510_DOPCTR_0_EOTP BIT(3) /* Support EoTP */
+#define NT35510_DOPCTR_0_N565 BIT(2) /* RGB or BGR pixel format */
+#define NT35510_DOPCTR_1_TW_PWR_SEL BIT(4) /* TE power selector */
+#define NT35510_DOPCTR_1_CRGB BIT(3) /* RGB or BGR byte order */
+#define NT35510_DOPCTR_1_CTB BIT(2) /* Vertical scanning direction */
+#define NT35510_DOPCTR_1_CRL BIT(1) /* Source driver data shift */
+#define NT35510_P0_SDVPCTR_PRG BIT(2) /* 0 = normal operation, 1 = VGLO */
+#define NT35510_P0_SDVPCTR_AVDD 0 /* source driver output = AVDD */
+#define NT35510_P0_SDVPCTR_OFFCOL 1 /* source driver output = off color */
+#define NT35510_P0_SDVPCTR_AVSS 2 /* source driver output = AVSS */
+#define NT35510_P0_SDVPCTR_HI_Z 3 /* source driver output = High impedance */
+
+/*
+ * These manufacturer commands are available after we enable manufacturer
+ * command set (MCS) for page 1.
+ */
+#define NT35510_P1_SETAVDD 0xB0
+#define NT35510_P1_SETAVEE 0xB1
+#define NT35510_P1_SETVCL 0xB2
+#define NT35510_P1_SETVGH 0xB3
+#define NT35510_P1_SETVRGH 0xB4
+#define NT35510_P1_SETVGL 0xB5
+#define NT35510_P1_BT1CTR 0xB6
+#define NT35510_P1_BT2CTR 0xB7
+#define NT35510_P1_BT3CTR 0xB8
+#define NT35510_P1_BT4CTR 0xB9 /* VGH boosting times/freq */
+#define NT35510_P1_BT5CTR 0xBA
+#define NT35510_P1_PFMCTR 0xBB
+#define NT35510_P1_SETVGP 0xBC
+#define NT35510_P1_SETVGN 0xBD
+#define NT35510_P1_SETVCMOFF 0xBE
+#define NT35510_P1_VGHCTR 0xBF /* VGH output ctrl */
+#define NT35510_P1_SET_GAMMA_RED_POS 0xD1
+#define NT35510_P1_SET_GAMMA_GREEN_POS 0xD2
+#define NT35510_P1_SET_GAMMA_BLUE_POS 0xD3
+#define NT35510_P1_SET_GAMMA_RED_NEG 0xD4
+#define NT35510_P1_SET_GAMMA_GREEN_NEG 0xD5
+#define NT35510_P1_SET_GAMMA_BLUE_NEG 0xD6
+
+/* AVDD and AVEE setting 3 bytes */
+#define NT35510_P1_AVDD_LEN 3
+#define NT35510_P1_AVEE_LEN 3
+#define NT35510_P1_VGH_LEN 3
+#define NT35510_P1_VGL_LEN 3
+#define NT35510_P1_VGP_LEN 3
+#define NT35510_P1_VGN_LEN 3
+/* BT1CTR thru BT5CTR setting 3 bytes */
+#define NT35510_P1_BT1CTR_LEN 3
+#define NT35510_P1_BT2CTR_LEN 3
+#define NT35510_P1_BT4CTR_LEN 3
+#define NT35510_P1_BT5CTR_LEN 3
+/* 52 gamma parameters times two per color: positive and negative */
+#define NT35510_P1_GAMMA_LEN 52
+
+/**
+ * struct nt35510_config - the display-specific NT35510 configuration
+ *
+ * Some of the settings provide an array of bytes, A, B C which mean:
+ * A = normal / idle off mode
+ * B = idle on mode
+ * C = partial / idle off mode
+ *
+ * Gamma correction arrays are 10bit numbers, two consecutive bytes
+ * makes out one point on the gamma correction curve. The points are
+ * not linearly placed along the X axis, we get points 0, 1, 3, 5
+ * 7, 11, 15, 23, 31, 47, 63, 95, 127, 128, 160, 192, 208, 224, 232,
+ * 240, 244, 248, 250, 252, 254, 255. The voltages tuples form
+ * V0, V1, V3 ... V255, with 0x0000 being the lowest voltage and
+ * 0x03FF being the highest voltage.
+ *
+ * Each value must be strictly higher than the previous value forming
+ * a rising curve like this:
+ *
+ * ^
+ * | V255
+ * | V254
+ * | ....
+ * | V5
+ * | V3
+ * | V1
+ * | V0
+ * +------------------------------------------->
+ *
+ * The details about all settings can be found in the NT35510 Application
+ * Note.
+ */
+struct nt35510_config {
+ /**
+ * @width_mm: physical panel width [mm]
+ */
+ u32 width_mm;
+ /**
+ * @height_mm: physical panel height [mm]
+ */
+ u32 height_mm;
+ /**
+ * @mode: the display mode. This is only relevant outside the panel
+ * in video mode: in command mode this is configuring the internal
+ * timing in the display controller.
+ */
+ const struct drm_display_mode mode;
+ /**
+ * @avdd: setting for AVDD ranging from 0x00 = 6.5V to 0x14 = 4.5V
+ * in 0.1V steps the default is 0x05 which means 6.0V
+ */
+ u8 avdd[NT35510_P1_AVDD_LEN];
+ /**
+ * @bt1ctr: setting for boost power control for the AVDD step-up
+ * circuit (1)
+ * bits 0..2 in the lower nibble controls PCK, the booster clock
+ * frequency for the step-up circuit:
+ * 0 = Hsync/32
+ * 1 = Hsync/16
+ * 2 = Hsync/8
+ * 3 = Hsync/4
+ * 4 = Hsync/2
+ * 5 = Hsync
+ * 6 = Hsync x 2
+ * 7 = Hsync x 4
+ * bits 4..6 in the upper nibble controls BTP, the boosting
+ * amplification for the the step-up circuit:
+ * 0 = Disable
+ * 1 = 1.5 x VDDB
+ * 2 = 1.66 x VDDB
+ * 3 = 2 x VDDB
+ * 4 = 2.5 x VDDB
+ * 5 = 3 x VDDB
+ * The defaults are 4 and 4 yielding 0x44
+ */
+ u8 bt1ctr[NT35510_P1_BT1CTR_LEN];
+ /**
+ * @avee: setting for AVEE ranging from 0x00 = -6.5V to 0x14 = -4.5V
+ * in 0.1V steps the default is 0x05 which means -6.0V
+ */
+ u8 avee[NT35510_P1_AVEE_LEN];
+ /**
+ * @bt2ctr: setting for boost power control for the AVEE step-up
+ * circuit (2)
+ * bits 0..2 in the lower nibble controls NCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTN, the boosting
+ * amplification for the the step-up circuit.
+ * 0 = Disable
+ * 1 = -1.5 x VDDB
+ * 2 = -2 x VDDB
+ * 3 = -2.5 x VDDB
+ * 4 = -3 x VDDB
+ * The defaults are 4 and 3 yielding 0x34
+ */
+ u8 bt2ctr[NT35510_P1_BT2CTR_LEN];
+ /**
+ * @vgh: setting for VGH ranging from 0x00 = 7.0V to 0x0B = 18.0V
+ * in 1V steps, the default is 0x08 which means 15V
+ */
+ u8 vgh[NT35510_P1_VGH_LEN];
+ /**
+ * @bt4ctr: setting for boost power control for the VGH step-up
+ * circuit (4)
+ * bits 0..2 in the lower nibble controls HCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTH, the boosting
+ * amplification for the the step-up circuit.
+ * 0 = AVDD + VDDB
+ * 1 = AVDD - AVEE
+ * 2 = AVDD - AVEE + VDDB
+ * 3 = AVDD x 2 - AVEE
+ * The defaults are 4 and 3 yielding 0x34
+ */
+ u8 bt4ctr[NT35510_P1_BT4CTR_LEN];
+ /**
+ * @vgl: setting for VGL ranging from 0x00 = -2V to 0x0f = -15V in
+ * 1V steps, the default is 0x08 which means -10V
+ */
+ u8 vgl[NT35510_P1_VGL_LEN];
+ /**
+ * @bt5ctr: setting for boost power control for the VGL step-up
+ * circuit (5)
+ * bits 0..2 in the lower nibble controls LCK, the booster clock
+ * frequency, the values are the same as for PCK in @bt1ctr.
+ * bits 4..5 in the upper nibble controls BTL, the boosting
+ * amplification for the the step-up circuit.
+ * 0 = AVEE + VCL
+ * 1 = AVEE - AVDD
+ * 2 = AVEE + VCL - AVDD
+ * 3 = AVEE x 2 - AVDD
+ * The defaults are 3 and 2 yielding 0x32
+ */
+ u8 bt5ctr[NT35510_P1_BT5CTR_LEN];
+ /**
+ * @vgp: setting for VGP, the positive gamma divider voltages
+ * VGMP the high voltage and VGSP the low voltage.
+ * The first byte contains bit 8 of VGMP and VGSP in bits 4 and 0
+ * The second byte contains bit 0..7 of VGMP
+ * The third byte contains bit 0..7 of VGSP
+ * VGMP 0x00 = 3.0V .. 0x108 = 6.3V in steps of 12.5mV
+ * VGSP 0x00 = 0V .. 0x111 = 3.7V in steps of 12.5mV
+ */
+ u8 vgp[NT35510_P1_VGP_LEN];
+ /**
+ * @vgn: setting for VGN, the negative gamma divider voltages,
+ * same layout of bytes as @vgp.
+ */
+ u8 vgn[NT35510_P1_VGN_LEN];
+ /**
+ * @sdeqctr: Source driver control settings, first byte is
+ * 0 for mode 1 and 1 for mode 2. Mode 1 uses two steps and
+ * mode 2 uses three steps meaning EQS3 is not used in mode
+ * 1. Mode 2 is default. The last three parameters are EQS1, EQS2
+ * and EQS3, setting the rise time for each equalizer step:
+ * 0x00 = 0.0 us to 0x0f = 7.5 us in steps of 0.5us. The default
+ * is 0x07 = 3.5 us.
+ */
+ u8 sdeqctr[NT35510_P0_SDEQCTR_LEN];
+ /**
+ * @sdvpctr: power/voltage behaviour during vertical porch time
+ */
+ u8 sdvpctr;
+ /**
+ * @t1: the number of pixel clocks on one scanline, range
+ * 0x100 (258 ticks) .. 0x3FF (1024 ticks) so the value + 1
+ * clock ticks.
+ */
+ u16 t1;
+ /**
+ * @vbp: vertical back porch toward the PANEL note: not toward
+ * the DSI host; these are separate interfaces, in from DSI host
+ * and out to the panel.
+ */
+ u8 vbp;
+ /**
+ * @vfp: vertical front porch toward the PANEL.
+ */
+ u8 vfp;
+ /**
+ * @psel: pixel clock divisor: 0 = 1, 1 = 2, 2 = 4, 3 = 8.
+ */
+ u8 psel;
+ /**
+ * @dpmctr12: Display timing control 12
+ * Byte 1 bit 4 selects LVGL voltage level: 0 = VGLX, 1 = VGL_REG
+ * Byte 1 bit 1 selects gate signal mode: 0 = non-overlap, 1 = overlap
+ * Byte 1 bit 0 selects output signal control R/L swap, 0 = normal
+ * 1 = swap all O->E, L->R
+ * Byte 2 is CLW delay clock for CK O/E and CKB O/E signals:
+ * 0x00 = 0us .. 0xFF = 12.75us in 0.05us steps
+ * Byte 3 is FTI_H0 delay time for STP O/E signals:
+ * 0x00 = 0us .. 0xFF = 12.75us in 0.05us steps
+ */
+ u8 dpmctr12[NT35510_P0_DPMCTR12_LEN];
+ /**
+ * @gamma_corr_pos_r: Red gamma correction parameters, positive
+ */
+ u8 gamma_corr_pos_r[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_pos_g: Green gamma correction parameters, positive
+ */
+ u8 gamma_corr_pos_g[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_pos_b: Blue gamma correction parameters, positive
+ */
+ u8 gamma_corr_pos_b[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_neg_r: Red gamma correction parameters, negative
+ */
+ u8 gamma_corr_neg_r[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_neg_g: Green gamma correction parameters, negative
+ */
+ u8 gamma_corr_neg_g[NT35510_P1_GAMMA_LEN];
+ /**
+ * @gamma_corr_neg_b: Blue gamma correction parameters, negative
+ */
+ u8 gamma_corr_neg_b[NT35510_P1_GAMMA_LEN];
+};
+
+/**
+ * struct nt35510 - state container for the NT35510 panel
+ */
+struct nt35510 {
+ /**
+ * @dev: the container device
+ */
+ struct device *dev;
+ /**
+ * @conf: the specific panel configuration, as the NT35510
+ * can be combined with many physical panels, they can have
+ * different physical dimensions and gamma correction etc,
+ * so this is stored in the config.
+ */
+ const struct nt35510_config *conf;
+ /**
+ * @panel: the DRM panel object for the instance
+ */
+ struct drm_panel panel;
+ /**
+ * @supplies: regulators supplying the panel
+ */
+ struct regulator_bulk_data supplies[2];
+ /**
+ * @reset_gpio: the reset line
+ */
+ struct gpio_desc *reset_gpio;
+};
+
+/* Manufacturer command has strictly this byte sequence */
+static const u8 nt35510_mauc_select_page_0[] = { 0x55, 0xAA, 0x52, 0x08, 0x00 };
+static const u8 nt35510_mauc_select_page_1[] = { 0x55, 0xAA, 0x52, 0x08, 0x01 };
+static const u8 nt35510_vgh_on[] = { 0x01 };
+
+static inline struct nt35510 *panel_to_nt35510(struct drm_panel *panel)
+{
+ return container_of(panel, struct nt35510, panel);
+}
+
+#define NT35510_ROTATE_0_SETTING 0x02
+#define NT35510_ROTATE_180_SETTING 0x00
+
+static int nt35510_send_long(struct nt35510 *nt, struct mipi_dsi_device *dsi,
+ u8 cmd, u8 cmdlen, const u8 *seq)
+{
+ const u8 *seqp = seq;
+ int cmdwritten = 0;
+ int chunk = cmdlen;
+ int ret;
+
+ if (chunk > 15)
+ chunk = 15;
+ ret = mipi_dsi_dcs_write(dsi, cmd, seqp, chunk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev,
+ "error sending DCS command seq cmd %02x\n",
+ cmd);
+ return ret;
+ }
+ cmdwritten += chunk;
+ seqp += chunk;
+
+ while (cmdwritten < cmdlen) {
+ chunk = cmdlen - cmdwritten;
+ if (chunk > 15)
+ chunk = 15;
+ ret = mipi_dsi_generic_write(dsi, seqp, chunk);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev,
+ "error sending generic write seq %02x\n",
+ cmd);
+ return ret;
+ }
+ cmdwritten += chunk;
+ seqp += chunk;
+ }
+ DRM_DEV_DEBUG(nt->dev, "sent command %02x %02x bytes\n",
+ cmd, cmdlen);
+ return 0;
+}
+
+static int nt35510_read_id(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ u8 id1, id2, id3;
+ int ret;
+
+ ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID1, &id1, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev, "could not read MTP ID1\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID2, &id2, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev, "could not read MTP ID2\n");
+ return ret;
+ }
+ ret = mipi_dsi_dcs_read(dsi, MCS_CMD_READ_ID3, &id3, 1);
+ if (ret < 0) {
+ DRM_DEV_ERROR(nt->dev, "could not read MTP ID3\n");
+ return ret;
+ }
+
+ /*
+ * Multi-Time Programmable (?) memory contains manufacturer
+ * ID (e.g. Hydis 0x55), driver ID (e.g. NT35510 0xc0) and
+ * version.
+ */
+ DRM_DEV_INFO(nt->dev,
+ "MTP ID manufacturer: %02x version: %02x driver: %02x\n",
+ id1, id2, id3);
+
+ return 0;
+}
+
+/**
+ * nt35510_setup_power() - set up power config in page 1
+ * @nt: the display instance to set up
+ */
+static int nt35510_setup_power(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETAVDD,
+ NT35510_P1_AVDD_LEN,
+ nt->conf->avdd);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT1CTR,
+ NT35510_P1_BT1CTR_LEN,
+ nt->conf->bt1ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETAVEE,
+ NT35510_P1_AVEE_LEN,
+ nt->conf->avee);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT2CTR,
+ NT35510_P1_BT2CTR_LEN,
+ nt->conf->bt2ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGH,
+ NT35510_P1_VGH_LEN,
+ nt->conf->vgh);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT4CTR,
+ NT35510_P1_BT4CTR_LEN,
+ nt->conf->bt4ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_VGHCTR,
+ ARRAY_SIZE(nt35510_vgh_on),
+ nt35510_vgh_on);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGL,
+ NT35510_P1_VGL_LEN,
+ nt->conf->vgl);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_BT5CTR,
+ NT35510_P1_BT5CTR_LEN,
+ nt->conf->bt5ctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGP,
+ NT35510_P1_VGP_LEN,
+ nt->conf->vgp);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SETVGN,
+ NT35510_P1_VGN_LEN,
+ nt->conf->vgn);
+ if (ret)
+ return ret;
+
+ /* Typically 10 ms */
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+/**
+ * nt35510_setup_display() - set up display config in page 0
+ * @nt: the display instance to set up
+ */
+static int nt35510_setup_display(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ const struct nt35510_config *conf = nt->conf;
+ u8 dopctr[NT35510_P0_DOPCTR_LEN];
+ u8 gseqctr[NT35510_P0_GSEQCTR_LEN];
+ u8 dpfrctr[NT35510_P0_DPFRCTR1_LEN];
+ /* FIXME: set up any rotation (assume none for now) */
+ u8 addr_mode = NT35510_ROTATE_0_SETTING;
+ u8 val;
+ int ret;
+
+ /* Enable TE, EoTP and RGB pixel format */
+ dopctr[0] = NT35510_DOPCTR_0_DSITE | NT35510_DOPCTR_0_EOTP |
+ NT35510_DOPCTR_0_N565;
+ dopctr[1] = NT35510_DOPCTR_1_CTB;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DOPCTR,
+ NT35510_P0_DOPCTR_LEN,
+ dopctr);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_ADDRESS_MODE, &addr_mode,
+ sizeof(addr_mode));
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Source data hold time, default 0x05 = 2.5us
+ * 0x00..0x3F = 0 .. 31.5us in steps of 0.5us
+ * 0x0A = 5us
+ */
+ val = 0x0A;
+ ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDHDTCTR, &val,
+ sizeof(val));
+ if (ret < 0)
+ return ret;
+
+ /* EQ control for gate signals, 0x00 = 0 us */
+ gseqctr[0] = 0x00;
+ gseqctr[1] = 0x00;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_GSEQCTR,
+ NT35510_P0_GSEQCTR_LEN,
+ gseqctr);
+ if (ret)
+ return ret;
+
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_SDEQCTR,
+ NT35510_P0_SDEQCTR_LEN,
+ conf->sdeqctr);
+ if (ret)
+ return ret;
+
+ ret = mipi_dsi_dcs_write(dsi, NT35510_P0_SDVPCTR,
+ &conf->sdvpctr, 1);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Display timing control for active and idle off mode:
+ * the first byte contains
+ * the two high bits of T1A and second byte the low 8 bits, and
+ * the valid range is 0x100 (257) to 0x3ff (1023) representing
+ * 258..1024 (+1) pixel clock ticks for one scanline. At 20MHz pixel
+ * clock this covers the range of 12.90us .. 51.20us in steps of
+ * 0.05us, the default is 0x184 (388) representing 389 ticks.
+ * The third byte is VBPDA, vertical back porch display active
+ * and the fourth VFPDA, vertical front porch display active,
+ * both given in number of scanlines in the range 0x02..0xff
+ * for 2..255 scanlines. The fifth byte is 2 bits selecting
+ * PSEL for active and idle off mode, how much the 20MHz clock
+ * is divided by 0..3. This needs to be adjusted to get the right
+ * frame rate.
+ */
+ dpfrctr[0] = (conf->t1 >> 8) & 0xFF;
+ dpfrctr[1] = conf->t1 & 0xFF;
+ /* Vertical back porch */
+ dpfrctr[2] = conf->vbp;
+ /* Vertical front porch */
+ dpfrctr[3] = conf->vfp;
+ dpfrctr[4] = conf->psel;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR1,
+ NT35510_P0_DPFRCTR1_LEN,
+ dpfrctr);
+ if (ret)
+ return ret;
+ /* For idle and partial idle off mode we decrease front porch by one */
+ dpfrctr[3]--;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR2,
+ NT35510_P0_DPFRCTR2_LEN,
+ dpfrctr);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPFRCTR3,
+ NT35510_P0_DPFRCTR3_LEN,
+ dpfrctr);
+ if (ret)
+ return ret;
+
+ /* Enable TE on vblank */
+ ret = mipi_dsi_dcs_set_tear_on(dsi, MIPI_DSI_DCS_TEAR_MODE_VBLANK);
+ if (ret)
+ return ret;
+
+ /* Turn on the pads? */
+ ret = nt35510_send_long(nt, dsi, NT35510_P0_DPMCTR12,
+ NT35510_P0_DPMCTR12_LEN,
+ conf->dpmctr12);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nt35510_set_brightness(struct backlight_device *bl)
+{
+ struct nt35510 *nt = bl_get_data(bl);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ u8 brightness = bl->props.brightness;
+ int ret;
+
+ DRM_DEV_DEBUG(nt->dev, "set brightness %d\n", brightness);
+ ret = mipi_dsi_dcs_write(dsi, MIPI_DCS_SET_DISPLAY_BRIGHTNESS,
+ &brightness,
+ sizeof(brightness));
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static const struct backlight_ops nt35510_bl_ops = {
+ .update_status = nt35510_set_brightness,
+};
+
+/*
+ * This power-on sequence
+ */
+static int nt35510_power_on(struct nt35510 *nt)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(nt->supplies), nt->supplies);
+ if (ret < 0) {
+ dev_err(nt->dev, "unable to enable regulators\n");
+ return ret;
+ }
+
+ /* Toggle RESET in accordance with datasheet page 370 */
+ if (nt->reset_gpio) {
+ gpiod_set_value(nt->reset_gpio, 1);
+ /* Active min 10 us according to datasheet, let's say 20 */
+ usleep_range(20, 1000);
+ gpiod_set_value(nt->reset_gpio, 0);
+ /*
+ * 5 ms during sleep mode, 120 ms during sleep out mode
+ * according to datasheet, let's use 120-140 ms.
+ */
+ usleep_range(120000, 140000);
+ }
+
+ ret = nt35510_read_id(nt);
+ if (ret)
+ return ret;
+
+ /* Set up stuff in manufacturer control, page 1 */
+ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
+ ARRAY_SIZE(nt35510_mauc_select_page_1),
+ nt35510_mauc_select_page_1);
+ if (ret)
+ return ret;
+
+ ret = nt35510_setup_power(nt);
+ if (ret)
+ return ret;
+
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_r);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_g);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_POS,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_pos_b);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_RED_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_r);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_GREEN_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_g);
+ if (ret)
+ return ret;
+ ret = nt35510_send_long(nt, dsi, NT35510_P1_SET_GAMMA_BLUE_NEG,
+ NT35510_P1_GAMMA_LEN,
+ nt->conf->gamma_corr_neg_b);
+ if (ret)
+ return ret;
+
+ /* Set up stuff in manufacturer control, page 0 */
+ ret = nt35510_send_long(nt, dsi, MCS_CMD_MAUCCTR,
+ ARRAY_SIZE(nt35510_mauc_select_page_0),
+ nt35510_mauc_select_page_0);
+ if (ret)
+ return ret;
+
+ ret = nt35510_setup_display(nt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nt35510_power_off(struct nt35510 *nt)
+{
+ int ret;
+
+ ret = regulator_bulk_disable(ARRAY_SIZE(nt->supplies), nt->supplies);
+ if (ret)
+ return ret;
+
+ if (nt->reset_gpio)
+ gpiod_set_value(nt->reset_gpio, 1);
+
+ return 0;
+}
+
+static int nt35510_unprepare(struct drm_panel *panel)
+{
+ struct nt35510 *nt = panel_to_nt35510(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to turn display off (%d)\n",
+ ret);
+ return ret;
+ }
+ usleep_range(10000, 20000);
+
+ /* Enter sleep mode */
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to enter sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+
+ /* Wait 4 frames, how much is that 5ms in the vendor driver */
+ usleep_range(5000, 10000);
+
+ ret = nt35510_power_off(nt);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static int nt35510_prepare(struct drm_panel *panel)
+{
+ struct nt35510 *nt = panel_to_nt35510(panel);
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(nt->dev);
+ int ret;
+
+ ret = nt35510_power_on(nt);
+ if (ret)
+ return ret;
+
+ /* Exit sleep mode */
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to exit sleep mode (%d)\n",
+ ret);
+ return ret;
+ }
+ /* Up to 120 ms */
+ usleep_range(120000, 150000);
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret) {
+ DRM_DEV_ERROR(nt->dev, "failed to turn display on (%d)\n",
+ ret);
+ return ret;
+ }
+ /* Some 10 ms */
+ usleep_range(10000, 20000);
+
+ return 0;
+}
+
+static int nt35510_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct nt35510 *nt = panel_to_nt35510(panel);
+ struct drm_display_mode *mode;
+ struct drm_display_info *info;
+
+ info = &connector->display_info;
+ info->width_mm = nt->conf->width_mm;
+ info->height_mm = nt->conf->height_mm;
+ mode = drm_mode_duplicate(connector->dev, &nt->conf->mode);
+ if (!mode) {
+ DRM_ERROR("bad mode or failed to add mode\n");
+ return -EINVAL;
+ }
+ drm_mode_set_name(mode);
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+
+ mode->width_mm = nt->conf->width_mm;
+ mode->height_mm = nt->conf->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1; /* Number of modes */
+}
+
+static const struct drm_panel_funcs nt35510_drm_funcs = {
+ .unprepare = nt35510_unprepare,
+ .prepare = nt35510_prepare,
+ .get_modes = nt35510_get_modes,
+};
+
+static int nt35510_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct nt35510 *nt;
+ int ret;
+
+ nt = devm_kzalloc(dev, sizeof(struct nt35510), GFP_KERNEL);
+ if (!nt)
+ return -ENOMEM;
+ mipi_dsi_set_drvdata(dsi, nt);
+ nt->dev = dev;
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ /*
+ * Datasheet suggests max HS rate for NT35510 is 250 MHz
+ * (period time 4ns, see figure 7.6.4 page 365) and max LP rate is
+ * 20 MHz (period time 50ns, see figure 7.6.6. page 366).
+ * However these frequencies appear in source code for the Hydis
+ * HVA40WV1 panel and setting up the LP frequency makes the panel
+ * not work.
+ *
+ * TODO: if other panels prove to be closer to the datasheet,
+ * maybe make this a per-panel config in struct nt35510_config?
+ */
+ dsi->hs_rate = 349440000;
+ dsi->lp_rate = 9600000;
+ dsi->mode_flags = MIPI_DSI_CLOCK_NON_CONTINUOUS |
+ MIPI_DSI_MODE_EOT_PACKET;
+
+ /*
+ * Every new incarnation of this display must have a unique
+ * data entry for the system in this driver.
+ */
+ nt->conf = of_device_get_match_data(dev);
+ if (!nt->conf) {
+ dev_err(dev, "missing device configuration\n");
+ return -ENODEV;
+ }
+
+ nt->supplies[0].supply = "vdd"; /* 2.3-4.8 V */
+ nt->supplies[1].supply = "vddi"; /* 1.65-3.3V */
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(nt->supplies),
+ nt->supplies);
+ if (ret < 0)
+ return ret;
+ ret = regulator_set_voltage(nt->supplies[0].consumer,
+ 2300000, 4800000);
+ if (ret)
+ return ret;
+ ret = regulator_set_voltage(nt->supplies[1].consumer,
+ 1650000, 3300000);
+ if (ret)
+ return ret;
+
+ nt->reset_gpio = devm_gpiod_get_optional(dev, "reset", GPIOD_ASIS);
+ if (IS_ERR(nt->reset_gpio)) {
+ dev_err(dev, "error getting RESET GPIO\n");
+ return PTR_ERR(nt->reset_gpio);
+ }
+
+ drm_panel_init(&nt->panel, dev, &nt35510_drm_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ /*
+ * First, try to locate an external backlight (such as on GPIO)
+ * if this fails, assume we will want to use the internal backlight
+ * control.
+ */
+ ret = drm_panel_of_backlight(&nt->panel);
+ if (ret) {
+ dev_err(dev, "error getting external backlight %d\n", ret);
+ return ret;
+ }
+ if (!nt->panel.backlight) {
+ struct backlight_device *bl;
+
+ bl = devm_backlight_device_register(dev, "nt35510", dev, nt,
+ &nt35510_bl_ops, NULL);
+ if (IS_ERR(bl)) {
+ DRM_DEV_ERROR(dev, "failed to register backlight device\n");
+ return PTR_ERR(bl);
+ }
+ bl->props.max_brightness = 255;
+ bl->props.brightness = 255;
+ bl->props.power = FB_BLANK_POWERDOWN;
+ nt->panel.backlight = bl;
+ }
+
+ ret = drm_panel_add(&nt->panel);
+ if (ret < 0)
+ return ret;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0)
+ drm_panel_remove(&nt->panel);
+
+ return 0;
+}
+
+static int nt35510_remove(struct mipi_dsi_device *dsi)
+{
+ struct nt35510 *nt = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ mipi_dsi_detach(dsi);
+ /* Power off */
+ ret = nt35510_power_off(nt);
+ drm_panel_remove(&nt->panel);
+
+ return ret;
+}
+
+/*
+ * These gamma correction values are 10bit tuples, so only bits 0 and 1 is
+ * ever used in the first byte. They form a positive and negative gamma
+ * correction curve for each color, values must be strictly higher for each
+ * step on the curve. As can be seen these default curves goes from 0x0001
+ * to 0x03FE.
+ */
+#define NT35510_GAMMA_POS_DEFAULT 0x00, 0x01, 0x00, 0x43, 0x00, \
+ 0x6B, 0x00, 0x87, 0x00, 0xA3, 0x00, 0xCE, 0x00, 0xF1, 0x01, \
+ 0x27, 0x01, 0x53, 0x01, 0x98, 0x01, 0xCE, 0x02, 0x22, 0x02, \
+ 0x83, 0x02, 0x78, 0x02, 0x9E, 0x02, 0xDD, 0x03, 0x00, 0x03, \
+ 0x2E, 0x03, 0x54, 0x03, 0x7F, 0x03, 0x95, 0x03, 0xB3, 0x03, \
+ 0xC2, 0x03, 0xE1, 0x03, 0xF1, 0x03, 0xFE
+
+#define NT35510_GAMMA_NEG_DEFAULT 0x00, 0x01, 0x00, 0x43, 0x00, \
+ 0x6B, 0x00, 0x87, 0x00, 0xA3, 0x00, 0xCE, 0x00, 0xF1, 0x01, \
+ 0x27, 0x01, 0x53, 0x01, 0x98, 0x01, 0xCE, 0x02, 0x22, 0x02, \
+ 0x43, 0x02, 0x50, 0x02, 0x9E, 0x02, 0xDD, 0x03, 0x00, 0x03, \
+ 0x2E, 0x03, 0x54, 0x03, 0x7F, 0x03, 0x95, 0x03, 0xB3, 0x03, \
+ 0xC2, 0x03, 0xE1, 0x03, 0xF1, 0x03, 0xFE
+
+/*
+ * The Hydis HVA40WV1 panel
+ */
+static const struct nt35510_config nt35510_hydis_hva40wv1 = {
+ .width_mm = 52,
+ .height_mm = 86,
+ /**
+ * As the Hydis panel is used in command mode, the porches etc
+ * are settings programmed internally into the NT35510 controller
+ * and generated toward the physical display. As the panel is not
+ * used in video mode, these are not really exposed to the DSI
+ * host.
+ *
+ * Display frame rate control:
+ * Frame rate = (20 MHz / 1) / (389 * (7 + 50 + 800)) ~= 60 Hz
+ */
+ .mode = {
+ /* The internal pixel clock of the NT35510 is 20 MHz */
+ .clock = 20000,
+ .hdisplay = 480,
+ .hsync_start = 480 + 2, /* HFP = 2 */
+ .hsync_end = 480 + 2 + 0, /* HSync = 0 */
+ .htotal = 480 + 2 + 0 + 5, /* HFP = 5 */
+ .vdisplay = 800,
+ .vsync_start = 800 + 2, /* VFP = 2 */
+ .vsync_end = 800 + 2 + 0, /* VSync = 0 */
+ .vtotal = 800 + 2 + 0 + 5, /* VBP = 5 */
+ .vrefresh = 60, /* Calculated */
+ .flags = 0,
+ },
+ /* 0x09: AVDD = 5.6V */
+ .avdd = { 0x09, 0x09, 0x09 },
+ /* 0x34: PCK = Hsync/2, BTP = 2 x VDDB */
+ .bt1ctr = { 0x34, 0x34, 0x34 },
+ /* 0x09: AVEE = -5.6V */
+ .avee = { 0x09, 0x09, 0x09 },
+ /* 0x24: NCK = Hsync/2, BTN = -2 x VDDB */
+ .bt2ctr = { 0x24, 0x24, 0x24 },
+ /* 0x05 = 12V */
+ .vgh = { 0x05, 0x05, 0x05 },
+ /* 0x24: NCKA = Hsync/2, VGH = 2 x AVDD - AVEE */
+ .bt4ctr = { 0x24, 0x24, 0x24 },
+ /* 0x0B = -13V */
+ .vgl = { 0x0B, 0x0B, 0x0B },
+ /* 0x24: LCKA = Hsync, VGL = AVDD + VCL - AVDD */
+ .bt5ctr = { 0x24, 0x24, 0x24 },
+ /* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
+ .vgp = { 0x00, 0xA3, 0x00 },
+ /* VGMP: 0x0A3 = 5.0375V, VGSP = 0V */
+ .vgn = { 0x00, 0xA3, 0x00 },
+ /* SDEQCTR: source driver EQ mode 2, 2.5 us rise time on each step */
+ .sdeqctr = { 0x01, 0x05, 0x05, 0x05 },
+ /* SDVPCTR: Normal operation off color during v porch */
+ .sdvpctr = 0x01,
+ /* T1: number of pixel clocks on one scanline: 0x184 = 389 clocks */
+ .t1 = 0x0184,
+ /* VBP: vertical back porch toward the panel */
+ .vbp = 7,
+ /* VFP: vertical front porch toward the panel */
+ .vfp = 50,
+ /* PSEL: divide pixel clock 20MHz with 1 (no clock downscaling) */
+ .psel = 0,
+ /* DPTMCTR12: 0x03: LVGL = VGLX, overlap mode, swap R->L O->E */
+ .dpmctr12 = { 0x03, 0x00, 0x00, },
+ /* Default gamma correction values */
+ .gamma_corr_pos_r = { NT35510_GAMMA_POS_DEFAULT },
+ .gamma_corr_pos_g = { NT35510_GAMMA_POS_DEFAULT },
+ .gamma_corr_pos_b = { NT35510_GAMMA_POS_DEFAULT },
+ .gamma_corr_neg_r = { NT35510_GAMMA_NEG_DEFAULT },
+ .gamma_corr_neg_g = { NT35510_GAMMA_NEG_DEFAULT },
+ .gamma_corr_neg_b = { NT35510_GAMMA_NEG_DEFAULT },
+};
+
+static const struct of_device_id nt35510_of_match[] = {
+ {
+ .compatible = "hydis,hva40wv1",
+ .data = &nt35510_hydis_hva40wv1,
+ },
+ { }
+};
+MODULE_DEVICE_TABLE(of, nt35510_of_match);
+
+static struct mipi_dsi_driver nt35510_driver = {
+ .probe = nt35510_probe,
+ .remove = nt35510_remove,
+ .driver = {
+ .name = "panel-novatek-nt35510",
+ .of_match_table = nt35510_of_match,
+ },
+};
+module_mipi_dsi_driver(nt35510_driver);
+
+MODULE_AUTHOR("Linus Walleij <[email protected]>");
+MODULE_DESCRIPTION("NT35510-based panel driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-samsung-ld9040.c b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
index 3c52f15f7a1c..9bb2e8c7934a 100644
--- a/drivers/gpu/drm/panel/panel-samsung-ld9040.c
+++ b/drivers/gpu/drm/panel/panel-samsung-ld9040.c
@@ -373,6 +373,12 @@ static const struct of_device_id ld9040_of_match[] = {
};
MODULE_DEVICE_TABLE(of, ld9040_of_match);
+static const struct spi_device_id ld9040_ids[] = {
+ { "ld9040", },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(spi, ld9040_ids);
+
static struct spi_driver ld9040_driver = {
.probe = ld9040_probe,
.remove = ld9040_remove,
diff --git a/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
new file mode 100644
index 000000000000..9d843fcc3a22
--- /dev/null
+++ b/drivers/gpu/drm/panel/panel-samsung-s6e88a0-ams452ef01.c
@@ -0,0 +1,293 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// Copyright (C) 2019, Michael Srba
+
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_modes.h>
+#include <drm/drm_panel.h>
+
+struct s6e88a0_ams452ef01 {
+ struct drm_panel panel;
+ struct mipi_dsi_device *dsi;
+ struct regulator_bulk_data supplies[2];
+ struct gpio_desc *reset_gpio;
+
+ bool prepared;
+};
+
+static inline struct
+s6e88a0_ams452ef01 *to_s6e88a0_ams452ef01(struct drm_panel *panel)
+{
+ return container_of(panel, struct s6e88a0_ams452ef01, panel);
+}
+
+#define dsi_dcs_write_seq(dsi, seq...) do { \
+ static const u8 d[] = { seq }; \
+ int ret; \
+ ret = mipi_dsi_dcs_write_buffer(dsi, d, ARRAY_SIZE(d)); \
+ if (ret < 0) \
+ return ret; \
+ } while (0)
+
+static void s6e88a0_ams452ef01_reset(struct s6e88a0_ams452ef01 *ctx)
+{
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(5000, 6000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ usleep_range(1000, 2000);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 1);
+ usleep_range(10000, 11000);
+}
+
+static int s6e88a0_ams452ef01_on(struct s6e88a0_ams452ef01 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags |= MIPI_DSI_MODE_LPM;
+
+ dsi_dcs_write_seq(dsi, 0xf0, 0x5a, 0x5a); // enable LEVEL2 commands
+ dsi_dcs_write_seq(dsi, 0xcc, 0x4c); // set Pixel Clock Divider polarity
+
+ ret = mipi_dsi_dcs_exit_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to exit sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ // set default brightness/gama
+ dsi_dcs_write_seq(dsi, 0xca,
+ 0x01, 0x00, 0x01, 0x00, 0x01, 0x00, // V255 RR,GG,BB
+ 0x80, 0x80, 0x80, // V203 R,G,B
+ 0x80, 0x80, 0x80, // V151 R,G,B
+ 0x80, 0x80, 0x80, // V87 R,G,B
+ 0x80, 0x80, 0x80, // V51 R,G,B
+ 0x80, 0x80, 0x80, // V35 R,G,B
+ 0x80, 0x80, 0x80, // V23 R,G,B
+ 0x80, 0x80, 0x80, // V11 R,G,B
+ 0x6b, 0x68, 0x71, // V3 R,G,B
+ 0x00, 0x00, 0x00); // V1 R,G,B
+ // set default Amoled Off Ratio
+ dsi_dcs_write_seq(dsi, 0xb2, 0x40, 0x0a, 0x17, 0x00, 0x0a);
+ dsi_dcs_write_seq(dsi, 0xb6, 0x2c, 0x0b); // set default elvss voltage
+ dsi_dcs_write_seq(dsi, MIPI_DCS_WRITE_POWER_SAVE, 0x00);
+ dsi_dcs_write_seq(dsi, 0xf7, 0x03); // gamma/aor update
+ dsi_dcs_write_seq(dsi, 0xf0, 0xa5, 0xa5); // disable LEVEL2 commands
+
+ ret = mipi_dsi_dcs_set_display_on(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display on: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_off(struct s6e88a0_ams452ef01 *ctx)
+{
+ struct mipi_dsi_device *dsi = ctx->dsi;
+ struct device *dev = &dsi->dev;
+ int ret;
+
+ dsi->mode_flags &= ~MIPI_DSI_MODE_LPM;
+
+ ret = mipi_dsi_dcs_set_display_off(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to set display off: %d\n", ret);
+ return ret;
+ }
+ msleep(35);
+
+ ret = mipi_dsi_dcs_enter_sleep_mode(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enter sleep mode: %d\n", ret);
+ return ret;
+ }
+ msleep(120);
+
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_prepare(struct drm_panel *panel)
+{
+ struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (ctx->prepared)
+ return 0;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to enable regulators: %d\n", ret);
+ return ret;
+ }
+
+ s6e88a0_ams452ef01_reset(ctx);
+
+ ret = s6e88a0_ams452ef01_on(ctx);
+ if (ret < 0) {
+ dev_err(dev, "Failed to initialize panel: %d\n", ret);
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ return ret;
+ }
+
+ ctx->prepared = true;
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_unprepare(struct drm_panel *panel)
+{
+ struct s6e88a0_ams452ef01 *ctx = to_s6e88a0_ams452ef01(panel);
+ struct device *dev = &ctx->dsi->dev;
+ int ret;
+
+ if (!ctx->prepared)
+ return 0;
+
+ ret = s6e88a0_ams452ef01_off(ctx);
+ if (ret < 0)
+ dev_err(dev, "Failed to un-initialize panel: %d\n", ret);
+
+ gpiod_set_value_cansleep(ctx->reset_gpio, 0);
+ regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+
+ ctx->prepared = false;
+ return 0;
+}
+
+static const struct drm_display_mode s6e88a0_ams452ef01_mode = {
+ .clock = (540 + 88 + 4 + 20) * (960 + 14 + 2 + 8) * 60 / 1000,
+ .hdisplay = 540,
+ .hsync_start = 540 + 88,
+ .hsync_end = 540 + 88 + 4,
+ .htotal = 540 + 88 + 4 + 20,
+ .vdisplay = 960,
+ .vsync_start = 960 + 14,
+ .vsync_end = 960 + 14 + 2,
+ .vtotal = 960 + 14 + 2 + 8,
+ .vrefresh = 60,
+ .width_mm = 56,
+ .height_mm = 100,
+};
+
+static int s6e88a0_ams452ef01_get_modes(struct drm_panel *panel,
+ struct drm_connector *connector)
+{
+ struct drm_display_mode *mode;
+
+ mode = drm_mode_duplicate(connector->dev, &s6e88a0_ams452ef01_mode);
+ if (!mode)
+ return -ENOMEM;
+
+ drm_mode_set_name(mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ connector->display_info.width_mm = mode->width_mm;
+ connector->display_info.height_mm = mode->height_mm;
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
+}
+
+static const struct drm_panel_funcs s6e88a0_ams452ef01_panel_funcs = {
+ .unprepare = s6e88a0_ams452ef01_unprepare,
+ .prepare = s6e88a0_ams452ef01_prepare,
+ .get_modes = s6e88a0_ams452ef01_get_modes,
+};
+
+static int s6e88a0_ams452ef01_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct s6e88a0_ams452ef01 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ ctx->supplies[0].supply = "vdd3";
+ ctx->supplies[1].supply = "vci";
+ ret = devm_regulator_bulk_get(dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0) {
+ dev_err(dev, "Failed to get regulators: %d\n", ret);
+ return ret;
+ }
+
+ ctx->reset_gpio = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->reset_gpio)) {
+ ret = PTR_ERR(ctx->reset_gpio);
+ dev_err(dev, "Failed to get reset-gpios: %d\n", ret);
+ return ret;
+ }
+
+ ctx->dsi = dsi;
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ dsi->lanes = 2;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST;
+
+ drm_panel_init(&ctx->panel, dev, &s6e88a0_ams452ef01_panel_funcs,
+ DRM_MODE_CONNECTOR_DSI);
+
+ ret = drm_panel_add(&ctx->panel);
+ if (ret < 0) {
+ dev_err(dev, "Failed to add panel: %d\n", ret);
+ return ret;
+ }
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ dev_err(dev, "Failed to attach to DSI host: %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static int s6e88a0_ams452ef01_remove(struct mipi_dsi_device *dsi)
+{
+ struct s6e88a0_ams452ef01 *ctx = mipi_dsi_get_drvdata(dsi);
+ int ret;
+
+ ret = mipi_dsi_detach(dsi);
+ if (ret < 0)
+ dev_err(&dsi->dev, "Failed to detach from DSI host: %d\n", ret);
+
+ drm_panel_remove(&ctx->panel);
+
+ return 0;
+}
+
+static const struct of_device_id s6e88a0_ams452ef01_of_match[] = {
+ { .compatible = "samsung,s6e88a0-ams452ef01" },
+ { /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, s6e88a0_ams452ef01_of_match);
+
+static struct mipi_dsi_driver s6e88a0_ams452ef01_driver = {
+ .probe = s6e88a0_ams452ef01_probe,
+ .remove = s6e88a0_ams452ef01_remove,
+ .driver = {
+ .name = "panel-s6e88a0-ams452ef01",
+ .of_match_table = s6e88a0_ams452ef01_of_match,
+ },
+};
+module_mipi_dsi_driver(s6e88a0_ams452ef01_driver);
+
+MODULE_AUTHOR("Michael Srba <[email protected]>");
+MODULE_DESCRIPTION("MIPI-DSI based Panel Driver for AMS452EF01 AMOLED LCD with a S6E88A0 controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index e14c14ac62b5..0ce81b1f36af 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -351,6 +351,65 @@ static const struct drm_panel_funcs panel_simple_funcs = {
.get_timings = panel_simple_get_timings,
};
+static struct panel_desc panel_dpi;
+
+static int panel_dpi_probe(struct device *dev,
+ struct panel_simple *panel)
+{
+ struct display_timing *timing;
+ const struct device_node *np;
+ struct panel_desc *desc;
+ unsigned int bus_flags;
+ struct videomode vm;
+ const char *mapping;
+ int ret;
+
+ np = dev->of_node;
+ desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
+ if (!desc)
+ return -ENOMEM;
+
+ timing = devm_kzalloc(dev, sizeof(*timing), GFP_KERNEL);
+ if (!timing)
+ return -ENOMEM;
+
+ ret = of_get_display_timing(np, "panel-timing", timing);
+ if (ret < 0) {
+ dev_err(dev, "%pOF: no panel-timing node found for \"panel-dpi\" binding\n",
+ np);
+ return ret;
+ }
+
+ desc->timings = timing;
+ desc->num_timings = 1;
+
+ of_property_read_u32(np, "width-mm", &desc->size.width);
+ of_property_read_u32(np, "height-mm", &desc->size.height);
+
+ of_property_read_string(np, "data-mapping", &mapping);
+ if (!strcmp(mapping, "rgb24"))
+ desc->bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+ else if (!strcmp(mapping, "rgb565"))
+ desc->bus_format = MEDIA_BUS_FMT_RGB565_1X16;
+ else if (!strcmp(mapping, "bgr666"))
+ desc->bus_format = MEDIA_BUS_FMT_RGB666_1X18;
+ else if (!strcmp(mapping, "lvds666"))
+ desc->bus_format = MEDIA_BUS_FMT_RGB666_1X24_CPADHI;
+
+ /* Extract bus_flags from display_timing */
+ bus_flags = 0;
+ vm.flags = timing->flags;
+ drm_bus_flags_from_videomode(&vm, &bus_flags);
+ desc->bus_flags = bus_flags;
+
+ /* We do not know the connector for the DT node, so guess it */
+ desc->connector_type = DRM_MODE_CONNECTOR_DPI;
+
+ panel->desc = desc;
+
+ return 0;
+}
+
#define PANEL_SIMPLE_BOUNDS_CHECK(to_check, bounds, field) \
(to_check->field.typ >= bounds->field.min && \
to_check->field.typ <= bounds->field.max)
@@ -437,8 +496,15 @@ static int panel_simple_probe(struct device *dev, const struct panel_desc *desc)
return -EPROBE_DEFER;
}
- if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
- panel_simple_parse_panel_timing_node(dev, panel, &dt);
+ if (desc == &panel_dpi) {
+ /* Handle the generic panel-dpi binding */
+ err = panel_dpi_probe(dev, panel);
+ if (err)
+ goto free_ddc;
+ } else {
+ if (!of_get_display_timing(dev->of_node, "panel-timing", &dt))
+ panel_simple_parse_panel_timing_node(dev, panel, &dt);
+ }
drm_panel_init(&panel->base, dev, &panel_simple_funcs,
desc->connector_type);
@@ -1301,6 +1367,37 @@ static const struct panel_desc edt_et035012dm6 = {
.bus_flags = DRM_BUS_FLAG_DE_LOW | DRM_BUS_FLAG_PIXDATA_NEGEDGE,
};
+static const struct drm_display_mode edt_etm043080dh6gp_mode = {
+ .clock = 10870,
+ .hdisplay = 480,
+ .hsync_start = 480 + 8,
+ .hsync_end = 480 + 8 + 4,
+ .htotal = 480 + 8 + 4 + 41,
+
+ /*
+ * IWG22M: Y resolution changed for "dc_linuxfb" module crashing while
+ * fb_align
+ */
+
+ .vdisplay = 288,
+ .vsync_start = 288 + 2,
+ .vsync_end = 288 + 2 + 4,
+ .vtotal = 288 + 2 + 4 + 10,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc edt_etm043080dh6gp = {
+ .modes = &edt_etm043080dh6gp_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 100,
+ .height = 65,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode edt_etm0430g0dh6_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -1440,6 +1537,33 @@ static const struct panel_desc foxlink_fl500wvr00_a0t = {
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
};
+static const struct drm_display_mode frida_frd350h54004_mode = {
+ .clock = 6000,
+ .hdisplay = 320,
+ .hsync_start = 320 + 44,
+ .hsync_end = 320 + 44 + 16,
+ .htotal = 320 + 44 + 16 + 20,
+ .vdisplay = 240,
+ .vsync_start = 240 + 2,
+ .vsync_end = 240 + 2 + 6,
+ .vtotal = 240 + 2 + 6 + 2,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_PHSYNC | DRM_MODE_FLAG_PVSYNC,
+};
+
+static const struct panel_desc frida_frd350h54004 = {
+ .modes = &frida_frd350h54004_mode,
+ .num_modes = 1,
+ .bpc = 8,
+ .size = {
+ .width = 77,
+ .height = 64,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
static const struct drm_display_mode friendlyarm_hd702e_mode = {
.clock = 67185,
.hdisplay = 800,
@@ -2080,6 +2204,64 @@ static const struct panel_desc lg_lp129qe = {
},
};
+static const struct display_timing logictechno_lt161010_2nh_timing = {
+ .pixelclock = { 26400000, 33300000, 46800000 },
+ .hactive = { 800, 800, 800 },
+ .hfront_porch = { 16, 210, 354 },
+ .hback_porch = { 46, 46, 46 },
+ .hsync_len = { 1, 20, 40 },
+ .vactive = { 480, 480, 480 },
+ .vfront_porch = { 7, 22, 147 },
+ .vback_porch = { 23, 23, 23 },
+ .vsync_len = { 1, 10, 20 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc logictechno_lt161010_2nh = {
+ .timings = &logictechno_lt161010_2nh_timing,
+ .num_timings = 1,
+ .size = {
+ .width = 154,
+ .height = 86,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
+};
+
+static const struct display_timing logictechno_lt170410_2whc_timing = {
+ .pixelclock = { 68900000, 71100000, 73400000 },
+ .hactive = { 1280, 1280, 1280 },
+ .hfront_porch = { 23, 60, 71 },
+ .hback_porch = { 23, 60, 71 },
+ .hsync_len = { 15, 40, 47 },
+ .vactive = { 800, 800, 800 },
+ .vfront_porch = { 5, 7, 10 },
+ .vback_porch = { 5, 7, 10 },
+ .vsync_len = { 6, 9, 12 },
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
+ DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_SYNC_POSEDGE,
+};
+
+static const struct panel_desc logictechno_lt170410_2whc = {
+ .timings = &logictechno_lt170410_2whc_timing,
+ .num_timings = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH |
+ DRM_BUS_FLAG_PIXDATA_SAMPLE_NEGEDGE |
+ DRM_BUS_FLAG_SYNC_SAMPLE_NEGEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
.clock = 30400,
.hdisplay = 800,
@@ -2095,7 +2277,7 @@ static const struct drm_display_mode mitsubishi_aa070mc01_mode = {
};
static const struct drm_display_mode logicpd_type_28_mode = {
- .clock = 9000,
+ .clock = 9107,
.hdisplay = 480,
.hsync_start = 480 + 3,
.hsync_end = 480 + 3 + 42,
@@ -2224,6 +2406,51 @@ static const struct panel_desc netron_dy_e231732 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode neweast_wjfh116008a_modes[] = {
+ {
+ .clock = 138500,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .vrefresh = 60,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }, {
+ .clock = 110920,
+ .hdisplay = 1920,
+ .hsync_start = 1920 + 48,
+ .hsync_end = 1920 + 48 + 32,
+ .htotal = 1920 + 48 + 32 + 80,
+ .vdisplay = 1080,
+ .vsync_start = 1080 + 3,
+ .vsync_end = 1080 + 3 + 5,
+ .vtotal = 1080 + 3 + 5 + 23,
+ .vrefresh = 48,
+ .flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
+ }
+};
+
+static const struct panel_desc neweast_wjfh116008a = {
+ .modes = neweast_wjfh116008a_modes,
+ .num_modes = 2,
+ .bpc = 6,
+ .size = {
+ .width = 260,
+ .height = 150,
+ },
+ .delay = {
+ .prepare = 110,
+ .enable = 20,
+ .unprepare = 500,
+ },
+ .bus_format = MEDIA_BUS_FMT_RGB666_1X18,
+ .connector_type = DRM_MODE_CONNECTOR_eDP,
+};
+
static const struct drm_display_mode newhaven_nhd_43_480272ef_atxl_mode = {
.clock = 9000,
.hdisplay = 480,
@@ -2390,15 +2617,15 @@ static const struct panel_desc ontat_yx700wv03 = {
};
static const struct drm_display_mode ortustech_com37h3m_mode = {
- .clock = 22153,
+ .clock = 22230,
.hdisplay = 480,
- .hsync_start = 480 + 8,
- .hsync_end = 480 + 8 + 10,
- .htotal = 480 + 8 + 10 + 10,
+ .hsync_start = 480 + 40,
+ .hsync_end = 480 + 40 + 10,
+ .htotal = 480 + 40 + 10 + 40,
.vdisplay = 640,
.vsync_start = 640 + 4,
- .vsync_end = 640 + 4 + 3,
- .vtotal = 640 + 4 + 3 + 4,
+ .vsync_end = 640 + 4 + 2,
+ .vtotal = 640 + 4 + 2 + 4,
.vrefresh = 60,
.flags = DRM_MODE_FLAG_NVSYNC | DRM_MODE_FLAG_NHSYNC,
};
@@ -2439,6 +2666,7 @@ static const struct panel_desc ortustech_com43h4m85ulc = {
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .connector_type = DRM_MODE_CONNECTOR_DPI,
};
static const struct drm_display_mode osddisplays_osd070t1718_19ts_mode = {
@@ -2464,7 +2692,8 @@ static const struct panel_desc osddisplays_osd070t1718_19ts = {
.height = 91,
},
.bus_format = MEDIA_BUS_FMT_RGB888_1X24,
- .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE |
+ DRM_BUS_FLAG_SYNC_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_DPI,
};
@@ -2546,6 +2775,35 @@ static const struct panel_desc rocktech_rk070er9427 = {
.bus_format = MEDIA_BUS_FMT_RGB666_1X18,
};
+static const struct drm_display_mode rocktech_rk101ii01d_ct_mode = {
+ .clock = 71100,
+ .hdisplay = 1280,
+ .hsync_start = 1280 + 48,
+ .hsync_end = 1280 + 48 + 32,
+ .htotal = 1280 + 48 + 32 + 80,
+ .vdisplay = 800,
+ .vsync_start = 800 + 2,
+ .vsync_end = 800 + 2 + 5,
+ .vtotal = 800 + 2 + 5 + 16,
+ .vrefresh = 60,
+};
+
+static const struct panel_desc rocktech_rk101ii01d_ct = {
+ .modes = &rocktech_rk101ii01d_ct_mode,
+ .num_modes = 1,
+ .size = {
+ .width = 217,
+ .height = 136,
+ },
+ .delay = {
+ .prepare = 50,
+ .disable = 50,
+ },
+ .bus_flags = DRM_BUS_FLAG_DE_HIGH,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_SPWG,
+ .connector_type = DRM_MODE_CONNECTOR_LVDS,
+};
+
static const struct drm_display_mode samsung_lsn122dl01_c01_mode = {
.clock = 271560,
.hdisplay = 2560,
@@ -2768,30 +3026,6 @@ static const struct panel_desc sharp_lq123p1jx31 = {
},
};
-static const struct drm_display_mode sharp_lq150x1lg11_mode = {
- .clock = 71100,
- .hdisplay = 1024,
- .hsync_start = 1024 + 168,
- .hsync_end = 1024 + 168 + 64,
- .htotal = 1024 + 168 + 64 + 88,
- .vdisplay = 768,
- .vsync_start = 768 + 37,
- .vsync_end = 768 + 37 + 2,
- .vtotal = 768 + 37 + 2 + 8,
- .vrefresh = 60,
-};
-
-static const struct panel_desc sharp_lq150x1lg11 = {
- .modes = &sharp_lq150x1lg11_mode,
- .num_modes = 1,
- .bpc = 6,
- .size = {
- .width = 304,
- .height = 228,
- },
- .bus_format = MEDIA_BUS_FMT_RGB565_1X16,
-};
-
static const struct display_timing sharp_ls020b1dd01d_timing = {
.pixelclock = { 2000000, 4200000, 5000000 },
.hactive = { 240, 240, 240 },
@@ -3023,7 +3257,7 @@ static const struct panel_desc toshiba_lt089ac29000 = {
.width = 194,
.height = 116,
},
- .bus_format = MEDIA_BUS_FMT_RGB888_1X24,
+ .bus_format = MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA,
.bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE,
.connector_type = DRM_MODE_CONNECTOR_LVDS,
};
@@ -3286,6 +3520,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "edt,et035012dm6",
.data = &edt_et035012dm6,
}, {
+ .compatible = "edt,etm043080dh6gp",
+ .data = &edt_etm043080dh6gp,
+ }, {
.compatible = "edt,etm0430g0dh6",
.data = &edt_etm0430g0dh6,
}, {
@@ -3310,6 +3547,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "foxlink,fl500wvr00-a0t",
.data = &foxlink_fl500wvr00_a0t,
}, {
+ .compatible = "frida,frd350h54004",
+ .data = &frida_frd350h54004,
+ }, {
.compatible = "friendlyarm,hd702e",
.data = &friendlyarm_hd702e,
}, {
@@ -3388,6 +3628,15 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "logicpd,type28",
.data = &logicpd_type_28,
}, {
+ .compatible = "logictechno,lt161010-2nhc",
+ .data = &logictechno_lt161010_2nh,
+ }, {
+ .compatible = "logictechno,lt161010-2nhr",
+ .data = &logictechno_lt161010_2nh,
+ }, {
+ .compatible = "logictechno,lt170410-2whc",
+ .data = &logictechno_lt170410_2whc,
+ }, {
.compatible = "mitsubishi,aa070mc01-ca1",
.data = &mitsubishi_aa070mc01,
}, {
@@ -3400,6 +3649,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "netron-dy,e231732",
.data = &netron_dy_e231732,
}, {
+ .compatible = "neweast,wjfh116008a",
+ .data = &neweast_wjfh116008a,
+ }, {
.compatible = "newhaven,nhd-4.3-480272ef-atxl",
.data = &newhaven_nhd_43_480272ef_atxl,
}, {
@@ -3439,6 +3691,9 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "rocktech,rk070er9427",
.data = &rocktech_rk070er9427,
}, {
+ .compatible = "rocktech,rk101ii01d-ct",
+ .data = &rocktech_rk101ii01d_ct,
+ }, {
.compatible = "samsung,lsn122dl01-c01",
.data = &samsung_lsn122dl01_c01,
}, {
@@ -3466,9 +3721,6 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "sharp,lq123p1jx31",
.data = &sharp_lq123p1jx31,
}, {
- .compatible = "sharp,lq150x1lg11",
- .data = &sharp_lq150x1lg11,
- }, {
.compatible = "sharp,ls020b1dd01d",
.data = &sharp_ls020b1dd01d,
}, {
@@ -3526,6 +3778,10 @@ static const struct of_device_id platform_of_match[] = {
.compatible = "winstar,wf35ltiacd",
.data = &winstar_wf35ltiacd,
}, {
+ /* Must be the last entry */
+ .compatible = "panel-dpi",
+ .data = &panel_dpi,
+ }, {
/* sentinel */
}
};
diff --git a/drivers/gpu/drm/panel/panel-sony-acx424akp.c b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
index de0abf76ae6f..c91e55b2d7a3 100644
--- a/drivers/gpu/drm/panel/panel-sony-acx424akp.c
+++ b/drivers/gpu/drm/panel/panel-sony-acx424akp.c
@@ -48,7 +48,7 @@ struct acx424akp {
};
static const struct drm_display_mode sony_acx424akp_vid_mode = {
- .clock = 330000,
+ .clock = 27234,
.hdisplay = 480,
.hsync_start = 480 + 15,
.hsync_end = 480 + 15 + 0,
@@ -68,7 +68,7 @@ static const struct drm_display_mode sony_acx424akp_vid_mode = {
* command mode using the maximum HS frequency.
*/
static const struct drm_display_mode sony_acx424akp_cmd_mode = {
- .clock = 420160,
+ .clock = 35478,
.hdisplay = 480,
.hsync_start = 480 + 154,
.hsync_end = 480 + 154 + 16,
diff --git a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
index cf29405a2dbe..aeca15dfeb3c 100644
--- a/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/panel/panel-tpo-td028ttec1.c
@@ -86,7 +86,12 @@ struct td028ttec1_panel {
#define to_td028ttec1_device(p) container_of(p, struct td028ttec1_panel, panel)
-static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
+/*
+ * noinline_for_stack so we don't get multiple copies of tx_buf
+ * on the stack in case of gcc-plugin-structleak
+ */
+static int noinline_for_stack
+jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
{
struct spi_device *spi = lcd->spi;
u16 tx_buf = JBT_COMMAND | reg;
@@ -105,8 +110,9 @@ static int jbt_ret_write_0(struct td028ttec1_panel *lcd, u8 reg, int *err)
return ret;
}
-static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
- u8 reg, u8 data, int *err)
+static int noinline_for_stack
+jbt_reg_write_1(struct td028ttec1_panel *lcd,
+ u8 reg, u8 data, int *err)
{
struct spi_device *spi = lcd->spi;
u16 tx_buf[2];
@@ -128,8 +134,9 @@ static int jbt_reg_write_1(struct td028ttec1_panel *lcd,
return ret;
}
-static int jbt_reg_write_2(struct td028ttec1_panel *lcd,
- u8 reg, u16 data, int *err)
+static int noinline_for_stack
+jbt_reg_write_2(struct td028ttec1_panel *lcd,
+ u8 reg, u16 data, int *err)
{
struct spi_device *spi = lcd->spi;
u16 tx_buf[3];
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.c b/drivers/gpu/drm/panfrost/panfrost_device.c
index 238fb6d54df4..8136babd3ba9 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.c
+++ b/drivers/gpu/drm/panfrost/panfrost_device.c
@@ -5,6 +5,7 @@
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
+#include <linux/pm_domain.h>
#include <linux/regulator/consumer.h>
#include "panfrost_device.h"
@@ -87,18 +88,27 @@ static void panfrost_clk_fini(struct panfrost_device *pfdev)
static int panfrost_regulator_init(struct panfrost_device *pfdev)
{
- int ret;
+ int ret, i;
- pfdev->regulator = devm_regulator_get(pfdev->dev, "mali");
- if (IS_ERR(pfdev->regulator)) {
- ret = PTR_ERR(pfdev->regulator);
- dev_err(pfdev->dev, "failed to get regulator: %d\n", ret);
+ if (WARN(pfdev->comp->num_supplies > ARRAY_SIZE(pfdev->regulators),
+ "Too many supplies in compatible structure.\n"))
+ return -EINVAL;
+
+ for (i = 0; i < pfdev->comp->num_supplies; i++)
+ pfdev->regulators[i].supply = pfdev->comp->supply_names[i];
+
+ ret = devm_regulator_bulk_get(pfdev->dev,
+ pfdev->comp->num_supplies,
+ pfdev->regulators);
+ if (ret < 0) {
+ dev_err(pfdev->dev, "failed to get regulators: %d\n", ret);
return ret;
}
- ret = regulator_enable(pfdev->regulator);
+ ret = regulator_bulk_enable(pfdev->comp->num_supplies,
+ pfdev->regulators);
if (ret < 0) {
- dev_err(pfdev->dev, "failed to enable regulator: %d\n", ret);
+ dev_err(pfdev->dev, "failed to enable regulators: %d\n", ret);
return ret;
}
@@ -107,7 +117,81 @@ static int panfrost_regulator_init(struct panfrost_device *pfdev)
static void panfrost_regulator_fini(struct panfrost_device *pfdev)
{
- regulator_disable(pfdev->regulator);
+ regulator_bulk_disable(pfdev->comp->num_supplies,
+ pfdev->regulators);
+}
+
+static void panfrost_pm_domain_fini(struct panfrost_device *pfdev)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pfdev->pm_domain_devs); i++) {
+ if (!pfdev->pm_domain_devs[i])
+ break;
+
+ if (pfdev->pm_domain_links[i])
+ device_link_del(pfdev->pm_domain_links[i]);
+
+ dev_pm_domain_detach(pfdev->pm_domain_devs[i], true);
+ }
+}
+
+static int panfrost_pm_domain_init(struct panfrost_device *pfdev)
+{
+ int err;
+ int i, num_domains;
+
+ num_domains = of_count_phandle_with_args(pfdev->dev->of_node,
+ "power-domains",
+ "#power-domain-cells");
+
+ /*
+ * Single domain is handled by the core, and, if only a single power
+ * the power domain is requested, the property is optional.
+ */
+ if (num_domains < 2 && pfdev->comp->num_pm_domains < 2)
+ return 0;
+
+ if (num_domains != pfdev->comp->num_pm_domains) {
+ dev_err(pfdev->dev,
+ "Incorrect number of power domains: %d provided, %d needed\n",
+ num_domains, pfdev->comp->num_pm_domains);
+ return -EINVAL;
+ }
+
+ if (WARN(num_domains > ARRAY_SIZE(pfdev->pm_domain_devs),
+ "Too many supplies in compatible structure.\n"))
+ return -EINVAL;
+
+ for (i = 0; i < num_domains; i++) {
+ pfdev->pm_domain_devs[i] =
+ dev_pm_domain_attach_by_name(pfdev->dev,
+ pfdev->comp->pm_domain_names[i]);
+ if (IS_ERR_OR_NULL(pfdev->pm_domain_devs[i])) {
+ err = PTR_ERR(pfdev->pm_domain_devs[i]) ? : -ENODATA;
+ pfdev->pm_domain_devs[i] = NULL;
+ dev_err(pfdev->dev,
+ "failed to get pm-domain %s(%d): %d\n",
+ pfdev->comp->pm_domain_names[i], i, err);
+ goto err;
+ }
+
+ pfdev->pm_domain_links[i] = device_link_add(pfdev->dev,
+ pfdev->pm_domain_devs[i], DL_FLAG_PM_RUNTIME |
+ DL_FLAG_STATELESS | DL_FLAG_RPM_ACTIVE);
+ if (!pfdev->pm_domain_links[i]) {
+ dev_err(pfdev->pm_domain_devs[i],
+ "adding device link failed!\n");
+ err = -ENODEV;
+ goto err;
+ }
+ }
+
+ return 0;
+
+err:
+ panfrost_pm_domain_fini(pfdev);
+ return err;
}
int panfrost_device_init(struct panfrost_device *pfdev)
@@ -140,37 +224,43 @@ int panfrost_device_init(struct panfrost_device *pfdev)
goto err_out1;
}
+ err = panfrost_pm_domain_init(pfdev);
+ if (err)
+ goto err_out2;
+
res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
if (IS_ERR(pfdev->iomem)) {
dev_err(pfdev->dev, "failed to ioremap iomem\n");
err = PTR_ERR(pfdev->iomem);
- goto err_out2;
+ goto err_out3;
}
err = panfrost_gpu_init(pfdev);
if (err)
- goto err_out2;
+ goto err_out3;
err = panfrost_mmu_init(pfdev);
if (err)
- goto err_out3;
+ goto err_out4;
err = panfrost_job_init(pfdev);
if (err)
- goto err_out4;
+ goto err_out5;
err = panfrost_perfcnt_init(pfdev);
if (err)
- goto err_out5;
+ goto err_out6;
return 0;
-err_out5:
+err_out6:
panfrost_job_fini(pfdev);
-err_out4:
+err_out5:
panfrost_mmu_fini(pfdev);
-err_out3:
+err_out4:
panfrost_gpu_fini(pfdev);
+err_out3:
+ panfrost_pm_domain_fini(pfdev);
err_out2:
panfrost_reset_fini(pfdev);
err_out1:
@@ -186,6 +276,7 @@ void panfrost_device_fini(struct panfrost_device *pfdev)
panfrost_job_fini(pfdev);
panfrost_mmu_fini(pfdev);
panfrost_gpu_fini(pfdev);
+ panfrost_pm_domain_fini(pfdev);
panfrost_reset_fini(pfdev);
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
diff --git a/drivers/gpu/drm/panfrost/panfrost_device.h b/drivers/gpu/drm/panfrost/panfrost_device.h
index 06713811b92c..c30c719a8059 100644
--- a/drivers/gpu/drm/panfrost/panfrost_device.h
+++ b/drivers/gpu/drm/panfrost/panfrost_device.h
@@ -7,6 +7,7 @@
#include <linux/atomic.h>
#include <linux/io-pgtable.h>
+#include <linux/regulator/consumer.h>
#include <linux/spinlock.h>
#include <drm/drm_device.h>
#include <drm/drm_mm.h>
@@ -19,6 +20,8 @@ struct panfrost_job;
struct panfrost_perfcnt;
#define NUM_JOB_SLOTS 3
+#define MAX_REGULATORS 2
+#define MAX_PM_DOMAINS 3
struct panfrost_features {
u16 id;
@@ -51,6 +54,23 @@ struct panfrost_features {
unsigned long hw_issues[64 / BITS_PER_LONG];
};
+/*
+ * Features that cannot be automatically detected and need matching using the
+ * compatible string, typically SoC-specific.
+ */
+struct panfrost_compatible {
+ /* Supplies count and names. */
+ int num_supplies;
+ const char * const *supply_names;
+ /*
+ * Number of power domains required, note that values 0 and 1 are
+ * handled identically, as only values > 1 need special handling.
+ */
+ int num_pm_domains;
+ /* Only required if num_pm_domains > 1. */
+ const char * const *pm_domain_names;
+};
+
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
@@ -59,10 +79,14 @@ struct panfrost_device {
void __iomem *iomem;
struct clk *clock;
struct clk *bus_clock;
- struct regulator *regulator;
+ struct regulator_bulk_data regulators[MAX_REGULATORS];
struct reset_control *rstc;
+ /* pm_domains for devices with more than one. */
+ struct device *pm_domain_devs[MAX_PM_DOMAINS];
+ struct device_link *pm_domain_links[MAX_PM_DOMAINS];
struct panfrost_features features;
+ const struct panfrost_compatible *comp;
spinlock_t as_lock;
unsigned long as_in_use_mask;
diff --git a/drivers/gpu/drm/panfrost/panfrost_drv.c b/drivers/gpu/drm/panfrost/panfrost_drv.c
index 6da59f476aba..882fecc33fdb 100644
--- a/drivers/gpu/drm/panfrost/panfrost_drv.c
+++ b/drivers/gpu/drm/panfrost/panfrost_drv.c
@@ -166,6 +166,7 @@ panfrost_lookup_bos(struct drm_device *dev,
break;
}
+ atomic_inc(&bo->gpu_usecount);
job->mappings[i] = mapping;
}
@@ -583,6 +584,10 @@ static int panfrost_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, pfdev);
+ pfdev->comp = of_device_get_match_data(&pdev->dev);
+ if (!pfdev->comp)
+ return -ENODEV;
+
/* Allocate and initialze the DRM device. */
ddev = drm_dev_alloc(&panfrost_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
@@ -654,16 +659,24 @@ static int panfrost_remove(struct platform_device *pdev)
return 0;
}
+static const char * const default_supplies[] = { "mali" };
+static const struct panfrost_compatible default_data = {
+ .num_supplies = ARRAY_SIZE(default_supplies),
+ .supply_names = default_supplies,
+ .num_pm_domains = 1, /* optional */
+ .pm_domain_names = NULL,
+};
+
static const struct of_device_id dt_match[] = {
- { .compatible = "arm,mali-t604" },
- { .compatible = "arm,mali-t624" },
- { .compatible = "arm,mali-t628" },
- { .compatible = "arm,mali-t720" },
- { .compatible = "arm,mali-t760" },
- { .compatible = "arm,mali-t820" },
- { .compatible = "arm,mali-t830" },
- { .compatible = "arm,mali-t860" },
- { .compatible = "arm,mali-t880" },
+ { .compatible = "arm,mali-t604", .data = &default_data, },
+ { .compatible = "arm,mali-t624", .data = &default_data, },
+ { .compatible = "arm,mali-t628", .data = &default_data, },
+ { .compatible = "arm,mali-t720", .data = &default_data, },
+ { .compatible = "arm,mali-t760", .data = &default_data, },
+ { .compatible = "arm,mali-t820", .data = &default_data, },
+ { .compatible = "arm,mali-t830", .data = &default_data, },
+ { .compatible = "arm,mali-t860", .data = &default_data, },
+ { .compatible = "arm,mali-t880", .data = &default_data, },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem.h b/drivers/gpu/drm/panfrost/panfrost_gem.h
index ca1bc9019600..b3517ff9630c 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem.h
+++ b/drivers/gpu/drm/panfrost/panfrost_gem.h
@@ -30,6 +30,12 @@ struct panfrost_gem_object {
struct mutex lock;
} mappings;
+ /*
+ * Count the number of jobs referencing this BO so we don't let the
+ * shrinker reclaim this object prematurely.
+ */
+ atomic_t gpu_usecount;
+
bool noexec :1;
bool is_heap :1;
};
diff --git a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
index f5dd7b29bc95..288e46c40673 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gem_shrinker.c
@@ -41,6 +41,9 @@ static bool panfrost_gem_purge(struct drm_gem_object *obj)
struct drm_gem_shmem_object *shmem = to_drm_gem_shmem_obj(obj);
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
+ if (atomic_read(&bo->gpu_usecount))
+ return false;
+
if (!mutex_trylock(&shmem->pages_lock))
return false;
diff --git a/drivers/gpu/drm/panfrost/panfrost_gpu.c b/drivers/gpu/drm/panfrost/panfrost_gpu.c
index 8822ec13a0d6..f2c1ddc41a9b 100644
--- a/drivers/gpu/drm/panfrost/panfrost_gpu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_gpu.c
@@ -308,28 +308,26 @@ void panfrost_gpu_power_on(struct panfrost_device *pfdev)
gpu_write(pfdev, L2_PWRON_LO, pfdev->features.l2_present);
ret = readl_relaxed_poll_timeout(pfdev->iomem + L2_READY_LO,
val, val == pfdev->features.l2_present, 100, 1000);
-
- gpu_write(pfdev, STACK_PWRON_LO, pfdev->features.stack_present);
- ret |= readl_relaxed_poll_timeout(pfdev->iomem + STACK_READY_LO,
- val, val == pfdev->features.stack_present, 100, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "error powering up gpu L2");
gpu_write(pfdev, SHADER_PWRON_LO, pfdev->features.shader_present);
- ret |= readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + SHADER_READY_LO,
val, val == pfdev->features.shader_present, 100, 1000);
+ if (ret)
+ dev_err(pfdev->dev, "error powering up gpu shader");
gpu_write(pfdev, TILER_PWRON_LO, pfdev->features.tiler_present);
- ret |= readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
+ ret = readl_relaxed_poll_timeout(pfdev->iomem + TILER_READY_LO,
val, val == pfdev->features.tiler_present, 100, 1000);
-
if (ret)
- dev_err(pfdev->dev, "error powering up gpu");
+ dev_err(pfdev->dev, "error powering up gpu tiler");
}
void panfrost_gpu_power_off(struct panfrost_device *pfdev)
{
gpu_write(pfdev, TILER_PWROFF_LO, 0);
gpu_write(pfdev, SHADER_PWROFF_LO, 0);
- gpu_write(pfdev, STACK_PWROFF_LO, 0);
gpu_write(pfdev, L2_PWROFF_LO, 0);
}
@@ -351,7 +349,7 @@ int panfrost_gpu_init(struct panfrost_device *pfdev)
return -ENODEV;
err = devm_request_irq(pfdev->dev, irq, panfrost_gpu_irq_handler,
- IRQF_SHARED, "gpu", pfdev);
+ IRQF_SHARED, KBUILD_MODNAME "-gpu", pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request gpu irq");
return err;
diff --git a/drivers/gpu/drm/panfrost/panfrost_job.c b/drivers/gpu/drm/panfrost/panfrost_job.c
index 7c36ec675b73..7914b1570841 100644
--- a/drivers/gpu/drm/panfrost/panfrost_job.c
+++ b/drivers/gpu/drm/panfrost/panfrost_job.c
@@ -269,18 +269,19 @@ static void panfrost_job_cleanup(struct kref *ref)
dma_fence_put(job->render_done_fence);
if (job->mappings) {
- for (i = 0; i < job->bo_count; i++)
+ for (i = 0; i < job->bo_count; i++) {
+ if (!job->mappings[i])
+ break;
+
+ atomic_dec(&job->mappings[i]->obj->gpu_usecount);
panfrost_gem_mapping_put(job->mappings[i]);
+ }
kvfree(job->mappings);
}
if (job->bos) {
- struct panfrost_gem_object *bo;
-
- for (i = 0; i < job->bo_count; i++) {
- bo = to_panfrost_bo(job->bos[i]);
+ for (i = 0; i < job->bo_count; i++)
drm_gem_object_put_unlocked(job->bos[i]);
- }
kvfree(job->bos);
}
@@ -507,7 +508,7 @@ int panfrost_job_init(struct panfrost_device *pfdev)
return -ENODEV;
ret = devm_request_irq(pfdev->dev, irq, panfrost_job_irq_handler,
- IRQF_SHARED, "job", pfdev);
+ IRQF_SHARED, KBUILD_MODNAME "-job", pfdev);
if (ret) {
dev_err(pfdev->dev, "failed to request job irq");
return ret;
diff --git a/drivers/gpu/drm/panfrost/panfrost_mmu.c b/drivers/gpu/drm/panfrost/panfrost_mmu.c
index 763cfca886a7..ed28aeba6d59 100644
--- a/drivers/gpu/drm/panfrost/panfrost_mmu.c
+++ b/drivers/gpu/drm/panfrost/panfrost_mmu.c
@@ -151,7 +151,12 @@ u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
as = mmu->as;
if (as >= 0) {
int en = atomic_inc_return(&mmu->as_count);
- WARN_ON(en >= NUM_JOB_SLOTS);
+
+ /*
+ * AS can be retained by active jobs or a perfcnt context,
+ * hence the '+ 1' here.
+ */
+ WARN_ON(en >= (NUM_JOB_SLOTS + 1));
list_move(&mmu->list, &pfdev->as_lru_list);
goto out;
@@ -596,33 +601,27 @@ static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
source_id = (fault_status >> 16);
/* Page fault only */
- if ((status & mask) == BIT(i)) {
- WARN_ON(exception_type < 0xC1 || exception_type > 0xC4);
-
+ ret = -1;
+ if ((status & mask) == BIT(i) && (exception_type & 0xF8) == 0xC0)
ret = panfrost_mmu_map_fault_addr(pfdev, i, addr);
- if (!ret) {
- mmu_write(pfdev, MMU_INT_CLEAR, BIT(i));
- status &= ~mask;
- continue;
- }
- }
- /* terminal fault, print info about the fault */
- dev_err(pfdev->dev,
- "Unhandled Page fault in AS%d at VA 0x%016llX\n"
- "Reason: %s\n"
- "raw fault status: 0x%X\n"
- "decoded fault status: %s\n"
- "exception type 0x%X: %s\n"
- "access type 0x%X: %s\n"
- "source id 0x%X\n",
- i, addr,
- "TODO",
- fault_status,
- (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
- exception_type, panfrost_exception_name(pfdev, exception_type),
- access_type, access_type_name(pfdev, fault_status),
- source_id);
+ if (ret)
+ /* terminal fault, print info about the fault */
+ dev_err(pfdev->dev,
+ "Unhandled Page fault in AS%d at VA 0x%016llX\n"
+ "Reason: %s\n"
+ "raw fault status: 0x%X\n"
+ "decoded fault status: %s\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n",
+ i, addr,
+ "TODO",
+ fault_status,
+ (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
+ exception_type, panfrost_exception_name(pfdev, exception_type),
+ access_type, access_type_name(pfdev, fault_status),
+ source_id);
mmu_write(pfdev, MMU_INT_CLEAR, mask);
@@ -641,9 +640,11 @@ int panfrost_mmu_init(struct panfrost_device *pfdev)
if (irq <= 0)
return -ENODEV;
- err = devm_request_threaded_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
+ err = devm_request_threaded_irq(pfdev->dev, irq,
+ panfrost_mmu_irq_handler,
panfrost_mmu_irq_handler_thread,
- IRQF_SHARED, "mmu", pfdev);
+ IRQF_SHARED, KBUILD_MODNAME "-mmu",
+ pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request mmu irq");
diff --git a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
index 684820448be3..6913578d5aa7 100644
--- a/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
+++ b/drivers/gpu/drm/panfrost/panfrost_perfcnt.c
@@ -73,7 +73,7 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
struct panfrost_file_priv *user = file_priv->driver_priv;
struct panfrost_perfcnt *perfcnt = pfdev->perfcnt;
struct drm_gem_shmem_object *bo;
- u32 cfg;
+ u32 cfg, as;
int ret;
if (user == perfcnt->user)
@@ -126,12 +126,8 @@ static int panfrost_perfcnt_enable_locked(struct panfrost_device *pfdev,
perfcnt->user = user;
- /*
- * Always use address space 0 for now.
- * FIXME: this needs to be updated when we start using different
- * address space.
- */
- cfg = GPU_PERFCNT_CFG_AS(0) |
+ as = panfrost_mmu_as_get(pfdev, perfcnt->mapping->mmu);
+ cfg = GPU_PERFCNT_CFG_AS(as) |
GPU_PERFCNT_CFG_MODE(GPU_PERFCNT_CFG_MODE_MANUAL);
/*
@@ -195,6 +191,7 @@ static int panfrost_perfcnt_disable_locked(struct panfrost_device *pfdev,
drm_gem_shmem_vunmap(&perfcnt->mapping->obj->base.base, perfcnt->buf);
perfcnt->buf = NULL;
panfrost_gem_close(&perfcnt->mapping->obj->base.base, file_priv);
+ panfrost_mmu_as_put(pfdev, perfcnt->mapping->mmu);
panfrost_gem_mapping_put(perfcnt->mapping);
perfcnt->mapping = NULL;
pm_runtime_mark_last_busy(pfdev->dev);
diff --git a/drivers/gpu/drm/pl111/pl111_versatile.c b/drivers/gpu/drm/pl111/pl111_versatile.c
index 09aeaffb7660..4f325c410b5d 100644
--- a/drivers/gpu/drm/pl111/pl111_versatile.c
+++ b/drivers/gpu/drm/pl111/pl111_versatile.c
@@ -19,6 +19,7 @@ static struct regmap *versatile_syscon_map;
* We detect the different syscon types from the compatible strings.
*/
enum versatile_clcd {
+ INTEGRATOR_IMPD1,
INTEGRATOR_CLCD_CM,
VERSATILE_CLCD,
REALVIEW_CLCD_EB,
@@ -65,6 +66,14 @@ static const struct of_device_id versatile_clcd_of_match[] = {
{},
};
+static const struct of_device_id impd1_clcd_of_match[] = {
+ {
+ .compatible = "arm,im-pd1-syscon",
+ .data = (void *)INTEGRATOR_IMPD1,
+ },
+ {},
+};
+
/*
* Core module CLCD control on the Integrator/CP, bits
* 8 thru 19 of the CM_CONTROL register controls a bunch
@@ -125,6 +134,36 @@ static void pl111_integrator_enable(struct drm_device *drm, u32 format)
val);
}
+#define IMPD1_CTRL_OFFSET 0x18
+#define IMPD1_CTRL_DISP_LCD (0 << 0)
+#define IMPD1_CTRL_DISP_VGA (1 << 0)
+#define IMPD1_CTRL_DISP_LCD1 (2 << 0)
+#define IMPD1_CTRL_DISP_ENABLE (1 << 2)
+#define IMPD1_CTRL_DISP_MASK (7 << 0)
+
+static void pl111_impd1_enable(struct drm_device *drm, u32 format)
+{
+ u32 val;
+
+ dev_info(drm->dev, "enable IM-PD1 CLCD connectors\n");
+ val = IMPD1_CTRL_DISP_VGA | IMPD1_CTRL_DISP_ENABLE;
+
+ regmap_update_bits(versatile_syscon_map,
+ IMPD1_CTRL_OFFSET,
+ IMPD1_CTRL_DISP_MASK,
+ val);
+}
+
+static void pl111_impd1_disable(struct drm_device *drm)
+{
+ dev_info(drm->dev, "disable IM-PD1 CLCD connectors\n");
+
+ regmap_update_bits(versatile_syscon_map,
+ IMPD1_CTRL_OFFSET,
+ IMPD1_CTRL_DISP_MASK,
+ 0);
+}
+
/*
* This configuration register in the Versatile and RealView
* family is uniformly present but appears more and more
@@ -271,6 +310,20 @@ static const struct pl111_variant_data pl110_integrator = {
};
/*
+ * The IM-PD1 variant is a PL110 with a bunch of broken, or not
+ * yet implemented features
+ */
+static const struct pl111_variant_data pl110_impd1 = {
+ .name = "PL110 IM-PD1",
+ .is_pl110 = true,
+ .broken_clockdivider = true,
+ .broken_vblank = true,
+ .formats = pl110_integrator_pixel_formats,
+ .nformats = ARRAY_SIZE(pl110_integrator_pixel_formats),
+ .fb_bpp = 16,
+};
+
+/*
* This is the in-between PL110 variant found in the ARM Versatile,
* supporting RGB565/BGR565
*/
@@ -322,8 +375,21 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
/* Non-ARM reference designs, just bail out */
return 0;
}
+
versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+ /*
+ * On the Integrator, check if we should use the IM-PD1 instead,
+ * if we find it, it will take precedence. This is on the Integrator/AP
+ * which only has this option for PL110 graphics.
+ */
+ if (versatile_clcd_type == INTEGRATOR_CLCD_CM) {
+ np = of_find_matching_node_and_match(NULL, impd1_clcd_of_match,
+ &clcd_id);
+ if (np)
+ versatile_clcd_type = (enum versatile_clcd)clcd_id->data;
+ }
+
/* Versatile Express special handling */
if (versatile_clcd_type == VEXPRESS_CLCD_V2M) {
struct platform_device *pdev;
@@ -367,6 +433,13 @@ int pl111_versatile_init(struct device *dev, struct pl111_drm_dev_private *priv)
priv->variant_display_enable = pl111_integrator_enable;
dev_info(dev, "set up callbacks for Integrator PL110\n");
break;
+ case INTEGRATOR_IMPD1:
+ versatile_syscon_map = map;
+ priv->variant = &pl110_impd1;
+ priv->variant_display_enable = pl111_impd1_enable;
+ priv->variant_display_disable = pl111_impd1_disable;
+ dev_info(dev, "set up callbacks for IM-PD1 PL110\n");
+ break;
case VERSATILE_CLCD:
versatile_syscon_map = map;
/* This can do RGB565 with external PLD */
diff --git a/drivers/gpu/drm/qxl/qxl_cmd.c b/drivers/gpu/drm/qxl/qxl_cmd.c
index ef09dc6bc635..d1086b2a6892 100644
--- a/drivers/gpu/drm/qxl/qxl_cmd.c
+++ b/drivers/gpu/drm/qxl/qxl_cmd.c
@@ -36,7 +36,7 @@ static int qxl_reap_surface_id(struct qxl_device *qdev, int max_to_reap);
struct ring {
struct qxl_ring_header header;
- uint8_t elements[0];
+ uint8_t elements[];
};
struct qxl_ring {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 16d73b22f3f5..09583a08e141 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -31,7 +31,7 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
+#include <drm/drm_simple_kms_helper.h>
#include "qxl_drv.h"
#include "qxl_object.h"
@@ -372,19 +372,6 @@ static void qxl_crtc_update_monitors_config(struct drm_crtc *crtc,
static void qxl_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct drm_device *dev = crtc->dev;
- struct drm_pending_vblank_event *event;
- unsigned long flags;
-
- if (crtc->state && crtc->state->event) {
- event = crtc->state->event;
- crtc->state->event = NULL;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
qxl_crtc_update_monitors_config(crtc, "flush");
}
@@ -1021,9 +1008,6 @@ static struct drm_encoder *qxl_best_encoder(struct drm_connector *connector)
return &qxl_output->enc;
}
-static const struct drm_encoder_helper_funcs qxl_enc_helper_funcs = {
-};
-
static const struct drm_connector_helper_funcs qxl_connector_helper_funcs = {
.get_modes = qxl_conn_get_modes,
.mode_valid = qxl_conn_mode_valid,
@@ -1073,15 +1057,6 @@ static const struct drm_connector_funcs qxl_connector_funcs = {
.atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
};
-static void qxl_enc_destroy(struct drm_encoder *encoder)
-{
- drm_encoder_cleanup(encoder);
-}
-
-static const struct drm_encoder_funcs qxl_enc_funcs = {
- .destroy = qxl_enc_destroy,
-};
-
static int qxl_mode_create_hotplug_mode_update_property(struct qxl_device *qdev)
{
if (qdev->hotplug_mode_update_property)
@@ -1100,6 +1075,7 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
struct qxl_output *qxl_output;
struct drm_connector *connector;
struct drm_encoder *encoder;
+ int ret;
qxl_output = kzalloc(sizeof(struct qxl_output), GFP_KERNEL);
if (!qxl_output)
@@ -1112,15 +1088,19 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_connector_init(dev, &qxl_output->base,
&qxl_connector_funcs, DRM_MODE_CONNECTOR_VIRTUAL);
- drm_encoder_init(dev, &qxl_output->enc, &qxl_enc_funcs,
- DRM_MODE_ENCODER_VIRTUAL, NULL);
+ ret = drm_simple_encoder_init(dev, &qxl_output->enc,
+ DRM_MODE_ENCODER_VIRTUAL);
+ if (ret) {
+ drm_err(dev, "drm_simple_encoder_init() failed, error %d\n",
+ ret);
+ goto err_drm_connector_cleanup;
+ }
/* we get HPD via client monitors config */
connector->polled = DRM_CONNECTOR_POLL_HPD;
encoder->possible_crtcs = 1 << num_output;
drm_connector_attach_encoder(&qxl_output->base,
&qxl_output->enc);
- drm_encoder_helper_add(encoder, &qxl_enc_helper_funcs);
drm_connector_helper_add(connector, &qxl_connector_helper_funcs);
drm_object_attach_property(&connector->base,
@@ -1130,6 +1110,11 @@ static int qdev_output_init(struct drm_device *dev, int num_output)
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
return 0;
+
+err_drm_connector_cleanup:
+ drm_connector_cleanup(&qxl_output->base);
+ kfree(qxl_output);
+ return ret;
}
static struct drm_framebuffer *
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 1d601f57a6ba..4fda3f9b29f4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -34,6 +34,7 @@
#include <linux/pci.h>
#include <drm/drm.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include <drm/drm_modeset_helper.h>
@@ -132,21 +133,30 @@ free_dev:
return ret;
}
+static void qxl_drm_release(struct drm_device *dev)
+{
+ struct qxl_device *qdev = dev->dev_private;
+
+ /*
+ * TODO: qxl_device_fini() call should be in qxl_pci_remove(),
+ * reodering qxl_modeset_fini() + qxl_device_fini() calls is
+ * non-trivial though.
+ */
+ qxl_modeset_fini(qdev);
+ qxl_device_fini(qdev);
+ dev->dev_private = NULL;
+ kfree(qdev);
+}
+
static void
qxl_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
- struct qxl_device *qdev = dev->dev_private;
drm_dev_unregister(dev);
-
- qxl_modeset_fini(qdev);
- qxl_device_fini(qdev);
+ drm_atomic_helper_shutdown(dev);
if (is_vga(pdev))
vga_put(pdev, VGA_RSRC_LEGACY_IO);
-
- dev->dev_private = NULL;
- kfree(qdev);
drm_dev_put(dev);
}
@@ -279,6 +289,8 @@ static struct drm_driver qxl_driver = {
.major = 0,
.minor = 1,
.patchlevel = 0,
+
+ .release = qxl_drm_release,
};
static int __init qxl_init(void)
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index bfc1631093e9..70b20ee4741a 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -299,12 +299,12 @@ void qxl_device_fini(struct qxl_device *qdev)
{
qxl_bo_unref(&qdev->current_release_bo[0]);
qxl_bo_unref(&qdev->current_release_bo[1]);
+ qxl_gem_fini(qdev);
+ qxl_bo_fini(qdev);
flush_work(&qdev->gc_work);
qxl_ring_free(qdev->command_ring);
qxl_ring_free(qdev->cursor_ring);
qxl_ring_free(qdev->release_ring);
- qxl_gem_fini(qdev);
- qxl_bo_fini(qdev);
io_mapping_free(qdev->surface_mapping);
io_mapping_free(qdev->vram_mapping);
iounmap(qdev->ram_header);
diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
index 16a5e903533d..62a5e424971b 100644
--- a/drivers/gpu/drm/qxl/qxl_ttm.c
+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
@@ -48,11 +48,6 @@ static struct qxl_device *qxl_get_qdev(struct ttm_bo_device *bdev)
return qdev;
}
-static int qxl_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
static int qxl_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -256,7 +251,6 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo,
static struct ttm_bo_driver qxl_bo_driver = {
.ttm_tt_create = &qxl_ttm_tt_create,
- .invalidate_caches = &qxl_invalidate_caches,
.init_mem_type = &qxl_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &qxl_evict_flags,
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index be583695427a..91811757104c 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -2231,6 +2231,7 @@ static const struct drm_crtc_helper_funcs atombios_helper_funcs = {
.prepare = atombios_crtc_prepare,
.commit = atombios_crtc_commit,
.disable = atombios_crtc_disable,
+ .get_scanout_position = radeon_get_crtc_scanout_position,
};
void radeon_atombios_init_crtc(struct drm_device *dev,
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index a522e092038b..266e3cbbd09b 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -1263,7 +1263,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
* locking inversion with the driver load path. And the access here is
* completely racy anyway. So don't bother with locking for now.
*/
- return dev->open_count == 0;
+ return atomic_read(&dev->open_count) == 0;
}
static const struct vga_switcheroo_client_ops radeon_switcheroo_ops = {
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index d07c7db0c815..35db79a168bf 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -45,6 +45,10 @@
#include "atom.h"
#include "radeon.h"
+u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc);
+int radeon_enable_vblank_kms(struct drm_crtc *crtc);
+void radeon_disable_vblank_kms(struct drm_crtc *crtc);
+
static void avivo_crtc_load_lut(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
@@ -460,7 +464,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
(DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK) &&
(!ASIC_IS_AVIVO(rdev) ||
((int) (work->target_vblank -
- dev->driver->get_vblank_counter(dev, work->crtc_id)) > 0)))
+ crtc->funcs->get_vblank_counter(crtc)) > 0)))
usleep_range(1000, 2000);
/* We borrow the event spin lock for protecting flip_status */
@@ -576,7 +580,7 @@ static int radeon_crtc_page_flip_target(struct drm_crtc *crtc,
}
work->base = base;
work->target_vblank = target - (uint32_t)drm_crtc_vblank_count(crtc) +
- dev->driver->get_vblank_counter(dev, work->crtc_id);
+ crtc->funcs->get_vblank_counter(crtc);
/* We borrow the event spin lock for protecting flip_work */
spin_lock_irqsave(&crtc->dev->event_lock, flags);
@@ -668,6 +672,10 @@ static const struct drm_crtc_funcs radeon_crtc_funcs = {
.set_config = radeon_crtc_set_config,
.destroy = radeon_crtc_destroy,
.page_flip_target = radeon_crtc_page_flip_target,
+ .get_vblank_counter = radeon_get_vblank_counter_kms,
+ .enable_vblank = radeon_enable_vblank_kms,
+ .disable_vblank = radeon_disable_vblank_kms,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static void radeon_crtc_init(struct drm_device *dev, int index)
@@ -1973,3 +1981,16 @@ int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
return ret;
}
+
+bool
+radeon_get_crtc_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
+{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
+
+ return radeon_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
+ stime, etime, mode);
+}
diff --git a/drivers/gpu/drm/radeon/radeon_dp_mst.c b/drivers/gpu/drm/radeon/radeon_dp_mst.c
index 28eef9282874..008308780443 100644
--- a/drivers/gpu/drm/radeon/radeon_dp_mst.c
+++ b/drivers/gpu/drm/radeon/radeon_dp_mst.c
@@ -301,35 +301,8 @@ static struct drm_connector *radeon_dp_add_mst_connector(struct drm_dp_mst_topol
return connector;
}
-static void radeon_dp_register_mst_connector(struct drm_connector *connector)
-{
- struct drm_device *dev = connector->dev;
- struct radeon_device *rdev = dev->dev_private;
-
- radeon_fb_add_connector(rdev, connector);
-
- drm_connector_register(connector);
-}
-
-static void radeon_dp_destroy_mst_connector(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_connector *connector)
-{
- struct radeon_connector *master = container_of(mgr, struct radeon_connector, mst_mgr);
- struct drm_device *dev = master->base.dev;
- struct radeon_device *rdev = dev->dev_private;
-
- drm_connector_unregister(connector);
- radeon_fb_remove_connector(rdev, connector);
- drm_connector_cleanup(connector);
-
- kfree(connector);
- DRM_DEBUG_KMS("\n");
-}
-
static const struct drm_dp_mst_topology_cbs mst_cbs = {
.add_connector = radeon_dp_add_mst_connector,
- .register_connector = radeon_dp_register_mst_connector,
- .destroy_connector = radeon_dp_destroy_mst_connector,
};
static struct
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index fd74e2611185..59f8186a2415 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -37,6 +37,7 @@
#include <linux/vga_switcheroo.h>
#include <linux/mmu_notifier.h>
+#include <drm/drm_agpsupport.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_fb_helper.h>
@@ -119,9 +120,6 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
int radeon_suspend_kms(struct drm_device *dev, bool suspend,
bool fbcon, bool freeze);
int radeon_resume_kms(struct drm_device *dev, bool resume, bool fbcon);
-u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe);
-int radeon_enable_vblank_kms(struct drm_device *dev, unsigned int pipe);
-void radeon_disable_vblank_kms(struct drm_device *dev, unsigned int pipe);
void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
@@ -325,6 +323,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
unsigned long flags = 0;
+ struct drm_device *dev;
int ret;
if (!ent)
@@ -365,7 +364,44 @@ static int radeon_pci_probe(struct pci_dev *pdev,
if (ret)
return ret;
- return drm_get_pci_dev(pdev, ent, &kms_driver);
+ dev = drm_dev_alloc(&kms_driver, &pdev->dev);
+ if (IS_ERR(dev))
+ return PTR_ERR(dev);
+
+ ret = pci_enable_device(pdev);
+ if (ret)
+ goto err_free;
+
+ dev->pdev = pdev;
+#ifdef __alpha__
+ dev->hose = pdev->sysdata;
+#endif
+
+ pci_set_drvdata(pdev, dev);
+
+ if (pci_find_capability(dev->pdev, PCI_CAP_ID_AGP))
+ dev->agp = drm_agp_init(dev);
+ if (dev->agp) {
+ dev->agp->agp_mtrr = arch_phys_wc_add(
+ dev->agp->agp_info.aper_base,
+ dev->agp->agp_info.aper_size *
+ 1024 * 1024);
+ }
+
+ ret = drm_dev_register(dev, ent->driver_data);
+ if (ret)
+ goto err_agp;
+
+ return 0;
+
+err_agp:
+ if (dev->agp)
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ kfree(dev->agp);
+ pci_disable_device(pdev);
+err_free:
+ drm_dev_put(dev);
+ return ret;
}
static void
@@ -563,29 +599,14 @@ static const struct file_operations radeon_driver_kms_fops = {
#endif
};
-static bool
-radeon_get_crtc_scanout_position(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
-{
- return radeon_get_crtc_scanoutpos(dev, pipe, 0, vpos, hpos,
- stime, etime, mode);
-}
-
static struct drm_driver kms_driver = {
.driver_features =
- DRIVER_USE_AGP | DRIVER_GEM | DRIVER_RENDER,
+ DRIVER_GEM | DRIVER_RENDER,
.load = radeon_driver_load_kms,
.open = radeon_driver_open_kms,
.postclose = radeon_driver_postclose_kms,
.lastclose = radeon_driver_lastclose_kms,
.unload = radeon_driver_unload_kms,
- .get_vblank_counter = radeon_get_vblank_counter_kms,
- .enable_vblank = radeon_enable_vblank_kms,
- .disable_vblank = radeon_disable_vblank_kms,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
- .get_scanout_position = radeon_get_crtc_scanout_position,
.irq_preinstall = radeon_driver_irq_preinstall_kms,
.irq_postinstall = radeon_driver_irq_postinstall_kms,
.irq_uninstall = radeon_driver_irq_uninstall_kms,
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index ec0b7d6c994d..cf3156a65fc1 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -354,15 +354,10 @@ int radeon_fbdev_init(struct radeon_device *rdev)
drm_fb_helper_prepare(rdev->ddev, &rfbdev->helper,
&radeon_fb_helper_funcs);
- ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper,
- RADEONFB_CONN_LIMIT);
+ ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper);
if (ret)
goto free;
- ret = drm_fb_helper_single_add_all_connectors(&rfbdev->helper);
- if (ret)
- goto fini;
-
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(rdev->ddev);
@@ -404,15 +399,3 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
return true;
return false;
}
-
-void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector)
-{
- if (rdev->mode_info.rfbdev)
- drm_fb_helper_add_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-}
-
-void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector)
-{
- if (rdev->mode_info.rfbdev)
- drm_fb_helper_remove_one_connector(&rdev->mode_info.rfbdev->helper, connector);
-}
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index d24f23a81656..58176db85952 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -32,6 +32,7 @@
#include <linux/uaccess.h>
#include <linux/vga_switcheroo.h>
+#include <drm/drm_agpsupport.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_file.h>
#include <drm/drm_ioctl.h>
@@ -77,6 +78,11 @@ void radeon_driver_unload_kms(struct drm_device *dev)
radeon_modeset_fini(rdev);
radeon_device_fini(rdev);
+ if (dev->agp)
+ arch_phys_wc_del(dev->agp->agp_mtrr);
+ kfree(dev->agp);
+ dev->agp = NULL;
+
done_free:
kfree(rdev);
dev->dev_private = NULL;
@@ -739,14 +745,15 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
/**
* radeon_get_vblank_counter_kms - get frame count
*
- * @dev: drm dev pointer
- * @pipe: crtc to get the frame count from
+ * @crtc: crtc to get the frame count from
*
* Gets the frame count on the requested crtc (all asics).
* Returns frame count on success, -EINVAL on failure.
*/
-u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
+u32 radeon_get_vblank_counter_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
int vpos, hpos, stat;
u32 count;
struct radeon_device *rdev = dev->dev_private;
@@ -808,25 +815,26 @@ u32 radeon_get_vblank_counter_kms(struct drm_device *dev, unsigned int pipe)
/**
* radeon_enable_vblank_kms - enable vblank interrupt
*
- * @dev: drm dev pointer
* @crtc: crtc to enable vblank interrupt for
*
* Enable the interrupt on the requested crtc (all asics).
* Returns 0 on success, -EINVAL on failure.
*/
-int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
+int radeon_enable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
int r;
- if (crtc < 0 || crtc >= rdev->num_crtc) {
- DRM_ERROR("Invalid crtc %d\n", crtc);
+ if (pipe < 0 || pipe >= rdev->num_crtc) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
return -EINVAL;
}
spin_lock_irqsave(&rdev->irq.lock, irqflags);
- rdev->irq.crtc_vblank_int[crtc] = true;
+ rdev->irq.crtc_vblank_int[pipe] = true;
r = radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
return r;
@@ -835,23 +843,24 @@ int radeon_enable_vblank_kms(struct drm_device *dev, int crtc)
/**
* radeon_disable_vblank_kms - disable vblank interrupt
*
- * @dev: drm dev pointer
* @crtc: crtc to disable vblank interrupt for
*
* Disable the interrupt on the requested crtc (all asics).
*/
-void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
+void radeon_disable_vblank_kms(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct radeon_device *rdev = dev->dev_private;
unsigned long irqflags;
- if (crtc < 0 || crtc >= rdev->num_crtc) {
- DRM_ERROR("Invalid crtc %d\n", crtc);
+ if (pipe < 0 || pipe >= rdev->num_crtc) {
+ DRM_ERROR("Invalid crtc %d\n", pipe);
return;
}
spin_lock_irqsave(&rdev->irq.lock, irqflags);
- rdev->irq.crtc_vblank_int[crtc] = false;
+ rdev->irq.crtc_vblank_int[pipe] = false;
radeon_irq_set(rdev);
spin_unlock_irqrestore(&rdev->irq.lock, irqflags);
}
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index a1985a552794..8817fd033cd0 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -1111,7 +1111,8 @@ static const struct drm_crtc_helper_funcs legacy_helper_funcs = {
.mode_set_base_atomic = radeon_crtc_set_base_atomic,
.prepare = radeon_crtc_prepare,
.commit = radeon_crtc_commit,
- .disable = radeon_crtc_disable
+ .disable = radeon_crtc_disable,
+ .get_scanout_position = radeon_get_crtc_scanout_position,
};
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 96565171d13e..c7f223743d46 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -880,6 +880,12 @@ extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
ktime_t *stime, ktime_t *etime,
const struct drm_display_mode *mode);
+extern bool
+radeon_get_crtc_scanout_position(struct drm_crtc *crtc, bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
+
extern bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev);
extern struct edid *
radeon_bios_get_hardcoded_edid(struct radeon_device *rdev);
@@ -980,9 +986,6 @@ bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj)
void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id);
-void radeon_fb_add_connector(struct radeon_device *rdev, struct drm_connector *connector);
-void radeon_fb_remove_connector(struct radeon_device *rdev, struct drm_connector *connector);
-
void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id);
int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled);
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index 3b92311d30b9..badf1b6d1549 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -66,11 +66,6 @@ static struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev)
return rdev;
}
-static int radeon_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
static int radeon_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -774,7 +769,6 @@ static struct ttm_bo_driver radeon_bo_driver = {
.ttm_tt_create = &radeon_ttm_tt_create,
.ttm_tt_populate = &radeon_ttm_tt_populate,
.ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate,
- .invalidate_caches = &radeon_invalidate_caches,
.init_mem_type = &radeon_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = &radeon_evict_flags,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
index 3cd83a030a04..c07c6a88aff0 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_encoder.c
@@ -121,7 +121,7 @@ int rcar_du_encoder_init(struct rcar_du_device *rcdu,
* Attach the bridge to the encoder. The bridge will create the
* connector.
*/
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
drm_encoder_cleanup(encoder);
return ret;
diff --git a/drivers/gpu/drm/rcar-du/rcar_lvds.c b/drivers/gpu/drm/rcar-du/rcar_lvds.c
index 8ffa4fbbdeb3..ab0d49618cf9 100644
--- a/drivers/gpu/drm/rcar-du/rcar_lvds.c
+++ b/drivers/gpu/drm/rcar-du/rcar_lvds.c
@@ -23,6 +23,7 @@
#include <drm/drm_bridge.h>
#include <drm/drm_of.h>
#include <drm/drm_panel.h>
+#include <drm/drm_print.h>
#include <drm/drm_probe_helper.h>
#include "rcar_lvds.h"
@@ -590,8 +591,9 @@ static void __rcar_lvds_atomic_enable(struct drm_bridge *bridge,
}
static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+ struct drm_bridge_state *old_bridge_state)
{
+ struct drm_atomic_state *state = old_bridge_state->base.state;
struct drm_connector *connector;
struct drm_crtc *crtc;
@@ -603,7 +605,7 @@ static void rcar_lvds_atomic_enable(struct drm_bridge *bridge,
}
static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state)
+ struct drm_bridge_state *old_bridge_state)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
@@ -618,7 +620,8 @@ static void rcar_lvds_atomic_disable(struct drm_bridge *bridge,
/* Disable the companion LVDS encoder in dual-link mode. */
if (lvds->link_type != RCAR_LVDS_SINGLE_LINK && lvds->companion)
- lvds->companion->funcs->atomic_disable(lvds->companion, state);
+ lvds->companion->funcs->atomic_disable(lvds->companion,
+ old_bridge_state);
clk_disable_unprepare(lvds->clocks.mod);
}
@@ -641,7 +644,8 @@ static bool rcar_lvds_mode_fixup(struct drm_bridge *bridge,
return true;
}
-static int rcar_lvds_attach(struct drm_bridge *bridge)
+static int rcar_lvds_attach(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags)
{
struct rcar_lvds *lvds = bridge_to_rcar_lvds(bridge);
struct drm_connector *connector = &lvds->connector;
@@ -651,7 +655,12 @@ static int rcar_lvds_attach(struct drm_bridge *bridge)
/* If we have a next bridge just attach it. */
if (lvds->next_bridge)
return drm_bridge_attach(bridge->encoder, lvds->next_bridge,
- bridge);
+ bridge, flags);
+
+ if (flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR) {
+ DRM_ERROR("Fix bridge driver to make connector optional!");
+ return -EINVAL;
+ }
/* Otherwise if we have a panel, create a connector. */
if (!lvds->panel)
@@ -682,6 +691,9 @@ static void rcar_lvds_detach(struct drm_bridge *bridge)
static const struct drm_bridge_funcs rcar_lvds_bridge_ops = {
.attach = rcar_lvds_attach,
.detach = rcar_lvds_detach,
+ .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state,
+ .atomic_reset = drm_atomic_helper_bridge_reset,
.atomic_enable = rcar_lvds_atomic_enable,
.atomic_disable = rcar_lvds_atomic_disable,
.mode_fixup = rcar_lvds_mode_fixup,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
index 521fe42ac5e2..2fdc455c4ad7 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_fbdev.c
@@ -124,7 +124,7 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &rockchip_drm_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, ROCKCHIP_MAX_CONNECTOR);
+ ret = drm_fb_helper_init(dev, helper);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
"Failed to initialize drm fb helper - %d.\n",
@@ -132,13 +132,6 @@ int rockchip_drm_fbdev_init(struct drm_device *dev)
return ret;
}
- ret = drm_fb_helper_single_add_all_connectors(helper);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev,
- "Failed to add connectors - %d.\n", ret);
- goto err_drm_fb_helper_fini;
- }
-
ret = drm_fb_helper_initial_config(helper, PREFERRED_BPP);
if (ret < 0) {
DRM_DEV_ERROR(dev->dev,
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
index 7582d0e6a60a..0d1884684dcb 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_gem.c
@@ -6,6 +6,7 @@
#include <linux/dma-buf.h>
#include <linux/iommu.h>
+#include <linux/vmalloc.h>
#include <drm/drm.h>
#include <drm/drm_gem.h>
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index d04b3492bdac..cecb2cc781f5 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -724,7 +724,7 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
int max_scale = win->phy->scl ? FRAC_16_16(8, 1) :
DRM_PLANE_HELPER_NO_SCALING;
- if (!crtc || !fb)
+ if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index 0b3d18c457b2..cc672620d6e0 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -328,7 +328,7 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
{
int act_height;
- act_height = (src_h + vskiplines - 1) / vskiplines;
+ act_height = DIV_ROUND_UP(src_h, vskiplines);
if (act_height == dst_h)
return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
diff --git a/drivers/gpu/drm/rockchip/rockchip_lvds.c b/drivers/gpu/drm/rockchip/rockchip_lvds.c
index f25a36743cbd..449a62908d21 100644
--- a/drivers/gpu/drm/rockchip/rockchip_lvds.c
+++ b/drivers/gpu/drm/rockchip/rockchip_lvds.c
@@ -646,7 +646,7 @@ static int rockchip_lvds_bind(struct device *dev, struct device *master,
goto err_free_connector;
}
} else {
- ret = drm_bridge_attach(encoder, lvds->bridge, NULL);
+ ret = drm_bridge_attach(encoder, lvds->bridge, NULL, 0);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
index ae730275a34f..90784781e515 100644
--- a/drivers/gpu/drm/rockchip/rockchip_rgb.c
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -98,7 +98,8 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
if (of_property_read_u32(endpoint, "reg", &endpoint_id))
endpoint_id = 0;
- if (rockchip_drm_endpoint_is_subdriver(endpoint) > 0)
+ /* if subdriver (> 0) or error case (< 0), ignore entry */
+ if (rockchip_drm_endpoint_is_subdriver(endpoint) != 0)
continue;
child_count++;
@@ -144,7 +145,7 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
rgb->bridge = bridge;
- ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
if (ret) {
DRM_DEV_ERROR(drm_dev->dev,
"failed to attach bridge: %d\n", ret);
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
index d79086498aff..877ce9b127f1 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler_trace.h
@@ -59,6 +59,33 @@ TRACE_EVENT(drm_sched_job,
__entry->job_count, __entry->hw_job_count)
);
+TRACE_EVENT(drm_run_job,
+ TP_PROTO(struct drm_sched_job *sched_job, struct drm_sched_entity *entity),
+ TP_ARGS(sched_job, entity),
+ TP_STRUCT__entry(
+ __field(struct drm_sched_entity *, entity)
+ __field(struct dma_fence *, fence)
+ __field(const char *, name)
+ __field(uint64_t, id)
+ __field(u32, job_count)
+ __field(int, hw_job_count)
+ ),
+
+ TP_fast_assign(
+ __entry->entity = entity;
+ __entry->id = sched_job->id;
+ __entry->fence = &sched_job->s_fence->finished;
+ __entry->name = sched_job->sched->name;
+ __entry->job_count = spsc_queue_count(&entity->job_queue);
+ __entry->hw_job_count = atomic_read(
+ &sched_job->sched->hw_rq_count);
+ ),
+ TP_printk("entity=%p, id=%llu, fence=%p, ring=%s, job count:%u, hw job count:%d",
+ __entry->entity, __entry->id,
+ __entry->fence, __entry->name,
+ __entry->job_count, __entry->hw_job_count)
+);
+
TRACE_EVENT(drm_sched_process_job,
TP_PROTO(struct drm_sched_fence *fence),
TP_ARGS(fence),
diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c
index 63bccd201b97..c803e14eed91 100644
--- a/drivers/gpu/drm/scheduler/sched_entity.c
+++ b/drivers/gpu/drm/scheduler/sched_entity.c
@@ -84,6 +84,24 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
EXPORT_SYMBOL(drm_sched_entity_init);
/**
+ * drm_sched_entity_modify_sched - Modify sched of an entity
+ * @entity: scheduler entity to init
+ * @sched_list: the list of new drm scheds which will replace
+ * existing entity->sched_list
+ * @num_sched_list: number of drm sched in sched_list
+ */
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list)
+{
+ WARN_ON(!num_sched_list || !sched_list);
+
+ entity->sched_list = sched_list;
+ entity->num_sched_list = num_sched_list;
+}
+EXPORT_SYMBOL(drm_sched_entity_modify_sched);
+
+/**
* drm_sched_entity_is_idle - Check if entity is idle
*
* @entity: scheduler entity
@@ -120,38 +138,6 @@ bool drm_sched_entity_is_ready(struct drm_sched_entity *entity)
}
/**
- * drm_sched_entity_get_free_sched - Get the rq from rq_list with least load
- *
- * @entity: scheduler entity
- *
- * Return the pointer to the rq with least load.
- */
-static struct drm_sched_rq *
-drm_sched_entity_get_free_sched(struct drm_sched_entity *entity)
-{
- struct drm_sched_rq *rq = NULL;
- unsigned int min_score = UINT_MAX, num_score;
- int i;
-
- for (i = 0; i < entity->num_sched_list; ++i) {
- struct drm_gpu_scheduler *sched = entity->sched_list[i];
-
- if (!entity->sched_list[i]->ready) {
- DRM_WARN("sched%s is not ready, skipping", sched->name);
- continue;
- }
-
- num_score = atomic_read(&sched->score);
- if (num_score < min_score) {
- min_score = num_score;
- rq = &entity->sched_list[i]->sched_rq[entity->priority];
- }
- }
-
- return rq;
-}
-
-/**
* drm_sched_entity_flush - Flush a context entity
*
* @entity: scheduler entity
@@ -461,6 +447,7 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity)
void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
{
struct dma_fence *fence;
+ struct drm_gpu_scheduler *sched;
struct drm_sched_rq *rq;
if (spsc_queue_count(&entity->job_queue) || entity->num_sched_list <= 1)
@@ -471,7 +458,8 @@ void drm_sched_entity_select_rq(struct drm_sched_entity *entity)
return;
spin_lock(&entity->rq_lock);
- rq = drm_sched_entity_get_free_sched(entity);
+ sched = drm_sched_pick_best(entity->sched_list, entity->num_sched_list);
+ rq = sched ? &sched->sched_rq[entity->priority] : NULL;
if (rq != entity->rq) {
drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
@@ -498,7 +486,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
bool first;
trace_drm_sched_job(sched_job, entity);
- atomic_inc(&entity->rq->sched->score);
+ atomic_inc(&entity->rq->sched->num_jobs);
WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c
index 71ce6215956f..a18eabf692e4 100644
--- a/drivers/gpu/drm/scheduler/sched_main.c
+++ b/drivers/gpu/drm/scheduler/sched_main.c
@@ -92,7 +92,6 @@ void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
if (!list_empty(&entity->list))
return;
spin_lock(&rq->lock);
- atomic_inc(&rq->sched->score);
list_add_tail(&entity->list, &rq->entities);
spin_unlock(&rq->lock);
}
@@ -111,7 +110,6 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
if (list_empty(&entity->list))
return;
spin_lock(&rq->lock);
- atomic_dec(&rq->sched->score);
list_del_init(&entity->list);
if (rq->current_entity == entity)
rq->current_entity = NULL;
@@ -222,8 +220,7 @@ EXPORT_SYMBOL(drm_sched_fault);
*
* Suspend the delayed work timeout for the scheduler. This is done by
* modifying the delayed work timeout to an arbitrary large value,
- * MAX_SCHEDULE_TIMEOUT in this case. Note that this function can be
- * called from an IRQ context.
+ * MAX_SCHEDULE_TIMEOUT in this case.
*
* Returns the timeout remaining
*
@@ -252,46 +249,41 @@ EXPORT_SYMBOL(drm_sched_suspend_timeout);
* @sched: scheduler instance for which to resume the timeout
* @remaining: remaining timeout
*
- * Resume the delayed work timeout for the scheduler. Note that
- * this function can be called from an IRQ context.
+ * Resume the delayed work timeout for the scheduler.
*/
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
unsigned long remaining)
{
- unsigned long flags;
-
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
if (list_empty(&sched->ring_mirror_list))
cancel_delayed_work(&sched->work_tdr);
else
mod_delayed_work(system_wq, &sched->work_tdr, remaining);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
EXPORT_SYMBOL(drm_sched_resume_timeout);
static void drm_sched_job_begin(struct drm_sched_job *s_job)
{
struct drm_gpu_scheduler *sched = s_job->sched;
- unsigned long flags;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
list_add_tail(&s_job->node, &sched->ring_mirror_list);
drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
static void drm_sched_job_timedout(struct work_struct *work)
{
struct drm_gpu_scheduler *sched;
struct drm_sched_job *job;
- unsigned long flags;
sched = container_of(work, struct drm_gpu_scheduler, work_tdr.work);
/* Protects against concurrent deletion in drm_sched_get_cleanup_job */
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
@@ -302,7 +294,7 @@ static void drm_sched_job_timedout(struct work_struct *work)
* is parked at which point it's safe.
*/
list_del_init(&job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
job->sched->ops->timedout_job(job);
@@ -315,12 +307,12 @@ static void drm_sched_job_timedout(struct work_struct *work)
sched->free_guilty = false;
}
} else {
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
/**
@@ -383,7 +375,6 @@ EXPORT_SYMBOL(drm_sched_increase_karma);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
{
struct drm_sched_job *s_job, *tmp;
- unsigned long flags;
kthread_park(sched->thread);
@@ -417,9 +408,9 @@ void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad)
* remove job from ring_mirror_list.
* Locking here is for concurrent resume timeout
*/
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
list_del_init(&s_job->node);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
/*
* Wait for job's HW fence callback to finish using s_job
@@ -462,7 +453,6 @@ EXPORT_SYMBOL(drm_sched_stop);
void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
{
struct drm_sched_job *s_job, *tmp;
- unsigned long flags;
int r;
/*
@@ -491,9 +481,9 @@ void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery)
}
if (full_recovery) {
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
drm_sched_start_timeout(sched);
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
}
kthread_unpark(sched->thread);
@@ -657,7 +647,7 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
struct drm_gpu_scheduler *sched = s_fence->sched;
atomic_dec(&sched->hw_rq_count);
- atomic_dec(&sched->score);
+ atomic_dec(&sched->num_jobs);
trace_drm_sched_process_job(s_fence);
@@ -677,7 +667,6 @@ static struct drm_sched_job *
drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
{
struct drm_sched_job *job;
- unsigned long flags;
/*
* Don't destroy jobs while the timeout worker is running OR thread
@@ -688,7 +677,7 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
__kthread_should_park(sched->thread))
return NULL;
- spin_lock_irqsave(&sched->job_list_lock, flags);
+ spin_lock(&sched->job_list_lock);
job = list_first_entry_or_null(&sched->ring_mirror_list,
struct drm_sched_job, node);
@@ -702,12 +691,48 @@ drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
drm_sched_start_timeout(sched);
}
- spin_unlock_irqrestore(&sched->job_list_lock, flags);
+ spin_unlock(&sched->job_list_lock);
return job;
}
/**
+ * drm_sched_pick_best - Get a drm sched from a sched_list with the least load
+ * @sched_list: list of drm_gpu_schedulers
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list
+ *
+ * Returns pointer of the sched with the least load or NULL if none of the
+ * drm_gpu_schedulers are ready
+ */
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list)
+{
+ struct drm_gpu_scheduler *sched, *picked_sched = NULL;
+ int i;
+ unsigned int min_jobs = UINT_MAX, num_jobs;
+
+ for (i = 0; i < num_sched_list; ++i) {
+ sched = sched_list[i];
+
+ if (!sched->ready) {
+ DRM_WARN("scheduler %s is not ready, skipping",
+ sched->name);
+ continue;
+ }
+
+ num_jobs = atomic_read(&sched->num_jobs);
+ if (num_jobs < min_jobs) {
+ min_jobs = num_jobs;
+ picked_sched = sched;
+ }
+ }
+
+ return picked_sched;
+}
+EXPORT_SYMBOL(drm_sched_pick_best);
+
+/**
* drm_sched_blocked - check if the scheduler is blocked
*
* @sched: scheduler instance
@@ -773,6 +798,7 @@ static int drm_sched_main(void *param)
atomic_inc(&sched->hw_rq_count);
drm_sched_job_begin(sched_job);
+ trace_drm_run_job(sched_job, entity);
fence = sched->ops->run_job(sched_job);
drm_sched_fence_scheduled(s_fence);
@@ -832,7 +858,7 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
spin_lock_init(&sched->job_list_lock);
atomic_set(&sched->hw_rq_count, 0);
INIT_DELAYED_WORK(&sched->work_tdr, drm_sched_job_timedout);
- atomic_set(&sched->score, 0);
+ atomic_set(&sched->num_jobs, 0);
atomic64_set(&sched->job_id_count, 0);
/* Each scheduler will run on a seperate kernel thread */
diff --git a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
index ceac7af9a172..29e367db6118 100644
--- a/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
+++ b/drivers/gpu/drm/selftests/drm_cmdline_selftests.h
@@ -53,6 +53,7 @@ cmdline_test(drm_cmdline_test_rotate_0)
cmdline_test(drm_cmdline_test_rotate_90)
cmdline_test(drm_cmdline_test_rotate_180)
cmdline_test(drm_cmdline_test_rotate_270)
+cmdline_test(drm_cmdline_test_rotate_multiple)
cmdline_test(drm_cmdline_test_rotate_invalid_val)
cmdline_test(drm_cmdline_test_rotate_truncated)
cmdline_test(drm_cmdline_test_hmirror)
diff --git a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
index 520f3e66a384..d96cd890def6 100644
--- a/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
+++ b/drivers/gpu/drm/selftests/test-drm_cmdline_parser.c
@@ -856,6 +856,17 @@ static int drm_cmdline_test_rotate_270(void *ignored)
return 0;
}
+static int drm_cmdline_test_rotate_multiple(void *ignored)
+{
+ struct drm_cmdline_mode mode = { };
+
+ FAIL_ON(drm_mode_parse_command_line_for_connector("720x480,rotate=0,rotate=90",
+ &no_connector,
+ &mode));
+
+ return 0;
+}
+
static int drm_cmdline_test_rotate_invalid_val(void *ignored)
{
struct drm_cmdline_mode mode = { };
@@ -888,7 +899,7 @@ static int drm_cmdline_test_hmirror(void *ignored)
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_X);
+ FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_X));
FAIL_ON(mode.refresh_specified);
@@ -913,7 +924,7 @@ static int drm_cmdline_test_vmirror(void *ignored)
FAIL_ON(!mode.specified);
FAIL_ON(mode.xres != 720);
FAIL_ON(mode.yres != 480);
- FAIL_ON(mode.rotation_reflection != DRM_MODE_REFLECT_Y);
+ FAIL_ON(mode.rotation_reflection != (DRM_MODE_ROTATE_0 | DRM_MODE_REFLECT_Y));
FAIL_ON(mode.refresh_specified);
diff --git a/drivers/gpu/drm/sti/sti_crtc.c b/drivers/gpu/drm/sti/sti_crtc.c
index dc64fbfc4e61..49e6cb8f5836 100644
--- a/drivers/gpu/drm/sti/sti_crtc.c
+++ b/drivers/gpu/drm/sti/sti_crtc.c
@@ -279,12 +279,13 @@ int sti_crtc_vblank_cb(struct notifier_block *nb,
return 0;
}
-int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
+static int sti_crtc_enable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct sti_private *dev_priv = dev->dev_private;
struct sti_compositor *compo = dev_priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
- struct drm_crtc *crtc = &compo->mixer[pipe]->drm_crtc;
struct sti_vtg *vtg = compo->vtg[pipe];
DRM_DEBUG_DRIVER("\n");
@@ -297,8 +298,10 @@ int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe)
return 0;
}
-void sti_crtc_disable_vblank(struct drm_device *drm_dev, unsigned int pipe)
+static void sti_crtc_disable_vblank(struct drm_crtc *crtc)
{
+ struct drm_device *drm_dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct sti_private *priv = drm_dev->dev_private;
struct sti_compositor *compo = priv->compo;
struct notifier_block *vtg_vblank_nb = &compo->vtg_vblank_nb[pipe];
@@ -330,6 +333,8 @@ static const struct drm_crtc_funcs sti_crtc_funcs = {
.atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.late_register = sti_crtc_late_register,
+ .enable_vblank = sti_crtc_enable_vblank,
+ .disable_vblank = sti_crtc_disable_vblank,
};
bool sti_crtc_is_main(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/sti/sti_crtc.h b/drivers/gpu/drm/sti/sti_crtc.h
index df489ab14e2b..1132b4586712 100644
--- a/drivers/gpu/drm/sti/sti_crtc.h
+++ b/drivers/gpu/drm/sti/sti_crtc.h
@@ -15,8 +15,6 @@ struct sti_mixer;
int sti_crtc_init(struct drm_device *drm_dev, struct sti_mixer *mixer,
struct drm_plane *primary, struct drm_plane *cursor);
-int sti_crtc_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void sti_crtc_disable_vblank(struct drm_device *dev, unsigned int pipe);
int sti_crtc_vblank_cb(struct notifier_block *nb,
unsigned long event, void *data);
bool sti_crtc_is_main(struct drm_crtc *drm_crtc);
diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c
index a39fc36f815b..50870d8cbb76 100644
--- a/drivers/gpu/drm/sti/sti_drv.c
+++ b/drivers/gpu/drm/sti/sti_drv.c
@@ -21,7 +21,6 @@
#include <drm/drm_of.h>
#include <drm/drm_probe_helper.h>
-#include "sti_crtc.h"
#include "sti_drv.h"
#include "sti_plane.h"
@@ -146,9 +145,6 @@ static struct drm_driver sti_driver = {
.dumb_create = drm_gem_cma_dumb_create,
.fops = &sti_driver_fops,
- .enable_vblank = sti_crtc_enable_vblank,
- .disable_vblank = sti_crtc_disable_vblank,
-
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_get_sg_table = drm_gem_cma_prime_get_sg_table,
diff --git a/drivers/gpu/drm/sti/sti_dvo.c b/drivers/gpu/drm/sti/sti_dvo.c
index b2778ec1cdd7..3d04bfca21a0 100644
--- a/drivers/gpu/drm/sti/sti_dvo.c
+++ b/drivers/gpu/drm/sti/sti_dvo.c
@@ -467,7 +467,7 @@ static int sti_dvo_bind(struct device *dev, struct device *master, void *data)
bridge->of_node = dvo->dev.of_node;
drm_bridge_add(bridge);
- err = drm_bridge_attach(encoder, bridge, NULL);
+ err = drm_bridge_attach(encoder, bridge, NULL, 0);
if (err) {
DRM_ERROR("Failed to attach bridge\n");
return err;
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 2bb32009d117..f3f28d79b0e4 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -701,7 +701,7 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
bridge->driver_private = hda;
bridge->funcs = &sti_hda_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL);
+ drm_bridge_attach(encoder, bridge, NULL, 0);
connector->encoder = encoder;
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 64ed102033c8..18eaf786ffa4 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1281,7 +1281,7 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
bridge->driver_private = hdmi;
bridge->funcs = &sti_hdmi_bridge_funcs;
- drm_bridge_attach(encoder, bridge, NULL);
+ drm_bridge_attach(encoder, bridge, NULL, 0);
connector->encoder = encoder;
diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c
index 5a9f9aca8bc2..ea9fcbdc68b3 100644
--- a/drivers/gpu/drm/stm/drv.c
+++ b/drivers/gpu/drm/stm/drv.c
@@ -72,8 +72,6 @@ static struct drm_driver drv_driver = {
.gem_prime_vmap = drm_gem_cma_prime_vmap,
.gem_prime_vunmap = drm_gem_cma_prime_vunmap,
.gem_prime_mmap = drm_gem_cma_prime_mmap,
- .get_scanout_position = ltdc_crtc_scanoutpos,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
};
static int drv_load(struct drm_device *ddev)
diff --git a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
index 4b165635b2d4..2e1f2664495d 100644
--- a/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
+++ b/drivers/gpu/drm/stm/dw_mipi_dsi-stm.c
@@ -377,7 +377,9 @@ static int dw_mipi_dsi_stm_probe(struct platform_device *pdev)
dsi->pllref_clk = devm_clk_get(dev, "ref");
if (IS_ERR(dsi->pllref_clk)) {
ret = PTR_ERR(dsi->pllref_clk);
- DRM_ERROR("Unable to get pll reference clock: %d\n", ret);
+ if (ret != -EPROBE_DEFER)
+ DRM_ERROR("Unable to get pll reference clock: %d\n",
+ ret);
goto err_clk_get;
}
diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c
index c2815e8ae1da..df585fe64f61 100644
--- a/drivers/gpu/drm/stm/ltdc.c
+++ b/drivers/gpu/drm/stm/ltdc.c
@@ -636,38 +636,13 @@ static void ltdc_crtc_atomic_flush(struct drm_crtc *crtc,
}
}
-static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
- .mode_valid = ltdc_crtc_mode_valid,
- .mode_fixup = ltdc_crtc_mode_fixup,
- .mode_set_nofb = ltdc_crtc_mode_set_nofb,
- .atomic_flush = ltdc_crtc_atomic_flush,
- .atomic_enable = ltdc_crtc_atomic_enable,
- .atomic_disable = ltdc_crtc_atomic_disable,
-};
-
-static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
-{
- struct ltdc_device *ldev = crtc_to_ltdc(crtc);
-
- DRM_DEBUG_DRIVER("\n");
- reg_set(ldev->regs, LTDC_IER, IER_LIE);
-
- return 0;
-}
-
-static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
-{
- struct ltdc_device *ldev = crtc_to_ltdc(crtc);
-
- DRM_DEBUG_DRIVER("\n");
- reg_clear(ldev->regs, LTDC_IER, IER_LIE);
-}
-
-bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+static bool ltdc_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
+ struct drm_device *ddev = crtc->dev;
struct ltdc_device *ldev = ddev->dev_private;
int line, vactive_start, vactive_end, vtotal;
@@ -710,6 +685,39 @@ bool ltdc_crtc_scanoutpos(struct drm_device *ddev, unsigned int pipe,
return true;
}
+static const struct drm_crtc_helper_funcs ltdc_crtc_helper_funcs = {
+ .mode_valid = ltdc_crtc_mode_valid,
+ .mode_fixup = ltdc_crtc_mode_fixup,
+ .mode_set_nofb = ltdc_crtc_mode_set_nofb,
+ .atomic_flush = ltdc_crtc_atomic_flush,
+ .atomic_enable = ltdc_crtc_atomic_enable,
+ .atomic_disable = ltdc_crtc_atomic_disable,
+ .get_scanout_position = ltdc_crtc_get_scanout_position,
+};
+
+static int ltdc_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+ struct drm_crtc_state *state = crtc->state;
+
+ DRM_DEBUG_DRIVER("\n");
+
+ if (state->enable)
+ reg_set(ldev->regs, LTDC_IER, IER_LIE);
+ else
+ return -EPERM;
+
+ return 0;
+}
+
+static void ltdc_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct ltdc_device *ldev = crtc_to_ltdc(crtc);
+
+ DRM_DEBUG_DRIVER("\n");
+ reg_clear(ldev->regs, LTDC_IER, IER_LIE);
+}
+
static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.destroy = drm_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
@@ -719,6 +727,7 @@ static const struct drm_crtc_funcs ltdc_crtc_funcs = {
.atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = ltdc_crtc_enable_vblank,
.disable_vblank = ltdc_crtc_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
.gamma_set = drm_atomic_helper_legacy_gamma_set,
};
@@ -1100,7 +1109,7 @@ static int ltdc_encoder_init(struct drm_device *ddev, struct drm_bridge *bridge)
drm_encoder_helper_add(encoder, &ltdc_encoder_helper_funcs);
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
drm_encoder_cleanup(encoder);
return -EINVAL;
@@ -1146,12 +1155,14 @@ static int ltdc_get_caps(struct drm_device *ddev)
ldev->caps.pad_max_freq_hz = 90000000;
if (ldev->caps.hw_version == HWVER_10200)
ldev->caps.pad_max_freq_hz = 65000000;
+ ldev->caps.nb_irq = 2;
break;
case HWVER_20101:
ldev->caps.reg_ofs = REG_OFS_4;
ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a1;
ldev->caps.non_alpha_only_l1 = false;
ldev->caps.pad_max_freq_hz = 150000000;
+ ldev->caps.nb_irq = 4;
break;
default:
return -ENODEV;
@@ -1251,13 +1262,21 @@ int ltdc_load(struct drm_device *ddev)
reg_clear(ldev->regs, LTDC_IER,
IER_LIE | IER_RRIE | IER_FUIE | IER_TERRIE);
- for (i = 0; i < MAX_IRQ; i++) {
+ ret = ltdc_get_caps(ddev);
+ if (ret) {
+ DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
+ ldev->caps.hw_version);
+ goto err;
+ }
+
+ DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
+
+ for (i = 0; i < ldev->caps.nb_irq; i++) {
irq = platform_get_irq(pdev, i);
- if (irq == -EPROBE_DEFER)
+ if (irq < 0) {
+ ret = irq;
goto err;
-
- if (irq < 0)
- continue;
+ }
ret = devm_request_threaded_irq(dev, irq, ltdc_irq,
ltdc_irq_thread, IRQF_ONESHOT,
@@ -1268,16 +1287,6 @@ int ltdc_load(struct drm_device *ddev)
}
}
-
- ret = ltdc_get_caps(ddev);
- if (ret) {
- DRM_ERROR("hardware identifier (0x%08x) not supported!\n",
- ldev->caps.hw_version);
- goto err;
- }
-
- DRM_DEBUG_DRIVER("ltdc hw version 0x%08x\n", ldev->caps.hw_version);
-
/* Add endpoints panels or bridges if any */
for (i = 0; i < MAX_ENDPOINTS; i++) {
if (panel[i]) {
diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h
index a1ad0ae3b006..f153b908c70e 100644
--- a/drivers/gpu/drm/stm/ltdc.h
+++ b/drivers/gpu/drm/stm/ltdc.h
@@ -19,6 +19,7 @@ struct ltdc_caps {
const u32 *pix_fmt_hw; /* supported pixel formats */
bool non_alpha_only_l1; /* non-native no-alpha formats on layer 1 */
int pad_max_freq_hz; /* max frequency supported by pad */
+ int nb_irq; /* number of hardware interrupts */
};
#define LTDC_MAX_LAYER 4
@@ -39,11 +40,6 @@ struct ltdc_device {
struct drm_atomic_state *suspend_state;
};
-bool ltdc_crtc_scanoutpos(struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
-
int ltdc_load(struct drm_device *ddev);
void ltdc_unload(struct drm_device *ddev);
void ltdc_suspend(struct drm_device *ddev);
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index 5ae67d526b1d..328272ff77d8 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -85,7 +85,6 @@ static int sun4i_drv_bind(struct device *dev)
}
drm_mode_config_init(drm);
- drm->mode_config.allow_fb_modifiers = true;
ret = component_bind_all(drm->dev, drm);
if (ret) {
diff --git a/drivers/gpu/drm/sun4i/sun4i_lvds.c b/drivers/gpu/drm/sun4i/sun4i_lvds.c
index 65b7a8739666..26e5c7ceb8ff 100644
--- a/drivers/gpu/drm/sun4i/sun4i_lvds.c
+++ b/drivers/gpu/drm/sun4i/sun4i_lvds.c
@@ -156,7 +156,7 @@ int sun4i_lvds_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
if (bridge) {
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(encoder, bridge, NULL, 0);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_rgb.c b/drivers/gpu/drm/sun4i/sun4i_rgb.c
index b27f16af50f5..3b23d5be3cf3 100644
--- a/drivers/gpu/drm/sun4i/sun4i_rgb.c
+++ b/drivers/gpu/drm/sun4i/sun4i_rgb.c
@@ -253,7 +253,7 @@ int sun4i_rgb_init(struct drm_device *drm, struct sun4i_tcon *tcon)
}
if (rgb->bridge) {
- ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL, 0);
if (ret) {
dev_err(drm->dev, "Couldn't attach our bridge\n");
goto err_cleanup_connector;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index c81cdce6ed55..624437b27cdc 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -114,46 +114,71 @@ static void sun4i_tcon_channel_set_status(struct sun4i_tcon *tcon, int channel,
}
}
+static void sun4i_tcon_setup_lvds_phy(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN4I_TCON0_LVDS_ANA0_CK_EN |
+ SUN4I_TCON0_LVDS_ANA0_REG_V |
+ SUN4I_TCON0_LVDS_ANA0_REG_C |
+ SUN4I_TCON0_LVDS_ANA0_EN_MB |
+ SUN4I_TCON0_LVDS_ANA0_PD |
+ SUN4I_TCON0_LVDS_ANA0_DCHS);
+
+ udelay(2); /* delay at least 1200 ns */
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA1_REG,
+ SUN4I_TCON0_LVDS_ANA1_INIT,
+ SUN4I_TCON0_LVDS_ANA1_INIT);
+ udelay(1); /* delay at least 120 ns */
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA1_REG,
+ SUN4I_TCON0_LVDS_ANA1_UPDATE,
+ SUN4I_TCON0_LVDS_ANA1_UPDATE);
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN4I_TCON0_LVDS_ANA0_EN_MB,
+ SUN4I_TCON0_LVDS_ANA0_EN_MB);
+}
+
+static void sun6i_tcon_setup_lvds_phy(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ u8 val;
+
+ regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_C(2) |
+ SUN6I_TCON0_LVDS_ANA0_V(3) |
+ SUN6I_TCON0_LVDS_ANA0_PD(2) |
+ SUN6I_TCON0_LVDS_ANA0_EN_LDO);
+ udelay(2);
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_MB,
+ SUN6I_TCON0_LVDS_ANA0_EN_MB);
+ udelay(2);
+
+ regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
+
+ if (sun4i_tcon_get_pixel_depth(encoder) == 18)
+ val = 7;
+ else
+ val = 0xf;
+
+ regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
+ SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
+}
+
static void sun4i_tcon_lvds_set_status(struct sun4i_tcon *tcon,
const struct drm_encoder *encoder,
bool enabled)
{
if (enabled) {
- u8 val;
-
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
SUN4I_TCON0_LVDS_IF_EN,
SUN4I_TCON0_LVDS_IF_EN);
-
- /*
- * As their name suggest, these values only apply to the A31
- * and later SoCs. We'll have to rework this when merging
- * support for the older SoCs.
- */
- regmap_write(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_C(2) |
- SUN6I_TCON0_LVDS_ANA0_V(3) |
- SUN6I_TCON0_LVDS_ANA0_PD(2) |
- SUN6I_TCON0_LVDS_ANA0_EN_LDO);
- udelay(2);
-
- regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_EN_MB,
- SUN6I_TCON0_LVDS_ANA0_EN_MB);
- udelay(2);
-
- regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_EN_DRVC,
- SUN6I_TCON0_LVDS_ANA0_EN_DRVC);
-
- if (sun4i_tcon_get_pixel_depth(encoder) == 18)
- val = 7;
- else
- val = 0xf;
-
- regmap_write_bits(tcon->regs, SUN4I_TCON0_LVDS_ANA0_REG,
- SUN6I_TCON0_LVDS_ANA0_EN_DRVD(0xf),
- SUN6I_TCON0_LVDS_ANA0_EN_DRVD(val));
+ if (tcon->quirks->setup_lvds_phy)
+ tcon->quirks->setup_lvds_phy(tcon, encoder);
} else {
regmap_update_bits(tcon->regs, SUN4I_TCON0_LVDS_IF_REG,
SUN4I_TCON0_LVDS_IF_EN, 0);
@@ -1453,6 +1478,16 @@ static const struct sun4i_tcon_quirks sun6i_a31s_quirks = {
.dclk_min_div = 1,
};
+static const struct sun4i_tcon_quirks sun7i_a20_tcon0_quirks = {
+ .supports_lvds = true,
+ .has_channel_0 = true,
+ .has_channel_1 = true,
+ .dclk_min_div = 4,
+ /* Same display pipeline structure as A10 */
+ .set_mux = sun4i_a10_tcon_set_mux,
+ .setup_lvds_phy = sun4i_tcon_setup_lvds_phy,
+};
+
static const struct sun4i_tcon_quirks sun7i_a20_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
@@ -1465,12 +1500,15 @@ static const struct sun4i_tcon_quirks sun8i_a33_quirks = {
.has_channel_0 = true,
.has_lvds_alt = true,
.dclk_min_div = 1,
+ .setup_lvds_phy = sun6i_tcon_setup_lvds_phy,
+ .supports_lvds = true,
};
static const struct sun4i_tcon_quirks sun8i_a83t_lcd_quirks = {
.supports_lvds = true,
.has_channel_0 = true,
.dclk_min_div = 1,
+ .setup_lvds_phy = sun6i_tcon_setup_lvds_phy,
};
static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
@@ -1505,6 +1543,8 @@ const struct of_device_id sun4i_tcon_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-tcon", .data = &sun6i_a31_quirks },
{ .compatible = "allwinner,sun6i-a31s-tcon", .data = &sun6i_a31s_quirks },
{ .compatible = "allwinner,sun7i-a20-tcon", .data = &sun7i_a20_quirks },
+ { .compatible = "allwinner,sun7i-a20-tcon0", .data = &sun7i_a20_tcon0_quirks },
+ { .compatible = "allwinner,sun7i-a20-tcon1", .data = &sun7i_a20_quirks },
{ .compatible = "allwinner,sun8i-a23-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks },
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.h b/drivers/gpu/drm/sun4i/sun4i_tcon.h
index a62ec826ae71..cfbf4e6c1679 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.h
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.h
@@ -193,6 +193,13 @@
#define SUN4I_TCON_MUX_CTRL_REG 0x200
#define SUN4I_TCON0_LVDS_ANA0_REG 0x220
+#define SUN4I_TCON0_LVDS_ANA0_DCHS BIT(16)
+#define SUN4I_TCON0_LVDS_ANA0_PD (BIT(20) | BIT(21))
+#define SUN4I_TCON0_LVDS_ANA0_EN_MB BIT(22)
+#define SUN4I_TCON0_LVDS_ANA0_REG_C (BIT(24) | BIT(25))
+#define SUN4I_TCON0_LVDS_ANA0_REG_V (BIT(26) | BIT(27))
+#define SUN4I_TCON0_LVDS_ANA0_CK_EN (BIT(29) | BIT(28))
+
#define SUN6I_TCON0_LVDS_ANA0_EN_MB BIT(31)
#define SUN6I_TCON0_LVDS_ANA0_EN_LDO BIT(30)
#define SUN6I_TCON0_LVDS_ANA0_EN_DRVC BIT(24)
@@ -201,6 +208,10 @@
#define SUN6I_TCON0_LVDS_ANA0_V(x) (((x) & 3) << 8)
#define SUN6I_TCON0_LVDS_ANA0_PD(x) (((x) & 3) << 4)
+#define SUN4I_TCON0_LVDS_ANA1_REG 0x224
+#define SUN4I_TCON0_LVDS_ANA1_INIT (0x1f << 26 | 0x1f << 10)
+#define SUN4I_TCON0_LVDS_ANA1_UPDATE (0x1f << 16 | 0x1f << 00)
+
#define SUN4I_TCON1_FILL_CTL_REG 0x300
#define SUN4I_TCON1_FILL_BEG0_REG 0x304
#define SUN4I_TCON1_FILL_END0_REG 0x308
@@ -228,6 +239,9 @@ struct sun4i_tcon_quirks {
/* callback to handle tcon muxing options */
int (*set_mux)(struct sun4i_tcon *, const struct drm_encoder *);
+ /* handler for LVDS setup routine */
+ void (*setup_lvds_phy)(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder);
};
struct sun4i_tcon {
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
index a75fcb113172..059939789730 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.c
@@ -14,7 +14,6 @@
#include <linux/phy/phy-mipi-dphy.h>
#include <linux/phy/phy.h>
#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
@@ -27,7 +26,6 @@
#include <drm/drm_probe_helper.h>
#include "sun4i_crtc.h"
-#include "sun4i_drv.h"
#include "sun4i_tcon.h"
#include "sun6i_mipi_dsi.h"
@@ -722,10 +720,31 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
union phy_configure_opts opts = { 0 };
struct phy_configure_opts_mipi_dphy *cfg = &opts.mipi_dphy;
u16 delay;
+ int err;
DRM_DEBUG_DRIVER("Enabling DSI output\n");
- pm_runtime_get_sync(dsi->dev);
+ err = regulator_enable(dsi->regulator);
+ if (err)
+ dev_warn(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
+
+ reset_control_deassert(dsi->reset);
+ clk_prepare_enable(dsi->mod_clk);
+
+ /*
+ * Enable the DSI block.
+ */
+ regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
+
+ regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
+ SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
+
+ regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
+ regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
+
+ sun6i_dsi_inst_init(dsi, dsi->device);
+
+ regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
delay = sun6i_dsi_get_video_start_delay(dsi, mode);
regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL1_REG,
@@ -749,7 +768,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
phy_configure(dsi->dphy, &opts);
phy_power_on(dsi->dphy);
- if (!IS_ERR(dsi->panel))
+ if (dsi->panel)
drm_panel_prepare(dsi->panel);
/*
@@ -764,7 +783,7 @@ static void sun6i_dsi_encoder_enable(struct drm_encoder *encoder)
* ordering on the panels I've tested it with, so I guess this
* will do for now, until that IP is better understood.
*/
- if (!IS_ERR(dsi->panel))
+ if (dsi->panel)
drm_panel_enable(dsi->panel);
sun6i_dsi_start(dsi, DSI_START_HSC);
@@ -780,7 +799,7 @@ static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
DRM_DEBUG_DRIVER("Disabling DSI output\n");
- if (!IS_ERR(dsi->panel)) {
+ if (dsi->panel) {
drm_panel_disable(dsi->panel);
drm_panel_unprepare(dsi->panel);
}
@@ -788,7 +807,9 @@ static void sun6i_dsi_encoder_disable(struct drm_encoder *encoder)
phy_power_off(dsi->dphy);
phy_exit(dsi->dphy);
- pm_runtime_put(dsi->dev);
+ clk_disable_unprepare(dsi->mod_clk);
+ reset_control_assert(dsi->reset);
+ regulator_disable(dsi->regulator);
}
static int sun6i_dsi_get_modes(struct drm_connector *connector)
@@ -805,7 +826,10 @@ static struct drm_connector_helper_funcs sun6i_dsi_connector_helper_funcs = {
static enum drm_connector_status
sun6i_dsi_connector_detect(struct drm_connector *connector, bool force)
{
- return connector_status_connected;
+ struct sun6i_dsi *dsi = connector_to_sun6i_dsi(connector);
+
+ return dsi->panel ? connector_status_connected :
+ connector_status_disconnected;
}
static const struct drm_connector_funcs sun6i_dsi_connector_funcs = {
@@ -942,11 +966,18 @@ static int sun6i_dsi_attach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+ struct drm_panel *panel = of_drm_find_panel(device->dev.of_node);
+ if (IS_ERR(panel))
+ return PTR_ERR(panel);
+ if (!dsi->drm || !dsi->drm->registered)
+ return -EPROBE_DEFER;
+
+ dsi->panel = panel;
dsi->device = device;
- dsi->panel = of_drm_find_panel(device->dev.of_node);
- if (IS_ERR(dsi->panel))
- return PTR_ERR(dsi->panel);
+
+ drm_panel_attach(dsi->panel, &dsi->connector);
+ drm_kms_helper_hotplug_event(dsi->drm);
dev_info(host->dev, "Attached device %s\n", device->name);
@@ -957,10 +988,14 @@ static int sun6i_dsi_detach(struct mipi_dsi_host *host,
struct mipi_dsi_device *device)
{
struct sun6i_dsi *dsi = host_to_sun6i_dsi(host);
+ struct drm_panel *panel = dsi->panel;
dsi->panel = NULL;
dsi->device = NULL;
+ drm_panel_detach(panel);
+ drm_kms_helper_hotplug_event(dsi->drm);
+
return 0;
}
@@ -1022,15 +1057,9 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
void *data)
{
struct drm_device *drm = data;
- struct sun4i_drv *drv = drm->dev_private;
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
int ret;
- if (!dsi->panel)
- return -EPROBE_DEFER;
-
- dsi->drv = drv;
-
drm_encoder_helper_add(&dsi->encoder,
&sun6i_dsi_enc_helper_funcs);
ret = drm_encoder_init(drm,
@@ -1056,7 +1085,8 @@ static int sun6i_dsi_bind(struct device *dev, struct device *master,
}
drm_connector_attach_encoder(&dsi->connector, &dsi->encoder);
- drm_panel_attach(dsi->panel, &dsi->connector);
+
+ dsi->drm = drm;
return 0;
@@ -1070,7 +1100,7 @@ static void sun6i_dsi_unbind(struct device *dev, struct device *master,
{
struct sun6i_dsi *dsi = dev_get_drvdata(dev);
- drm_panel_detach(dsi->panel);
+ dsi->drm = NULL;
}
static const struct component_ops sun6i_dsi_ops = {
@@ -1157,12 +1187,10 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
goto err_unprotect_clk;
}
- pm_runtime_enable(dev);
-
ret = mipi_dsi_host_register(&dsi->host);
if (ret) {
dev_err(dev, "Couldn't register MIPI-DSI host\n");
- goto err_pm_disable;
+ goto err_unprotect_clk;
}
ret = component_add(&pdev->dev, &sun6i_dsi_ops);
@@ -1175,8 +1203,6 @@ static int sun6i_dsi_probe(struct platform_device *pdev)
err_remove_dsi_host:
mipi_dsi_host_unregister(&dsi->host);
-err_pm_disable:
- pm_runtime_disable(dev);
err_unprotect_clk:
clk_rate_exclusive_put(dsi->mod_clk);
err_attach_clk:
@@ -1192,7 +1218,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
component_del(&pdev->dev, &sun6i_dsi_ops);
mipi_dsi_host_unregister(&dsi->host);
- pm_runtime_disable(dev);
clk_rate_exclusive_put(dsi->mod_clk);
if (!IS_ERR(dsi->bus_clk))
@@ -1201,59 +1226,6 @@ static int sun6i_dsi_remove(struct platform_device *pdev)
return 0;
}
-static int __maybe_unused sun6i_dsi_runtime_resume(struct device *dev)
-{
- struct sun6i_dsi *dsi = dev_get_drvdata(dev);
- int err;
-
- err = regulator_enable(dsi->regulator);
- if (err) {
- dev_err(dsi->dev, "failed to enable VCC-DSI supply: %d\n", err);
- return err;
- }
-
- reset_control_deassert(dsi->reset);
- clk_prepare_enable(dsi->mod_clk);
-
- /*
- * Enable the DSI block.
- *
- * Some part of it can only be done once we get a number of
- * lanes, see sun6i_dsi_inst_init
- */
- regmap_write(dsi->regs, SUN6I_DSI_CTL_REG, SUN6I_DSI_CTL_EN);
-
- regmap_write(dsi->regs, SUN6I_DSI_BASIC_CTL0_REG,
- SUN6I_DSI_BASIC_CTL0_ECC_EN | SUN6I_DSI_BASIC_CTL0_CRC_EN);
-
- regmap_write(dsi->regs, SUN6I_DSI_TRANS_START_REG, 10);
- regmap_write(dsi->regs, SUN6I_DSI_TRANS_ZERO_REG, 0);
-
- if (dsi->device)
- sun6i_dsi_inst_init(dsi, dsi->device);
-
- regmap_write(dsi->regs, SUN6I_DSI_DEBUG_DATA_REG, 0xff);
-
- return 0;
-}
-
-static int __maybe_unused sun6i_dsi_runtime_suspend(struct device *dev)
-{
- struct sun6i_dsi *dsi = dev_get_drvdata(dev);
-
- clk_disable_unprepare(dsi->mod_clk);
- reset_control_assert(dsi->reset);
- regulator_disable(dsi->regulator);
-
- return 0;
-}
-
-static const struct dev_pm_ops sun6i_dsi_pm_ops = {
- SET_RUNTIME_PM_OPS(sun6i_dsi_runtime_suspend,
- sun6i_dsi_runtime_resume,
- NULL)
-};
-
static const struct of_device_id sun6i_dsi_of_table[] = {
{ .compatible = "allwinner,sun6i-a31-mipi-dsi" },
{ .compatible = "allwinner,sun50i-a64-mipi-dsi" },
@@ -1267,7 +1239,6 @@ static struct platform_driver sun6i_dsi_platform_driver = {
.driver = {
.name = "sun6i-mipi-dsi",
.of_match_table = sun6i_dsi_of_table,
- .pm = &sun6i_dsi_pm_ops,
},
};
module_platform_driver(sun6i_dsi_platform_driver);
diff --git a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
index 3f4846f581ef..c863900ae3b4 100644
--- a/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
+++ b/drivers/gpu/drm/sun4i/sun6i_mipi_dsi.h
@@ -28,8 +28,8 @@ struct sun6i_dsi {
struct phy *dphy;
struct device *dev;
- struct sun4i_drv *drv;
struct mipi_dsi_device *device;
+ struct drm_device *drm;
struct drm_panel *panel;
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index 7c24f8f832a5..4a64f7ae437a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -107,48 +107,128 @@ static const struct de2_fmt_info de2_formats[] = {
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_XRGB4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_ABGR4444,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_XBGR4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_RGBA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_RGBX4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_BGRA4444,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_BGRX4444,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA4444,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_ARGB1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_XRGB1555,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB1555,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_ABGR1555,
.de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_XBGR1555,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR1555,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_RGBA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_RGBX5551,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA5551,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_BGRA5551,
.de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
.rgb = true,
.csc = SUN8I_CSC_MODE_OFF,
},
{
+ /* for DE2 VI layer which ignores alpha */
+ .drm_fmt = DRM_FORMAT_BGRX5551,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA5551,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_ARGB2101010,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ARGB2101010,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_ABGR2101010,
+ .de2_fmt = SUN8I_MIXER_FBFMT_ABGR2101010,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_RGBA1010102,
+ .de2_fmt = SUN8I_MIXER_FBFMT_RGBA1010102,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_BGRA1010102,
+ .de2_fmt = SUN8I_MIXER_FBFMT_BGRA1010102,
+ .rgb = true,
+ .csc = SUN8I_CSC_MODE_OFF,
+ },
+ {
.drm_fmt = DRM_FORMAT_UYVY,
.de2_fmt = SUN8I_MIXER_FBFMT_UYVY,
.rgb = false,
@@ -197,12 +277,6 @@ static const struct de2_fmt_info de2_formats[] = {
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
- .drm_fmt = DRM_FORMAT_YUV444,
- .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_YUV2RGB,
- },
- {
.drm_fmt = DRM_FORMAT_YUV422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
.rgb = false,
@@ -221,12 +295,6 @@ static const struct de2_fmt_info de2_formats[] = {
.csc = SUN8I_CSC_MODE_YUV2RGB,
},
{
- .drm_fmt = DRM_FORMAT_YVU444,
- .de2_fmt = SUN8I_MIXER_FBFMT_RGB888,
- .rgb = true,
- .csc = SUN8I_CSC_MODE_YVU2RGB,
- },
- {
.drm_fmt = DRM_FORMAT_YVU422,
.de2_fmt = SUN8I_MIXER_FBFMT_YUV422,
.rgb = false,
@@ -244,6 +312,18 @@ static const struct de2_fmt_info de2_formats[] = {
.rgb = false,
.csc = SUN8I_CSC_MODE_YVU2RGB,
},
+ {
+ .drm_fmt = DRM_FORMAT_P010,
+ .de2_fmt = SUN8I_MIXER_FBFMT_P010_YUV,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
+ {
+ .drm_fmt = DRM_FORMAT_P210,
+ .de2_fmt = SUN8I_MIXER_FBFMT_P210_YUV,
+ .rgb = false,
+ .csc = SUN8I_CSC_MODE_YUV2RGB,
+ },
};
const struct de2_fmt_info *sun8i_mixer_format_info(u32 format)
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.h b/drivers/gpu/drm/sun4i/sun8i_mixer.h
index c6cc94057faf..345b28b0a80a 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.h
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.h
@@ -93,6 +93,10 @@
#define SUN8I_MIXER_FBFMT_ABGR1555 17
#define SUN8I_MIXER_FBFMT_RGBA5551 18
#define SUN8I_MIXER_FBFMT_BGRA5551 19
+#define SUN8I_MIXER_FBFMT_ARGB2101010 20
+#define SUN8I_MIXER_FBFMT_ABGR2101010 21
+#define SUN8I_MIXER_FBFMT_RGBA1010102 22
+#define SUN8I_MIXER_FBFMT_BGRA1010102 23
#define SUN8I_MIXER_FBFMT_YUYV 0
#define SUN8I_MIXER_FBFMT_UYVY 1
@@ -109,6 +113,13 @@
/* format 12 is semi-planar YUV411 UVUV */
/* format 13 is semi-planar YUV411 VUVU */
#define SUN8I_MIXER_FBFMT_YUV411 14
+/* format 15 doesn't exist */
+/* format 16 is P010 YVU */
+#define SUN8I_MIXER_FBFMT_P010_YUV 17
+/* format 18 is P210 YVU */
+#define SUN8I_MIXER_FBFMT_P210_YUV 19
+/* format 20 is packed YVU444 10-bit */
+/* format 21 is packed YUV444 10-bit */
/*
* Sub-engines listed bellow are unused for now. The EN registers are here only
diff --git a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
index 42d445d23773..b8398ca18b0f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_vi_layer.c
@@ -398,24 +398,66 @@ static const struct drm_plane_funcs sun8i_vi_layer_funcs = {
};
/*
- * While all RGB formats are supported, VI planes don't support
- * alpha blending, so there is no point having formats with alpha
- * channel if their opaque analog exist.
+ * While DE2 VI layer supports same RGB formats as UI layer, alpha
+ * channel is ignored. This structure lists all unique variants
+ * where alpha channel is replaced with "don't care" (X) channel.
*/
static const u32 sun8i_vi_layer_formats[] = {
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_BGRX4444,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XRGB8888,
+
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV411,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YUV422,
+ DRM_FORMAT_YVU411,
+ DRM_FORMAT_YVU420,
+ DRM_FORMAT_YVU422,
+};
+
+static const u32 sun8i_vi_layer_de3_formats[] = {
DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_ABGR2101010,
DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_ABGR8888,
DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ARGB2101010,
DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ARGB8888,
DRM_FORMAT_BGR565,
DRM_FORMAT_BGR888,
+ DRM_FORMAT_BGRA1010102,
DRM_FORMAT_BGRA5551,
DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_BGRA8888,
DRM_FORMAT_BGRX8888,
DRM_FORMAT_RGB565,
DRM_FORMAT_RGB888,
+ DRM_FORMAT_RGBA1010102,
DRM_FORMAT_RGBA4444,
DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_RGBA8888,
DRM_FORMAT_RGBX8888,
DRM_FORMAT_XBGR8888,
DRM_FORMAT_XRGB8888,
@@ -424,6 +466,8 @@ static const u32 sun8i_vi_layer_formats[] = {
DRM_FORMAT_NV12,
DRM_FORMAT_NV21,
DRM_FORMAT_NV61,
+ DRM_FORMAT_P010,
+ DRM_FORMAT_P210,
DRM_FORMAT_UYVY,
DRM_FORMAT_VYUY,
DRM_FORMAT_YUYV,
@@ -431,11 +475,9 @@ static const u32 sun8i_vi_layer_formats[] = {
DRM_FORMAT_YUV411,
DRM_FORMAT_YUV420,
DRM_FORMAT_YUV422,
- DRM_FORMAT_YUV444,
DRM_FORMAT_YVU411,
DRM_FORMAT_YVU420,
DRM_FORMAT_YVU422,
- DRM_FORMAT_YVU444,
};
struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
@@ -443,19 +485,27 @@ struct sun8i_vi_layer *sun8i_vi_layer_init_one(struct drm_device *drm,
int index)
{
u32 supported_encodings, supported_ranges;
+ unsigned int plane_cnt, format_count;
struct sun8i_vi_layer *layer;
- unsigned int plane_cnt;
+ const u32 *formats;
int ret;
layer = devm_kzalloc(drm->dev, sizeof(*layer), GFP_KERNEL);
if (!layer)
return ERR_PTR(-ENOMEM);
+ if (mixer->cfg->is_de3) {
+ formats = sun8i_vi_layer_de3_formats;
+ format_count = ARRAY_SIZE(sun8i_vi_layer_de3_formats);
+ } else {
+ formats = sun8i_vi_layer_formats;
+ format_count = ARRAY_SIZE(sun8i_vi_layer_formats);
+ }
+
/* possible crtcs are set later */
ret = drm_universal_plane_init(drm, &layer->plane, 0,
&sun8i_vi_layer_funcs,
- sun8i_vi_layer_formats,
- ARRAY_SIZE(sun8i_vi_layer_formats),
+ formats, format_count,
NULL, DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
dev_err(drm->dev, "Couldn't initialize layer\n");
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 7c70fd31a4c2..1a7b08f35776 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2503,7 +2503,6 @@ static int tegra_dc_couple(struct tegra_dc *dc)
static int tegra_dc_probe(struct platform_device *pdev)
{
- struct resource *regs;
struct tegra_dc *dc;
int err;
@@ -2560,8 +2559,7 @@ static int tegra_dc_probe(struct platform_device *pdev)
tegra_powergate_power_off(dc->powergate);
}
- regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- dc->regs = devm_ioremap_resource(&pdev->dev, regs);
+ dc->regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(dc->regs))
return PTR_ERR(dc->regs);
@@ -2573,7 +2571,13 @@ static int tegra_dc_probe(struct platform_device *pdev)
err = tegra_dc_rgb_probe(dc);
if (err < 0 && err != -ENODEV) {
- dev_err(&pdev->dev, "failed to probe RGB output: %d\n", err);
+ const char *level = KERN_ERR;
+
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, dc->dev, "failed to probe RGB output: %d\n",
+ err);
return err;
}
@@ -2588,10 +2592,16 @@ static int tegra_dc_probe(struct platform_device *pdev)
if (err < 0) {
dev_err(&pdev->dev, "failed to register host1x client: %d\n",
err);
- return err;
+ goto disable_pm;
}
return 0;
+
+disable_pm:
+ pm_runtime_disable(&pdev->dev);
+ tegra_dc_rgb_remove(dc);
+
+ return err;
}
static int tegra_dc_remove(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/tegra/fb.c b/drivers/gpu/drm/tegra/fb.c
index 84f0e01e3428..b8a328f53862 100644
--- a/drivers/gpu/drm/tegra/fb.c
+++ b/drivers/gpu/drm/tegra/fb.c
@@ -314,19 +314,13 @@ static int tegra_fbdev_init(struct tegra_fbdev *fbdev,
struct drm_device *drm = fbdev->base.dev;
int err;
- err = drm_fb_helper_init(drm, &fbdev->base, max_connectors);
+ err = drm_fb_helper_init(drm, &fbdev->base);
if (err < 0) {
dev_err(drm->dev, "failed to initialize DRM FB helper: %d\n",
err);
return err;
}
- err = drm_fb_helper_single_add_all_connectors(&fbdev->base);
- if (err < 0) {
- dev_err(drm->dev, "failed to add connectors: %d\n", err);
- goto fini;
- }
-
err = drm_fb_helper_initial_config(&fbdev->base, preferred_bpp);
if (err < 0) {
dev_err(drm->dev, "failed to set initial configuration: %d\n",
diff --git a/drivers/gpu/drm/tegra/hdmi.c b/drivers/gpu/drm/tegra/hdmi.c
index 6f117628f257..38252c0f068d 100644
--- a/drivers/gpu/drm/tegra/hdmi.c
+++ b/drivers/gpu/drm/tegra/hdmi.c
@@ -1648,6 +1648,7 @@ static irqreturn_t tegra_hdmi_irq(int irq, void *data)
static int tegra_hdmi_probe(struct platform_device *pdev)
{
+ const char *level = KERN_ERR;
struct tegra_hdmi *hdmi;
struct resource *regs;
int err;
@@ -1686,21 +1687,36 @@ static int tegra_hdmi_probe(struct platform_device *pdev)
}
hdmi->hdmi = devm_regulator_get(&pdev->dev, "hdmi");
- if (IS_ERR(hdmi->hdmi)) {
- dev_err(&pdev->dev, "failed to get HDMI regulator\n");
- return PTR_ERR(hdmi->hdmi);
+ err = PTR_ERR_OR_ZERO(hdmi->hdmi);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ "failed to get HDMI regulator: %d\n", err);
+ return err;
}
hdmi->pll = devm_regulator_get(&pdev->dev, "pll");
- if (IS_ERR(hdmi->pll)) {
- dev_err(&pdev->dev, "failed to get PLL regulator\n");
- return PTR_ERR(hdmi->pll);
+ err = PTR_ERR_OR_ZERO(hdmi->pll);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ "failed to get PLL regulator: %d\n", err);
+ return err;
}
hdmi->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(hdmi->vdd)) {
- dev_err(&pdev->dev, "failed to get VDD regulator\n");
- return PTR_ERR(hdmi->vdd);
+ err = PTR_ERR_OR_ZERO(hdmi->vdd);
+ if (err) {
+ if (err == -EPROBE_DEFER)
+ level = KERN_DEBUG;
+
+ dev_printk(level, &pdev->dev,
+ "failed to get VDD regulator: %d\n", err);
+ return err;
}
hdmi->output.dev = &pdev->dev;
diff --git a/drivers/gpu/drm/tidss/Kconfig b/drivers/gpu/drm/tidss/Kconfig
new file mode 100644
index 000000000000..f790a5215302
--- /dev/null
+++ b/drivers/gpu/drm/tidss/Kconfig
@@ -0,0 +1,14 @@
+config DRM_TIDSS
+ tristate "DRM Support for TI Keystone"
+ depends on DRM && OF
+ depends on ARM || ARM64 || COMPILE_TEST
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_GEM_CMA_HELPER
+ help
+ The TI Keystone family SoCs introduced a new generation of
+ Display SubSystem. There is currently three Keystone family
+ SoCs released with DSS. Each with somewhat different version
+ of it. The SoCs are 66AK2Gx, AM65x, and J721E. Set this to Y
+ or M to add display support for TI Keystone family
+ platforms.
diff --git a/drivers/gpu/drm/tidss/Makefile b/drivers/gpu/drm/tidss/Makefile
new file mode 100644
index 000000000000..312645271014
--- /dev/null
+++ b/drivers/gpu/drm/tidss/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+
+tidss-y := tidss_crtc.o \
+ tidss_drv.o \
+ tidss_encoder.o \
+ tidss_kms.o \
+ tidss_irq.o \
+ tidss_plane.o \
+ tidss_scale_coefs.o \
+ tidss_dispc.o
+
+obj-$(CONFIG_DRM_TIDSS) += tidss.o
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.c b/drivers/gpu/drm/tidss/tidss_crtc.c
new file mode 100644
index 000000000000..d4ce9bab8c7e
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_crtc.c
@@ -0,0 +1,432 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/drm_vblank.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+/* Page flip and frame done IRQs */
+
+static void tidss_crtc_finish_page_flip(struct tidss_crtc *tcrtc)
+{
+ struct drm_device *ddev = tcrtc->crtc.dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+ bool busy;
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+
+ /*
+ * New settings are taken into use at VFP, and GO bit is cleared at
+ * the same time. This happens before the vertical blank interrupt.
+ * So there is a small change that the driver sets GO bit after VFP, but
+ * before vblank, and we have to check for that case here.
+ */
+ busy = dispc_vp_go_busy(tidss->dispc, tcrtc->hw_videoport);
+ if (busy) {
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+ return;
+ }
+
+ event = tcrtc->event;
+ tcrtc->event = NULL;
+
+ if (!event) {
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+ return;
+ }
+
+ drm_crtc_send_vblank_event(&tcrtc->crtc, event);
+
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+
+ drm_crtc_vblank_put(&tcrtc->crtc);
+}
+
+void tidss_crtc_vblank_irq(struct drm_crtc *crtc)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ drm_crtc_handle_vblank(crtc);
+
+ tidss_crtc_finish_page_flip(tcrtc);
+}
+
+void tidss_crtc_framedone_irq(struct drm_crtc *crtc)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ complete(&tcrtc->framedone_completion);
+}
+
+void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+
+ dev_err_ratelimited(crtc->dev->dev, "CRTC%u SYNC LOST: (irq %llx)\n",
+ tcrtc->hw_videoport, irqstatus);
+}
+
+/* drm_crtc_helper_funcs */
+
+static int tidss_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct dispc_device *dispc = tidss->dispc;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+ const struct drm_display_mode *mode;
+ enum drm_mode_status ok;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ if (!state->enable)
+ return 0;
+
+ mode = &state->adjusted_mode;
+
+ ok = dispc_vp_mode_valid(dispc, hw_videoport, mode);
+ if (ok != MODE_OK) {
+ dev_dbg(ddev->dev, "%s: bad mode: %ux%u pclk %u kHz\n",
+ __func__, mode->hdisplay, mode->vdisplay, mode->clock);
+ return -EINVAL;
+ }
+
+ return dispc_vp_bus_check(dispc, hw_videoport, state);
+}
+
+/*
+ * This needs all affected planes to be present in the atomic
+ * state. The untouched planes are added to the state in
+ * tidss_atomic_check().
+ */
+static void tidss_crtc_position_planes(struct tidss_device *tidss,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state,
+ bool newmodeset)
+{
+ struct drm_atomic_state *ostate = old_state->state;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_crtc_state *cstate = crtc->state;
+ int layer;
+
+ if (!newmodeset && !cstate->zpos_changed &&
+ !to_tidss_crtc_state(cstate)->plane_pos_changed)
+ return;
+
+ for (layer = 0; layer < tidss->feat->num_planes; layer++) {
+ struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+ bool layer_active = false;
+ int i;
+
+ for_each_new_plane_in_state(ostate, plane, pstate, i) {
+ if (pstate->crtc != crtc || !pstate->visible)
+ continue;
+
+ if (pstate->normalized_zpos == layer) {
+ layer_active = true;
+ break;
+ }
+ }
+
+ if (layer_active) {
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dispc_ovr_set_plane(tidss->dispc, tplane->hw_plane_id,
+ tcrtc->hw_videoport,
+ pstate->crtc_x, pstate->crtc_y,
+ layer);
+ }
+ dispc_ovr_enable_layer(tidss->dispc, tcrtc->hw_videoport, layer,
+ layer_active);
+ }
+}
+
+static void tidss_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned long flags;
+
+ dev_dbg(ddev->dev,
+ "%s: %s enabled %d, needs modeset %d, event %p\n", __func__,
+ crtc->name, drm_atomic_crtc_needs_modeset(crtc->state),
+ crtc->state->enable, crtc->state->event);
+
+ /* There is nothing to do if CRTC is not going to be enabled. */
+ if (!crtc->state->enable)
+ return;
+
+ /*
+ * Flush CRTC changes with go bit only if new modeset is not
+ * coming, so CRTC is enabled trough out the commit.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc->state))
+ return;
+
+ /* If the GO bit is stuck we better quit here. */
+ if (WARN_ON(dispc_vp_go_busy(tidss->dispc, tcrtc->hw_videoport)))
+ return;
+
+ /* We should have event if CRTC is enabled through out this commit. */
+ if (WARN_ON(!crtc->state->event))
+ return;
+
+ /* Write vp properties to HW if needed. */
+ dispc_vp_setup(tidss->dispc, tcrtc->hw_videoport, crtc->state, false);
+
+ /* Update plane positions if needed. */
+ tidss_crtc_position_planes(tidss, crtc, old_crtc_state, false);
+
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+ dispc_vp_go(tidss->dispc, tcrtc->hw_videoport);
+
+ WARN_ON(tcrtc->event);
+
+ tcrtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+}
+
+static void tidss_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ const struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+ unsigned long flags;
+ int r;
+
+ dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
+
+ tidss_runtime_get(tidss);
+
+ r = dispc_vp_set_clk_rate(tidss->dispc, tcrtc->hw_videoport,
+ mode->clock * 1000);
+ if (r != 0)
+ return;
+
+ r = dispc_vp_enable_clk(tidss->dispc, tcrtc->hw_videoport);
+ if (r != 0)
+ return;
+
+ dispc_vp_setup(tidss->dispc, tcrtc->hw_videoport, crtc->state, true);
+ tidss_crtc_position_planes(tidss, crtc, old_state, true);
+
+ /* Turn vertical blanking interrupt reporting on. */
+ drm_crtc_vblank_on(crtc);
+
+ dispc_vp_prepare(tidss->dispc, tcrtc->hw_videoport, crtc->state);
+
+ dispc_vp_enable(tidss->dispc, tcrtc->hw_videoport, crtc->state);
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+}
+
+static void tidss_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned long flags;
+
+ dev_dbg(ddev->dev, "%s, event %p\n", __func__, crtc->state->event);
+
+ reinit_completion(&tcrtc->framedone_completion);
+
+ dispc_vp_disable(tidss->dispc, tcrtc->hw_videoport);
+
+ if (!wait_for_completion_timeout(&tcrtc->framedone_completion,
+ msecs_to_jiffies(500)))
+ dev_err(tidss->dev, "Timeout waiting for framedone on crtc %d",
+ tcrtc->hw_videoport);
+
+ dispc_vp_unprepare(tidss->dispc, tcrtc->hw_videoport);
+
+ spin_lock_irqsave(&ddev->event_lock, flags);
+ if (crtc->state->event) {
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ }
+ spin_unlock_irqrestore(&ddev->event_lock, flags);
+
+ drm_crtc_vblank_off(crtc);
+
+ dispc_vp_disable_clk(tidss->dispc, tcrtc->hw_videoport);
+
+ tidss_runtime_put(tidss);
+}
+
+static
+enum drm_mode_status tidss_crtc_mode_valid(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode)
+{
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ return dispc_vp_mode_valid(tidss->dispc, tcrtc->hw_videoport, mode);
+}
+
+static const struct drm_crtc_helper_funcs tidss_crtc_helper_funcs = {
+ .atomic_check = tidss_crtc_atomic_check,
+ .atomic_flush = tidss_crtc_atomic_flush,
+ .atomic_enable = tidss_crtc_atomic_enable,
+ .atomic_disable = tidss_crtc_atomic_disable,
+
+ .mode_valid = tidss_crtc_mode_valid,
+};
+
+/* drm_crtc_funcs */
+
+static int tidss_crtc_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ tidss_runtime_get(tidss);
+
+ tidss_irq_enable_vblank(crtc);
+
+ return 0;
+}
+
+static void tidss_crtc_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ tidss_irq_disable_vblank(crtc);
+
+ tidss_runtime_put(tidss);
+}
+
+static void tidss_crtc_reset(struct drm_crtc *crtc)
+{
+ struct tidss_crtc_state *tcrtc;
+
+ if (crtc->state)
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+
+ kfree(crtc->state);
+
+ tcrtc = kzalloc(sizeof(*tcrtc), GFP_KERNEL);
+ if (!tcrtc) {
+ crtc->state = NULL;
+ return;
+ }
+
+ crtc->state = &tcrtc->base;
+ crtc->state->crtc = crtc;
+}
+
+static struct drm_crtc_state *tidss_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct tidss_crtc_state *state, *current_state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ current_state = to_tidss_crtc_state(crtc->state);
+
+ state = kmalloc(sizeof(*state), GFP_KERNEL);
+ if (!state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &state->base);
+
+ state->plane_pos_changed = false;
+
+ state->bus_format = current_state->bus_format;
+ state->bus_flags = current_state->bus_flags;
+
+ return &state->base;
+}
+
+static const struct drm_crtc_funcs tidss_crtc_funcs = {
+ .reset = tidss_crtc_reset,
+ .destroy = drm_crtc_cleanup,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = tidss_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .enable_vblank = tidss_crtc_enable_vblank,
+ .disable_vblank = tidss_crtc_disable_vblank,
+};
+
+struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss,
+ u32 hw_videoport,
+ struct drm_plane *primary)
+{
+ struct tidss_crtc *tcrtc;
+ struct drm_crtc *crtc;
+ unsigned int gamma_lut_size = 0;
+ bool has_ctm = tidss->feat->vp_feat.color.has_ctm;
+ int ret;
+
+ tcrtc = devm_kzalloc(tidss->dev, sizeof(*tcrtc), GFP_KERNEL);
+ if (!tcrtc)
+ return ERR_PTR(-ENOMEM);
+
+ tcrtc->hw_videoport = hw_videoport;
+ init_completion(&tcrtc->framedone_completion);
+
+ crtc = &tcrtc->crtc;
+
+ ret = drm_crtc_init_with_planes(&tidss->ddev, crtc, primary,
+ NULL, &tidss_crtc_funcs, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ drm_crtc_helper_add(crtc, &tidss_crtc_helper_funcs);
+
+ /*
+ * The dispc gamma functions adapt to what ever size we ask
+ * from it no matter what HW supports. X-server assumes 256
+ * element gamma tables so lets use that.
+ */
+ if (tidss->feat->vp_feat.color.gamma_size)
+ gamma_lut_size = 256;
+
+ drm_crtc_enable_color_mgmt(crtc, 0, has_ctm, gamma_lut_size);
+ if (gamma_lut_size)
+ drm_mode_crtc_set_gamma_size(crtc, gamma_lut_size);
+
+ return tcrtc;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_crtc.h b/drivers/gpu/drm/tidss/tidss_crtc.h
new file mode 100644
index 000000000000..09e773666228
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_crtc.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_CRTC_H__
+#define __TIDSS_CRTC_H__
+
+#include <linux/completion.h>
+#include <linux/wait.h>
+
+#include <drm/drm_crtc.h>
+
+#define to_tidss_crtc(c) container_of((c), struct tidss_crtc, crtc)
+
+struct tidss_device;
+
+struct tidss_crtc {
+ struct drm_crtc crtc;
+
+ u32 hw_videoport;
+
+ struct drm_pending_vblank_event *event;
+
+ struct completion framedone_completion;
+};
+
+#define to_tidss_crtc_state(x) container_of(x, struct tidss_crtc_state, base)
+
+struct tidss_crtc_state {
+ /* Must be first. */
+ struct drm_crtc_state base;
+
+ bool plane_pos_changed;
+
+ u32 bus_format;
+ u32 bus_flags;
+};
+
+void tidss_crtc_vblank_irq(struct drm_crtc *crtc);
+void tidss_crtc_framedone_irq(struct drm_crtc *crtc);
+void tidss_crtc_error_irq(struct drm_crtc *crtc, u64 irqstatus);
+
+struct tidss_crtc *tidss_crtc_create(struct tidss_device *tidss,
+ u32 hw_videoport,
+ struct drm_plane *primary);
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c
new file mode 100644
index 000000000000..29f42768e294
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc.c
@@ -0,0 +1,2753 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <[email protected]>
+ */
+
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+#include <linux/err.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/mfd/syscon.h>
+#include <linux/of.h>
+#include <linux/of_graph.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_panel.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+#include "tidss_dispc_regs.h"
+#include "tidss_scale_coefs.h"
+
+static const u16 tidss_k2g_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+ [DSS_REVISION_OFF] = 0x00,
+ [DSS_SYSCONFIG_OFF] = 0x04,
+ [DSS_SYSSTATUS_OFF] = 0x08,
+ [DISPC_IRQ_EOI_OFF] = 0x20,
+ [DISPC_IRQSTATUS_RAW_OFF] = 0x24,
+ [DISPC_IRQSTATUS_OFF] = 0x28,
+ [DISPC_IRQENABLE_SET_OFF] = 0x2c,
+ [DISPC_IRQENABLE_CLR_OFF] = 0x30,
+
+ [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x40,
+ [DISPC_GLOBAL_BUFFER_OFF] = 0x44,
+
+ [DISPC_DBG_CONTROL_OFF] = 0x4c,
+ [DISPC_DBG_STATUS_OFF] = 0x50,
+
+ [DISPC_CLKGATING_DISABLE_OFF] = 0x54,
+};
+
+const struct dispc_features dispc_k2g_feats = {
+ .min_pclk_khz = 4375,
+
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 150000,
+ },
+
+ /*
+ * XXX According TRM the RGB input buffer width up to 2560 should
+ * work on 3 taps, but in practice it only works up to 1280.
+ */
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 1280,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 2560,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_K2G,
+
+ .common = "common",
+
+ .common_regs = tidss_k2g_common_regs,
+
+ .num_vps = 1,
+ .vp_name = { "vp1" },
+ .ovr_name = { "ovr1" },
+ .vpclk_name = { "vp1" },
+ .vp_bus_type = { DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 1,
+ .vid_name = { "vid1" },
+ .vid_lite = { false },
+ .vid_order = { 0 },
+};
+
+static const u16 tidss_am65x_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+ [DSS_REVISION_OFF] = 0x4,
+ [DSS_SYSCONFIG_OFF] = 0x8,
+ [DSS_SYSSTATUS_OFF] = 0x20,
+ [DISPC_IRQ_EOI_OFF] = 0x24,
+ [DISPC_IRQSTATUS_RAW_OFF] = 0x28,
+ [DISPC_IRQSTATUS_OFF] = 0x2c,
+ [DISPC_IRQENABLE_SET_OFF] = 0x30,
+ [DISPC_IRQENABLE_CLR_OFF] = 0x40,
+ [DISPC_VID_IRQENABLE_OFF] = 0x44,
+ [DISPC_VID_IRQSTATUS_OFF] = 0x58,
+ [DISPC_VP_IRQENABLE_OFF] = 0x70,
+ [DISPC_VP_IRQSTATUS_OFF] = 0x7c,
+
+ [WB_IRQENABLE_OFF] = 0x88,
+ [WB_IRQSTATUS_OFF] = 0x8c,
+
+ [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x90,
+ [DISPC_GLOBAL_OUTPUT_ENABLE_OFF] = 0x94,
+ [DISPC_GLOBAL_BUFFER_OFF] = 0x98,
+ [DSS_CBA_CFG_OFF] = 0x9c,
+ [DISPC_DBG_CONTROL_OFF] = 0xa0,
+ [DISPC_DBG_STATUS_OFF] = 0xa4,
+ [DISPC_CLKGATING_DISABLE_OFF] = 0xa8,
+ [DISPC_SECURE_DISABLE_OFF] = 0xac,
+};
+
+const struct dispc_features dispc_am65x_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 165000,
+ [DISPC_VP_OLDI] = 165000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 1280,
+ .in_width_max_3tap_rgb = 2560,
+ .in_width_max_5tap_yuv = 2560,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_AM65X,
+
+ .common = "common",
+ .common_regs = tidss_am65x_common_regs,
+
+ .num_vps = 2,
+ .vp_name = { "vp1", "vp2" },
+ .ovr_name = { "ovr1", "ovr2" },
+ .vpclk_name = { "vp1", "vp2" },
+ .vp_bus_type = { DISPC_VP_OLDI, DISPC_VP_DPI },
+
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 256,
+ .gamma_type = TIDSS_GAMMA_8BIT,
+ },
+ },
+
+ .num_planes = 2,
+ /* note: vid is plane_id 0 and vidl1 is plane_id 1 */
+ .vid_name = { "vid", "vidl1" },
+ .vid_lite = { false, true, },
+ .vid_order = { 1, 0 },
+
+ .errata = {
+ .i2000 = true,
+ },
+};
+
+static const u16 tidss_j721e_common_regs[DISPC_COMMON_REG_TABLE_LEN] = {
+ [DSS_REVISION_OFF] = 0x4,
+ [DSS_SYSCONFIG_OFF] = 0x8,
+ [DSS_SYSSTATUS_OFF] = 0x20,
+ [DISPC_IRQ_EOI_OFF] = 0x80,
+ [DISPC_IRQSTATUS_RAW_OFF] = 0x28,
+ [DISPC_IRQSTATUS_OFF] = 0x2c,
+ [DISPC_IRQENABLE_SET_OFF] = 0x30,
+ [DISPC_IRQENABLE_CLR_OFF] = 0x34,
+ [DISPC_VID_IRQENABLE_OFF] = 0x38,
+ [DISPC_VID_IRQSTATUS_OFF] = 0x48,
+ [DISPC_VP_IRQENABLE_OFF] = 0x58,
+ [DISPC_VP_IRQSTATUS_OFF] = 0x68,
+
+ [WB_IRQENABLE_OFF] = 0x78,
+ [WB_IRQSTATUS_OFF] = 0x7c,
+
+ [DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF] = 0x98,
+ [DISPC_GLOBAL_OUTPUT_ENABLE_OFF] = 0x9c,
+ [DISPC_GLOBAL_BUFFER_OFF] = 0xa0,
+ [DSS_CBA_CFG_OFF] = 0xa4,
+ [DISPC_DBG_CONTROL_OFF] = 0xa8,
+ [DISPC_DBG_STATUS_OFF] = 0xac,
+ [DISPC_CLKGATING_DISABLE_OFF] = 0xb0,
+ [DISPC_SECURE_DISABLE_OFF] = 0x90,
+
+ [FBDC_REVISION_1_OFF] = 0xb8,
+ [FBDC_REVISION_2_OFF] = 0xbc,
+ [FBDC_REVISION_3_OFF] = 0xc0,
+ [FBDC_REVISION_4_OFF] = 0xc4,
+ [FBDC_REVISION_5_OFF] = 0xc8,
+ [FBDC_REVISION_6_OFF] = 0xcc,
+ [FBDC_COMMON_CONTROL_OFF] = 0xd0,
+ [FBDC_CONSTANT_COLOR_0_OFF] = 0xd4,
+ [FBDC_CONSTANT_COLOR_1_OFF] = 0xd8,
+ [DISPC_CONNECTIONS_OFF] = 0xe4,
+ [DISPC_MSS_VP1_OFF] = 0xe8,
+ [DISPC_MSS_VP3_OFF] = 0xec,
+};
+
+const struct dispc_features dispc_j721e_feats = {
+ .max_pclk_khz = {
+ [DISPC_VP_DPI] = 170000,
+ [DISPC_VP_INTERNAL] = 600000,
+ },
+
+ .scaling = {
+ .in_width_max_5tap_rgb = 2048,
+ .in_width_max_3tap_rgb = 4096,
+ .in_width_max_5tap_yuv = 4096,
+ .in_width_max_3tap_yuv = 4096,
+ .upscale_limit = 16,
+ .downscale_limit_5tap = 4,
+ .downscale_limit_3tap = 2,
+ /*
+ * The max supported pixel inc value is 255. The value
+ * of pixel inc is calculated like this: 1+(xinc-1)*bpp.
+ * The maximum bpp of all formats supported by the HW
+ * is 8. So the maximum supported xinc value is 32,
+ * because 1+(32-1)*8 < 255 < 1+(33-1)*4.
+ */
+ .xinc_max = 32,
+ },
+
+ .subrev = DISPC_J721E,
+
+ .common = "common_m",
+ .common_regs = tidss_j721e_common_regs,
+
+ .num_vps = 4,
+ .vp_name = { "vp1", "vp2", "vp3", "vp4" },
+ .ovr_name = { "ovr1", "ovr2", "ovr3", "ovr4" },
+ .vpclk_name = { "vp1", "vp2", "vp3", "vp4" },
+ /* Currently hard coded VP routing (see dispc_initial_config()) */
+ .vp_bus_type = { DISPC_VP_INTERNAL, DISPC_VP_DPI,
+ DISPC_VP_INTERNAL, DISPC_VP_DPI, },
+ .vp_feat = { .color = {
+ .has_ctm = true,
+ .gamma_size = 1024,
+ .gamma_type = TIDSS_GAMMA_10BIT,
+ },
+ },
+ .num_planes = 4,
+ .vid_name = { "vid1", "vidl1", "vid2", "vidl2" },
+ .vid_lite = { 0, 1, 0, 1, },
+ .vid_order = { 1, 3, 0, 2 },
+};
+
+static const u16 *dispc_common_regmap;
+
+struct dss_vp_data {
+ u32 *gamma_table;
+};
+
+struct dispc_device {
+ struct tidss_device *tidss;
+ struct device *dev;
+
+ void __iomem *base_common;
+ void __iomem *base_vid[TIDSS_MAX_PLANES];
+ void __iomem *base_ovr[TIDSS_MAX_PORTS];
+ void __iomem *base_vp[TIDSS_MAX_PORTS];
+
+ struct regmap *oldi_io_ctrl;
+
+ struct clk *vp_clk[TIDSS_MAX_PORTS];
+
+ const struct dispc_features *feat;
+
+ struct clk *fclk;
+
+ bool is_enabled;
+
+ struct dss_vp_data vp_data[TIDSS_MAX_PORTS];
+
+ u32 *fourccs;
+ u32 num_fourccs;
+
+ u32 memory_bandwidth_limit;
+};
+
+static void dispc_write(struct dispc_device *dispc, u16 reg, u32 val)
+{
+ iowrite32(val, dispc->base_common + reg);
+}
+
+static u32 dispc_read(struct dispc_device *dispc, u16 reg)
+{
+ return ioread32(dispc->base_common + reg);
+}
+
+static
+void dispc_vid_write(struct dispc_device *dispc, u32 hw_plane, u16 reg, u32 val)
+{
+ void __iomem *base = dispc->base_vid[hw_plane];
+
+ iowrite32(val, base + reg);
+}
+
+static u32 dispc_vid_read(struct dispc_device *dispc, u32 hw_plane, u16 reg)
+{
+ void __iomem *base = dispc->base_vid[hw_plane];
+
+ return ioread32(base + reg);
+}
+
+static void dispc_ovr_write(struct dispc_device *dispc, u32 hw_videoport,
+ u16 reg, u32 val)
+{
+ void __iomem *base = dispc->base_ovr[hw_videoport];
+
+ iowrite32(val, base + reg);
+}
+
+static u32 dispc_ovr_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
+{
+ void __iomem *base = dispc->base_ovr[hw_videoport];
+
+ return ioread32(base + reg);
+}
+
+static void dispc_vp_write(struct dispc_device *dispc, u32 hw_videoport,
+ u16 reg, u32 val)
+{
+ void __iomem *base = dispc->base_vp[hw_videoport];
+
+ iowrite32(val, base + reg);
+}
+
+static u32 dispc_vp_read(struct dispc_device *dispc, u32 hw_videoport, u16 reg)
+{
+ void __iomem *base = dispc->base_vp[hw_videoport];
+
+ return ioread32(base + reg);
+}
+
+/*
+ * TRM gives bitfields as start:end, where start is the higher bit
+ * number. For example 7:0
+ */
+
+static u32 FLD_MASK(u32 start, u32 end)
+{
+ return ((1 << (start - end + 1)) - 1) << end;
+}
+
+static u32 FLD_VAL(u32 val, u32 start, u32 end)
+{
+ return (val << end) & FLD_MASK(start, end);
+}
+
+static u32 FLD_GET(u32 val, u32 start, u32 end)
+{
+ return (val & FLD_MASK(start, end)) >> end;
+}
+
+static u32 FLD_MOD(u32 orig, u32 val, u32 start, u32 end)
+{
+ return (orig & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end);
+}
+
+static u32 REG_GET(struct dispc_device *dispc, u32 idx, u32 start, u32 end)
+{
+ return FLD_GET(dispc_read(dispc, idx), start, end);
+}
+
+static void REG_FLD_MOD(struct dispc_device *dispc, u32 idx, u32 val,
+ u32 start, u32 end)
+{
+ dispc_write(dispc, idx, FLD_MOD(dispc_read(dispc, idx), val,
+ start, end));
+}
+
+static u32 VID_REG_GET(struct dispc_device *dispc, u32 hw_plane, u32 idx,
+ u32 start, u32 end)
+{
+ return FLD_GET(dispc_vid_read(dispc, hw_plane, idx), start, end);
+}
+
+static void VID_REG_FLD_MOD(struct dispc_device *dispc, u32 hw_plane, u32 idx,
+ u32 val, u32 start, u32 end)
+{
+ dispc_vid_write(dispc, hw_plane, idx,
+ FLD_MOD(dispc_vid_read(dispc, hw_plane, idx),
+ val, start, end));
+}
+
+static u32 VP_REG_GET(struct dispc_device *dispc, u32 vp, u32 idx,
+ u32 start, u32 end)
+{
+ return FLD_GET(dispc_vp_read(dispc, vp, idx), start, end);
+}
+
+static void VP_REG_FLD_MOD(struct dispc_device *dispc, u32 vp, u32 idx, u32 val,
+ u32 start, u32 end)
+{
+ dispc_vp_write(dispc, vp, idx, FLD_MOD(dispc_vp_read(dispc, vp, idx),
+ val, start, end));
+}
+
+__maybe_unused
+static u32 OVR_REG_GET(struct dispc_device *dispc, u32 ovr, u32 idx,
+ u32 start, u32 end)
+{
+ return FLD_GET(dispc_ovr_read(dispc, ovr, idx), start, end);
+}
+
+static void OVR_REG_FLD_MOD(struct dispc_device *dispc, u32 ovr, u32 idx,
+ u32 val, u32 start, u32 end)
+{
+ dispc_ovr_write(dispc, ovr, idx,
+ FLD_MOD(dispc_ovr_read(dispc, ovr, idx),
+ val, start, end));
+}
+
+static dispc_irq_t dispc_vp_irq_from_raw(u32 stat, u32 hw_videoport)
+{
+ dispc_irq_t vp_stat = 0;
+
+ if (stat & BIT(0))
+ vp_stat |= DSS_IRQ_VP_FRAME_DONE(hw_videoport);
+ if (stat & BIT(1))
+ vp_stat |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport);
+ if (stat & BIT(2))
+ vp_stat |= DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
+ if (stat & BIT(4))
+ vp_stat |= DSS_IRQ_VP_SYNC_LOST(hw_videoport);
+
+ return vp_stat;
+}
+
+static u32 dispc_vp_irq_to_raw(dispc_irq_t vpstat, u32 hw_videoport)
+{
+ u32 stat = 0;
+
+ if (vpstat & DSS_IRQ_VP_FRAME_DONE(hw_videoport))
+ stat |= BIT(0);
+ if (vpstat & DSS_IRQ_VP_VSYNC_EVEN(hw_videoport))
+ stat |= BIT(1);
+ if (vpstat & DSS_IRQ_VP_VSYNC_ODD(hw_videoport))
+ stat |= BIT(2);
+ if (vpstat & DSS_IRQ_VP_SYNC_LOST(hw_videoport))
+ stat |= BIT(4);
+
+ return stat;
+}
+
+static dispc_irq_t dispc_vid_irq_from_raw(u32 stat, u32 hw_plane)
+{
+ dispc_irq_t vid_stat = 0;
+
+ if (stat & BIT(0))
+ vid_stat |= DSS_IRQ_PLANE_FIFO_UNDERFLOW(hw_plane);
+
+ return vid_stat;
+}
+
+static u32 dispc_vid_irq_to_raw(dispc_irq_t vidstat, u32 hw_plane)
+{
+ u32 stat = 0;
+
+ if (vidstat & DSS_IRQ_PLANE_FIFO_UNDERFLOW(hw_plane))
+ stat |= BIT(0);
+
+ return stat;
+}
+
+static dispc_irq_t dispc_k2g_vp_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_vp_read(dispc, hw_videoport, DISPC_VP_K2G_IRQSTATUS);
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k2g_vp_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_IRQSTATUS, stat);
+}
+
+static dispc_irq_t dispc_k2g_vid_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_vid_read(dispc, hw_plane, DISPC_VID_K2G_IRQSTATUS);
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k2g_vid_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_IRQSTATUS, stat);
+}
+
+static dispc_irq_t dispc_k2g_vp_read_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_vp_read(dispc, hw_videoport, DISPC_VP_K2G_IRQENABLE);
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k2g_vp_set_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_IRQENABLE, stat);
+}
+
+static dispc_irq_t dispc_k2g_vid_read_irqenable(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_vid_read(dispc, hw_plane, DISPC_VID_K2G_IRQENABLE);
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k2g_vid_set_irqenable(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_IRQENABLE, stat);
+}
+
+static void dispc_k2g_clear_irqstatus(struct dispc_device *dispc,
+ dispc_irq_t mask)
+{
+ dispc_k2g_vp_write_irqstatus(dispc, 0, mask);
+ dispc_k2g_vid_write_irqstatus(dispc, 0, mask);
+}
+
+static
+dispc_irq_t dispc_k2g_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+ dispc_irq_t stat = 0;
+
+ /* always clear the top level irqstatus */
+ dispc_write(dispc, DISPC_IRQSTATUS,
+ dispc_read(dispc, DISPC_IRQSTATUS));
+
+ stat |= dispc_k2g_vp_read_irqstatus(dispc, 0);
+ stat |= dispc_k2g_vid_read_irqstatus(dispc, 0);
+
+ dispc_k2g_clear_irqstatus(dispc, stat);
+
+ return stat;
+}
+
+static dispc_irq_t dispc_k2g_read_irqenable(struct dispc_device *dispc)
+{
+ dispc_irq_t stat = 0;
+
+ stat |= dispc_k2g_vp_read_irqenable(dispc, 0);
+ stat |= dispc_k2g_vid_read_irqenable(dispc, 0);
+
+ return stat;
+}
+
+static
+void dispc_k2g_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+{
+ dispc_irq_t old_mask = dispc_k2g_read_irqenable(dispc);
+
+ /* clear the irqstatus for newly enabled irqs */
+ dispc_k2g_clear_irqstatus(dispc, (mask ^ old_mask) & mask);
+
+ dispc_k2g_vp_set_irqenable(dispc, 0, mask);
+ dispc_k2g_vid_set_irqenable(dispc, 0, mask);
+
+ dispc_write(dispc, DISPC_IRQENABLE_SET, (1 << 0) | (1 << 7));
+
+ /* flush posted write */
+ dispc_k2g_read_irqenable(dispc);
+}
+
+static dispc_irq_t dispc_k3_vp_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_read(dispc, DISPC_VP_IRQSTATUS(hw_videoport));
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k3_vp_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_write(dispc, DISPC_VP_IRQSTATUS(hw_videoport), stat);
+}
+
+static dispc_irq_t dispc_k3_vid_read_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQSTATUS(hw_plane));
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k3_vid_write_irqstatus(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_write(dispc, DISPC_VID_IRQSTATUS(hw_plane), stat);
+}
+
+static dispc_irq_t dispc_k3_vp_read_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 stat = dispc_read(dispc, DISPC_VP_IRQENABLE(hw_videoport));
+
+ return dispc_vp_irq_from_raw(stat, hw_videoport);
+}
+
+static void dispc_k3_vp_set_irqenable(struct dispc_device *dispc,
+ u32 hw_videoport, dispc_irq_t vpstat)
+{
+ u32 stat = dispc_vp_irq_to_raw(vpstat, hw_videoport);
+
+ dispc_write(dispc, DISPC_VP_IRQENABLE(hw_videoport), stat);
+}
+
+static dispc_irq_t dispc_k3_vid_read_irqenable(struct dispc_device *dispc,
+ u32 hw_plane)
+{
+ u32 stat = dispc_read(dispc, DISPC_VID_IRQENABLE(hw_plane));
+
+ return dispc_vid_irq_from_raw(stat, hw_plane);
+}
+
+static void dispc_k3_vid_set_irqenable(struct dispc_device *dispc,
+ u32 hw_plane, dispc_irq_t vidstat)
+{
+ u32 stat = dispc_vid_irq_to_raw(vidstat, hw_plane);
+
+ dispc_write(dispc, DISPC_VID_IRQENABLE(hw_plane), stat);
+}
+
+static
+void dispc_k3_clear_irqstatus(struct dispc_device *dispc, dispc_irq_t clearmask)
+{
+ unsigned int i;
+ u32 top_clear = 0;
+
+ for (i = 0; i < dispc->feat->num_vps; ++i) {
+ if (clearmask & DSS_IRQ_VP_MASK(i)) {
+ dispc_k3_vp_write_irqstatus(dispc, i, clearmask);
+ top_clear |= BIT(i);
+ }
+ }
+ for (i = 0; i < dispc->feat->num_planes; ++i) {
+ if (clearmask & DSS_IRQ_PLANE_MASK(i)) {
+ dispc_k3_vid_write_irqstatus(dispc, i, clearmask);
+ top_clear |= BIT(4 + i);
+ }
+ }
+ if (dispc->feat->subrev == DISPC_K2G)
+ return;
+
+ dispc_write(dispc, DISPC_IRQSTATUS, top_clear);
+
+ /* Flush posted writes */
+ dispc_read(dispc, DISPC_IRQSTATUS);
+}
+
+static
+dispc_irq_t dispc_k3_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+ dispc_irq_t status = 0;
+ unsigned int i;
+
+ for (i = 0; i < dispc->feat->num_vps; ++i)
+ status |= dispc_k3_vp_read_irqstatus(dispc, i);
+
+ for (i = 0; i < dispc->feat->num_planes; ++i)
+ status |= dispc_k3_vid_read_irqstatus(dispc, i);
+
+ dispc_k3_clear_irqstatus(dispc, status);
+
+ return status;
+}
+
+static dispc_irq_t dispc_k3_read_irqenable(struct dispc_device *dispc)
+{
+ dispc_irq_t enable = 0;
+ unsigned int i;
+
+ for (i = 0; i < dispc->feat->num_vps; ++i)
+ enable |= dispc_k3_vp_read_irqenable(dispc, i);
+
+ for (i = 0; i < dispc->feat->num_planes; ++i)
+ enable |= dispc_k3_vid_read_irqenable(dispc, i);
+
+ return enable;
+}
+
+static void dispc_k3_set_irqenable(struct dispc_device *dispc,
+ dispc_irq_t mask)
+{
+ unsigned int i;
+ u32 main_enable = 0, main_disable = 0;
+ dispc_irq_t old_mask;
+
+ old_mask = dispc_k3_read_irqenable(dispc);
+
+ /* clear the irqstatus for newly enabled irqs */
+ dispc_k3_clear_irqstatus(dispc, (old_mask ^ mask) & mask);
+
+ for (i = 0; i < dispc->feat->num_vps; ++i) {
+ dispc_k3_vp_set_irqenable(dispc, i, mask);
+ if (mask & DSS_IRQ_VP_MASK(i))
+ main_enable |= BIT(i); /* VP IRQ */
+ else
+ main_disable |= BIT(i); /* VP IRQ */
+ }
+
+ for (i = 0; i < dispc->feat->num_planes; ++i) {
+ dispc_k3_vid_set_irqenable(dispc, i, mask);
+ if (mask & DSS_IRQ_PLANE_MASK(i))
+ main_enable |= BIT(i + 4); /* VID IRQ */
+ else
+ main_disable |= BIT(i + 4); /* VID IRQ */
+ }
+
+ if (main_enable)
+ dispc_write(dispc, DISPC_IRQENABLE_SET, main_enable);
+
+ if (main_disable)
+ dispc_write(dispc, DISPC_IRQENABLE_CLR, main_disable);
+
+ /* Flush posted writes */
+ dispc_read(dispc, DISPC_IRQENABLE_SET);
+}
+
+dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ return dispc_k2g_read_and_clear_irqstatus(dispc);
+ case DISPC_AM65X:
+ case DISPC_J721E:
+ return dispc_k3_read_and_clear_irqstatus(dispc);
+ default:
+ WARN_ON(1);
+ return 0;
+ }
+}
+
+void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_set_irqenable(dispc, mask);
+ break;
+ case DISPC_AM65X:
+ case DISPC_J721E:
+ dispc_k3_set_irqenable(dispc, mask);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+enum dispc_oldi_mode_reg_val { SPWG_18 = 0, JEIDA_24 = 1, SPWG_24 = 2 };
+
+struct dispc_bus_format {
+ u32 bus_fmt;
+ u32 data_width;
+ bool is_oldi_fmt;
+ enum dispc_oldi_mode_reg_val oldi_mode_reg_val;
+};
+
+static const struct dispc_bus_format dispc_bus_formats[] = {
+ { MEDIA_BUS_FMT_RGB444_1X12, 12, false, 0 },
+ { MEDIA_BUS_FMT_RGB565_1X16, 16, false, 0 },
+ { MEDIA_BUS_FMT_RGB666_1X18, 18, false, 0 },
+ { MEDIA_BUS_FMT_RGB888_1X24, 24, false, 0 },
+ { MEDIA_BUS_FMT_RGB101010_1X30, 30, false, 0 },
+ { MEDIA_BUS_FMT_RGB121212_1X36, 36, false, 0 },
+ { MEDIA_BUS_FMT_RGB666_1X7X3_SPWG, 18, true, SPWG_18 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_SPWG, 24, true, SPWG_24 },
+ { MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA, 24, true, JEIDA_24 },
+};
+
+static const
+struct dispc_bus_format *dispc_vp_find_bus_fmt(struct dispc_device *dispc,
+ u32 hw_videoport,
+ u32 bus_fmt, u32 bus_flags)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dispc_bus_formats); ++i) {
+ if (dispc_bus_formats[i].bus_fmt == bus_fmt)
+ return &dispc_bus_formats[i];
+ }
+
+ return NULL;
+}
+
+int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state)
+{
+ const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+ const struct dispc_bus_format *fmt;
+
+ fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+ tstate->bus_flags);
+ if (!fmt) {
+ dev_dbg(dispc->dev, "%s: Unsupported bus format: %u\n",
+ __func__, tstate->bus_format);
+ return -EINVAL;
+ }
+
+ if (dispc->feat->vp_bus_type[hw_videoport] != DISPC_VP_OLDI &&
+ fmt->is_oldi_fmt) {
+ dev_dbg(dispc->dev, "%s: %s is not OLDI-port\n",
+ __func__, dispc->feat->vp_name[hw_videoport]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dispc_oldi_tx_power(struct dispc_device *dispc, bool power)
+{
+ u32 val = power ? 0 : OLDI_PWRDN_TX;
+
+ if (WARN_ON(!dispc->oldi_io_ctrl))
+ return;
+
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT0_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT1_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT2_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_DAT3_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+ regmap_update_bits(dispc->oldi_io_ctrl, OLDI_CLK_IO_CTRL,
+ OLDI_PWRDN_TX, val);
+}
+
+static void dispc_set_num_datalines(struct dispc_device *dispc,
+ u32 hw_videoport, int num_lines)
+{
+ int v;
+
+ switch (num_lines) {
+ case 12:
+ v = 0; break;
+ case 16:
+ v = 1; break;
+ case 18:
+ v = 2; break;
+ case 24:
+ v = 3; break;
+ case 30:
+ v = 4; break;
+ case 36:
+ v = 5; break;
+ default:
+ WARN_ON(1);
+ v = 3;
+ }
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8);
+}
+
+static void dispc_enable_oldi(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_bus_format *fmt)
+{
+ u32 oldi_cfg = 0;
+ u32 oldi_reset_bit = BIT(5 + hw_videoport);
+ int count = 0;
+
+ /*
+ * For the moment DUALMODESYNC, MASTERSLAVE, MODE, and SRC
+ * bits of DISPC_VP_DSS_OLDI_CFG are set statically to 0.
+ */
+
+ if (fmt->data_width == 24)
+ oldi_cfg |= BIT(8); /* MSB */
+ else if (fmt->data_width != 18)
+ dev_warn(dispc->dev, "%s: %d port width not supported\n",
+ __func__, fmt->data_width);
+
+ oldi_cfg |= BIT(7); /* DEPOL */
+
+ oldi_cfg = FLD_MOD(oldi_cfg, fmt->oldi_mode_reg_val, 3, 1);
+
+ oldi_cfg |= BIT(12); /* SOFTRST */
+
+ oldi_cfg |= BIT(0); /* ENABLE */
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, oldi_cfg);
+
+ while (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS)) &&
+ count < 10000)
+ count++;
+
+ if (!(oldi_reset_bit & dispc_read(dispc, DSS_SYSSTATUS)))
+ dev_warn(dispc->dev, "%s: timeout waiting OLDI reset done\n",
+ __func__);
+}
+
+void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state)
+{
+ const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+ const struct dispc_bus_format *fmt;
+
+ fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+ tstate->bus_flags);
+
+ if (WARN_ON(!fmt))
+ return;
+
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+ dispc_oldi_tx_power(dispc, true);
+
+ dispc_enable_oldi(dispc, hw_videoport, fmt);
+ }
+}
+
+void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state)
+{
+ const struct drm_display_mode *mode = &state->adjusted_mode;
+ const struct tidss_crtc_state *tstate = to_tidss_crtc_state(state);
+ bool align, onoff, rf, ieo, ipc, ihs, ivs;
+ const struct dispc_bus_format *fmt;
+ u32 hsw, hfp, hbp, vsw, vfp, vbp;
+
+ fmt = dispc_vp_find_bus_fmt(dispc, hw_videoport, tstate->bus_format,
+ tstate->bus_flags);
+
+ if (WARN_ON(!fmt))
+ return;
+
+ dispc_set_num_datalines(dispc, hw_videoport, fmt->data_width);
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H,
+ FLD_VAL(hsw - 1, 7, 0) |
+ FLD_VAL(hfp - 1, 19, 8) |
+ FLD_VAL(hbp - 1, 31, 20));
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_V,
+ FLD_VAL(vsw - 1, 7, 0) |
+ FLD_VAL(vfp, 19, 8) |
+ FLD_VAL(vbp, 31, 20));
+
+ ivs = !!(mode->flags & DRM_MODE_FLAG_NVSYNC);
+
+ ihs = !!(mode->flags & DRM_MODE_FLAG_NHSYNC);
+
+ ieo = !!(tstate->bus_flags & DRM_BUS_FLAG_DE_LOW);
+
+ ipc = !!(tstate->bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE);
+
+ /* always use the 'rf' setting */
+ onoff = true;
+
+ rf = !!(tstate->bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE);
+
+ /* always use aligned syncs */
+ align = true;
+
+ /* always use DE_HIGH for OLDI */
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI)
+ ieo = false;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ,
+ FLD_VAL(align, 18, 18) |
+ FLD_VAL(onoff, 17, 17) |
+ FLD_VAL(rf, 16, 16) |
+ FLD_VAL(ieo, 15, 15) |
+ FLD_VAL(ipc, 14, 14) |
+ FLD_VAL(ihs, 13, 13) |
+ FLD_VAL(ivs, 12, 12));
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN,
+ FLD_VAL(mode->hdisplay - 1, 11, 0) |
+ FLD_VAL(mode->vdisplay - 1, 27, 16));
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0);
+}
+
+void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport)
+{
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, 0, 0);
+}
+
+void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport)
+{
+ if (dispc->feat->vp_bus_type[hw_videoport] == DISPC_VP_OLDI) {
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_DSS_OLDI_CFG, 0);
+
+ dispc_oldi_tx_power(dispc, false);
+ }
+}
+
+bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport)
+{
+ return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5);
+}
+
+void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport)
+{
+ WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5));
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 5, 5);
+}
+
+enum c8_to_c12_mode { C8_TO_C12_REPLICATE, C8_TO_C12_MAX, C8_TO_C12_MIN };
+
+static u16 c8_to_c12(u8 c8, enum c8_to_c12_mode mode)
+{
+ u16 c12;
+
+ c12 = c8 << 4;
+
+ switch (mode) {
+ case C8_TO_C12_REPLICATE:
+ /* Copy c8 4 MSB to 4 LSB for full scale c12 */
+ c12 |= c8 >> 4;
+ break;
+ case C8_TO_C12_MAX:
+ c12 |= 0xF;
+ break;
+ default:
+ case C8_TO_C12_MIN:
+ break;
+ }
+
+ return c12;
+}
+
+static u64 argb8888_to_argb12121212(u32 argb8888, enum c8_to_c12_mode m)
+{
+ u8 a, r, g, b;
+ u64 v;
+
+ a = (argb8888 >> 24) & 0xff;
+ r = (argb8888 >> 16) & 0xff;
+ g = (argb8888 >> 8) & 0xff;
+ b = (argb8888 >> 0) & 0xff;
+
+ v = ((u64)c8_to_c12(a, m) << 36) | ((u64)c8_to_c12(r, m) << 24) |
+ ((u64)c8_to_c12(g, m) << 12) | (u64)c8_to_c12(b, m);
+
+ return v;
+}
+
+static void dispc_vp_set_default_color(struct dispc_device *dispc,
+ u32 hw_videoport, u32 default_color)
+{
+ u64 v;
+
+ v = argb8888_to_argb12121212(default_color, C8_TO_C12_REPLICATE);
+
+ dispc_ovr_write(dispc, hw_videoport,
+ DISPC_OVR_DEFAULT_COLOR, v & 0xffffffff);
+ dispc_ovr_write(dispc, hw_videoport,
+ DISPC_OVR_DEFAULT_COLOR2, (v >> 32) & 0xffff);
+}
+
+enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_display_mode *mode)
+{
+ u32 hsw, hfp, hbp, vsw, vfp, vbp;
+ enum dispc_vp_bus_type bus_type;
+ int max_pclk;
+
+ bus_type = dispc->feat->vp_bus_type[hw_videoport];
+
+ max_pclk = dispc->feat->max_pclk_khz[bus_type];
+
+ if (WARN_ON(max_pclk == 0))
+ return MODE_BAD;
+
+ if (mode->clock < dispc->feat->min_pclk_khz)
+ return MODE_CLOCK_LOW;
+
+ if (mode->clock > max_pclk)
+ return MODE_CLOCK_HIGH;
+
+ if (mode->hdisplay > 4096)
+ return MODE_BAD;
+
+ if (mode->vdisplay > 4096)
+ return MODE_BAD;
+
+ /* TODO: add interlace support */
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ return MODE_NO_INTERLACE;
+
+ /*
+ * Enforce the output width is divisible by 2. Actually this
+ * is only needed in following cases:
+ * - YUV output selected (BT656, BT1120)
+ * - Dithering enabled
+ * - TDM with TDMCycleFormat == 3
+ * But for simplicity we enforce that always.
+ */
+ if ((mode->hdisplay % 2) != 0)
+ return MODE_BAD_HVALUE;
+
+ hfp = mode->hsync_start - mode->hdisplay;
+ hsw = mode->hsync_end - mode->hsync_start;
+ hbp = mode->htotal - mode->hsync_end;
+
+ vfp = mode->vsync_start - mode->vdisplay;
+ vsw = mode->vsync_end - mode->vsync_start;
+ vbp = mode->vtotal - mode->vsync_end;
+
+ if (hsw < 1 || hsw > 256 ||
+ hfp < 1 || hfp > 4096 ||
+ hbp < 1 || hbp > 4096)
+ return MODE_BAD_HVALUE;
+
+ if (vsw < 1 || vsw > 256 ||
+ vfp > 4095 || vbp > 4095)
+ return MODE_BAD_VVALUE;
+
+ if (dispc->memory_bandwidth_limit) {
+ const unsigned int bpp = 4;
+ u64 bandwidth;
+
+ bandwidth = 1000 * mode->clock;
+ bandwidth = bandwidth * mode->hdisplay * mode->vdisplay * bpp;
+ bandwidth = div_u64(bandwidth, mode->htotal * mode->vtotal);
+
+ if (dispc->memory_bandwidth_limit < bandwidth)
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+int dispc_vp_enable_clk(struct dispc_device *dispc, u32 hw_videoport)
+{
+ int ret = clk_prepare_enable(dispc->vp_clk[hw_videoport]);
+
+ if (ret)
+ dev_err(dispc->dev, "%s: enabling clk failed: %d\n", __func__,
+ ret);
+
+ return ret;
+}
+
+void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport)
+{
+ clk_disable_unprepare(dispc->vp_clk[hw_videoport]);
+}
+
+/*
+ * Calculate the percentage difference between the requested pixel clock rate
+ * and the effective rate resulting from calculating the clock divider value.
+ */
+static
+unsigned int dispc_pclk_diff(unsigned long rate, unsigned long real_rate)
+{
+ int r = rate / 100, rr = real_rate / 100;
+
+ return (unsigned int)(abs(((rr - r) * 100) / r));
+}
+
+int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport,
+ unsigned long rate)
+{
+ int r;
+ unsigned long new_rate;
+
+ r = clk_set_rate(dispc->vp_clk[hw_videoport], rate);
+ if (r) {
+ dev_err(dispc->dev, "vp%d: failed to set clk rate to %lu\n",
+ hw_videoport, rate);
+ return r;
+ }
+
+ new_rate = clk_get_rate(dispc->vp_clk[hw_videoport]);
+
+ if (dispc_pclk_diff(rate, new_rate) > 5)
+ dev_warn(dispc->dev,
+ "vp%d: Clock rate %lu differs over 5%% from requested %lu\n",
+ hw_videoport, new_rate, rate);
+
+ dev_dbg(dispc->dev, "vp%d: new rate %lu Hz (requested %lu Hz)\n",
+ hw_videoport, clk_get_rate(dispc->vp_clk[hw_videoport]), rate);
+
+ return 0;
+}
+
+/* OVR */
+static void dispc_k2g_ovr_set_plane(struct dispc_device *dispc,
+ u32 hw_plane, u32 hw_videoport,
+ u32 x, u32 y, u32 layer)
+{
+ /* On k2g there is only one plane and no need for ovr */
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_K2G_POSITION,
+ x | (y << 16));
+}
+
+static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc,
+ u32 hw_plane, u32 hw_videoport,
+ u32 x, u32 y, u32 layer)
+{
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ hw_plane, 4, 1);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ x, 17, 6);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ y, 30, 19);
+}
+
+static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc,
+ u32 hw_plane, u32 hw_videoport,
+ u32 x, u32 y, u32 layer)
+{
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ hw_plane, 4, 1);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
+ x, 13, 0);
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer),
+ y, 29, 16);
+}
+
+void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
+ u32 hw_videoport, u32 x, u32 y, u32 layer)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_ovr_set_plane(dispc, hw_plane, hw_videoport,
+ x, y, layer);
+ break;
+ case DISPC_AM65X:
+ dispc_am65x_ovr_set_plane(dispc, hw_plane, hw_videoport,
+ x, y, layer);
+ break;
+ case DISPC_J721E:
+ dispc_j721e_ovr_set_plane(dispc, hw_plane, hw_videoport,
+ x, y, layer);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+void dispc_ovr_enable_layer(struct dispc_device *dispc,
+ u32 hw_videoport, u32 layer, bool enable)
+{
+ if (dispc->feat->subrev == DISPC_K2G)
+ return;
+
+ OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer),
+ !!enable, 0, 0);
+}
+
+/* CSC */
+enum csc_ctm {
+ CSC_RR, CSC_RG, CSC_RB,
+ CSC_GR, CSC_GG, CSC_GB,
+ CSC_BR, CSC_BG, CSC_BB,
+};
+
+enum csc_yuv2rgb {
+ CSC_RY, CSC_RCB, CSC_RCR,
+ CSC_GY, CSC_GCB, CSC_GCR,
+ CSC_BY, CSC_BCB, CSC_BCR,
+};
+
+enum csc_rgb2yuv {
+ CSC_YR, CSC_YG, CSC_YB,
+ CSC_CBR, CSC_CBG, CSC_CBB,
+ CSC_CRR, CSC_CRG, CSC_CRB,
+};
+
+struct dispc_csc_coef {
+ void (*to_regval)(const struct dispc_csc_coef *csc, u32 *regval);
+ int m[9];
+ int preoffset[3];
+ int postoffset[3];
+ enum { CLIP_LIMITED_RANGE = 0, CLIP_FULL_RANGE = 1, } cliping;
+ const char *name;
+};
+
+#define DISPC_CSC_REGVAL_LEN 8
+
+static
+void dispc_csc_offset_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+#define OVAL(x, y) (FLD_VAL(x, 15, 3) | FLD_VAL(y, 31, 19))
+ regval[5] = OVAL(csc->preoffset[0], csc->preoffset[1]);
+ regval[6] = OVAL(csc->preoffset[2], csc->postoffset[0]);
+ regval[7] = OVAL(csc->postoffset[1], csc->postoffset[2]);
+#undef OVAL
+}
+
+#define CVAL(x, y) (FLD_VAL(x, 10, 0) | FLD_VAL(y, 26, 16))
+static
+void dispc_csc_yuv2rgb_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_RY], csc->m[CSC_RCR]);
+ regval[1] = CVAL(csc->m[CSC_RCB], csc->m[CSC_GY]);
+ regval[2] = CVAL(csc->m[CSC_GCR], csc->m[CSC_GCB]);
+ regval[3] = CVAL(csc->m[CSC_BY], csc->m[CSC_BCR]);
+ regval[4] = CVAL(csc->m[CSC_BCB], 0);
+
+ dispc_csc_offset_regval(csc, regval);
+}
+
+__maybe_unused static
+void dispc_csc_rgb2yuv_regval(const struct dispc_csc_coef *csc, u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_YR], csc->m[CSC_YG]);
+ regval[1] = CVAL(csc->m[CSC_YB], csc->m[CSC_CRR]);
+ regval[2] = CVAL(csc->m[CSC_CRG], csc->m[CSC_CRB]);
+ regval[3] = CVAL(csc->m[CSC_CBR], csc->m[CSC_CBG]);
+ regval[4] = CVAL(csc->m[CSC_CBB], 0);
+
+ dispc_csc_offset_regval(csc, regval);
+}
+
+static void dispc_csc_cpr_regval(const struct dispc_csc_coef *csc,
+ u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_RR], csc->m[CSC_RG]);
+ regval[1] = CVAL(csc->m[CSC_RB], csc->m[CSC_GR]);
+ regval[2] = CVAL(csc->m[CSC_GG], csc->m[CSC_GB]);
+ regval[3] = CVAL(csc->m[CSC_BR], csc->m[CSC_BG]);
+ regval[4] = CVAL(csc->m[CSC_BB], 0);
+
+ dispc_csc_offset_regval(csc, regval);
+}
+
+#undef CVAL
+
+static void dispc_k2g_vid_write_csc(struct dispc_device *dispc, u32 hw_plane,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vid_csc_coef_reg[] = {
+ DISPC_VID_CSC_COEF(0), DISPC_VID_CSC_COEF(1),
+ DISPC_VID_CSC_COEF(2), DISPC_VID_CSC_COEF(3),
+ DISPC_VID_CSC_COEF(4), DISPC_VID_CSC_COEF(5),
+ DISPC_VID_CSC_COEF(6), /* K2G has no post offset support */
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ csc->to_regval(csc, regval);
+
+ if (regval[7] != 0)
+ dev_warn(dispc->dev, "%s: No post offset support for %s\n",
+ __func__, csc->name);
+
+ for (i = 0; i < ARRAY_SIZE(dispc_vid_csc_coef_reg); i++)
+ dispc_vid_write(dispc, hw_plane, dispc_vid_csc_coef_reg[i],
+ regval[i]);
+}
+
+static void dispc_k3_vid_write_csc(struct dispc_device *dispc, u32 hw_plane,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vid_csc_coef_reg[DISPC_CSC_REGVAL_LEN] = {
+ DISPC_VID_CSC_COEF(0), DISPC_VID_CSC_COEF(1),
+ DISPC_VID_CSC_COEF(2), DISPC_VID_CSC_COEF(3),
+ DISPC_VID_CSC_COEF(4), DISPC_VID_CSC_COEF(5),
+ DISPC_VID_CSC_COEF(6), DISPC_VID_CSC_COEF7,
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ csc->to_regval(csc, regval);
+
+ for (i = 0; i < ARRAY_SIZE(dispc_vid_csc_coef_reg); i++)
+ dispc_vid_write(dispc, hw_plane, dispc_vid_csc_coef_reg[i],
+ regval[i]);
+}
+
+/* YUV -> RGB, ITU-R BT.601, full range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt601_full = {
+ dispc_csc_yuv2rgb_regval,
+ { 256, 0, 358, /* ry, rcb, rcr |1.000 0.000 1.402|*/
+ 256, -88, -182, /* gy, gcb, gcr |1.000 -0.344 -0.714|*/
+ 256, 452, 0, }, /* by, bcb, bcr |1.000 1.772 0.000|*/
+ { 0, -2048, -2048, }, /* full range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.601 Full",
+};
+
+/* YUV -> RGB, ITU-R BT.601, limited range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt601_lim = {
+ dispc_csc_yuv2rgb_regval,
+ { 298, 0, 409, /* ry, rcb, rcr |1.164 0.000 1.596|*/
+ 298, -100, -208, /* gy, gcb, gcr |1.164 -0.392 -0.813|*/
+ 298, 516, 0, }, /* by, bcb, bcr |1.164 2.017 0.000|*/
+ { -256, -2048, -2048, }, /* limited range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.601 Limited",
+};
+
+/* YUV -> RGB, ITU-R BT.709, full range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt709_full = {
+ dispc_csc_yuv2rgb_regval,
+ { 256, 0, 402, /* ry, rcb, rcr |1.000 0.000 1.570|*/
+ 256, -48, -120, /* gy, gcb, gcr |1.000 -0.187 -0.467|*/
+ 256, 475, 0, }, /* by, bcb, bcr |1.000 1.856 0.000|*/
+ { 0, -2048, -2048, }, /* full range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.709 Full",
+};
+
+/* YUV -> RGB, ITU-R BT.709, limited range */
+static const struct dispc_csc_coef csc_yuv2rgb_bt709_lim = {
+ dispc_csc_yuv2rgb_regval,
+ { 298, 0, 459, /* ry, rcb, rcr |1.164 0.000 1.793|*/
+ 298, -55, -136, /* gy, gcb, gcr |1.164 -0.213 -0.533|*/
+ 298, 541, 0, }, /* by, bcb, bcr |1.164 2.112 0.000|*/
+ { -256, -2048, -2048, }, /* limited range */
+ { 0, 0, 0, },
+ CLIP_FULL_RANGE,
+ "BT.709 Limited",
+};
+
+static const struct {
+ enum drm_color_encoding encoding;
+ enum drm_color_range range;
+ const struct dispc_csc_coef *csc;
+} dispc_csc_table[] = {
+ { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_FULL_RANGE,
+ &csc_yuv2rgb_bt601_full, },
+ { DRM_COLOR_YCBCR_BT601, DRM_COLOR_YCBCR_LIMITED_RANGE,
+ &csc_yuv2rgb_bt601_lim, },
+ { DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_FULL_RANGE,
+ &csc_yuv2rgb_bt709_full, },
+ { DRM_COLOR_YCBCR_BT709, DRM_COLOR_YCBCR_LIMITED_RANGE,
+ &csc_yuv2rgb_bt709_lim, },
+};
+
+static const
+struct dispc_csc_coef *dispc_find_csc(enum drm_color_encoding encoding,
+ enum drm_color_range range)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dispc_csc_table); i++) {
+ if (dispc_csc_table[i].encoding == encoding &&
+ dispc_csc_table[i].range == range) {
+ return dispc_csc_table[i].csc;
+ }
+ }
+ return NULL;
+}
+
+static void dispc_vid_csc_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state)
+{
+ const struct dispc_csc_coef *coef;
+
+ coef = dispc_find_csc(state->color_encoding, state->color_range);
+ if (!coef) {
+ dev_err(dispc->dev, "%s: CSC (%u,%u) not found\n",
+ __func__, state->color_encoding, state->color_range);
+ return;
+ }
+
+ if (dispc->feat->subrev == DISPC_K2G)
+ dispc_k2g_vid_write_csc(dispc, hw_plane, coef);
+ else
+ dispc_k3_vid_write_csc(dispc, hw_plane, coef);
+}
+
+static void dispc_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane,
+ bool enable)
+{
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 9, 9);
+}
+
+/* SCALER */
+
+static u32 dispc_calc_fir_inc(u32 in, u32 out)
+{
+ return (u32)div_u64(0x200000ull * in, out);
+}
+
+enum dispc_vid_fir_coef_set {
+ DISPC_VID_FIR_COEF_HORIZ,
+ DISPC_VID_FIR_COEF_HORIZ_UV,
+ DISPC_VID_FIR_COEF_VERT,
+ DISPC_VID_FIR_COEF_VERT_UV,
+};
+
+static void dispc_vid_write_fir_coefs(struct dispc_device *dispc,
+ u32 hw_plane,
+ enum dispc_vid_fir_coef_set coef_set,
+ const struct tidss_scale_coefs *coefs)
+{
+ static const u16 c0_regs[] = {
+ [DISPC_VID_FIR_COEF_HORIZ] = DISPC_VID_FIR_COEFS_H0,
+ [DISPC_VID_FIR_COEF_HORIZ_UV] = DISPC_VID_FIR_COEFS_H0_C,
+ [DISPC_VID_FIR_COEF_VERT] = DISPC_VID_FIR_COEFS_V0,
+ [DISPC_VID_FIR_COEF_VERT_UV] = DISPC_VID_FIR_COEFS_V0_C,
+ };
+
+ static const u16 c12_regs[] = {
+ [DISPC_VID_FIR_COEF_HORIZ] = DISPC_VID_FIR_COEFS_H12,
+ [DISPC_VID_FIR_COEF_HORIZ_UV] = DISPC_VID_FIR_COEFS_H12_C,
+ [DISPC_VID_FIR_COEF_VERT] = DISPC_VID_FIR_COEFS_V12,
+ [DISPC_VID_FIR_COEF_VERT_UV] = DISPC_VID_FIR_COEFS_V12_C,
+ };
+
+ const u16 c0_base = c0_regs[coef_set];
+ const u16 c12_base = c12_regs[coef_set];
+ int phase;
+
+ if (!coefs) {
+ dev_err(dispc->dev, "%s: No coefficients given.\n", __func__);
+ return;
+ }
+
+ for (phase = 0; phase <= 8; ++phase) {
+ u16 reg = c0_base + phase * 4;
+ u16 c0 = coefs->c0[phase];
+
+ dispc_vid_write(dispc, hw_plane, reg, c0);
+ }
+
+ for (phase = 0; phase <= 15; ++phase) {
+ u16 reg = c12_base + phase * 4;
+ s16 c1, c2;
+ u32 c12;
+
+ c1 = coefs->c1[phase];
+ c2 = coefs->c2[phase];
+ c12 = FLD_VAL(c1, 19, 10) | FLD_VAL(c2, 29, 20);
+
+ dispc_vid_write(dispc, hw_plane, reg, c12);
+ }
+}
+
+static bool dispc_fourcc_is_yuv(u32 fourcc)
+{
+ switch (fourcc) {
+ case DRM_FORMAT_YUYV:
+ case DRM_FORMAT_UYVY:
+ case DRM_FORMAT_NV12:
+ return true;
+ default:
+ return false;
+ }
+}
+
+struct dispc_scaling_params {
+ int xinc, yinc;
+ u32 in_w, in_h, in_w_uv, in_h_uv;
+ u32 fir_xinc, fir_yinc, fir_xinc_uv, fir_yinc_uv;
+ bool scale_x, scale_y;
+ const struct tidss_scale_coefs *xcoef, *ycoef, *xcoef_uv, *ycoef_uv;
+ bool five_taps;
+};
+
+static int dispc_vid_calc_scaling(struct dispc_device *dispc,
+ const struct drm_plane_state *state,
+ struct dispc_scaling_params *sp,
+ bool lite_plane)
+{
+ const struct dispc_features_scaling *f = &dispc->feat->scaling;
+ u32 fourcc = state->fb->format->format;
+ u32 in_width_max_5tap = f->in_width_max_5tap_rgb;
+ u32 in_width_max_3tap = f->in_width_max_3tap_rgb;
+ u32 downscale_limit;
+ u32 in_width_max;
+
+ memset(sp, 0, sizeof(*sp));
+ sp->xinc = 1;
+ sp->yinc = 1;
+ sp->in_w = state->src_w >> 16;
+ sp->in_w_uv = sp->in_w;
+ sp->in_h = state->src_h >> 16;
+ sp->in_h_uv = sp->in_h;
+
+ sp->scale_x = sp->in_w != state->crtc_w;
+ sp->scale_y = sp->in_h != state->crtc_h;
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ in_width_max_5tap = f->in_width_max_5tap_yuv;
+ in_width_max_3tap = f->in_width_max_3tap_yuv;
+
+ sp->in_w_uv >>= 1;
+ sp->scale_x = true;
+
+ if (fourcc == DRM_FORMAT_NV12) {
+ sp->in_h_uv >>= 1;
+ sp->scale_y = true;
+ }
+ }
+
+ /* Skip the rest if no scaling is used */
+ if ((!sp->scale_x && !sp->scale_y) || lite_plane)
+ return 0;
+
+ if (sp->in_w > in_width_max_5tap) {
+ sp->five_taps = false;
+ in_width_max = in_width_max_3tap;
+ downscale_limit = f->downscale_limit_3tap;
+ } else {
+ sp->five_taps = true;
+ in_width_max = in_width_max_5tap;
+ downscale_limit = f->downscale_limit_5tap;
+ }
+
+ if (sp->scale_x) {
+ sp->fir_xinc = dispc_calc_fir_inc(sp->in_w, state->crtc_w);
+
+ if (sp->fir_xinc < dispc_calc_fir_inc(1, f->upscale_limit)) {
+ dev_dbg(dispc->dev,
+ "%s: X-scaling factor %u/%u > %u\n",
+ __func__, state->crtc_w, state->src_w >> 16,
+ f->upscale_limit);
+ return -EINVAL;
+ }
+
+ if (sp->fir_xinc >= dispc_calc_fir_inc(downscale_limit, 1)) {
+ sp->xinc = DIV_ROUND_UP(DIV_ROUND_UP(sp->in_w,
+ state->crtc_w),
+ downscale_limit);
+
+ if (sp->xinc > f->xinc_max) {
+ dev_dbg(dispc->dev,
+ "%s: X-scaling factor %u/%u < 1/%u\n",
+ __func__, state->crtc_w,
+ state->src_w >> 16,
+ downscale_limit * f->xinc_max);
+ return -EINVAL;
+ }
+
+ sp->in_w = (state->src_w >> 16) / sp->xinc;
+ }
+
+ while (sp->in_w > in_width_max) {
+ sp->xinc++;
+ sp->in_w = (state->src_w >> 16) / sp->xinc;
+ }
+
+ if (sp->xinc > f->xinc_max) {
+ dev_dbg(dispc->dev,
+ "%s: Too wide input buffer %u > %u\n", __func__,
+ state->src_w >> 16, in_width_max * f->xinc_max);
+ return -EINVAL;
+ }
+
+ /*
+ * We need even line length for YUV formats. Decimation
+ * can lead to odd length, so we need to make it even
+ * again.
+ */
+ if (dispc_fourcc_is_yuv(fourcc))
+ sp->in_w &= ~1;
+
+ sp->fir_xinc = dispc_calc_fir_inc(sp->in_w, state->crtc_w);
+ }
+
+ if (sp->scale_y) {
+ sp->fir_yinc = dispc_calc_fir_inc(sp->in_h, state->crtc_h);
+
+ if (sp->fir_yinc < dispc_calc_fir_inc(1, f->upscale_limit)) {
+ dev_dbg(dispc->dev,
+ "%s: Y-scaling factor %u/%u > %u\n",
+ __func__, state->crtc_h, state->src_h >> 16,
+ f->upscale_limit);
+ return -EINVAL;
+ }
+
+ if (sp->fir_yinc >= dispc_calc_fir_inc(downscale_limit, 1)) {
+ sp->yinc = DIV_ROUND_UP(DIV_ROUND_UP(sp->in_h,
+ state->crtc_h),
+ downscale_limit);
+
+ sp->in_h /= sp->yinc;
+ sp->fir_yinc = dispc_calc_fir_inc(sp->in_h,
+ state->crtc_h);
+ }
+ }
+
+ dev_dbg(dispc->dev,
+ "%s: %ux%u decim %ux%u -> %ux%u firinc %u.%03ux%u.%03u taps %u -> %ux%u\n",
+ __func__, state->src_w >> 16, state->src_h >> 16,
+ sp->xinc, sp->yinc, sp->in_w, sp->in_h,
+ sp->fir_xinc / 0x200000u,
+ ((sp->fir_xinc & 0x1FFFFFu) * 999u) / 0x1FFFFFu,
+ sp->fir_yinc / 0x200000u,
+ ((sp->fir_yinc & 0x1FFFFFu) * 999u) / 0x1FFFFFu,
+ sp->five_taps ? 5 : 3,
+ state->crtc_w, state->crtc_h);
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ if (sp->scale_x) {
+ sp->in_w_uv /= sp->xinc;
+ sp->fir_xinc_uv = dispc_calc_fir_inc(sp->in_w_uv,
+ state->crtc_w);
+ sp->xcoef_uv = tidss_get_scale_coefs(dispc->dev,
+ sp->fir_xinc_uv,
+ true);
+ }
+ if (sp->scale_y) {
+ sp->in_h_uv /= sp->yinc;
+ sp->fir_yinc_uv = dispc_calc_fir_inc(sp->in_h_uv,
+ state->crtc_h);
+ sp->ycoef_uv = tidss_get_scale_coefs(dispc->dev,
+ sp->fir_yinc_uv,
+ sp->five_taps);
+ }
+ }
+
+ if (sp->scale_x)
+ sp->xcoef = tidss_get_scale_coefs(dispc->dev, sp->fir_xinc,
+ true);
+
+ if (sp->scale_y)
+ sp->ycoef = tidss_get_scale_coefs(dispc->dev, sp->fir_yinc,
+ sp->five_taps);
+
+ return 0;
+}
+
+static void dispc_vid_set_scaling(struct dispc_device *dispc,
+ u32 hw_plane,
+ struct dispc_scaling_params *sp,
+ u32 fourcc)
+{
+ /* HORIZONTAL RESIZE ENABLE */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ sp->scale_x, 7, 7);
+
+ /* VERTICAL RESIZE ENABLE */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ sp->scale_y, 8, 8);
+
+ /* Skip the rest if no scaling is used */
+ if (!sp->scale_x && !sp->scale_y)
+ return;
+
+ /* VERTICAL 5-TAPS */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ sp->five_taps, 21, 21);
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ if (sp->scale_x) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRH2,
+ sp->fir_xinc_uv);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_HORIZ_UV,
+ sp->xcoef_uv);
+ }
+ if (sp->scale_y) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRV2,
+ sp->fir_yinc_uv);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_VERT_UV,
+ sp->ycoef_uv);
+ }
+ }
+
+ if (sp->scale_x) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRH, sp->fir_xinc);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_HORIZ,
+ sp->xcoef);
+ }
+
+ if (sp->scale_y) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_FIRV, sp->fir_yinc);
+ dispc_vid_write_fir_coefs(dispc, hw_plane,
+ DISPC_VID_FIR_COEF_VERT, sp->ycoef);
+ }
+}
+
+/* OTHER */
+
+static const struct {
+ u32 fourcc;
+ u8 dss_code;
+} dispc_color_formats[] = {
+ { DRM_FORMAT_ARGB4444, 0x0, },
+ { DRM_FORMAT_ABGR4444, 0x1, },
+ { DRM_FORMAT_RGBA4444, 0x2, },
+
+ { DRM_FORMAT_RGB565, 0x3, },
+ { DRM_FORMAT_BGR565, 0x4, },
+
+ { DRM_FORMAT_ARGB1555, 0x5, },
+ { DRM_FORMAT_ABGR1555, 0x6, },
+
+ { DRM_FORMAT_ARGB8888, 0x7, },
+ { DRM_FORMAT_ABGR8888, 0x8, },
+ { DRM_FORMAT_RGBA8888, 0x9, },
+ { DRM_FORMAT_BGRA8888, 0xa, },
+
+ { DRM_FORMAT_RGB888, 0xb, },
+ { DRM_FORMAT_BGR888, 0xc, },
+
+ { DRM_FORMAT_ARGB2101010, 0xe, },
+ { DRM_FORMAT_ABGR2101010, 0xf, },
+
+ { DRM_FORMAT_XRGB4444, 0x20, },
+ { DRM_FORMAT_XBGR4444, 0x21, },
+ { DRM_FORMAT_RGBX4444, 0x22, },
+
+ { DRM_FORMAT_ARGB1555, 0x25, },
+ { DRM_FORMAT_ABGR1555, 0x26, },
+
+ { DRM_FORMAT_XRGB8888, 0x27, },
+ { DRM_FORMAT_XBGR8888, 0x28, },
+ { DRM_FORMAT_RGBX8888, 0x29, },
+ { DRM_FORMAT_BGRX8888, 0x2a, },
+
+ { DRM_FORMAT_XRGB2101010, 0x2e, },
+ { DRM_FORMAT_XBGR2101010, 0x2f, },
+
+ { DRM_FORMAT_YUYV, 0x3e, },
+ { DRM_FORMAT_UYVY, 0x3f, },
+
+ { DRM_FORMAT_NV12, 0x3d, },
+};
+
+static void dispc_plane_set_pixel_format(struct dispc_device *dispc,
+ u32 hw_plane, u32 fourcc)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
+ if (dispc_color_formats[i].fourcc == fourcc) {
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES,
+ dispc_color_formats[i].dss_code,
+ 6, 1);
+ return;
+ }
+ }
+
+ WARN_ON(1);
+}
+
+const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len)
+{
+ WARN_ON(!dispc->fourccs);
+
+ *len = dispc->num_fourccs;
+
+ return dispc->fourccs;
+}
+
+static s32 pixinc(int pixels, u8 ps)
+{
+ if (pixels == 1)
+ return 1;
+ else if (pixels > 1)
+ return 1 + (pixels - 1) * ps;
+ else if (pixels < 0)
+ return 1 - (-pixels + 1) * ps;
+
+ WARN_ON(1);
+ return 0;
+}
+
+int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport)
+{
+ bool lite = dispc->feat->vid_lite[hw_plane];
+ u32 fourcc = state->fb->format->format;
+ bool need_scaling = state->src_w >> 16 != state->crtc_w ||
+ state->src_h >> 16 != state->crtc_h;
+ struct dispc_scaling_params scaling;
+ int ret;
+
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ if (!dispc_find_csc(state->color_encoding,
+ state->color_range)) {
+ dev_dbg(dispc->dev,
+ "%s: Unsupported CSC (%u,%u) for HW plane %u\n",
+ __func__, state->color_encoding,
+ state->color_range, hw_plane);
+ return -EINVAL;
+ }
+ }
+
+ if (need_scaling) {
+ if (lite) {
+ dev_dbg(dispc->dev,
+ "%s: Lite plane %u can't scale %ux%u!=%ux%u\n",
+ __func__, hw_plane,
+ state->src_w >> 16, state->src_h >> 16,
+ state->crtc_w, state->crtc_h);
+ return -EINVAL;
+ }
+ ret = dispc_vid_calc_scaling(dispc, state, &scaling, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static
+dma_addr_t dispc_plane_state_paddr(const struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *gem;
+ u32 x = state->src_x >> 16;
+ u32 y = state->src_y >> 16;
+
+ gem = drm_fb_cma_get_gem_obj(state->fb, 0);
+
+ return gem->paddr + fb->offsets[0] + x * fb->format->cpp[0] +
+ y * fb->pitches[0];
+}
+
+static
+dma_addr_t dispc_plane_state_p_uv_addr(const struct drm_plane_state *state)
+{
+ struct drm_framebuffer *fb = state->fb;
+ struct drm_gem_cma_object *gem;
+ u32 x = state->src_x >> 16;
+ u32 y = state->src_y >> 16;
+
+ if (WARN_ON(state->fb->format->num_planes != 2))
+ return 0;
+
+ gem = drm_fb_cma_get_gem_obj(fb, 1);
+
+ return gem->paddr + fb->offsets[1] +
+ (x * fb->format->cpp[1] / fb->format->hsub) +
+ (y * fb->pitches[1] / fb->format->vsub);
+}
+
+int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport)
+{
+ bool lite = dispc->feat->vid_lite[hw_plane];
+ u32 fourcc = state->fb->format->format;
+ u16 cpp = state->fb->format->cpp[0];
+ u32 fb_width = state->fb->pitches[0] / cpp;
+ dma_addr_t paddr = dispc_plane_state_paddr(state);
+ struct dispc_scaling_params scale;
+
+ dispc_vid_calc_scaling(dispc, state, &scale, lite);
+
+ dispc_plane_set_pixel_format(dispc, hw_plane, fourcc);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_0, paddr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_0, (u64)paddr >> 32);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_1, paddr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)paddr >> 32);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE,
+ (scale.in_w - 1) | ((scale.in_h - 1) << 16));
+
+ /* For YUV422 format we use the macropixel size for pixel inc */
+ if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY)
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PIXEL_INC,
+ pixinc(scale.xinc, cpp * 2));
+ else
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PIXEL_INC,
+ pixinc(scale.xinc, cpp));
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_ROW_INC,
+ pixinc(1 + (scale.yinc * fb_width -
+ scale.xinc * scale.in_w),
+ cpp));
+
+ if (state->fb->format->num_planes == 2) {
+ u16 cpp_uv = state->fb->format->cpp[1];
+ u32 fb_width_uv = state->fb->pitches[1] / cpp_uv;
+ dma_addr_t p_uv_addr = dispc_plane_state_p_uv_addr(state);
+
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_0, p_uv_addr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_EXT_0, (u64)p_uv_addr >> 32);
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_1, p_uv_addr & 0xffffffff);
+ dispc_vid_write(dispc, hw_plane,
+ DISPC_VID_BA_UV_EXT_1, (u64)p_uv_addr >> 32);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_ROW_INC_UV,
+ pixinc(1 + (scale.yinc * fb_width_uv -
+ scale.xinc * scale.in_w_uv),
+ cpp_uv));
+ }
+
+ if (!lite) {
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_SIZE,
+ (state->crtc_w - 1) |
+ ((state->crtc_h - 1) << 16));
+
+ dispc_vid_set_scaling(dispc, hw_plane, &scale, fourcc);
+ }
+
+ /* enable YUV->RGB color conversion */
+ if (dispc_fourcc_is_yuv(fourcc)) {
+ dispc_vid_csc_setup(dispc, hw_plane, state);
+ dispc_vid_csc_enable(dispc, hw_plane, true);
+ } else {
+ dispc_vid_csc_enable(dispc, hw_plane, false);
+ }
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_GLOBAL_ALPHA,
+ 0xFF & (state->alpha >> 8));
+
+ if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI)
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
+ 28, 28);
+ else
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
+ 28, 28);
+
+ return 0;
+}
+
+int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable)
+{
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0);
+
+ return 0;
+}
+
+static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane)
+{
+ return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0);
+}
+
+static void dispc_vid_set_mflag_threshold(struct dispc_device *dispc,
+ u32 hw_plane, u32 low, u32 high)
+{
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_MFLAG_THRESHOLD,
+ FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
+
+static void dispc_vid_set_buf_threshold(struct dispc_device *dispc,
+ u32 hw_plane, u32 low, u32 high)
+{
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD,
+ FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0));
+}
+
+static void dispc_k2g_plane_init(struct dispc_device *dispc)
+{
+ unsigned int hw_plane;
+
+ dev_dbg(dispc->dev, "%s()\n", __func__);
+
+ /* MFLAG_CTRL = ENABLED */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ /* MFLAG_START = MFLAGNORMALSTARTMODE */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+
+ for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
+ u32 thr_low, thr_high;
+ u32 mflag_low, mflag_high;
+ u32 preload;
+
+ thr_high = size - 1;
+ thr_low = size / 2;
+
+ mflag_high = size * 2 / 3;
+ mflag_low = size / 3;
+
+ preload = thr_low;
+
+ dev_dbg(dispc->dev,
+ "%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
+ dispc->feat->vid_name[hw_plane],
+ size,
+ thr_high, thr_low,
+ mflag_high, mflag_low,
+ preload);
+
+ dispc_vid_set_buf_threshold(dispc, hw_plane,
+ thr_low, thr_high);
+ dispc_vid_set_mflag_threshold(dispc, hw_plane,
+ mflag_low, mflag_high);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload);
+
+ /*
+ * Prefetch up to fifo high-threshold value to minimize the
+ * possibility of underflows. Note that this means the PRELOAD
+ * register is ignored.
+ */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1,
+ 19, 19);
+ }
+}
+
+static void dispc_k3_plane_init(struct dispc_device *dispc)
+{
+ unsigned int hw_plane;
+ u32 cba_lo_pri = 1;
+ u32 cba_hi_pri = 0;
+
+ dev_dbg(dispc->dev, "%s()\n", __func__);
+
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0);
+ REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3);
+
+ /* MFLAG_CTRL = ENABLED */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0);
+ /* MFLAG_START = MFLAGNORMALSTARTMODE */
+ REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6);
+
+ for (hw_plane = 0; hw_plane < dispc->feat->num_planes; hw_plane++) {
+ u32 size = dispc_vid_get_fifo_size(dispc, hw_plane);
+ u32 thr_low, thr_high;
+ u32 mflag_low, mflag_high;
+ u32 preload;
+
+ thr_high = size - 1;
+ thr_low = size / 2;
+
+ mflag_high = size * 2 / 3;
+ mflag_low = size / 3;
+
+ preload = thr_low;
+
+ dev_dbg(dispc->dev,
+ "%s: bufsize %u, buf_threshold %u/%u, mflag threshold %u/%u preload %u\n",
+ dispc->feat->vid_name[hw_plane],
+ size,
+ thr_high, thr_low,
+ mflag_high, mflag_low,
+ preload);
+
+ dispc_vid_set_buf_threshold(dispc, hw_plane,
+ thr_low, thr_high);
+ dispc_vid_set_mflag_threshold(dispc, hw_plane,
+ mflag_low, mflag_high);
+
+ dispc_vid_write(dispc, hw_plane, DISPC_VID_PRELOAD, preload);
+
+ /* Prefech up to PRELOAD value */
+ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0,
+ 19, 19);
+ }
+}
+
+static void dispc_plane_init(struct dispc_device *dispc)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_plane_init(dispc);
+ break;
+ case DISPC_AM65X:
+ case DISPC_J721E:
+ dispc_k3_plane_init(dispc);
+ break;
+ default:
+ WARN_ON(1);
+ }
+}
+
+static void dispc_vp_init(struct dispc_device *dispc)
+{
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s()\n", __func__);
+
+ /* Enable the gamma Shadow bit-field for all VPs*/
+ for (i = 0; i < dispc->feat->num_vps; i++)
+ VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2, 2);
+}
+
+static void dispc_initial_config(struct dispc_device *dispc)
+{
+ dispc_plane_init(dispc);
+ dispc_vp_init(dispc);
+
+ /* Note: Hardcoded DPI routing on J721E for now */
+ if (dispc->feat->subrev == DISPC_J721E) {
+ dispc_write(dispc, DISPC_CONNECTIONS,
+ FLD_VAL(2, 3, 0) | /* VP1 to DPI0 */
+ FLD_VAL(8, 7, 4) /* VP3 to DPI1 */
+ );
+ }
+}
+
+static void dispc_k2g_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+ if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_8BIT))
+ return;
+
+ for (i = 0; i < hwlen; ++i) {
+ u32 v = table[i];
+
+ v |= i << 24;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_K2G_GAMMA_TABLE,
+ v);
+ }
+}
+
+static void dispc_am65x_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+ if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_8BIT))
+ return;
+
+ for (i = 0; i < hwlen; ++i) {
+ u32 v = table[i];
+
+ v |= i << 24;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_GAMMA_TABLE, v);
+ }
+}
+
+static void dispc_j721e_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d\n", __func__, hw_videoport);
+
+ if (WARN_ON(dispc->feat->vp_feat.color.gamma_type != TIDSS_GAMMA_10BIT))
+ return;
+
+ for (i = 0; i < hwlen; ++i) {
+ u32 v = table[i];
+
+ if (i == 0)
+ v |= 1 << 31;
+
+ dispc_vp_write(dispc, hw_videoport, DISPC_VP_GAMMA_TABLE, v);
+ }
+}
+
+static void dispc_vp_write_gamma_table(struct dispc_device *dispc,
+ u32 hw_videoport)
+{
+ switch (dispc->feat->subrev) {
+ case DISPC_K2G:
+ dispc_k2g_vp_write_gamma_table(dispc, hw_videoport);
+ break;
+ case DISPC_AM65X:
+ dispc_am65x_vp_write_gamma_table(dispc, hw_videoport);
+ break;
+ case DISPC_J721E:
+ dispc_j721e_vp_write_gamma_table(dispc, hw_videoport);
+ break;
+ default:
+ WARN_ON(1);
+ break;
+ }
+}
+
+static const struct drm_color_lut dispc_vp_gamma_default_lut[] = {
+ { .red = 0, .green = 0, .blue = 0, },
+ { .red = U16_MAX, .green = U16_MAX, .blue = U16_MAX, },
+};
+
+static void dispc_vp_set_gamma(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_color_lut *lut,
+ unsigned int length)
+{
+ u32 *table = dispc->vp_data[hw_videoport].gamma_table;
+ u32 hwlen = dispc->feat->vp_feat.color.gamma_size;
+ u32 hwbits;
+ unsigned int i;
+
+ dev_dbg(dispc->dev, "%s: hw_videoport %d, lut len %u, hw len %u\n",
+ __func__, hw_videoport, length, hwlen);
+
+ if (dispc->feat->vp_feat.color.gamma_type == TIDSS_GAMMA_10BIT)
+ hwbits = 10;
+ else
+ hwbits = 8;
+
+ if (!lut || length < 2) {
+ lut = dispc_vp_gamma_default_lut;
+ length = ARRAY_SIZE(dispc_vp_gamma_default_lut);
+ }
+
+ for (i = 0; i < length - 1; ++i) {
+ unsigned int first = i * (hwlen - 1) / (length - 1);
+ unsigned int last = (i + 1) * (hwlen - 1) / (length - 1);
+ unsigned int w = last - first;
+ u16 r, g, b;
+ unsigned int j;
+
+ if (w == 0)
+ continue;
+
+ for (j = 0; j <= w; j++) {
+ r = (lut[i].red * (w - j) + lut[i + 1].red * j) / w;
+ g = (lut[i].green * (w - j) + lut[i + 1].green * j) / w;
+ b = (lut[i].blue * (w - j) + lut[i + 1].blue * j) / w;
+
+ r >>= 16 - hwbits;
+ g >>= 16 - hwbits;
+ b >>= 16 - hwbits;
+
+ table[first + j] = (r << (hwbits * 2)) |
+ (g << hwbits) | b;
+ }
+ }
+
+ dispc_vp_write_gamma_table(dispc, hw_videoport);
+}
+
+static s16 dispc_S31_32_to_s2_8(s64 coef)
+{
+ u64 sign_bit = 1ULL << 63;
+ u64 cbits = (u64)coef;
+ s16 ret;
+
+ if (cbits & sign_bit)
+ ret = -clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x200);
+ else
+ ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x1FF);
+
+ return ret;
+}
+
+static void dispc_k2g_cpr_from_ctm(const struct drm_color_ctm *ctm,
+ struct dispc_csc_coef *cpr)
+{
+ memset(cpr, 0, sizeof(*cpr));
+
+ cpr->to_regval = dispc_csc_cpr_regval;
+ cpr->m[CSC_RR] = dispc_S31_32_to_s2_8(ctm->matrix[0]);
+ cpr->m[CSC_RG] = dispc_S31_32_to_s2_8(ctm->matrix[1]);
+ cpr->m[CSC_RB] = dispc_S31_32_to_s2_8(ctm->matrix[2]);
+ cpr->m[CSC_GR] = dispc_S31_32_to_s2_8(ctm->matrix[3]);
+ cpr->m[CSC_GG] = dispc_S31_32_to_s2_8(ctm->matrix[4]);
+ cpr->m[CSC_GB] = dispc_S31_32_to_s2_8(ctm->matrix[5]);
+ cpr->m[CSC_BR] = dispc_S31_32_to_s2_8(ctm->matrix[6]);
+ cpr->m[CSC_BG] = dispc_S31_32_to_s2_8(ctm->matrix[7]);
+ cpr->m[CSC_BB] = dispc_S31_32_to_s2_8(ctm->matrix[8]);
+}
+
+#define CVAL(xR, xG, xB) (FLD_VAL(xR, 9, 0) | FLD_VAL(xG, 20, 11) | \
+ FLD_VAL(xB, 31, 22))
+
+static void dispc_k2g_vp_csc_cpr_regval(const struct dispc_csc_coef *csc,
+ u32 *regval)
+{
+ regval[0] = CVAL(csc->m[CSC_BB], csc->m[CSC_BG], csc->m[CSC_BR]);
+ regval[1] = CVAL(csc->m[CSC_GB], csc->m[CSC_GG], csc->m[CSC_GR]);
+ regval[2] = CVAL(csc->m[CSC_RB], csc->m[CSC_RG], csc->m[CSC_RR]);
+}
+
+#undef CVAL
+
+static void dispc_k2g_vp_write_csc(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vp_cpr_coef_reg[] = {
+ DISPC_VP_CSC_COEF0, DISPC_VP_CSC_COEF1, DISPC_VP_CSC_COEF2,
+ /* K2G CPR is packed to three registers. */
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ dispc_k2g_vp_csc_cpr_regval(csc, regval);
+
+ for (i = 0; i < ARRAY_SIZE(dispc_vp_cpr_coef_reg); i++)
+ dispc_vp_write(dispc, hw_videoport, dispc_vp_cpr_coef_reg[i],
+ regval[i]);
+}
+
+static void dispc_k2g_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
+ struct drm_color_ctm *ctm)
+{
+ u32 cprenable = 0;
+
+ if (ctm) {
+ struct dispc_csc_coef cpr;
+
+ dispc_k2g_cpr_from_ctm(ctm, &cpr);
+ dispc_k2g_vp_write_csc(dispc, hw_videoport, &cpr);
+ cprenable = 1;
+ }
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
+ cprenable, 15, 15);
+}
+
+static s16 dispc_S31_32_to_s3_8(s64 coef)
+{
+ u64 sign_bit = 1ULL << 63;
+ u64 cbits = (u64)coef;
+ s16 ret;
+
+ if (cbits & sign_bit)
+ ret = -clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x400);
+ else
+ ret = clamp_val(((cbits & ~sign_bit) >> 24), 0, 0x3FF);
+
+ return ret;
+}
+
+static void dispc_csc_from_ctm(const struct drm_color_ctm *ctm,
+ struct dispc_csc_coef *cpr)
+{
+ memset(cpr, 0, sizeof(*cpr));
+
+ cpr->to_regval = dispc_csc_cpr_regval;
+ cpr->m[CSC_RR] = dispc_S31_32_to_s3_8(ctm->matrix[0]);
+ cpr->m[CSC_RG] = dispc_S31_32_to_s3_8(ctm->matrix[1]);
+ cpr->m[CSC_RB] = dispc_S31_32_to_s3_8(ctm->matrix[2]);
+ cpr->m[CSC_GR] = dispc_S31_32_to_s3_8(ctm->matrix[3]);
+ cpr->m[CSC_GG] = dispc_S31_32_to_s3_8(ctm->matrix[4]);
+ cpr->m[CSC_GB] = dispc_S31_32_to_s3_8(ctm->matrix[5]);
+ cpr->m[CSC_BR] = dispc_S31_32_to_s3_8(ctm->matrix[6]);
+ cpr->m[CSC_BG] = dispc_S31_32_to_s3_8(ctm->matrix[7]);
+ cpr->m[CSC_BB] = dispc_S31_32_to_s3_8(ctm->matrix[8]);
+}
+
+static void dispc_k3_vp_write_csc(struct dispc_device *dispc, u32 hw_videoport,
+ const struct dispc_csc_coef *csc)
+{
+ static const u16 dispc_vp_csc_coef_reg[DISPC_CSC_REGVAL_LEN] = {
+ DISPC_VP_CSC_COEF0, DISPC_VP_CSC_COEF1, DISPC_VP_CSC_COEF2,
+ DISPC_VP_CSC_COEF3, DISPC_VP_CSC_COEF4, DISPC_VP_CSC_COEF5,
+ DISPC_VP_CSC_COEF6, DISPC_VP_CSC_COEF7,
+ };
+ u32 regval[DISPC_CSC_REGVAL_LEN];
+ unsigned int i;
+
+ csc->to_regval(csc, regval);
+
+ for (i = 0; i < ARRAY_SIZE(regval); i++)
+ dispc_vp_write(dispc, hw_videoport, dispc_vp_csc_coef_reg[i],
+ regval[i]);
+}
+
+static void dispc_k3_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport,
+ struct drm_color_ctm *ctm)
+{
+ u32 colorconvenable = 0;
+
+ if (ctm) {
+ struct dispc_csc_coef csc;
+
+ dispc_csc_from_ctm(ctm, &csc);
+ dispc_k3_vp_write_csc(dispc, hw_videoport, &csc);
+ colorconvenable = 1;
+ }
+
+ VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG,
+ colorconvenable, 24, 24);
+}
+
+static void dispc_vp_set_color_mgmt(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_crtc_state *state,
+ bool newmodeset)
+{
+ struct drm_color_lut *lut = NULL;
+ struct drm_color_ctm *ctm = NULL;
+ unsigned int length = 0;
+
+ if (!(state->color_mgmt_changed || newmodeset))
+ return;
+
+ if (state->gamma_lut) {
+ lut = (struct drm_color_lut *)state->gamma_lut->data;
+ length = state->gamma_lut->length / sizeof(*lut);
+ }
+
+ dispc_vp_set_gamma(dispc, hw_videoport, lut, length);
+
+ if (state->ctm)
+ ctm = (struct drm_color_ctm *)state->ctm->data;
+
+ if (dispc->feat->subrev == DISPC_K2G)
+ dispc_k2g_vp_set_ctm(dispc, hw_videoport, ctm);
+ else
+ dispc_k3_vp_set_ctm(dispc, hw_videoport, ctm);
+}
+
+void dispc_vp_setup(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state, bool newmodeset)
+{
+ dispc_vp_set_default_color(dispc, hw_videoport, 0);
+ dispc_vp_set_color_mgmt(dispc, hw_videoport, state, newmodeset);
+}
+
+int dispc_runtime_suspend(struct dispc_device *dispc)
+{
+ dev_dbg(dispc->dev, "suspend\n");
+
+ dispc->is_enabled = false;
+
+ clk_disable_unprepare(dispc->fclk);
+
+ return 0;
+}
+
+int dispc_runtime_resume(struct dispc_device *dispc)
+{
+ dev_dbg(dispc->dev, "resume\n");
+
+ clk_prepare_enable(dispc->fclk);
+
+ if (REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0)
+ dev_warn(dispc->dev, "DSS FUNC RESET not done!\n");
+
+ dev_dbg(dispc->dev, "OMAP DSS7 rev 0x%x\n",
+ dispc_read(dispc, DSS_REVISION));
+
+ dev_dbg(dispc->dev, "VP RESETDONE %d,%d,%d\n",
+ REG_GET(dispc, DSS_SYSSTATUS, 1, 1),
+ REG_GET(dispc, DSS_SYSSTATUS, 2, 2),
+ REG_GET(dispc, DSS_SYSSTATUS, 3, 3));
+
+ if (dispc->feat->subrev == DISPC_AM65X)
+ dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n",
+ REG_GET(dispc, DSS_SYSSTATUS, 5, 5),
+ REG_GET(dispc, DSS_SYSSTATUS, 6, 6),
+ REG_GET(dispc, DSS_SYSSTATUS, 7, 7));
+
+ dev_dbg(dispc->dev, "DISPC IDLE %d\n",
+ REG_GET(dispc, DSS_SYSSTATUS, 9, 9));
+
+ dispc_initial_config(dispc);
+
+ dispc->is_enabled = true;
+
+ tidss_irq_resume(dispc->tidss);
+
+ return 0;
+}
+
+void dispc_remove(struct tidss_device *tidss)
+{
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ tidss->dispc = NULL;
+}
+
+static int dispc_iomap_resource(struct platform_device *pdev, const char *name,
+ void __iomem **base)
+{
+ struct resource *res;
+ void __iomem *b;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ dev_err(&pdev->dev, "cannot get mem resource '%s'\n", name);
+ return -EINVAL;
+ }
+
+ b = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(b)) {
+ dev_err(&pdev->dev, "cannot ioremap resource '%s'\n", name);
+ return PTR_ERR(b);
+ }
+
+ *base = b;
+
+ return 0;
+}
+
+static int dispc_init_am65x_oldi_io_ctrl(struct device *dev,
+ struct dispc_device *dispc)
+{
+ dispc->oldi_io_ctrl =
+ syscon_regmap_lookup_by_phandle(dev->of_node,
+ "ti,am65x-oldi-io-ctrl");
+ if (PTR_ERR(dispc->oldi_io_ctrl) == -ENODEV) {
+ dispc->oldi_io_ctrl = NULL;
+ } else if (IS_ERR(dispc->oldi_io_ctrl)) {
+ dev_err(dev, "%s: syscon_regmap_lookup_by_phandle failed %ld\n",
+ __func__, PTR_ERR(dispc->oldi_io_ctrl));
+ return PTR_ERR(dispc->oldi_io_ctrl);
+ }
+ return 0;
+}
+
+int dispc_init(struct tidss_device *tidss)
+{
+ struct device *dev = tidss->dev;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dispc_device *dispc;
+ const struct dispc_features *feat;
+ unsigned int i, num_fourccs;
+ int r = 0;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ feat = tidss->feat;
+
+ if (feat->subrev != DISPC_K2G) {
+ r = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (r)
+ dev_warn(dev, "cannot set DMA masks to 48-bit\n");
+ }
+
+ dispc = devm_kzalloc(dev, sizeof(*dispc), GFP_KERNEL);
+ if (!dispc)
+ return -ENOMEM;
+
+ dispc->fourccs = devm_kcalloc(dev, ARRAY_SIZE(dispc_color_formats),
+ sizeof(*dispc->fourccs), GFP_KERNEL);
+ if (!dispc->fourccs)
+ return -ENOMEM;
+
+ num_fourccs = 0;
+ for (i = 0; i < ARRAY_SIZE(dispc_color_formats); ++i) {
+ if (feat->errata.i2000 &&
+ dispc_fourcc_is_yuv(dispc_color_formats[i].fourcc))
+ continue;
+ dispc->fourccs[num_fourccs++] = dispc_color_formats[i].fourcc;
+ }
+ dispc->num_fourccs = num_fourccs;
+ dispc->tidss = tidss;
+ dispc->dev = dev;
+ dispc->feat = feat;
+
+ dispc_common_regmap = dispc->feat->common_regs;
+
+ r = dispc_iomap_resource(pdev, dispc->feat->common,
+ &dispc->base_common);
+ if (r)
+ return r;
+
+ for (i = 0; i < dispc->feat->num_planes; i++) {
+ r = dispc_iomap_resource(pdev, dispc->feat->vid_name[i],
+ &dispc->base_vid[i]);
+ if (r)
+ return r;
+ }
+
+ for (i = 0; i < dispc->feat->num_vps; i++) {
+ u32 gamma_size = dispc->feat->vp_feat.color.gamma_size;
+ u32 *gamma_table;
+ struct clk *clk;
+
+ r = dispc_iomap_resource(pdev, dispc->feat->ovr_name[i],
+ &dispc->base_ovr[i]);
+ if (r)
+ return r;
+
+ r = dispc_iomap_resource(pdev, dispc->feat->vp_name[i],
+ &dispc->base_vp[i]);
+ if (r)
+ return r;
+
+ clk = devm_clk_get(dev, dispc->feat->vpclk_name[i]);
+ if (IS_ERR(clk)) {
+ dev_err(dev, "%s: Failed to get clk %s:%ld\n", __func__,
+ dispc->feat->vpclk_name[i], PTR_ERR(clk));
+ return PTR_ERR(clk);
+ }
+ dispc->vp_clk[i] = clk;
+
+ gamma_table = devm_kmalloc_array(dev, gamma_size,
+ sizeof(*gamma_table),
+ GFP_KERNEL);
+ if (!gamma_table)
+ return -ENOMEM;
+ dispc->vp_data[i].gamma_table = gamma_table;
+ }
+
+ if (feat->subrev == DISPC_AM65X) {
+ r = dispc_init_am65x_oldi_io_ctrl(dev, dispc);
+ if (r)
+ return r;
+ }
+
+ dispc->fclk = devm_clk_get(dev, "fck");
+ if (IS_ERR(dispc->fclk)) {
+ dev_err(dev, "%s: Failed to get fclk: %ld\n",
+ __func__, PTR_ERR(dispc->fclk));
+ return PTR_ERR(dispc->fclk);
+ }
+ dev_dbg(dev, "DSS fclk %lu Hz\n", clk_get_rate(dispc->fclk));
+
+ of_property_read_u32(dispc->dev->of_node, "max-memory-bandwidth",
+ &dispc->memory_bandwidth_limit);
+
+ tidss->dispc = dispc;
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_dispc.h b/drivers/gpu/drm/tidss/tidss_dispc.h
new file mode 100644
index 000000000000..a4a68249e44b
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_DISPC_H__
+#define __TIDSS_DISPC_H__
+
+#include "tidss_drv.h"
+
+struct dispc_device;
+
+struct drm_crtc_state;
+
+enum tidss_gamma_type { TIDSS_GAMMA_8BIT, TIDSS_GAMMA_10BIT };
+
+struct tidss_vp_feat {
+ struct tidss_vp_color_feat {
+ u32 gamma_size;
+ enum tidss_gamma_type gamma_type;
+ bool has_ctm;
+ } color;
+};
+
+struct tidss_plane_feat {
+ struct tidss_plane_color_feat {
+ u32 encodings;
+ u32 ranges;
+ enum drm_color_encoding default_encoding;
+ enum drm_color_range default_range;
+ } color;
+ struct tidss_plane_blend_feat {
+ bool global_alpha;
+ } blend;
+};
+
+struct dispc_features_scaling {
+ u32 in_width_max_5tap_rgb;
+ u32 in_width_max_3tap_rgb;
+ u32 in_width_max_5tap_yuv;
+ u32 in_width_max_3tap_yuv;
+ u32 upscale_limit;
+ u32 downscale_limit_5tap;
+ u32 downscale_limit_3tap;
+ u32 xinc_max;
+};
+
+struct dispc_errata {
+ bool i2000; /* DSS Does Not Support YUV Pixel Data Formats */
+};
+
+enum dispc_vp_bus_type {
+ DISPC_VP_DPI, /* DPI output */
+ DISPC_VP_OLDI, /* OLDI (LVDS) output */
+ DISPC_VP_INTERNAL, /* SoC internal routing */
+ DISPC_VP_MAX_BUS_TYPE,
+};
+
+enum dispc_dss_subrevision {
+ DISPC_K2G,
+ DISPC_AM65X,
+ DISPC_J721E,
+};
+
+struct dispc_features {
+ int min_pclk_khz;
+ int max_pclk_khz[DISPC_VP_MAX_BUS_TYPE];
+
+ struct dispc_features_scaling scaling;
+
+ enum dispc_dss_subrevision subrev;
+
+ const char *common;
+ const u16 *common_regs;
+ u32 num_vps;
+ const char *vp_name[TIDSS_MAX_PORTS]; /* Should match dt reg names */
+ const char *ovr_name[TIDSS_MAX_PORTS]; /* Should match dt reg names */
+ const char *vpclk_name[TIDSS_MAX_PORTS]; /* Should match dt clk names */
+ const enum dispc_vp_bus_type vp_bus_type[TIDSS_MAX_PORTS];
+ struct tidss_vp_feat vp_feat;
+ u32 num_planes;
+ const char *vid_name[TIDSS_MAX_PLANES]; /* Should match dt reg names */
+ bool vid_lite[TIDSS_MAX_PLANES];
+ u32 vid_order[TIDSS_MAX_PLANES];
+
+ struct dispc_errata errata;
+};
+
+extern const struct dispc_features dispc_k2g_feats;
+extern const struct dispc_features dispc_am65x_feats;
+extern const struct dispc_features dispc_j721e_feats;
+
+void dispc_set_irqenable(struct dispc_device *dispc, dispc_irq_t mask);
+dispc_irq_t dispc_read_and_clear_irqstatus(struct dispc_device *dispc);
+
+void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane,
+ u32 hw_videoport, u32 x, u32 y, u32 layer);
+void dispc_ovr_enable_layer(struct dispc_device *dispc,
+ u32 hw_videoport, u32 layer, bool enable);
+
+void dispc_vp_prepare(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state);
+void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state);
+void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport);
+bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport);
+int dispc_vp_bus_check(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state);
+enum drm_mode_status dispc_vp_mode_valid(struct dispc_device *dispc,
+ u32 hw_videoport,
+ const struct drm_display_mode *mode);
+int dispc_vp_enable_clk(struct dispc_device *dispc, u32 hw_videoport);
+void dispc_vp_disable_clk(struct dispc_device *dispc, u32 hw_videoport);
+int dispc_vp_set_clk_rate(struct dispc_device *dispc, u32 hw_videoport,
+ unsigned long rate);
+void dispc_vp_setup(struct dispc_device *dispc, u32 hw_videoport,
+ const struct drm_crtc_state *state, bool newmodeset);
+
+int dispc_runtime_suspend(struct dispc_device *dispc);
+int dispc_runtime_resume(struct dispc_device *dispc);
+
+int dispc_plane_check(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport);
+int dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane,
+ const struct drm_plane_state *state,
+ u32 hw_videoport);
+int dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable);
+const u32 *dispc_plane_formats(struct dispc_device *dispc, unsigned int *len);
+
+int dispc_init(struct tidss_device *tidss);
+void dispc_remove(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
new file mode 100644
index 000000000000..88a83a41b6e3
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2016-2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <[email protected]>
+ */
+
+#ifndef __TIDSS_DISPC_REGS_H
+#define __TIDSS_DISPC_REGS_H
+
+enum dispc_common_regs {
+ NOT_APPLICABLE_OFF = 0,
+ DSS_REVISION_OFF,
+ DSS_SYSCONFIG_OFF,
+ DSS_SYSSTATUS_OFF,
+ DISPC_IRQ_EOI_OFF,
+ DISPC_IRQSTATUS_RAW_OFF,
+ DISPC_IRQSTATUS_OFF,
+ DISPC_IRQENABLE_SET_OFF,
+ DISPC_IRQENABLE_CLR_OFF,
+ DISPC_VID_IRQENABLE_OFF,
+ DISPC_VID_IRQSTATUS_OFF,
+ DISPC_VP_IRQENABLE_OFF,
+ DISPC_VP_IRQSTATUS_OFF,
+ WB_IRQENABLE_OFF,
+ WB_IRQSTATUS_OFF,
+ DISPC_GLOBAL_MFLAG_ATTRIBUTE_OFF,
+ DISPC_GLOBAL_OUTPUT_ENABLE_OFF,
+ DISPC_GLOBAL_BUFFER_OFF,
+ DSS_CBA_CFG_OFF,
+ DISPC_DBG_CONTROL_OFF,
+ DISPC_DBG_STATUS_OFF,
+ DISPC_CLKGATING_DISABLE_OFF,
+ DISPC_SECURE_DISABLE_OFF,
+ FBDC_REVISION_1_OFF,
+ FBDC_REVISION_2_OFF,
+ FBDC_REVISION_3_OFF,
+ FBDC_REVISION_4_OFF,
+ FBDC_REVISION_5_OFF,
+ FBDC_REVISION_6_OFF,
+ FBDC_COMMON_CONTROL_OFF,
+ FBDC_CONSTANT_COLOR_0_OFF,
+ FBDC_CONSTANT_COLOR_1_OFF,
+ DISPC_CONNECTIONS_OFF,
+ DISPC_MSS_VP1_OFF,
+ DISPC_MSS_VP3_OFF,
+ DISPC_COMMON_REG_TABLE_LEN,
+};
+
+/*
+ * dispc_common_regmap should be defined as const u16 * and pointing
+ * to a valid dss common register map for the platform, before the
+ * macros bellow can be used.
+ */
+
+#define REG(r) (dispc_common_regmap[r ## _OFF])
+
+#define DSS_REVISION REG(DSS_REVISION)
+#define DSS_SYSCONFIG REG(DSS_SYSCONFIG)
+#define DSS_SYSSTATUS REG(DSS_SYSSTATUS)
+#define DISPC_IRQ_EOI REG(DISPC_IRQ_EOI)
+#define DISPC_IRQSTATUS_RAW REG(DISPC_IRQSTATUS_RAW)
+#define DISPC_IRQSTATUS REG(DISPC_IRQSTATUS)
+#define DISPC_IRQENABLE_SET REG(DISPC_IRQENABLE_SET)
+#define DISPC_IRQENABLE_CLR REG(DISPC_IRQENABLE_CLR)
+#define DISPC_VID_IRQENABLE(n) (REG(DISPC_VID_IRQENABLE) + (n) * 4)
+#define DISPC_VID_IRQSTATUS(n) (REG(DISPC_VID_IRQSTATUS) + (n) * 4)
+#define DISPC_VP_IRQENABLE(n) (REG(DISPC_VP_IRQENABLE) + (n) * 4)
+#define DISPC_VP_IRQSTATUS(n) (REG(DISPC_VP_IRQSTATUS) + (n) * 4)
+#define WB_IRQENABLE REG(WB_IRQENABLE)
+#define WB_IRQSTATUS REG(WB_IRQSTATUS)
+
+#define DISPC_GLOBAL_MFLAG_ATTRIBUTE REG(DISPC_GLOBAL_MFLAG_ATTRIBUTE)
+#define DISPC_GLOBAL_OUTPUT_ENABLE REG(DISPC_GLOBAL_OUTPUT_ENABLE)
+#define DISPC_GLOBAL_BUFFER REG(DISPC_GLOBAL_BUFFER)
+#define DSS_CBA_CFG REG(DSS_CBA_CFG)
+#define DISPC_DBG_CONTROL REG(DISPC_DBG_CONTROL)
+#define DISPC_DBG_STATUS REG(DISPC_DBG_STATUS)
+#define DISPC_CLKGATING_DISABLE REG(DISPC_CLKGATING_DISABLE)
+#define DISPC_SECURE_DISABLE REG(DISPC_SECURE_DISABLE)
+
+#define FBDC_REVISION_1 REG(FBDC_REVISION_1)
+#define FBDC_REVISION_2 REG(FBDC_REVISION_2)
+#define FBDC_REVISION_3 REG(FBDC_REVISION_3)
+#define FBDC_REVISION_4 REG(FBDC_REVISION_4)
+#define FBDC_REVISION_5 REG(FBDC_REVISION_5)
+#define FBDC_REVISION_6 REG(FBDC_REVISION_6)
+#define FBDC_COMMON_CONTROL REG(FBDC_COMMON_CONTROL)
+#define FBDC_CONSTANT_COLOR_0 REG(FBDC_CONSTANT_COLOR_0)
+#define FBDC_CONSTANT_COLOR_1 REG(FBDC_CONSTANT_COLOR_1)
+#define DISPC_CONNECTIONS REG(DISPC_CONNECTIONS)
+#define DISPC_MSS_VP1 REG(DISPC_MSS_VP1)
+#define DISPC_MSS_VP3 REG(DISPC_MSS_VP3)
+
+/* VID */
+
+#define DISPC_VID_ACCUH_0 0x0
+#define DISPC_VID_ACCUH_1 0x4
+#define DISPC_VID_ACCUH2_0 0x8
+#define DISPC_VID_ACCUH2_1 0xc
+#define DISPC_VID_ACCUV_0 0x10
+#define DISPC_VID_ACCUV_1 0x14
+#define DISPC_VID_ACCUV2_0 0x18
+#define DISPC_VID_ACCUV2_1 0x1c
+#define DISPC_VID_ATTRIBUTES 0x20
+#define DISPC_VID_ATTRIBUTES2 0x24
+#define DISPC_VID_BA_0 0x28
+#define DISPC_VID_BA_1 0x2c
+#define DISPC_VID_BA_UV_0 0x30
+#define DISPC_VID_BA_UV_1 0x34
+#define DISPC_VID_BUF_SIZE_STATUS 0x38
+#define DISPC_VID_BUF_THRESHOLD 0x3c
+#define DISPC_VID_CSC_COEF(n) (0x40 + (n) * 4)
+
+#define DISPC_VID_FIRH 0x5c
+#define DISPC_VID_FIRH2 0x60
+#define DISPC_VID_FIRV 0x64
+#define DISPC_VID_FIRV2 0x68
+
+#define DISPC_VID_FIR_COEFS_H0 0x6c
+#define DISPC_VID_FIR_COEF_H0(phase) (0x6c + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_H0_C 0x90
+#define DISPC_VID_FIR_COEF_H0_C(phase) (0x90 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_H12 0xb4
+#define DISPC_VID_FIR_COEF_H12(phase) (0xb4 + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_H12_C 0xf4
+#define DISPC_VID_FIR_COEF_H12_C(phase) (0xf4 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_V0 0x134
+#define DISPC_VID_FIR_COEF_V0(phase) (0x134 + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_V0_C 0x158
+#define DISPC_VID_FIR_COEF_V0_C(phase) (0x158 + (phase) * 4)
+
+#define DISPC_VID_FIR_COEFS_V12 0x17c
+#define DISPC_VID_FIR_COEF_V12(phase) (0x17c + (phase) * 4)
+#define DISPC_VID_FIR_COEFS_V12_C 0x1bc
+#define DISPC_VID_FIR_COEF_V12_C(phase) (0x1bc + (phase) * 4)
+
+#define DISPC_VID_GLOBAL_ALPHA 0x1fc
+#define DISPC_VID_K2G_IRQENABLE 0x200 /* K2G */
+#define DISPC_VID_K2G_IRQSTATUS 0x204 /* K2G */
+#define DISPC_VID_MFLAG_THRESHOLD 0x208
+#define DISPC_VID_PICTURE_SIZE 0x20c
+#define DISPC_VID_PIXEL_INC 0x210
+#define DISPC_VID_K2G_POSITION 0x214 /* K2G */
+#define DISPC_VID_PRELOAD 0x218
+#define DISPC_VID_ROW_INC 0x21c
+#define DISPC_VID_SIZE 0x220
+#define DISPC_VID_BA_EXT_0 0x22c
+#define DISPC_VID_BA_EXT_1 0x230
+#define DISPC_VID_BA_UV_EXT_0 0x234
+#define DISPC_VID_BA_UV_EXT_1 0x238
+#define DISPC_VID_CSC_COEF7 0x23c
+#define DISPC_VID_ROW_INC_UV 0x248
+#define DISPC_VID_CLUT 0x260
+#define DISPC_VID_SAFETY_ATTRIBUTES 0x2a0
+#define DISPC_VID_SAFETY_CAPT_SIGNATURE 0x2a4
+#define DISPC_VID_SAFETY_POSITION 0x2a8
+#define DISPC_VID_SAFETY_REF_SIGNATURE 0x2ac
+#define DISPC_VID_SAFETY_SIZE 0x2b0
+#define DISPC_VID_SAFETY_LFSR_SEED 0x2b4
+#define DISPC_VID_LUMAKEY 0x2b8
+#define DISPC_VID_DMA_BUFSIZE 0x2bc /* J721E */
+
+/* OVR */
+
+#define DISPC_OVR_CONFIG 0x0
+#define DISPC_OVR_VIRTVP 0x4 /* J721E */
+#define DISPC_OVR_DEFAULT_COLOR 0x8
+#define DISPC_OVR_DEFAULT_COLOR2 0xc
+#define DISPC_OVR_TRANS_COLOR_MAX 0x10
+#define DISPC_OVR_TRANS_COLOR_MAX2 0x14
+#define DISPC_OVR_TRANS_COLOR_MIN 0x18
+#define DISPC_OVR_TRANS_COLOR_MIN2 0x1c
+#define DISPC_OVR_ATTRIBUTES(n) (0x20 + (n) * 4)
+#define DISPC_OVR_ATTRIBUTES2(n) (0x34 + (n) * 4) /* J721E */
+/* VP */
+
+#define DISPC_VP_CONFIG 0x0
+#define DISPC_VP_CONTROL 0x4
+#define DISPC_VP_CSC_COEF0 0x8
+#define DISPC_VP_CSC_COEF1 0xc
+#define DISPC_VP_CSC_COEF2 0x10
+#define DISPC_VP_DATA_CYCLE_0 0x14
+#define DISPC_VP_DATA_CYCLE_1 0x18
+#define DISPC_VP_K2G_GAMMA_TABLE 0x20 /* K2G */
+#define DISPC_VP_K2G_IRQENABLE 0x3c /* K2G */
+#define DISPC_VP_K2G_IRQSTATUS 0x40 /* K2G */
+#define DISPC_VP_DATA_CYCLE_2 0x1c
+#define DISPC_VP_LINE_NUMBER 0x44
+#define DISPC_VP_POL_FREQ 0x4c
+#define DISPC_VP_SIZE_SCREEN 0x50
+#define DISPC_VP_TIMING_H 0x54
+#define DISPC_VP_TIMING_V 0x58
+#define DISPC_VP_CSC_COEF3 0x5c
+#define DISPC_VP_CSC_COEF4 0x60
+#define DISPC_VP_CSC_COEF5 0x64
+#define DISPC_VP_CSC_COEF6 0x68
+#define DISPC_VP_CSC_COEF7 0x6c
+#define DISPC_VP_SAFETY_ATTRIBUTES_0 0x70
+#define DISPC_VP_SAFETY_ATTRIBUTES_1 0x74
+#define DISPC_VP_SAFETY_ATTRIBUTES_2 0x78
+#define DISPC_VP_SAFETY_ATTRIBUTES_3 0x7c
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_0 0x90
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_1 0x94
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_2 0x98
+#define DISPC_VP_SAFETY_CAPT_SIGNATURE_3 0x9c
+#define DISPC_VP_SAFETY_POSITION_0 0xb0
+#define DISPC_VP_SAFETY_POSITION_1 0xb4
+#define DISPC_VP_SAFETY_POSITION_2 0xb8
+#define DISPC_VP_SAFETY_POSITION_3 0xbc
+#define DISPC_VP_SAFETY_REF_SIGNATURE_0 0xd0
+#define DISPC_VP_SAFETY_REF_SIGNATURE_1 0xd4
+#define DISPC_VP_SAFETY_REF_SIGNATURE_2 0xd8
+#define DISPC_VP_SAFETY_REF_SIGNATURE_3 0xdc
+#define DISPC_VP_SAFETY_SIZE_0 0xf0
+#define DISPC_VP_SAFETY_SIZE_1 0xf4
+#define DISPC_VP_SAFETY_SIZE_2 0xf8
+#define DISPC_VP_SAFETY_SIZE_3 0xfc
+#define DISPC_VP_SAFETY_LFSR_SEED 0x110
+#define DISPC_VP_GAMMA_TABLE 0x120
+#define DISPC_VP_DSS_OLDI_CFG 0x160
+#define DISPC_VP_DSS_OLDI_STATUS 0x164
+#define DISPC_VP_DSS_OLDI_LB 0x168
+#define DISPC_VP_DSS_MERGE_SPLIT 0x16c /* J721E */
+#define DISPC_VP_DSS_DMA_THREADSIZE 0x170 /* J721E */
+#define DISPC_VP_DSS_DMA_THREADSIZE_STATUS 0x174 /* J721E */
+
+/*
+ * OLDI IO_CTRL register offsets. On AM654 the registers are found
+ * from CTRL_MMR0, there the syscon regmap should map 0x14 bytes from
+ * CTRLMMR0P1_OLDI_DAT0_IO_CTRL to CTRLMMR0P1_OLDI_CLK_IO_CTRL
+ * register range.
+ */
+#define OLDI_DAT0_IO_CTRL 0x00
+#define OLDI_DAT1_IO_CTRL 0x04
+#define OLDI_DAT2_IO_CTRL 0x08
+#define OLDI_DAT3_IO_CTRL 0x0C
+#define OLDI_CLK_IO_CTRL 0x10
+
+#define OLDI_PWRDN_TX BIT(8)
+
+#endif /* __TIDSS_DISPC_REGS_H */
diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c
new file mode 100644
index 000000000000..d95e4be2c7b9
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_drv.c
@@ -0,0 +1,285 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <linux/console.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/pm_runtime.h>
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_irq.h>
+#include <drm/drm_probe_helper.h>
+
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_kms.h"
+#include "tidss_irq.h"
+
+/* Power management */
+
+int tidss_runtime_get(struct tidss_device *tidss)
+{
+ int r;
+
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ r = pm_runtime_get_sync(tidss->dev);
+ WARN_ON(r < 0);
+ return r < 0 ? r : 0;
+}
+
+void tidss_runtime_put(struct tidss_device *tidss)
+{
+ int r;
+
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ r = pm_runtime_put_sync(tidss->dev);
+ WARN_ON(r < 0);
+}
+
+static int __maybe_unused tidss_pm_runtime_suspend(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return dispc_runtime_suspend(tidss->dispc);
+}
+
+static int __maybe_unused tidss_pm_runtime_resume(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+ int r;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ r = dispc_runtime_resume(tidss->dispc);
+ if (r)
+ return r;
+
+ return 0;
+}
+
+static int __maybe_unused tidss_suspend(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return drm_mode_config_helper_suspend(&tidss->ddev);
+}
+
+static int __maybe_unused tidss_resume(struct device *dev)
+{
+ struct tidss_device *tidss = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ return drm_mode_config_helper_resume(&tidss->ddev);
+}
+
+#ifdef CONFIG_PM
+
+static const struct dev_pm_ops tidss_pm_ops = {
+ .runtime_suspend = tidss_pm_runtime_suspend,
+ .runtime_resume = tidss_pm_runtime_resume,
+ SET_SYSTEM_SLEEP_PM_OPS(tidss_suspend, tidss_resume)
+};
+
+#endif /* CONFIG_PM */
+
+/* DRM device Information */
+
+static void tidss_release(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+
+ drm_kms_helper_poll_fini(ddev);
+
+ tidss_modeset_cleanup(tidss);
+
+ drm_dev_fini(ddev);
+
+ kfree(tidss);
+}
+
+DEFINE_DRM_GEM_CMA_FOPS(tidss_fops);
+
+static struct drm_driver tidss_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &tidss_fops,
+ .release = tidss_release,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ .name = "tidss",
+ .desc = "TI Keystone DSS",
+ .date = "20180215",
+ .major = 1,
+ .minor = 0,
+
+ .irq_preinstall = tidss_irq_preinstall,
+ .irq_postinstall = tidss_irq_postinstall,
+ .irq_handler = tidss_irq_handler,
+ .irq_uninstall = tidss_irq_uninstall,
+};
+
+static int tidss_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tidss_device *tidss;
+ struct drm_device *ddev;
+ int ret;
+ int irq;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ /* Can't use devm_* since drm_device's lifetime may exceed dev's */
+ tidss = kzalloc(sizeof(*tidss), GFP_KERNEL);
+ if (!tidss)
+ return -ENOMEM;
+
+ ddev = &tidss->ddev;
+
+ ret = devm_drm_dev_init(&pdev->dev, ddev, &tidss_driver);
+ if (ret) {
+ kfree(ddev);
+ return ret;
+ }
+
+ tidss->dev = dev;
+ tidss->feat = of_device_get_match_data(dev);
+
+ platform_set_drvdata(pdev, tidss);
+
+ ddev->dev_private = tidss;
+
+ ret = dispc_init(tidss);
+ if (ret) {
+ dev_err(dev, "failed to initialize dispc: %d\n", ret);
+ return ret;
+ }
+
+ pm_runtime_enable(dev);
+
+#ifndef CONFIG_PM
+ /* If we don't have PM, we need to call resume manually */
+ dispc_runtime_resume(tidss->dispc);
+#endif
+
+ ret = tidss_modeset_init(tidss);
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ dev_err(dev, "failed to init DRM/KMS (%d)\n", ret);
+ goto err_runtime_suspend;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ ret = irq;
+ goto err_runtime_suspend;
+ }
+
+ ret = drm_irq_install(ddev, irq);
+ if (ret) {
+ dev_err(dev, "drm_irq_install failed: %d\n", ret);
+ goto err_runtime_suspend;
+ }
+
+ drm_kms_helper_poll_init(ddev);
+
+ drm_mode_config_reset(ddev);
+
+ ret = drm_dev_register(ddev, 0);
+ if (ret) {
+ dev_err(dev, "failed to register DRM device\n");
+ goto err_irq_uninstall;
+ }
+
+ drm_fbdev_generic_setup(ddev, 32);
+
+ dev_dbg(dev, "%s done\n", __func__);
+
+ return 0;
+
+err_irq_uninstall:
+ drm_irq_uninstall(ddev);
+
+err_runtime_suspend:
+#ifndef CONFIG_PM
+ dispc_runtime_suspend(tidss->dispc);
+#endif
+ pm_runtime_disable(dev);
+
+ return ret;
+}
+
+static int tidss_remove(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct tidss_device *tidss = platform_get_drvdata(pdev);
+ struct drm_device *ddev = &tidss->ddev;
+
+ dev_dbg(dev, "%s\n", __func__);
+
+ drm_dev_unregister(ddev);
+
+ drm_atomic_helper_shutdown(ddev);
+
+ drm_irq_uninstall(ddev);
+
+#ifndef CONFIG_PM
+ /* If we don't have PM, we need to call suspend manually */
+ dispc_runtime_suspend(tidss->dispc);
+#endif
+ pm_runtime_disable(dev);
+
+ /* devm allocated dispc goes away with the dev so mark it NULL */
+ dispc_remove(tidss);
+
+ dev_dbg(dev, "%s done\n", __func__);
+
+ return 0;
+}
+
+static void tidss_shutdown(struct platform_device *pdev)
+{
+ drm_atomic_helper_shutdown(platform_get_drvdata(pdev));
+}
+
+static const struct of_device_id tidss_of_table[] = {
+ { .compatible = "ti,k2g-dss", .data = &dispc_k2g_feats, },
+ { .compatible = "ti,am65x-dss", .data = &dispc_am65x_feats, },
+ { .compatible = "ti,j721e-dss", .data = &dispc_j721e_feats, },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, tidss_of_table);
+
+static struct platform_driver tidss_platform_driver = {
+ .probe = tidss_probe,
+ .remove = tidss_remove,
+ .shutdown = tidss_shutdown,
+ .driver = {
+ .name = "tidss",
+#ifdef CONFIG_PM
+ .pm = &tidss_pm_ops,
+#endif
+ .of_match_table = tidss_of_table,
+ .suppress_bind_attrs = true,
+ },
+};
+
+module_platform_driver(tidss_platform_driver);
+
+MODULE_AUTHOR("Tomi Valkeinen <[email protected]>");
+MODULE_DESCRIPTION("TI Keystone DSS Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/tidss/tidss_drv.h b/drivers/gpu/drm/tidss/tidss_drv.h
new file mode 100644
index 000000000000..e2aa6436ad18
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_drv.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_DRV_H__
+#define __TIDSS_DRV_H__
+
+#include <linux/spinlock.h>
+
+#define TIDSS_MAX_PORTS 4
+#define TIDSS_MAX_PLANES 4
+
+typedef u32 dispc_irq_t;
+
+struct tidss_device {
+ struct drm_device ddev; /* DRM device for DSS */
+ struct device *dev; /* Underlying DSS device */
+
+ const struct dispc_features *feat;
+ struct dispc_device *dispc;
+
+ unsigned int num_crtcs;
+ struct drm_crtc *crtcs[TIDSS_MAX_PORTS];
+
+ unsigned int num_planes;
+ struct drm_plane *planes[TIDSS_MAX_PLANES];
+
+ spinlock_t wait_lock; /* protects the irq masks */
+ dispc_irq_t irq_mask; /* enabled irqs in addition to wait_list */
+
+ struct drm_atomic_state *saved_state;
+};
+
+int tidss_runtime_get(struct tidss_device *tidss);
+void tidss_runtime_put(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.c b/drivers/gpu/drm/tidss/tidss_encoder.c
new file mode 100644
index 000000000000..83785b0a66a9
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_encoder.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <linux/export.h>
+
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
+
+#include "tidss_crtc.h"
+#include "tidss_drv.h"
+#include "tidss_encoder.h"
+
+static int tidss_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct drm_device *ddev = encoder->dev;
+ struct tidss_crtc_state *tcrtc_state = to_tidss_crtc_state(crtc_state);
+ struct drm_display_info *di = &conn_state->connector->display_info;
+ struct drm_bridge *bridge;
+ bool bus_flags_set = false;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ /*
+ * Take the bus_flags from the first bridge that defines
+ * bridge timings, or from the connector's display_info if no
+ * bridge defines the timings.
+ */
+ drm_for_each_bridge_in_chain(encoder, bridge) {
+ if (!bridge->timings)
+ continue;
+
+ tcrtc_state->bus_flags = bridge->timings->input_bus_flags;
+ bus_flags_set = true;
+ break;
+ }
+
+ if (!di->bus_formats || di->num_bus_formats == 0) {
+ dev_err(ddev->dev, "%s: No bus_formats in connected display\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ // XXX any cleaner way to set bus format and flags?
+ tcrtc_state->bus_format = di->bus_formats[0];
+ if (!bus_flags_set)
+ tcrtc_state->bus_flags = di->bus_flags;
+
+ return 0;
+}
+
+static const struct drm_encoder_helper_funcs encoder_helper_funcs = {
+ .atomic_check = tidss_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
+ u32 encoder_type, u32 possible_crtcs)
+{
+ struct drm_encoder *enc;
+ int ret;
+
+ enc = devm_kzalloc(tidss->dev, sizeof(*enc), GFP_KERNEL);
+ if (!enc)
+ return ERR_PTR(-ENOMEM);
+
+ enc->possible_crtcs = possible_crtcs;
+
+ ret = drm_encoder_init(&tidss->ddev, enc, &encoder_funcs,
+ encoder_type, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ drm_encoder_helper_add(enc, &encoder_helper_funcs);
+
+ dev_dbg(tidss->dev, "Encoder create done\n");
+
+ return enc;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_encoder.h b/drivers/gpu/drm/tidss/tidss_encoder.h
new file mode 100644
index 000000000000..06854d66e7e6
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_encoder.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_ENCODER_H__
+#define __TIDSS_ENCODER_H__
+
+#include <drm/drm_encoder.h>
+
+struct tidss_device;
+
+struct drm_encoder *tidss_encoder_create(struct tidss_device *tidss,
+ u32 encoder_type, u32 possible_crtcs);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_irq.c b/drivers/gpu/drm/tidss/tidss_irq.c
new file mode 100644
index 000000000000..612c046738e5
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_irq.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <drm/drm_print.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_irq.h"
+#include "tidss_plane.h"
+
+/* call with wait_lock and dispc runtime held */
+static void tidss_irq_update(struct tidss_device *tidss)
+{
+ assert_spin_locked(&tidss->wait_lock);
+
+ dispc_set_irqenable(tidss->dispc, tidss->irq_mask);
+}
+
+void tidss_irq_enable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+ tidss->irq_mask |= DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+ DSS_IRQ_VP_VSYNC_ODD(hw_videoport);
+ tidss_irq_update(tidss);
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+void tidss_irq_disable_vblank(struct drm_crtc *crtc)
+{
+ struct drm_device *ddev = crtc->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+ tidss->irq_mask &= ~(DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+ DSS_IRQ_VP_VSYNC_ODD(hw_videoport));
+ tidss_irq_update(tidss);
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+irqreturn_t tidss_irq_handler(int irq, void *arg)
+{
+ struct drm_device *ddev = (struct drm_device *)arg;
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned int id;
+ dispc_irq_t irqstatus;
+
+ if (WARN_ON(!ddev->irq_enabled))
+ return IRQ_NONE;
+
+ irqstatus = dispc_read_and_clear_irqstatus(tidss->dispc);
+
+ for (id = 0; id < tidss->num_crtcs; id++) {
+ struct drm_crtc *crtc = tidss->crtcs[id];
+ struct tidss_crtc *tcrtc = to_tidss_crtc(crtc);
+ u32 hw_videoport = tcrtc->hw_videoport;
+
+ if (irqstatus & (DSS_IRQ_VP_VSYNC_EVEN(hw_videoport) |
+ DSS_IRQ_VP_VSYNC_ODD(hw_videoport)))
+ tidss_crtc_vblank_irq(crtc);
+
+ if (irqstatus & (DSS_IRQ_VP_FRAME_DONE(hw_videoport)))
+ tidss_crtc_framedone_irq(crtc);
+
+ if (irqstatus & DSS_IRQ_VP_SYNC_LOST(hw_videoport))
+ tidss_crtc_error_irq(crtc, irqstatus);
+ }
+
+ if (irqstatus & DSS_IRQ_DEVICE_OCP_ERR)
+ dev_err_ratelimited(tidss->dev, "OCP error\n");
+
+ return IRQ_HANDLED;
+}
+
+void tidss_irq_resume(struct tidss_device *tidss)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+ tidss_irq_update(tidss);
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+}
+
+void tidss_irq_preinstall(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+
+ spin_lock_init(&tidss->wait_lock);
+
+ tidss_runtime_get(tidss);
+
+ dispc_set_irqenable(tidss->dispc, 0);
+ dispc_read_and_clear_irqstatus(tidss->dispc);
+
+ tidss_runtime_put(tidss);
+}
+
+int tidss_irq_postinstall(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+ unsigned long flags;
+ unsigned int i;
+
+ tidss_runtime_get(tidss);
+
+ spin_lock_irqsave(&tidss->wait_lock, flags);
+
+ tidss->irq_mask = DSS_IRQ_DEVICE_OCP_ERR;
+
+ for (i = 0; i < tidss->num_crtcs; ++i) {
+ struct tidss_crtc *tcrtc = to_tidss_crtc(tidss->crtcs[i]);
+
+ tidss->irq_mask |= DSS_IRQ_VP_SYNC_LOST(tcrtc->hw_videoport);
+
+ tidss->irq_mask |= DSS_IRQ_VP_FRAME_DONE(tcrtc->hw_videoport);
+ }
+
+ tidss_irq_update(tidss);
+
+ spin_unlock_irqrestore(&tidss->wait_lock, flags);
+
+ tidss_runtime_put(tidss);
+
+ return 0;
+}
+
+void tidss_irq_uninstall(struct drm_device *ddev)
+{
+ struct tidss_device *tidss = ddev->dev_private;
+
+ tidss_runtime_get(tidss);
+ dispc_set_irqenable(tidss->dispc, 0);
+ tidss_runtime_put(tidss);
+}
diff --git a/drivers/gpu/drm/tidss/tidss_irq.h b/drivers/gpu/drm/tidss/tidss_irq.h
new file mode 100644
index 000000000000..aa92db403cca
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_irq.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_IRQ_H__
+#define __TIDSS_IRQ_H__
+
+#include <linux/types.h>
+
+#include "tidss_drv.h"
+
+/*
+ * The IRQ status from various DISPC IRQ registers are packed into a single
+ * value, where the bits are defined as follows:
+ *
+ * bit group |dev|wb |mrg0|mrg1|mrg2|mrg3|plane0-3| <unused> |
+ * bit use |D |fou|FEOL|FEOL|FEOL|FEOL| UUUU | |
+ * bit number|0 |1-3|4-7 |8-11| 12-19 | 20-23 | 24-31 |
+ *
+ * device bits: D = OCP error
+ * WB bits: f = frame done wb, o = wb buffer overflow,
+ * u = wb buffer uncomplete
+ * vp bits: F = frame done, E = vsync even, O = vsync odd, L = sync lost
+ * plane bits: U = fifo underflow
+ */
+
+#define DSS_IRQ_DEVICE_OCP_ERR BIT(0)
+
+#define DSS_IRQ_DEVICE_FRAMEDONEWB BIT(1)
+#define DSS_IRQ_DEVICE_WBBUFFEROVERFLOW BIT(2)
+#define DSS_IRQ_DEVICE_WBUNCOMPLETEERROR BIT(3)
+#define DSS_IRQ_DEVICE_WB_MASK GENMASK(3, 1)
+
+#define DSS_IRQ_VP_BIT_N(ch, bit) (4 + 4 * (ch) + (bit))
+#define DSS_IRQ_PLANE_BIT_N(plane, bit) \
+ (DSS_IRQ_VP_BIT_N(TIDSS_MAX_PORTS, 0) + 1 * (plane) + (bit))
+
+#define DSS_IRQ_VP_BIT(ch, bit) BIT(DSS_IRQ_VP_BIT_N((ch), (bit)))
+#define DSS_IRQ_PLANE_BIT(plane, bit) \
+ BIT(DSS_IRQ_PLANE_BIT_N((plane), (bit)))
+
+static inline dispc_irq_t DSS_IRQ_VP_MASK(u32 ch)
+{
+ return GENMASK(DSS_IRQ_VP_BIT_N((ch), 3), DSS_IRQ_VP_BIT_N((ch), 0));
+}
+
+static inline dispc_irq_t DSS_IRQ_PLANE_MASK(u32 plane)
+{
+ return GENMASK(DSS_IRQ_PLANE_BIT_N((plane), 0),
+ DSS_IRQ_PLANE_BIT_N((plane), 0));
+}
+
+#define DSS_IRQ_VP_FRAME_DONE(ch) DSS_IRQ_VP_BIT((ch), 0)
+#define DSS_IRQ_VP_VSYNC_EVEN(ch) DSS_IRQ_VP_BIT((ch), 1)
+#define DSS_IRQ_VP_VSYNC_ODD(ch) DSS_IRQ_VP_BIT((ch), 2)
+#define DSS_IRQ_VP_SYNC_LOST(ch) DSS_IRQ_VP_BIT((ch), 3)
+
+#define DSS_IRQ_PLANE_FIFO_UNDERFLOW(plane) DSS_IRQ_PLANE_BIT((plane), 0)
+
+struct drm_crtc;
+struct drm_device;
+
+struct tidss_device;
+
+void tidss_irq_enable_vblank(struct drm_crtc *crtc);
+void tidss_irq_disable_vblank(struct drm_crtc *crtc);
+
+void tidss_irq_preinstall(struct drm_device *ddev);
+int tidss_irq_postinstall(struct drm_device *ddev);
+void tidss_irq_uninstall(struct drm_device *ddev);
+irqreturn_t tidss_irq_handler(int irq, void *arg);
+
+void tidss_irq_resume(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_kms.c b/drivers/gpu/drm/tidss/tidss_kms.c
new file mode 100644
index 000000000000..7d419960b030
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_kms.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_bridge.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_cma_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_vblank.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_encoder.h"
+#include "tidss_kms.h"
+#include "tidss_plane.h"
+
+static void tidss_atomic_commit_tail(struct drm_atomic_state *old_state)
+{
+ struct drm_device *ddev = old_state->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ tidss_runtime_get(tidss);
+
+ drm_atomic_helper_commit_modeset_disables(ddev, old_state);
+ drm_atomic_helper_commit_planes(ddev, old_state, 0);
+ drm_atomic_helper_commit_modeset_enables(ddev, old_state);
+
+ drm_atomic_helper_commit_hw_done(old_state);
+ drm_atomic_helper_wait_for_flip_done(ddev, old_state);
+
+ drm_atomic_helper_cleanup_planes(ddev, old_state);
+
+ tidss_runtime_put(tidss);
+}
+
+static const struct drm_mode_config_helper_funcs mode_config_helper_funcs = {
+ .atomic_commit_tail = tidss_atomic_commit_tail,
+};
+
+static int tidss_atomic_check(struct drm_device *ddev,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane_state *opstate;
+ struct drm_plane_state *npstate;
+ struct drm_plane *plane;
+ struct drm_crtc_state *cstate;
+ struct drm_crtc *crtc;
+ int ret, i;
+
+ ret = drm_atomic_helper_check(ddev, state);
+ if (ret)
+ return ret;
+
+ /*
+ * Add all active planes on a CRTC to the atomic state, if
+ * x/y/z position or activity of any plane on that CRTC
+ * changes. This is needed for updating the plane positions in
+ * tidss_crtc_position_planes() which is called from
+ * crtc_atomic_enable() and crtc_atomic_flush(). We have an
+ * extra flag to to mark x,y-position changes and together
+ * with zpos_changed the condition recognizes all the above
+ * cases.
+ */
+ for_each_oldnew_plane_in_state(state, plane, opstate, npstate, i) {
+ if (!npstate->crtc || !npstate->visible)
+ continue;
+
+ if (!opstate->crtc || opstate->crtc_x != npstate->crtc_x ||
+ opstate->crtc_y != npstate->crtc_y) {
+ cstate = drm_atomic_get_crtc_state(state,
+ npstate->crtc);
+ if (IS_ERR(cstate))
+ return PTR_ERR(cstate);
+ to_tidss_crtc_state(cstate)->plane_pos_changed = true;
+ }
+ }
+
+ for_each_new_crtc_in_state(state, crtc, cstate, i) {
+ if (to_tidss_crtc_state(cstate)->plane_pos_changed ||
+ cstate->zpos_changed) {
+ ret = drm_atomic_add_affected_planes(state, crtc);
+ if (ret)
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+static const struct drm_mode_config_funcs mode_config_funcs = {
+ .fb_create = drm_gem_fb_create,
+ .atomic_check = tidss_atomic_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
+static int tidss_dispc_modeset_init(struct tidss_device *tidss)
+{
+ struct device *dev = tidss->dev;
+ unsigned int fourccs_len;
+ const u32 *fourccs = dispc_plane_formats(tidss->dispc, &fourccs_len);
+ unsigned int i;
+
+ struct pipe {
+ u32 hw_videoport;
+ struct drm_bridge *bridge;
+ u32 enc_type;
+ };
+
+ const struct dispc_features *feat = tidss->feat;
+ u32 max_vps = feat->num_vps;
+ u32 max_planes = feat->num_planes;
+
+ struct pipe pipes[TIDSS_MAX_PORTS];
+ u32 num_pipes = 0;
+ u32 crtc_mask;
+
+ /* first find all the connected panels & bridges */
+
+ for (i = 0; i < max_vps; i++) {
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+ u32 enc_type = DRM_MODE_ENCODER_NONE;
+ int ret;
+
+ ret = drm_of_find_panel_or_bridge(dev->of_node, i, 0,
+ &panel, &bridge);
+ if (ret == -ENODEV) {
+ dev_dbg(dev, "no panel/bridge for port %d\n", i);
+ continue;
+ } else if (ret) {
+ dev_dbg(dev, "port %d probe returned %d\n", i, ret);
+ return ret;
+ }
+
+ if (panel) {
+ u32 conn_type;
+
+ dev_dbg(dev, "Setting up panel for port %d\n", i);
+
+ switch (feat->vp_bus_type[i]) {
+ case DISPC_VP_OLDI:
+ enc_type = DRM_MODE_ENCODER_LVDS;
+ conn_type = DRM_MODE_CONNECTOR_LVDS;
+ break;
+ case DISPC_VP_DPI:
+ enc_type = DRM_MODE_ENCODER_DPI;
+ conn_type = DRM_MODE_CONNECTOR_LVDS;
+ break;
+ default:
+ WARN_ON(1);
+ return -EINVAL;
+ }
+
+ if (panel->connector_type != conn_type) {
+ dev_err(dev,
+ "%s: Panel %s has incompatible connector type for vp%d (%d != %d)\n",
+ __func__, dev_name(panel->dev), i,
+ panel->connector_type, conn_type);
+ return -EINVAL;
+ }
+
+ bridge = devm_drm_panel_bridge_add(dev, panel);
+ if (IS_ERR(bridge)) {
+ dev_err(dev,
+ "failed to set up panel bridge for port %d\n",
+ i);
+ return PTR_ERR(bridge);
+ }
+ }
+
+ pipes[num_pipes].hw_videoport = i;
+ pipes[num_pipes].bridge = bridge;
+ pipes[num_pipes].enc_type = enc_type;
+ num_pipes++;
+ }
+
+ /* all planes can be on any crtc */
+ crtc_mask = (1 << num_pipes) - 1;
+
+ /* then create a plane, a crtc and an encoder for each panel/bridge */
+
+ for (i = 0; i < num_pipes; ++i) {
+ struct tidss_plane *tplane;
+ struct tidss_crtc *tcrtc;
+ struct drm_encoder *enc;
+ u32 hw_plane_id = feat->vid_order[tidss->num_planes];
+ int ret;
+
+ tplane = tidss_plane_create(tidss, hw_plane_id,
+ DRM_PLANE_TYPE_PRIMARY, crtc_mask,
+ fourccs, fourccs_len);
+ if (IS_ERR(tplane)) {
+ dev_err(tidss->dev, "plane create failed\n");
+ return PTR_ERR(tplane);
+ }
+
+ tidss->planes[tidss->num_planes++] = &tplane->plane;
+
+ tcrtc = tidss_crtc_create(tidss, pipes[i].hw_videoport,
+ &tplane->plane);
+ if (IS_ERR(tcrtc)) {
+ dev_err(tidss->dev, "crtc create failed\n");
+ return PTR_ERR(tcrtc);
+ }
+
+ tidss->crtcs[tidss->num_crtcs++] = &tcrtc->crtc;
+
+ enc = tidss_encoder_create(tidss, pipes[i].enc_type,
+ 1 << tcrtc->crtc.index);
+ if (IS_ERR(enc)) {
+ dev_err(tidss->dev, "encoder create failed\n");
+ return PTR_ERR(enc);
+ }
+
+ ret = drm_bridge_attach(enc, pipes[i].bridge, NULL, 0);
+ if (ret) {
+ dev_err(tidss->dev, "bridge attach failed: %d\n", ret);
+ return ret;
+ }
+ }
+
+ /* create overlay planes of the leftover planes */
+
+ while (tidss->num_planes < max_planes) {
+ struct tidss_plane *tplane;
+ u32 hw_plane_id = feat->vid_order[tidss->num_planes];
+
+ tplane = tidss_plane_create(tidss, hw_plane_id,
+ DRM_PLANE_TYPE_OVERLAY, crtc_mask,
+ fourccs, fourccs_len);
+
+ if (IS_ERR(tplane)) {
+ dev_err(tidss->dev, "plane create failed\n");
+ return PTR_ERR(tplane);
+ }
+
+ tidss->planes[tidss->num_planes++] = &tplane->plane;
+ }
+
+ return 0;
+}
+
+int tidss_modeset_init(struct tidss_device *tidss)
+{
+ struct drm_device *ddev = &tidss->ddev;
+ unsigned int i;
+ int ret;
+
+ dev_dbg(tidss->dev, "%s\n", __func__);
+
+ drm_mode_config_init(ddev);
+
+ ddev->mode_config.min_width = 8;
+ ddev->mode_config.min_height = 8;
+ ddev->mode_config.max_width = 8096;
+ ddev->mode_config.max_height = 8096;
+ ddev->mode_config.normalize_zpos = true;
+ ddev->mode_config.funcs = &mode_config_funcs;
+ ddev->mode_config.helper_private = &mode_config_helper_funcs;
+
+ ret = tidss_dispc_modeset_init(tidss);
+ if (ret)
+ goto err_mode_config_cleanup;
+
+ ret = drm_vblank_init(ddev, tidss->num_crtcs);
+ if (ret)
+ goto err_mode_config_cleanup;
+
+ /* Start with vertical blanking interrupt reporting disabled. */
+ for (i = 0; i < tidss->num_crtcs; ++i)
+ drm_crtc_vblank_reset(tidss->crtcs[i]);
+
+ drm_mode_config_reset(ddev);
+
+ dev_dbg(tidss->dev, "%s done\n", __func__);
+
+ return 0;
+
+err_mode_config_cleanup:
+ drm_mode_config_cleanup(ddev);
+ return ret;
+}
+
+void tidss_modeset_cleanup(struct tidss_device *tidss)
+{
+ struct drm_device *ddev = &tidss->ddev;
+
+ drm_mode_config_cleanup(ddev);
+}
diff --git a/drivers/gpu/drm/tidss/tidss_kms.h b/drivers/gpu/drm/tidss/tidss_kms.h
new file mode 100644
index 000000000000..dda5625d0128
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_kms.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_KMS_H__
+#define __TIDSS_KMS_H__
+
+struct tidss_device;
+
+int tidss_modeset_init(struct tidss_device *tidss);
+void tidss_modeset_cleanup(struct tidss_device *tidss);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_plane.c b/drivers/gpu/drm/tidss/tidss_plane.c
new file mode 100644
index 000000000000..ff99b2dd4a17
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_plane.c
@@ -0,0 +1,217 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fourcc.h>
+#include <drm/drm_fb_cma_helper.h>
+
+#include "tidss_crtc.h"
+#include "tidss_dispc.h"
+#include "tidss_drv.h"
+#include "tidss_plane.h"
+
+/* drm_plane_helper_funcs */
+
+static int tidss_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+ const struct drm_format_info *finfo;
+ struct drm_crtc_state *crtc_state;
+ u32 hw_plane = tplane->hw_plane_id;
+ u32 hw_videoport;
+ int ret;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ if (!state->crtc) {
+ /*
+ * The visible field is not reset by the DRM core but only
+ * updated by drm_plane_helper_check_state(), set it manually.
+ */
+ state->visible = false;
+ return 0;
+ }
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state, 0,
+ INT_MAX, true, true);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * The HW is only able to start drawing at subpixel boundary
+ * (the two first checks bellow). At the end of a row the HW
+ * can only jump integer number of subpixels forward to the
+ * beginning of the next row. So we can only show picture with
+ * integer subpixel width (the third check). However, after
+ * reaching the end of the drawn picture the drawing starts
+ * again at the absolute memory address where top left corner
+ * position of the drawn picture is (so there is no need to
+ * check for odd height).
+ */
+
+ finfo = drm_format_info(state->fb->format->format);
+
+ if ((state->src_x >> 16) % finfo->hsub != 0) {
+ dev_dbg(ddev->dev,
+ "%s: x-position %u not divisible subpixel size %u\n",
+ __func__, (state->src_x >> 16), finfo->hsub);
+ return -EINVAL;
+ }
+
+ if ((state->src_y >> 16) % finfo->vsub != 0) {
+ dev_dbg(ddev->dev,
+ "%s: y-position %u not divisible subpixel size %u\n",
+ __func__, (state->src_y >> 16), finfo->vsub);
+ return -EINVAL;
+ }
+
+ if ((state->src_w >> 16) % finfo->hsub != 0) {
+ dev_dbg(ddev->dev,
+ "%s: src width %u not divisible by subpixel size %u\n",
+ __func__, (state->src_w >> 16), finfo->hsub);
+ return -EINVAL;
+ }
+
+ if (!state->visible)
+ return 0;
+
+ hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport;
+
+ ret = dispc_plane_check(tidss->dispc, hw_plane, state, hw_videoport);
+ if (ret)
+ return ret;
+
+ return 0;
+}
+
+static void tidss_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+ struct drm_plane_state *state = plane->state;
+ u32 hw_videoport;
+ int ret;
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ if (!state->visible) {
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+ return;
+ }
+
+ hw_videoport = to_tidss_crtc(state->crtc)->hw_videoport;
+
+ ret = dispc_plane_setup(tidss->dispc, tplane->hw_plane_id,
+ state, hw_videoport);
+
+ if (ret) {
+ dev_err(plane->dev->dev, "%s: Failed to setup plane %d\n",
+ __func__, tplane->hw_plane_id);
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+ return;
+ }
+
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, true);
+}
+
+static void tidss_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_device *ddev = plane->dev;
+ struct tidss_device *tidss = ddev->dev_private;
+ struct tidss_plane *tplane = to_tidss_plane(plane);
+
+ dev_dbg(ddev->dev, "%s\n", __func__);
+
+ dispc_plane_enable(tidss->dispc, tplane->hw_plane_id, false);
+}
+
+static const struct drm_plane_helper_funcs tidss_plane_helper_funcs = {
+ .atomic_check = tidss_plane_atomic_check,
+ .atomic_update = tidss_plane_atomic_update,
+ .atomic_disable = tidss_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs tidss_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .reset = drm_atomic_helper_plane_reset,
+ .destroy = drm_plane_cleanup,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
+ u32 hw_plane_id, u32 plane_type,
+ u32 crtc_mask, const u32 *formats,
+ u32 num_formats)
+{
+ struct tidss_plane *tplane;
+ enum drm_plane_type type;
+ u32 possible_crtcs;
+ u32 num_planes = tidss->feat->num_planes;
+ u32 color_encodings = (BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709));
+ u32 color_ranges = (BIT(DRM_COLOR_YCBCR_FULL_RANGE) |
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE));
+ u32 default_encoding = DRM_COLOR_YCBCR_BT601;
+ u32 default_range = DRM_COLOR_YCBCR_FULL_RANGE;
+ u32 blend_modes = (BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE));
+ int ret;
+
+ tplane = devm_kzalloc(tidss->dev, sizeof(*tplane), GFP_KERNEL);
+ if (!tplane)
+ return ERR_PTR(-ENOMEM);
+
+ tplane->hw_plane_id = hw_plane_id;
+
+ possible_crtcs = crtc_mask;
+ type = plane_type;
+
+ ret = drm_universal_plane_init(&tidss->ddev, &tplane->plane,
+ possible_crtcs,
+ &tidss_plane_funcs,
+ formats, num_formats,
+ NULL, type, NULL);
+ if (ret < 0)
+ return ERR_PTR(ret);
+
+ drm_plane_helper_add(&tplane->plane, &tidss_plane_helper_funcs);
+
+ drm_plane_create_zpos_property(&tplane->plane, hw_plane_id, 0,
+ num_planes - 1);
+
+ ret = drm_plane_create_color_properties(&tplane->plane,
+ color_encodings,
+ color_ranges,
+ default_encoding,
+ default_range);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_plane_create_alpha_property(&tplane->plane);
+ if (ret)
+ return ERR_PTR(ret);
+
+ ret = drm_plane_create_blend_mode_property(&tplane->plane, blend_modes);
+ if (ret)
+ return ERR_PTR(ret);
+
+ return tplane;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_plane.h b/drivers/gpu/drm/tidss/tidss_plane.h
new file mode 100644
index 000000000000..80ff1c5a2535
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_plane.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ */
+
+#ifndef __TIDSS_PLANE_H__
+#define __TIDSS_PLANE_H__
+
+#define to_tidss_plane(p) container_of((p), struct tidss_plane, plane)
+
+struct tidss_device;
+
+struct tidss_plane {
+ struct drm_plane plane;
+
+ u32 hw_plane_id;
+};
+
+struct tidss_plane *tidss_plane_create(struct tidss_device *tidss,
+ u32 hw_plane_id, u32 plane_type,
+ u32 crtc_mask, const u32 *formats,
+ u32 num_formats);
+
+#endif
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.c b/drivers/gpu/drm/tidss/tidss_scale_coefs.c
new file mode 100644
index 000000000000..5ec68389cc68
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <[email protected]>
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+
+#include "tidss_scale_coefs.h"
+
+/*
+ * These are interpolated with a custom python script from DSS5
+ * (drivers/gpu/drm/omapdrm/dss/dispc_coef.c) coefficients.
+ */
+static const struct tidss_scale_coefs coef5_m32 = {
+ .c2 = { 28, 34, 40, 46, 52, 58, 64, 70, 0, 2, 4, 8, 12, 16, 20, 24, },
+ .c1 = { 132, 138, 144, 150, 156, 162, 168, 174, 76, 84, 92, 98, 104, 110, 116, 124, },
+ .c0 = { 192, 192, 192, 190, 188, 186, 184, 182, 180, },
+};
+
+static const struct tidss_scale_coefs coef5_m26 = {
+ .c2 = { 24, 28, 32, 38, 44, 50, 56, 64, 0, 2, 4, 6, 8, 12, 16, 20, },
+ .c1 = { 132, 138, 144, 152, 160, 166, 172, 178, 72, 80, 88, 94, 100, 108, 116, 124, },
+ .c0 = { 200, 202, 204, 202, 200, 196, 192, 188, 184, },
+};
+
+static const struct tidss_scale_coefs coef5_m22 = {
+ .c2 = { 16, 20, 24, 30, 36, 42, 48, 56, 0, 0, 0, 2, 4, 8, 12, 14, },
+ .c1 = { 132, 140, 148, 156, 164, 172, 180, 186, 64, 72, 80, 88, 96, 104, 112, 122, },
+ .c0 = { 216, 216, 216, 214, 212, 208, 204, 198, 192, },
+};
+
+static const struct tidss_scale_coefs coef5_m19 = {
+ .c2 = { 12, 14, 16, 22, 28, 34, 40, 48, 0, 0, 0, 2, 4, 4, 4, 8, },
+ .c1 = { 128, 140, 152, 160, 168, 176, 184, 192, 56, 64, 72, 82, 92, 100, 108, 118, },
+ .c0 = { 232, 232, 232, 226, 220, 218, 216, 208, 200, },
+};
+
+static const struct tidss_scale_coefs coef5_m16 = {
+ .c2 = { 0, 2, 4, 8, 12, 18, 24, 32, 0, 0, 0, -2, -4, -4, -4, -2, },
+ .c1 = { 124, 138, 152, 164, 176, 186, 196, 206, 40, 48, 56, 68, 80, 90, 100, 112, },
+ .c0 = { 264, 262, 260, 254, 248, 242, 236, 226, 216, },
+};
+
+static const struct tidss_scale_coefs coef5_m14 = {
+ .c2 = { -8, -6, -4, -2, 0, 6, 12, 18, 0, -2, -4, -6, -8, -8, -8, -8, },
+ .c1 = { 120, 134, 148, 164, 180, 194, 208, 220, 24, 32, 40, 52, 64, 78, 92, 106, },
+ .c0 = { 288, 286, 284, 280, 276, 266, 256, 244, 232, },
+};
+
+static const struct tidss_scale_coefs coef5_m13 = {
+ .c2 = { -12, -12, -12, -10, -8, -4, 0, 6, 0, -2, -4, -6, -8, -10, -12, -12, },
+ .c1 = { 112, 130, 148, 164, 180, 196, 212, 228, 12, 22, 32, 44, 56, 70, 84, 98, },
+ .c0 = { 312, 308, 304, 298, 292, 282, 272, 258, 244, },
+};
+
+static const struct tidss_scale_coefs coef5_m12 = {
+ .c2 = { -16, -18, -20, -18, -16, -14, -12, -6, 0, -2, -4, -6, -8, -10, -12, -14, },
+ .c1 = { 104, 124, 144, 164, 184, 202, 220, 238, 0, 10, 20, 30, 40, 56, 72, 88, },
+ .c0 = { 336, 332, 328, 320, 312, 300, 288, 272, 256, },
+};
+
+static const struct tidss_scale_coefs coef5_m11 = {
+ .c2 = { -20, -22, -24, -24, -24, -24, -24, -20, 0, -2, -4, -6, -8, -10, -12, -16, },
+ .c1 = { 92, 114, 136, 158, 180, 204, 228, 250, -16, -8, 0, 12, 24, 38, 52, 72, },
+ .c0 = { 368, 364, 360, 350, 340, 326, 312, 292, 272, },
+};
+
+static const struct tidss_scale_coefs coef5_m10 = {
+ .c2 = { -16, -20, -24, -28, -32, -34, -36, -34, 0, 0, 0, -2, -4, -8, -12, -14, },
+ .c1 = { 72, 96, 120, 148, 176, 204, 232, 260, -32, -26, -20, -10, 0, 16, 32, 52, },
+ .c0 = { 400, 398, 396, 384, 372, 354, 336, 312, 288, },
+};
+
+static const struct tidss_scale_coefs coef5_m9 = {
+ .c2 = { -12, -18, -24, -28, -32, -38, -44, -46, 0, 2, 4, 2, 0, -2, -4, -8, },
+ .c1 = { 40, 68, 96, 128, 160, 196, 232, 268, -48, -46, -44, -36, -28, -14, 0, 20, },
+ .c0 = { 456, 450, 444, 428, 412, 388, 364, 334, 304, },
+};
+
+static const struct tidss_scale_coefs coef5_m8 = {
+ .c2 = { 0, -4, -8, -16, -24, -32, -40, -48, 0, 2, 4, 6, 8, 6, 4, 2, },
+ .c1 = { 0, 28, 56, 94, 132, 176, 220, 266, -56, -60, -64, -62, -60, -50, -40, -20, },
+ .c0 = { 512, 506, 500, 478, 456, 424, 392, 352, 312, },
+};
+
+static const struct tidss_scale_coefs coef3_m32 = {
+ .c1 = { 108, 92, 76, 62, 48, 36, 24, 140, 256, 236, 216, 198, 180, 162, 144, 126, },
+ .c0 = { 296, 294, 292, 288, 284, 278, 272, 136, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m26 = {
+ .c1 = { 104, 90, 76, 60, 44, 32, 20, 138, 256, 236, 216, 198, 180, 160, 140, 122, },
+ .c0 = { 304, 300, 296, 292, 288, 282, 276, 138, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m22 = {
+ .c1 = { 100, 84, 68, 54, 40, 30, 20, 138, 256, 236, 216, 196, 176, 156, 136, 118, },
+ .c0 = { 312, 310, 308, 302, 296, 286, 276, 138, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m19 = {
+ .c1 = { 96, 80, 64, 50, 36, 26, 16, 136, 256, 236, 216, 194, 172, 152, 132, 114, },
+ .c0 = { 320, 318, 316, 310, 304, 292, 280, 140, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m16 = {
+ .c1 = { 88, 72, 56, 44, 32, 22, 12, 134, 256, 234, 212, 190, 168, 148, 128, 108, },
+ .c0 = { 336, 332, 328, 320, 312, 300, 288, 144, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m14 = {
+ .c1 = { 80, 64, 48, 36, 24, 16, 8, 132, 256, 232, 208, 186, 164, 142, 120, 100, },
+ .c0 = { 352, 348, 344, 334, 324, 310, 296, 148, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m13 = {
+ .c1 = { 72, 56, 40, 30, 20, 12, 4, 130, 256, 232, 208, 184, 160, 136, 112, 92, },
+ .c0 = { 368, 364, 360, 346, 332, 316, 300, 150, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m12 = {
+ .c1 = { 64, 50, 36, 26, 16, 10, 4, 130, 256, 230, 204, 178, 152, 128, 104, 84, },
+ .c0 = { 384, 378, 372, 358, 344, 324, 304, 152, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m11 = {
+ .c1 = { 56, 40, 24, 16, 8, 4, 0, 128, 256, 228, 200, 172, 144, 120, 96, 76, },
+ .c0 = { 400, 396, 392, 376, 360, 336, 312, 156, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m10 = {
+ .c1 = { 40, 26, 12, 6, 0, -2, -4, 126, 256, 226, 196, 166, 136, 110, 84, 62, },
+ .c0 = { 432, 424, 416, 396, 376, 348, 320, 160, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m9 = {
+ .c1 = { 24, 12, 0, -4, -8, -8, -8, 124, 256, 222, 188, 154, 120, 92, 64, 44, },
+ .c0 = { 464, 456, 448, 424, 400, 366, 332, 166, 256, },
+};
+
+static const struct tidss_scale_coefs coef3_m8 = {
+ .c1 = { 0, -8, -16, -16, -16, -12, -8, 124, 256, 214, 172, 134, 96, 66, 36, 18, },
+ .c0 = { 512, 502, 492, 462, 432, 390, 348, 174, 256, },
+};
+
+const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev,
+ u32 firinc,
+ bool five_taps)
+{
+ int i;
+ int inc;
+ static const struct {
+ int mmin;
+ int mmax;
+ const struct tidss_scale_coefs *coef3;
+ const struct tidss_scale_coefs *coef5;
+ const char *name;
+ } coefs[] = {
+ { 27, 32, &coef3_m32, &coef5_m32, "M32" },
+ { 23, 26, &coef3_m26, &coef5_m26, "M26" },
+ { 20, 22, &coef3_m22, &coef5_m22, "M22" },
+ { 17, 19, &coef3_m19, &coef5_m19, "M19" },
+ { 15, 16, &coef3_m16, &coef5_m16, "M16" },
+ { 14, 14, &coef3_m14, &coef5_m14, "M14" },
+ { 13, 13, &coef3_m13, &coef5_m13, "M13" },
+ { 12, 12, &coef3_m12, &coef5_m12, "M12" },
+ { 11, 11, &coef3_m11, &coef5_m11, "M11" },
+ { 10, 10, &coef3_m10, &coef5_m10, "M10" },
+ { 9, 9, &coef3_m9, &coef5_m9, "M9" },
+ { 4, 8, &coef3_m8, &coef5_m8, "M8" },
+ /*
+ * When upscaling more than two times, blockiness and outlines
+ * around the image are observed when M8 tables are used. M11,
+ * M16 and M19 tables are used to prevent this.
+ */
+ { 3, 3, &coef3_m11, &coef5_m11, "M11" },
+ { 2, 2, &coef3_m16, &coef5_m16, "M16" },
+ { 0, 1, &coef3_m19, &coef5_m19, "M19" },
+ };
+
+ /*
+ * inc is result of 0x200000 * in_size / out_size. This dividing
+ * by 0x40000 scales it down to 8 * in_size / out_size. After
+ * division the actual scaling factor is 8/inc.
+ */
+ inc = firinc / 0x40000;
+ for (i = 0; i < ARRAY_SIZE(coefs); ++i) {
+ if (inc >= coefs[i].mmin && inc <= coefs[i].mmax) {
+ if (five_taps)
+ return coefs[i].coef5;
+ else
+ return coefs[i].coef3;
+ }
+ }
+
+ dev_err(dev, "%s: Coefficients not found for firinc 0x%08x, inc %d\n",
+ __func__, firinc, inc);
+
+ return NULL;
+}
diff --git a/drivers/gpu/drm/tidss/tidss_scale_coefs.h b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
new file mode 100644
index 000000000000..64b5af5b5361
--- /dev/null
+++ b/drivers/gpu/drm/tidss/tidss_scale_coefs.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Jyri Sarha <[email protected]>
+ */
+
+#ifndef __TIDSS_DISPC_COEF_H__
+#define __TIDSS_DISPC_COEF_H__
+
+#include <linux/types.h>
+
+struct tidss_scale_coefs {
+ s16 c2[16];
+ s16 c1[16];
+ u16 c0[9];
+};
+
+const struct tidss_scale_coefs *tidss_get_scale_coefs(struct device *dev,
+ u32 firinc,
+ bool five_taps);
+
+#endif
diff --git a/drivers/gpu/drm/tilcdc/tilcdc_external.c b/drivers/gpu/drm/tilcdc/tilcdc_external.c
index 51d034e095f4..28b7f703236e 100644
--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
+++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
@@ -95,7 +95,7 @@ int tilcdc_attach_bridge(struct drm_device *ddev, struct drm_bridge *bridge)
priv->external_encoder->possible_crtcs = BIT(0);
- ret = drm_bridge_attach(priv->external_encoder, bridge, NULL);
+ ret = drm_bridge_attach(priv->external_encoder, bridge, NULL, 0);
if (ret) {
dev_err(ddev->dev, "drm_bridge_attach() failed %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig
index a46ac284dd5e..4160e74e4751 100644
--- a/drivers/gpu/drm/tiny/Kconfig
+++ b/drivers/gpu/drm/tiny/Kconfig
@@ -47,6 +47,20 @@ config TINYDRM_ILI9341
If M is selected the module will be called ili9341.
+config TINYDRM_ILI9486
+ tristate "DRM support for ILI9486 display panels"
+ depends on DRM && SPI
+ select DRM_KMS_HELPER
+ select DRM_KMS_CMA_HELPER
+ select DRM_MIPI_DBI
+ select BACKLIGHT_CLASS_DEVICE
+ help
+ DRM driver for the following Ilitek ILI9486 panels:
+ * PISCREEN 3.5" 320x480 TFT (Ozzmaker 3.5")
+ * RPILCD 3.5" 320x480 TFT (Waveshare 3.5")
+
+ If M is selected the module will be called ili9486.
+
config TINYDRM_MI0283QT
tristate "DRM support for MI0283QT"
depends on DRM && SPI
@@ -85,14 +99,16 @@ config TINYDRM_ST7586
If M is selected the module will be called st7586.
config TINYDRM_ST7735R
- tristate "DRM support for Sitronix ST7735R display panels"
+ tristate "DRM support for Sitronix ST7715R/ST7735R display panels"
depends on DRM && SPI
select DRM_KMS_HELPER
select DRM_KMS_CMA_HELPER
select DRM_MIPI_DBI
select BACKLIGHT_CLASS_DEVICE
help
- DRM driver Sitronix ST7735R with one of the following LCDs:
- * JD-T18003-T01 1.8" 128x160 TFT
+ DRM driver for Sitronix ST7715R/ST7735R with one of the following
+ LCDs:
+ * Jianda JD-T18003-T01 1.8" 128x160 TFT
+ * Okaya RH128128T 1.44" 128x128 TFT
If M is selected the module will be called st7735r.
diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile
index 896cf31132d3..c96ceee71453 100644
--- a/drivers/gpu/drm/tiny/Makefile
+++ b/drivers/gpu/drm/tiny/Makefile
@@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_GM12U320) += gm12u320.o
obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o
obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o
obj-$(CONFIG_TINYDRM_ILI9341) += ili9341.o
+obj-$(CONFIG_TINYDRM_ILI9486) += ili9486.o
obj-$(CONFIG_TINYDRM_MI0283QT) += mi0283qt.o
obj-$(CONFIG_TINYDRM_REPAPER) += repaper.o
obj-$(CONFIG_TINYDRM_ST7586) += st7586.o
diff --git a/drivers/gpu/drm/tiny/gm12u320.c b/drivers/gpu/drm/tiny/gm12u320.c
index 94fb1f593564..a48173441ae0 100644
--- a/drivers/gpu/drm/tiny/gm12u320.c
+++ b/drivers/gpu/drm/tiny/gm12u320.c
@@ -22,7 +22,6 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
-#include <drm/drm_vblank.h>
static bool eco_mode;
module_param(eco_mode, bool, 0644);
@@ -610,18 +609,10 @@ static void gm12u320_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
gm12u320_fb_mark_dirty(pipe->plane.state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irq(&crtc->dev->event_lock);
- }
}
static const struct drm_simple_display_pipe_funcs gm12u320_pipe_funcs = {
diff --git a/drivers/gpu/drm/tiny/ili9225.c b/drivers/gpu/drm/tiny/ili9225.c
index c66acc566c2b..802fb8dde1b6 100644
--- a/drivers/gpu/drm/tiny/ili9225.c
+++ b/drivers/gpu/drm/tiny/ili9225.c
@@ -26,7 +26,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#define ILI9225_DRIVER_READ_CODE 0x00
#define ILI9225_DRIVER_OUTPUT_CONTROL 0x01
@@ -165,18 +164,10 @@ static void ili9225_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
ili9225_fb_dirty(state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
static void ili9225_pipe_enable(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/tiny/ili9486.c b/drivers/gpu/drm/tiny/ili9486.c
new file mode 100644
index 000000000000..532560aebb1e
--- /dev/null
+++ b/drivers/gpu/drm/tiny/ili9486.c
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * DRM driver for Ilitek ILI9486 panels
+ *
+ * Copyright 2020 Kamlesh Gurudasani <[email protected]>
+ */
+
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/property.h>
+#include <linux/spi/spi.h>
+
+#include <video/mipi_display.h>
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_drv.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_gem_cma_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_mipi_dbi.h>
+#include <drm/drm_modeset_helper.h>
+
+#define ILI9486_ITFCTR1 0xb0
+#define ILI9486_PWCTRL1 0xc2
+#define ILI9486_VMCTRL1 0xc5
+#define ILI9486_PGAMCTRL 0xe0
+#define ILI9486_NGAMCTRL 0xe1
+#define ILI9486_DGAMCTRL 0xe2
+#define ILI9486_MADCTL_BGR BIT(3)
+#define ILI9486_MADCTL_MV BIT(5)
+#define ILI9486_MADCTL_MX BIT(6)
+#define ILI9486_MADCTL_MY BIT(7)
+
+/*
+ * The PiScreen/waveshare rpi-lcd-35 has a SPI to 16-bit parallel bus converter
+ * in front of the display controller. This means that 8-bit values have to be
+ * transferred as 16-bit.
+ */
+static int waveshare_command(struct mipi_dbi *mipi, u8 *cmd, u8 *par,
+ size_t num)
+{
+ struct spi_device *spi = mipi->spi;
+ void *data = par;
+ u32 speed_hz;
+ int i, ret;
+ __be16 *buf;
+
+ buf = kmalloc(32 * sizeof(u16), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /*
+ * The displays are Raspberry Pi HATs and connected to the 8-bit only
+ * SPI controller, so 16-bit command and parameters need byte swapping
+ * before being transferred as 8-bit on the big endian SPI bus.
+ * Pixel data bytes have already been swapped before this function is
+ * called.
+ */
+ buf[0] = cpu_to_be16(*cmd);
+ gpiod_set_value_cansleep(mipi->dc, 0);
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, 2);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, buf, 2);
+ if (ret || !num)
+ goto free;
+
+ /* 8-bit configuration data, not 16-bit pixel data */
+ if (num <= 32) {
+ for (i = 0; i < num; i++)
+ buf[i] = cpu_to_be16(par[i]);
+ num *= 2;
+ speed_hz = mipi_dbi_spi_cmd_max_speed(spi, num);
+ data = buf;
+ }
+
+ gpiod_set_value_cansleep(mipi->dc, 1);
+ ret = mipi_dbi_spi_transfer(spi, speed_hz, 8, data, num);
+ free:
+ kfree(buf);
+
+ return ret;
+}
+
+static void waveshare_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
+{
+ struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct mipi_dbi *dbi = &dbidev->dbi;
+ u8 addr_mode;
+ int ret, idx;
+
+ if (!drm_dev_enter(pipe->crtc.dev, &idx))
+ return;
+
+ DRM_DEBUG_KMS("\n");
+
+ ret = mipi_dbi_poweron_conditional_reset(dbidev);
+ if (ret < 0)
+ goto out_exit;
+ if (ret == 1)
+ goto out_enable;
+
+ mipi_dbi_command(dbi, ILI9486_ITFCTR1);
+ mipi_dbi_command(dbi, MIPI_DCS_EXIT_SLEEP_MODE);
+ msleep(250);
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT, 0x55);
+
+ mipi_dbi_command(dbi, ILI9486_PWCTRL1, 0x44);
+
+ mipi_dbi_command(dbi, ILI9486_VMCTRL1, 0x00, 0x00, 0x00, 0x00);
+
+ mipi_dbi_command(dbi, ILI9486_PGAMCTRL,
+ 0x0F, 0x1F, 0x1C, 0x0C, 0x0F, 0x08, 0x48, 0x98,
+ 0x37, 0x0A, 0x13, 0x04, 0x11, 0x0D, 0x0);
+ mipi_dbi_command(dbi, ILI9486_NGAMCTRL,
+ 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
+ 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00);
+ mipi_dbi_command(dbi, ILI9486_DGAMCTRL,
+ 0x0F, 0x32, 0x2E, 0x0B, 0x0D, 0x05, 0x47, 0x75,
+ 0x37, 0x06, 0x10, 0x03, 0x24, 0x20, 0x00);
+
+ mipi_dbi_command(dbi, MIPI_DCS_SET_DISPLAY_ON);
+ msleep(100);
+
+ out_enable:
+ switch (dbidev->rotation) {
+ case 90:
+ addr_mode = ILI9486_MADCTL_MY;
+ break;
+ case 180:
+ addr_mode = ILI9486_MADCTL_MV;
+ break;
+ case 270:
+ addr_mode = ILI9486_MADCTL_MX;
+ break;
+ default:
+ addr_mode = ILI9486_MADCTL_MV | ILI9486_MADCTL_MY |
+ ILI9486_MADCTL_MX;
+ break;
+ }
+ addr_mode |= ILI9486_MADCTL_BGR;
+ mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
+ mipi_dbi_enable_flush(dbidev, crtc_state, plane_state);
+ out_exit:
+ drm_dev_exit(idx);
+}
+
+static const struct drm_simple_display_pipe_funcs waveshare_pipe_funcs = {
+ .enable = waveshare_enable,
+ .disable = mipi_dbi_pipe_disable,
+ .update = mipi_dbi_pipe_update,
+ .prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+};
+
+static const struct drm_display_mode waveshare_mode = {
+ DRM_SIMPLE_MODE(480, 320, 73, 49),
+};
+
+DEFINE_DRM_GEM_CMA_FOPS(ili9486_fops);
+
+static struct drm_driver ili9486_driver = {
+ .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
+ .fops = &ili9486_fops,
+ .release = mipi_dbi_release,
+ DRM_GEM_CMA_VMAP_DRIVER_OPS,
+ .debugfs_init = mipi_dbi_debugfs_init,
+ .name = "ili9486",
+ .desc = "Ilitek ILI9486",
+ .date = "20200118",
+ .major = 1,
+ .minor = 0,
+};
+
+static const struct of_device_id ili9486_of_match[] = {
+ { .compatible = "waveshare,rpi-lcd-35" },
+ { .compatible = "ozzmaker,piscreen" },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ili9486_of_match);
+
+static const struct spi_device_id ili9486_id[] = {
+ { "ili9486", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(spi, ili9486_id);
+
+static int ili9486_probe(struct spi_device *spi)
+{
+ struct device *dev = &spi->dev;
+ struct mipi_dbi_dev *dbidev;
+ struct drm_device *drm;
+ struct mipi_dbi *dbi;
+ struct gpio_desc *dc;
+ u32 rotation = 0;
+ int ret;
+
+ dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
+ if (!dbidev)
+ return -ENOMEM;
+
+ dbi = &dbidev->dbi;
+ drm = &dbidev->drm;
+ ret = devm_drm_dev_init(dev, drm, &ili9486_driver);
+ if (ret) {
+ kfree(dbidev);
+ return ret;
+ }
+
+ drm_mode_config_init(drm);
+
+ dbi->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(dbi->reset)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'reset'\n");
+ return PTR_ERR(dbi->reset);
+ }
+
+ dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_LOW);
+ if (IS_ERR(dc)) {
+ DRM_DEV_ERROR(dev, "Failed to get gpio 'dc'\n");
+ return PTR_ERR(dc);
+ }
+
+ dbidev->backlight = devm_of_find_backlight(dev);
+ if (IS_ERR(dbidev->backlight))
+ return PTR_ERR(dbidev->backlight);
+
+ device_property_read_u32(dev, "rotation", &rotation);
+
+ ret = mipi_dbi_spi_init(spi, dbi, dc);
+ if (ret)
+ return ret;
+
+ dbi->command = waveshare_command;
+ dbi->read_commands = NULL;
+
+ ret = mipi_dbi_dev_init(dbidev, &waveshare_pipe_funcs,
+ &waveshare_mode, rotation);
+ if (ret)
+ return ret;
+
+ drm_mode_config_reset(drm);
+
+ ret = drm_dev_register(drm, 0);
+ if (ret)
+ return ret;
+
+ spi_set_drvdata(spi, drm);
+
+ drm_fbdev_generic_setup(drm, 0);
+
+ return 0;
+}
+
+static int ili9486_remove(struct spi_device *spi)
+{
+ struct drm_device *drm = spi_get_drvdata(spi);
+
+ drm_dev_unplug(drm);
+ drm_atomic_helper_shutdown(drm);
+
+ return 0;
+}
+
+static void ili9486_shutdown(struct spi_device *spi)
+{
+ drm_atomic_helper_shutdown(spi_get_drvdata(spi));
+}
+
+static struct spi_driver ili9486_spi_driver = {
+ .driver = {
+ .name = "ili9486",
+ .of_match_table = ili9486_of_match,
+ },
+ .id_table = ili9486_id,
+ .probe = ili9486_probe,
+ .remove = ili9486_remove,
+ .shutdown = ili9486_shutdown,
+};
+module_spi_driver(ili9486_spi_driver);
+
+MODULE_DESCRIPTION("Ilitek ILI9486 DRM driver");
+MODULE_AUTHOR("Kamlesh Gurudasani <[email protected]>");
+MODULE_LICENSE("GPL");
diff --git a/drivers/gpu/drm/tiny/repaper.c b/drivers/gpu/drm/tiny/repaper.c
index 76d179200775..f5ebcaf7ee3a 100644
--- a/drivers/gpu/drm/tiny/repaper.c
+++ b/drivers/gpu/drm/tiny/repaper.c
@@ -17,7 +17,7 @@
#include <linux/dma-buf.h>
#include <linux/gpio/consumer.h>
#include <linux/module.h>
-#include <linux/of_device.h>
+#include <linux/property.h>
#include <linux/sched/clock.h>
#include <linux/spi/spi.h>
#include <linux/thermal.h>
@@ -33,13 +33,13 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_modes.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
#include <drm/drm_probe_helper.h>
#include <drm/drm_simple_kms_helper.h>
#define REPAPER_RID_G2_COG_ID 0x12
enum repaper_model {
+ /* 0 is reserved to avoid clashing with NULL */
E1144CS021 = 1,
E1190CS021,
E2200CS021,
@@ -856,18 +856,10 @@ static void repaper_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
repaper_fb_dirty(state->fb);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
static const struct drm_simple_display_pipe_funcs repaper_pipe_funcs = {
@@ -995,21 +987,21 @@ static int repaper_probe(struct spi_device *spi)
{
const struct drm_display_mode *mode;
const struct spi_device_id *spi_id;
- const struct of_device_id *match;
struct device *dev = &spi->dev;
enum repaper_model model;
const char *thermal_zone;
struct repaper_epd *epd;
size_t line_buffer_size;
struct drm_device *drm;
+ const void *match;
int ret;
- match = of_match_device(repaper_of_match, dev);
+ match = device_get_match_data(dev);
if (match) {
- model = (enum repaper_model)match->data;
+ model = (enum repaper_model)match;
} else {
spi_id = spi_get_device_id(spi);
- model = spi_id->driver_data;
+ model = (enum repaper_model)spi_id->driver_data;
}
/* The SPI device is used to allocate dma memory */
@@ -1197,7 +1189,6 @@ static void repaper_shutdown(struct spi_device *spi)
static struct spi_driver repaper_spi_driver = {
.driver = {
.name = "repaper",
- .owner = THIS_MODULE,
.of_match_table = repaper_of_match,
},
.id_table = repaper_id,
diff --git a/drivers/gpu/drm/tiny/st7586.c b/drivers/gpu/drm/tiny/st7586.c
index 060cc756194f..9ef559dd3191 100644
--- a/drivers/gpu/drm/tiny/st7586.c
+++ b/drivers/gpu/drm/tiny/st7586.c
@@ -23,7 +23,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_mipi_dbi.h>
#include <drm/drm_rect.h>
-#include <drm/drm_vblank.h>
/* controller-specific commands */
#define ST7586_DISP_MODE_GRAY 0x38
@@ -159,18 +158,10 @@ static void st7586_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_state)
{
struct drm_plane_state *state = pipe->plane.state;
- struct drm_crtc *crtc = &pipe->crtc;
struct drm_rect rect;
if (drm_atomic_helper_damage_merged(old_state, state, &rect))
st7586_fb_dirty(state->fb, &rect);
-
- if (crtc->state->event) {
- spin_lock_irq(&crtc->dev->event_lock);
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- spin_unlock_irq(&crtc->dev->event_lock);
- crtc->state->event = NULL;
- }
}
static void st7586_pipe_enable(struct drm_simple_display_pipe *pipe,
diff --git a/drivers/gpu/drm/tiny/st7735r.c b/drivers/gpu/drm/tiny/st7735r.c
index 3f4487c71684..3cd9b8d9888d 100644
--- a/drivers/gpu/drm/tiny/st7735r.c
+++ b/drivers/gpu/drm/tiny/st7735r.c
@@ -1,8 +1,10 @@
// SPDX-License-Identifier: GPL-2.0+
/*
- * DRM driver for Sitronix ST7735R panels
+ * DRM driver for display panels connected to a Sitronix ST7715R or ST7735R
+ * display controller in SPI mode.
*
* Copyright 2017 David Lechner <[email protected]>
+ * Copyright (C) 2019 Glider bvba
*/
#include <linux/backlight.h>
@@ -37,12 +39,28 @@
#define ST7735R_MY BIT(7)
#define ST7735R_MX BIT(6)
#define ST7735R_MV BIT(5)
+#define ST7735R_RGB BIT(3)
+
+struct st7735r_cfg {
+ const struct drm_display_mode mode;
+ unsigned int left_offset;
+ unsigned int top_offset;
+ unsigned int write_only:1;
+ unsigned int rgb:1; /* RGB (vs. BGR) */
+};
+
+struct st7735r_priv {
+ struct mipi_dbi_dev dbidev; /* Must be first for .release() */
+ const struct st7735r_cfg *cfg;
+};
-static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
- struct drm_crtc_state *crtc_state,
- struct drm_plane_state *plane_state)
+static void st7735r_pipe_enable(struct drm_simple_display_pipe *pipe,
+ struct drm_crtc_state *crtc_state,
+ struct drm_plane_state *plane_state)
{
struct mipi_dbi_dev *dbidev = drm_to_mipi_dbi_dev(pipe->crtc.dev);
+ struct st7735r_priv *priv = container_of(dbidev, struct st7735r_priv,
+ dbidev);
struct mipi_dbi *dbi = &dbidev->dbi;
int ret, idx;
u8 addr_mode;
@@ -87,6 +105,10 @@ static void jd_t18003_t01_pipe_enable(struct drm_simple_display_pipe *pipe,
addr_mode = ST7735R_MY | ST7735R_MV;
break;
}
+
+ if (priv->cfg->rgb)
+ addr_mode |= ST7735R_RGB;
+
mipi_dbi_command(dbi, MIPI_DCS_SET_ADDRESS_MODE, addr_mode);
mipi_dbi_command(dbi, MIPI_DCS_SET_PIXEL_FORMAT,
MIPI_DCS_PIXEL_FMT_16BIT);
@@ -109,15 +131,24 @@ out_exit:
drm_dev_exit(idx);
}
-static const struct drm_simple_display_pipe_funcs jd_t18003_t01_pipe_funcs = {
- .enable = jd_t18003_t01_pipe_enable,
+static const struct drm_simple_display_pipe_funcs st7735r_pipe_funcs = {
+ .enable = st7735r_pipe_enable,
.disable = mipi_dbi_pipe_disable,
.update = mipi_dbi_pipe_update,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
-static const struct drm_display_mode jd_t18003_t01_mode = {
- DRM_SIMPLE_MODE(128, 160, 28, 35),
+static const struct st7735r_cfg jd_t18003_t01_cfg = {
+ .mode = { DRM_SIMPLE_MODE(128, 160, 28, 35) },
+ /* Cannot read from Adafruit 1.8" display via SPI */
+ .write_only = true,
+};
+
+static const struct st7735r_cfg rh128128t_cfg = {
+ .mode = { DRM_SIMPLE_MODE(128, 128, 25, 26) },
+ .left_offset = 2,
+ .top_offset = 3,
+ .rgb = true,
};
DEFINE_DRM_GEM_CMA_FOPS(st7735r_fops);
@@ -136,13 +167,14 @@ static struct drm_driver st7735r_driver = {
};
static const struct of_device_id st7735r_of_match[] = {
- { .compatible = "jianda,jd-t18003-t01" },
+ { .compatible = "jianda,jd-t18003-t01", .data = &jd_t18003_t01_cfg },
+ { .compatible = "okaya,rh128128t", .data = &rh128128t_cfg },
{ },
};
MODULE_DEVICE_TABLE(of, st7735r_of_match);
static const struct spi_device_id st7735r_id[] = {
- { "jd-t18003-t01", 0 },
+ { "jd-t18003-t01", (uintptr_t)&jd_t18003_t01_cfg },
{ },
};
MODULE_DEVICE_TABLE(spi, st7735r_id);
@@ -150,17 +182,26 @@ MODULE_DEVICE_TABLE(spi, st7735r_id);
static int st7735r_probe(struct spi_device *spi)
{
struct device *dev = &spi->dev;
+ const struct st7735r_cfg *cfg;
struct mipi_dbi_dev *dbidev;
+ struct st7735r_priv *priv;
struct drm_device *drm;
struct mipi_dbi *dbi;
struct gpio_desc *dc;
u32 rotation = 0;
int ret;
- dbidev = kzalloc(sizeof(*dbidev), GFP_KERNEL);
- if (!dbidev)
+ cfg = device_get_match_data(&spi->dev);
+ if (!cfg)
+ cfg = (void *)spi_get_device_id(spi)->driver_data;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
return -ENOMEM;
+ dbidev = &priv->dbidev;
+ priv->cfg = cfg;
+
dbi = &dbidev->dbi;
drm = &dbidev->drm;
ret = devm_drm_dev_init(dev, drm, &st7735r_driver);
@@ -193,10 +234,14 @@ static int st7735r_probe(struct spi_device *spi)
if (ret)
return ret;
- /* Cannot read from Adafruit 1.8" display via SPI */
- dbi->read_commands = NULL;
+ if (cfg->write_only)
+ dbi->read_commands = NULL;
+
+ dbidev->left_offset = cfg->left_offset;
+ dbidev->top_offset = cfg->top_offset;
- ret = mipi_dbi_dev_init(dbidev, &jd_t18003_t01_pipe_funcs, &jd_t18003_t01_mode, rotation);
+ ret = mipi_dbi_dev_init(dbidev, &st7735r_pipe_funcs, &cfg->mode,
+ rotation);
if (ret)
return ret;
@@ -231,7 +276,6 @@ static void st7735r_shutdown(struct spi_device *spi)
static struct spi_driver st7735r_spi_driver = {
.driver = {
.name = "st7735r",
- .owner = THIS_MODULE,
.of_match_table = st7735r_of_match,
},
.id_table = st7735r_id,
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 5df596fb0280..9e07c3f75156 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -145,34 +145,12 @@ static inline uint32_t ttm_bo_type_flags(unsigned type)
return 1 << (type);
}
-static void ttm_bo_release_list(struct kref *list_kref)
-{
- struct ttm_buffer_object *bo =
- container_of(list_kref, struct ttm_buffer_object, list_kref);
- size_t acc_size = bo->acc_size;
-
- BUG_ON(kref_read(&bo->list_kref));
- BUG_ON(kref_read(&bo->kref));
- BUG_ON(bo->mem.mm_node != NULL);
- BUG_ON(!list_empty(&bo->lru));
- BUG_ON(!list_empty(&bo->ddestroy));
- ttm_tt_destroy(bo->ttm);
- atomic_dec(&ttm_bo_glob.bo_count);
- dma_fence_put(bo->moving);
- if (!ttm_bo_uses_embedded_gem_object(bo))
- dma_resv_fini(&bo->base._resv);
- bo->destroy(bo);
- ttm_mem_global_free(&ttm_mem_glob, acc_size);
-}
-
static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
struct ttm_mem_reg *mem)
{
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man;
- dma_resv_assert_held(bo->base.resv);
-
if (!list_empty(&bo->lru))
return;
@@ -181,21 +159,14 @@ static void ttm_bo_add_mem_to_lru(struct ttm_buffer_object *bo,
man = &bdev->man[mem->mem_type];
list_add_tail(&bo->lru, &man->lru[bo->priority]);
- kref_get(&bo->list_kref);
if (!(man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm &&
!(bo->ttm->page_flags & (TTM_PAGE_FLAG_SG |
TTM_PAGE_FLAG_SWAPPED))) {
list_add_tail(&bo->swap, &ttm_bo_glob.swap_lru[bo->priority]);
- kref_get(&bo->list_kref);
}
}
-static void ttm_bo_ref_bug(struct kref *list_kref)
-{
- BUG();
-}
-
static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
{
struct ttm_bo_device *bdev = bo->bdev;
@@ -203,12 +174,10 @@ static void ttm_bo_del_from_lru(struct ttm_buffer_object *bo)
if (!list_empty(&bo->swap)) {
list_del_init(&bo->swap);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
notify = true;
}
if (!list_empty(&bo->lru)) {
list_del_init(&bo->lru);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
notify = true;
}
@@ -372,14 +341,7 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo,
}
moved:
- if (bo->evicted) {
- if (bdev->driver->invalidate_caches) {
- ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement);
- if (ret)
- pr_err("Can not flush read caches\n");
- }
- bo->evicted = false;
- }
+ bo->evicted = false;
if (bo->mem.mm_node)
bo->offset = (bo->mem.start << PAGE_SHIFT) +
@@ -428,92 +390,49 @@ static int ttm_bo_individualize_resv(struct ttm_buffer_object *bo)
BUG_ON(!dma_resv_trylock(&bo->base._resv));
r = dma_resv_copy_fences(&bo->base._resv, bo->base.resv);
+ dma_resv_unlock(&bo->base._resv);
if (r)
- dma_resv_unlock(&bo->base._resv);
+ return r;
+
+ if (bo->type != ttm_bo_type_sg) {
+ /* This works because the BO is about to be destroyed and nobody
+ * reference it any more. The only tricky case is the trylock on
+ * the resv object while holding the lru_lock.
+ */
+ spin_lock(&ttm_bo_glob.lru_lock);
+ bo->base.resv = &bo->base._resv;
+ spin_unlock(&ttm_bo_glob.lru_lock);
+ }
return r;
}
static void ttm_bo_flush_all_fences(struct ttm_buffer_object *bo)
{
+ struct dma_resv *resv = &bo->base._resv;
struct dma_resv_list *fobj;
struct dma_fence *fence;
int i;
- fobj = dma_resv_get_list(&bo->base._resv);
- fence = dma_resv_get_excl(&bo->base._resv);
+ rcu_read_lock();
+ fobj = rcu_dereference(resv->fence);
+ fence = rcu_dereference(resv->fence_excl);
if (fence && !fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
for (i = 0; fobj && i < fobj->shared_count; ++i) {
- fence = rcu_dereference_protected(fobj->shared[i],
- dma_resv_held(bo->base.resv));
+ fence = rcu_dereference(fobj->shared[i]);
if (!fence->ops->signaled)
dma_fence_enable_sw_signaling(fence);
}
-}
-
-static void ttm_bo_cleanup_refs_or_queue(struct ttm_buffer_object *bo)
-{
- struct ttm_bo_device *bdev = bo->bdev;
- int ret;
-
- ret = ttm_bo_individualize_resv(bo);
- if (ret) {
- /* Last resort, if we fail to allocate memory for the
- * fences block for the BO to become idle
- */
- dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
- 30 * HZ);
- spin_lock(&ttm_bo_glob.lru_lock);
- goto error;
- }
-
- spin_lock(&ttm_bo_glob.lru_lock);
- ret = dma_resv_trylock(bo->base.resv) ? 0 : -EBUSY;
- if (!ret) {
- if (dma_resv_test_signaled_rcu(&bo->base._resv, true)) {
- ttm_bo_del_from_lru(bo);
- spin_unlock(&ttm_bo_glob.lru_lock);
- if (bo->base.resv != &bo->base._resv)
- dma_resv_unlock(&bo->base._resv);
-
- ttm_bo_cleanup_memtype_use(bo);
- dma_resv_unlock(bo->base.resv);
- return;
- }
-
- ttm_bo_flush_all_fences(bo);
-
- /*
- * Make NO_EVICT bos immediately available to
- * shrinkers, now that they are queued for
- * destruction.
- */
- if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
- bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
- ttm_bo_move_to_lru_tail(bo, NULL);
- }
-
- dma_resv_unlock(bo->base.resv);
- }
- if (bo->base.resv != &bo->base._resv)
- dma_resv_unlock(&bo->base._resv);
-
-error:
- kref_get(&bo->list_kref);
- list_add_tail(&bo->ddestroy, &bdev->ddestroy);
- spin_unlock(&ttm_bo_glob.lru_lock);
-
- schedule_delayed_work(&bdev->wq,
- ((HZ / 100) < 1) ? 1 : HZ / 100);
+ rcu_read_unlock();
}
/**
* function ttm_bo_cleanup_refs
- * If bo idle, remove from delayed- and lru lists, and unref.
- * If not idle, do nothing.
+ * If bo idle, remove from lru lists, and unref.
+ * If not idle, block if possible.
*
* Must be called with lru_lock and reservation held, this function
* will drop the lru lock and optionally the reservation lock before returning.
@@ -527,14 +446,9 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait_gpu,
bool unlock_resv)
{
- struct dma_resv *resv;
+ struct dma_resv *resv = &bo->base._resv;
int ret;
- if (unlikely(list_empty(&bo->ddestroy)))
- resv = bo->base.resv;
- else
- resv = &bo->base._resv;
-
if (dma_resv_test_signaled_rcu(resv, true))
ret = 0;
else
@@ -547,9 +461,8 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
dma_resv_unlock(bo->base.resv);
spin_unlock(&ttm_bo_glob.lru_lock);
- lret = dma_resv_wait_timeout_rcu(resv, true,
- interruptible,
- 30 * HZ);
+ lret = dma_resv_wait_timeout_rcu(resv, true, interruptible,
+ 30 * HZ);
if (lret < 0)
return lret;
@@ -581,14 +494,14 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo,
ttm_bo_del_from_lru(bo);
list_del_init(&bo->ddestroy);
- kref_put(&bo->list_kref, ttm_bo_ref_bug);
-
spin_unlock(&ttm_bo_glob.lru_lock);
ttm_bo_cleanup_memtype_use(bo);
if (unlock_resv)
dma_resv_unlock(bo->base.resv);
+ ttm_bo_put(bo);
+
return 0;
}
@@ -610,8 +523,9 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
bo = list_first_entry(&bdev->ddestroy, struct ttm_buffer_object,
ddestroy);
- kref_get(&bo->list_kref);
list_move_tail(&bo->ddestroy, &removed);
+ if (!ttm_bo_get_unless_zero(bo))
+ continue;
if (remove_all || bo->base.resv != &bo->base._resv) {
spin_unlock(&glob->lru_lock);
@@ -626,7 +540,7 @@ static bool ttm_bo_delayed_delete(struct ttm_bo_device *bdev, bool remove_all)
spin_unlock(&glob->lru_lock);
}
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
spin_lock(&glob->lru_lock);
}
list_splice_tail(&removed, &bdev->ddestroy);
@@ -652,16 +566,69 @@ static void ttm_bo_release(struct kref *kref)
container_of(kref, struct ttm_buffer_object, kref);
struct ttm_bo_device *bdev = bo->bdev;
struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];
+ size_t acc_size = bo->acc_size;
+ int ret;
- if (bo->bdev->driver->release_notify)
- bo->bdev->driver->release_notify(bo);
+ if (!bo->deleted) {
+ ret = ttm_bo_individualize_resv(bo);
+ if (ret) {
+ /* Last resort, if we fail to allocate memory for the
+ * fences block for the BO to become idle
+ */
+ dma_resv_wait_timeout_rcu(bo->base.resv, true, false,
+ 30 * HZ);
+ }
- drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
- ttm_mem_io_lock(man, false);
- ttm_mem_io_free_vm(bo);
- ttm_mem_io_unlock(man);
- ttm_bo_cleanup_refs_or_queue(bo);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ if (bo->bdev->driver->release_notify)
+ bo->bdev->driver->release_notify(bo);
+
+ drm_vma_offset_remove(bdev->vma_manager, &bo->base.vma_node);
+ ttm_mem_io_lock(man, false);
+ ttm_mem_io_free_vm(bo);
+ ttm_mem_io_unlock(man);
+ }
+
+ if (!dma_resv_test_signaled_rcu(bo->base.resv, true)) {
+ /* The BO is not idle, resurrect it for delayed destroy */
+ ttm_bo_flush_all_fences(bo);
+ bo->deleted = true;
+
+ spin_lock(&ttm_bo_glob.lru_lock);
+
+ /*
+ * Make NO_EVICT bos immediately available to
+ * shrinkers, now that they are queued for
+ * destruction.
+ */
+ if (bo->mem.placement & TTM_PL_FLAG_NO_EVICT) {
+ bo->mem.placement &= ~TTM_PL_FLAG_NO_EVICT;
+ ttm_bo_del_from_lru(bo);
+ ttm_bo_add_mem_to_lru(bo, &bo->mem);
+ }
+
+ kref_init(&bo->kref);
+ list_add_tail(&bo->ddestroy, &bdev->ddestroy);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+
+ schedule_delayed_work(&bdev->wq,
+ ((HZ / 100) < 1) ? 1 : HZ / 100);
+ return;
+ }
+
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_del_from_lru(bo);
+ list_del(&bo->ddestroy);
+ spin_unlock(&ttm_bo_glob.lru_lock);
+
+ ttm_bo_cleanup_memtype_use(bo);
+
+ BUG_ON(bo->mem.mm_node != NULL);
+ atomic_dec(&ttm_bo_glob.bo_count);
+ dma_fence_put(bo->moving);
+ if (!ttm_bo_uses_embedded_gem_object(bo))
+ dma_resv_fini(&bo->base._resv);
+ bo->destroy(bo);
+ ttm_mem_global_free(&ttm_mem_glob, acc_size);
}
void ttm_bo_put(struct ttm_buffer_object *bo)
@@ -764,8 +731,7 @@ static bool ttm_bo_evict_swapout_allowable(struct ttm_buffer_object *bo,
if (bo->base.resv == ctx->resv) {
dma_resv_assert_held(bo->base.resv);
- if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT
- || !list_empty(&bo->ddestroy))
+ if (ctx->flags & TTM_OPT_FLAG_ALLOW_RES_EVICT)
ret = true;
*locked = false;
if (busy)
@@ -846,6 +812,11 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
dma_resv_unlock(bo->base.resv);
continue;
}
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+ continue;
+ }
break;
}
@@ -857,21 +828,19 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
}
if (!bo) {
- if (busy_bo)
- kref_get(&busy_bo->list_kref);
+ if (busy_bo && !ttm_bo_get_unless_zero(busy_bo))
+ busy_bo = NULL;
spin_unlock(&ttm_bo_glob.lru_lock);
ret = ttm_mem_evict_wait_busy(busy_bo, ctx, ticket);
if (busy_bo)
- kref_put(&busy_bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(busy_bo);
return ret;
}
- kref_get(&bo->list_kref);
-
- if (!list_empty(&bo->ddestroy)) {
+ if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, ctx->interruptible,
ctx->no_wait_gpu, locked);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
@@ -881,7 +850,7 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
if (locked)
ttm_bo_unreserve(bo);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
@@ -1226,6 +1195,18 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
uint32_t new_flags;
dma_resv_assert_held(bo->base.resv);
+
+ /*
+ * Remove the backing store if no placement is given.
+ */
+ if (!placement->num_placement && !placement->num_busy_placement) {
+ ret = ttm_bo_pipeline_gutting(bo);
+ if (ret)
+ return ret;
+
+ return ttm_tt_create(bo, false);
+ }
+
/*
* Check whether we need to move buffer.
*/
@@ -1293,7 +1274,6 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
bo->destroy = destroy ? destroy : ttm_bo_default_destroy;
kref_init(&bo->kref);
- kref_init(&bo->list_kref);
INIT_LIST_HEAD(&bo->lru);
INIT_LIST_HEAD(&bo->ddestroy);
INIT_LIST_HEAD(&bo->swap);
@@ -1813,11 +1793,18 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
spin_lock(&glob->lru_lock);
for (i = 0; i < TTM_MAX_BO_PRIORITY; ++i) {
list_for_each_entry(bo, &glob->swap_lru[i], swap) {
- if (ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
- NULL)) {
- ret = 0;
- break;
+ if (!ttm_bo_evict_swapout_allowable(bo, ctx, &locked,
+ NULL))
+ continue;
+
+ if (!ttm_bo_get_unless_zero(bo)) {
+ if (locked)
+ dma_resv_unlock(bo->base.resv);
+ continue;
}
+
+ ret = 0;
+ break;
}
if (!ret)
break;
@@ -1828,11 +1815,9 @@ int ttm_bo_swapout(struct ttm_bo_global *glob, struct ttm_operation_ctx *ctx)
return ret;
}
- kref_get(&bo->list_kref);
-
- if (!list_empty(&bo->ddestroy)) {
+ if (bo->deleted) {
ret = ttm_bo_cleanup_refs(bo, false, false, locked);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
@@ -1886,7 +1871,7 @@ out:
*/
if (locked)
dma_resv_unlock(bo->base.resv);
- kref_put(&bo->list_kref, ttm_bo_release_list);
+ ttm_bo_put(bo);
return ret;
}
EXPORT_SYMBOL(ttm_bo_swapout);
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 49ed55779128..52d2b71f1588 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -507,14 +507,14 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo,
fbo->base.moving = NULL;
drm_vma_node_reset(&fbo->base.base.vma_node);
- kref_init(&fbo->base.list_kref);
kref_init(&fbo->base.kref);
fbo->base.destroy = &ttm_transfered_destroy;
fbo->base.acc_size = 0;
- if (bo->base.resv == &bo->base._resv)
+ if (bo->type != ttm_bo_type_sg)
fbo->base.base.resv = &fbo->base.base._resv;
dma_resv_init(&fbo->base.base._resv);
+ fbo->base.base.dev = NULL;
ret = dma_resv_trylock(&fbo->base.base._resv);
WARN_ON(!ret);
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index bf876faea592..faefaaef7909 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -604,7 +604,7 @@ static struct dma_pool *ttm_dma_pool_init(struct device *dev, gfp_t flags,
p = pool->name;
for (i = 0; i < ARRAY_SIZE(t); i++) {
if (type & t[i]) {
- p += snprintf(p, sizeof(pool->name) - (p - pool->name),
+ p += scnprintf(p, sizeof(pool->name) - (p - pool->name),
"%s", n[i]);
}
}
diff --git a/drivers/gpu/drm/udl/udl_connector.c b/drivers/gpu/drm/udl/udl_connector.c
index e9671d38b4a0..0afdfb0d1fe1 100644
--- a/drivers/gpu/drm/udl/udl_connector.c
+++ b/drivers/gpu/drm/udl/udl_connector.c
@@ -109,7 +109,6 @@ static const struct drm_connector_helper_funcs udl_connector_helper_funcs = {
};
static const struct drm_connector_funcs udl_connector_funcs = {
- .dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
.detect = udl_detect,
.fill_modes = drm_helper_probe_single_connector_modes,
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 22af17959053..d59ebac70b15 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -375,8 +375,6 @@ udl_simple_display_pipe_enable(struct drm_simple_display_pipe *pipe,
char *wrptr;
int color_depth = UDL_COLOR_DEPTH_16BPP;
- crtc_state->no_vblank = true;
-
buf = (char *)udl->mode_buf;
/* This first section has to do with setting the base address on the
@@ -428,14 +426,6 @@ udl_simple_display_pipe_disable(struct drm_simple_display_pipe *pipe)
udl_submit_urb(dev, urb, buf - (char *)urb->transfer_buffer);
}
-static int
-udl_simple_display_pipe_check(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state,
- struct drm_crtc_state *crtc_state)
-{
- return 0;
-}
-
static void
udl_simple_display_pipe_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state)
@@ -457,7 +447,6 @@ struct drm_simple_display_pipe_funcs udl_simple_display_pipe_funcs = {
.mode_valid = udl_simple_display_pipe_mode_valid,
.enable = udl_simple_display_pipe_enable,
.disable = udl_simple_display_pipe_disable,
- .check = udl_simple_display_pipe_check,
.update = udl_simple_display_pipe_update,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
};
diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h
index 9a35c555ec52..ac2603334587 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.h
+++ b/drivers/gpu/drm/v3d/v3d_drv.h
@@ -254,27 +254,42 @@ struct v3d_csd_job {
};
/**
- * _wait_for - magic (register) wait macro
+ * __wait_for - magic wait macro
*
- * Does the right thing for modeset paths when run under kdgb or similar atomic
- * contexts. Note that it's important that we check the condition again after
- * having timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
*/
-#define wait_for(COND, MS) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- if (!(COND)) \
- ret__ = -ETIMEDOUT; \
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
break; \
} \
- msleep(1); \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
+ } \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
} \
ret__; \
})
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
+
static inline unsigned long nsecs_to_jiffies_timeout(const u64 n)
{
/* nsecs_to_jiffies64() does not guard against overflow */
diff --git a/drivers/gpu/drm/vboxvideo/vbox_mode.c b/drivers/gpu/drm/vboxvideo/vbox_mode.c
index 19612132c8a3..0883a435e62b 100644
--- a/drivers/gpu/drm/vboxvideo/vbox_mode.c
+++ b/drivers/gpu/drm/vboxvideo/vbox_mode.c
@@ -18,7 +18,6 @@
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "hgsmi_channels.h"
#include "vbox_drv.h"
@@ -226,17 +225,6 @@ static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
static void vbox_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
- struct drm_pending_vblank_event *event;
- unsigned long flags;
-
- if (crtc->state && crtc->state->event) {
- event = crtc->state->event;
- crtc->state->event = NULL;
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
- }
}
static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
@@ -838,6 +826,7 @@ static int vbox_connector_init(struct drm_device *dev,
static const struct drm_mode_config_funcs vbox_mode_funcs = {
.fb_create = drm_gem_fb_create_with_dirty,
+ .mode_valid = drm_vram_helper_mode_valid,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
diff --git a/drivers/gpu/drm/vboxvideo/vboxvideo.h b/drivers/gpu/drm/vboxvideo/vboxvideo.h
index 0592004f71aa..a5de40fe1a76 100644
--- a/drivers/gpu/drm/vboxvideo/vboxvideo.h
+++ b/drivers/gpu/drm/vboxvideo/vboxvideo.h
@@ -138,7 +138,7 @@ struct vbva_buffer {
u32 data_len;
/* variable size for the rest of the vbva_buffer area in VRAM. */
- u8 data[0];
+ u8 data[];
} __packed;
#define VBVA_MAX_RECORD_SIZE (128 * 1024 * 1024)
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index b00e20f5ce05..1208258ad3b2 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -84,13 +84,14 @@ static const struct debugfs_reg32 crtc_regs[] = {
VC4_REG32(PV_HACT_ACT),
};
-bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode)
+static bool vc4_crtc_get_scanout_position(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode)
{
+ struct drm_device *dev = crtc->dev;
struct vc4_dev *vc4 = to_vc4_dev(dev);
- struct drm_crtc *crtc = drm_crtc_from_index(dev, crtc_id);
struct vc4_crtc *vc4_crtc = to_vc4_crtc(crtc);
u32 val;
int fifo_lines;
@@ -1030,6 +1031,7 @@ static const struct drm_crtc_funcs vc4_crtc_funcs = {
.gamma_set = drm_atomic_helper_legacy_gamma_set,
.enable_vblank = vc4_enable_vblank,
.disable_vblank = vc4_disable_vblank,
+ .get_vblank_timestamp = drm_crtc_vblank_helper_get_vblank_timestamp,
};
static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
@@ -1039,6 +1041,7 @@ static const struct drm_crtc_helper_funcs vc4_crtc_helper_funcs = {
.atomic_flush = vc4_crtc_atomic_flush,
.atomic_enable = vc4_crtc_atomic_enable,
.atomic_disable = vc4_crtc_atomic_disable,
+ .get_scanout_position = vc4_crtc_get_scanout_position,
};
static const struct vc4_crtc_data pv0_data = {
diff --git a/drivers/gpu/drm/vc4/vc4_dpi.c b/drivers/gpu/drm/vc4/vc4_dpi.c
index c586325de2a5..6dfede03396e 100644
--- a/drivers/gpu/drm/vc4/vc4_dpi.c
+++ b/drivers/gpu/drm/vc4/vc4_dpi.c
@@ -252,7 +252,7 @@ static int vc4_dpi_init_bridge(struct vc4_dpi *dpi)
bridge = drm_panel_bridge_add_typed(panel,
DRM_MODE_CONNECTOR_DPI);
- return drm_bridge_attach(dpi->encoder, bridge, NULL);
+ return drm_bridge_attach(dpi->encoder, bridge, NULL, 0);
}
static int vc4_dpi_bind(struct device *dev, struct device *master, void *data)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 5e6fb6c2307f..76f93b662766 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -190,9 +190,6 @@ static struct drm_driver vc4_drm_driver = {
.irq_postinstall = vc4_irq_postinstall,
.irq_uninstall = vc4_irq_uninstall,
- .get_scanout_position = vc4_crtc_get_scanoutpos,
- .get_vblank_timestamp = drm_calc_vbltimestamp_from_scanoutpos,
-
#if defined(CONFIG_DEBUG_FS)
.debugfs_init = vc4_debugfs_init,
#endif
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 6627b20c99e9..139d25a8328e 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -65,7 +65,7 @@ struct vc4_perfmon {
* Note that counter values can't be reset, but you can fake a reset by
* destroying the perfmon and creating a new one.
*/
- u64 counters[0];
+ u64 counters[];
};
struct vc4_dev {
@@ -677,32 +677,41 @@ struct vc4_validated_shader_info {
};
/**
- * _wait_for - magic (register) wait macro
+ * __wait_for - magic wait macro
*
- * Does the right thing for modeset paths when run under kdgb or similar atomic
- * contexts. Note that it's important that we check the condition again after
- * having timed out, since the timeout could be due to preemption or similar and
- * we've never had a chance to check the condition before the timeout.
+ * Macro to help avoid open coding check/wait/timeout patterns. Note that it's
+ * important that we check the condition again after having timed out, since the
+ * timeout could be due to preemption or similar and we've never had a chance to
+ * check the condition before the timeout.
*/
-#define _wait_for(COND, MS, W) ({ \
- unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1; \
- int ret__ = 0; \
- while (!(COND)) { \
- if (time_after(jiffies, timeout__)) { \
- if (!(COND)) \
- ret__ = -ETIMEDOUT; \
+#define __wait_for(OP, COND, US, Wmin, Wmax) ({ \
+ const ktime_t end__ = ktime_add_ns(ktime_get_raw(), 1000ll * (US)); \
+ long wait__ = (Wmin); /* recommended min for usleep is 10 us */ \
+ int ret__; \
+ might_sleep(); \
+ for (;;) { \
+ const bool expired__ = ktime_after(ktime_get_raw(), end__); \
+ OP; \
+ /* Guarantee COND check prior to timeout */ \
+ barrier(); \
+ if (COND) { \
+ ret__ = 0; \
break; \
} \
- if (W && drm_can_sleep()) { \
- msleep(W); \
- } else { \
- cpu_relax(); \
+ if (expired__) { \
+ ret__ = -ETIMEDOUT; \
+ break; \
} \
+ usleep_range(wait__, wait__ * 2); \
+ if (wait__ < (Wmax)) \
+ wait__ <<= 1; \
} \
ret__; \
})
-#define wait_for(COND, MS) _wait_for(COND, MS, 1)
+#define _wait_for(COND, US, Wmin, Wmax) __wait_for(, (COND), (US), (Wmin), \
+ (Wmax))
+#define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000)
/* vc4_bo.c */
struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
@@ -743,10 +752,6 @@ void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
/* vc4_crtc.c */
extern struct platform_driver vc4_crtc_driver;
-bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
void vc4_crtc_handle_vblank(struct vc4_crtc *crtc);
void vc4_crtc_txp_armed(struct drm_crtc_state *state);
void vc4_crtc_get_margins(struct drm_crtc_state *state,
diff --git a/drivers/gpu/drm/vc4/vc4_dsi.c b/drivers/gpu/drm/vc4/vc4_dsi.c
index fd8a2eb60505..d99b1d526651 100644
--- a/drivers/gpu/drm/vc4/vc4_dsi.c
+++ b/drivers/gpu/drm/vc4/vc4_dsi.c
@@ -1619,7 +1619,7 @@ static int vc4_dsi_bind(struct device *dev, struct device *master, void *data)
DRM_MODE_ENCODER_DSI, NULL);
drm_encoder_helper_add(dsi->encoder, &vc4_dsi_encoder_helper_funcs);
- ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL);
+ ret = drm_bridge_attach(dsi->encoder, dsi->bridge, NULL, 0);
if (ret) {
dev_err(dev, "bridge attach failed: %d\n", ret);
return ret;
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 4934127f0d76..91e408f7a56e 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -139,7 +139,7 @@ static enum vc4_scaling_mode vc4_get_scaling_mode(u32 src, u32 dst)
static bool plane_enabled(struct drm_plane_state *state)
{
- return state->fb && state->crtc;
+ return state->fb && !WARN_ON(!state->crtc);
}
static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 5bd60ded3d81..909eba43664a 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -196,9 +196,10 @@ static struct drm_gem_object *vgem_gem_create(struct drm_device *dev,
return ERR_CAST(obj);
ret = drm_gem_handle_create(file, &obj->base, handle);
- drm_gem_object_put_unlocked(&obj->base);
- if (ret)
+ if (ret) {
+ drm_gem_object_put_unlocked(&obj->base);
return ERR_PTR(ret);
+ }
return &obj->base;
}
@@ -221,7 +222,9 @@ static int vgem_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
args->size = gem_object->size;
args->pitch = pitch;
- DRM_DEBUG("Created object of size %lld\n", size);
+ drm_gem_object_put_unlocked(gem_object);
+
+ DRM_DEBUG("Created object of size %llu\n", args->size);
return 0;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_debugfs.c b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
index 5156e6b279db..e27120d512b0 100644
--- a/drivers/gpu/drm/virtio/virtgpu_debugfs.c
+++ b/drivers/gpu/drm/virtio/virtgpu_debugfs.c
@@ -47,6 +47,7 @@ static int virtio_gpu_features(struct seq_file *m, void *data)
virtio_add_bool(m, "virgl", vgdev->has_virgl_3d);
virtio_add_bool(m, "edid", vgdev->has_edid);
+ virtio_add_bool(m, "indirect", vgdev->has_indirect);
virtio_add_int(m, "cap sets", vgdev->num_capsets);
virtio_add_int(m, "scanouts", vgdev->num_scanouts);
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 0966208ec30d..2b7e6ae65546 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -30,7 +30,6 @@
#include <drm/drm_fourcc.h>
#include <drm/drm_gem_framebuffer_helper.h>
#include <drm/drm_probe_helper.h>
-#include <drm/drm_vblank.h>
#include "virtgpu_drv.h"
@@ -91,6 +90,7 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0,
crtc->mode.hdisplay,
crtc->mode.vdisplay, 0, 0);
+ virtio_gpu_notify(vgdev);
}
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -109,6 +109,7 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
+ virtio_gpu_notify(vgdev);
output->enabled = false;
}
@@ -121,13 +122,6 @@ static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
static void virtio_gpu_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
- unsigned long flags;
-
- spin_lock_irqsave(&crtc->dev->event_lock, flags);
- if (crtc->state->event)
- drm_crtc_send_vblank_event(crtc, crtc->state->event);
- crtc->state->event = NULL;
- spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
}
static const struct drm_crtc_helper_funcs virtio_gpu_crtc_helper_funcs = {
@@ -332,6 +326,7 @@ static void vgdev_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
drm_atomic_helper_commit_planes(dev, state, 0);
+ drm_atomic_helper_fake_vblank(state);
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_wait_for_vblanks(dev, state);
@@ -375,6 +370,5 @@ void virtio_gpu_modeset_fini(struct virtio_gpu_device *vgdev)
for (i = 0 ; i < vgdev->num_scanouts; ++i)
kfree(vgdev->outputs[i].edid);
- drm_atomic_helper_shutdown(vgdev->ddev);
drm_mode_config_cleanup(vgdev->ddev);
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.c b/drivers/gpu/drm/virtio/virtgpu_drv.c
index 8cf27af3ad53..ab4bed78e656 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.c
@@ -31,6 +31,7 @@
#include <linux/pci.h>
#include <drm/drm.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
@@ -135,7 +136,8 @@ static void virtio_gpu_remove(struct virtio_device *vdev)
{
struct drm_device *dev = vdev->priv;
- drm_dev_unregister(dev);
+ drm_dev_unplug(dev);
+ drm_atomic_helper_shutdown(dev);
virtio_gpu_deinit(dev);
drm_dev_put(dev);
}
@@ -214,4 +216,6 @@ static struct drm_driver driver = {
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
+
+ .release = virtio_gpu_release,
};
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 7e69c06e168e..c1824bdf2418 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -32,6 +32,7 @@
#include <linux/virtio_gpu.h>
#include <drm/drm_atomic.h>
+#include <drm/drm_drv.h>
#include <drm/drm_encoder.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
@@ -68,15 +69,21 @@ struct virtio_gpu_object_params {
struct virtio_gpu_object {
struct drm_gem_shmem_object base;
uint32_t hw_res_handle;
-
- struct sg_table *pages;
- uint32_t mapped;
bool dumb;
bool created;
};
#define gem_to_virtio_gpu_obj(gobj) \
container_of((gobj), struct virtio_gpu_object, base.base)
+struct virtio_gpu_object_shmem {
+ struct virtio_gpu_object base;
+ struct sg_table *pages;
+ uint32_t mapped;
+};
+
+#define to_virtio_gpu_shmem(virtio_gpu_object) \
+ container_of((virtio_gpu_object), struct virtio_gpu_object_shmem, base)
+
struct virtio_gpu_object_array {
struct ww_acquire_ctx ticket;
struct list_head next;
@@ -114,6 +121,7 @@ struct virtio_gpu_vbuffer {
char *resp_buf;
int resp_size;
virtio_gpu_resp_cb resp_cb;
+ void *resp_cb_data;
struct virtio_gpu_object_array *objs;
struct list_head list;
@@ -175,10 +183,8 @@ struct virtio_gpu_device {
struct virtio_gpu_queue ctrlq;
struct virtio_gpu_queue cursorq;
struct kmem_cache *vbufs;
- bool vqs_ready;
- bool disable_notify;
- bool pending_notify;
+ atomic_t pending_commands;
struct ida resource_ida;
@@ -193,6 +199,7 @@ struct virtio_gpu_device {
bool has_virgl_3d;
bool has_edid;
+ bool has_indirect;
struct work_struct config_changed_work;
@@ -207,6 +214,8 @@ struct virtio_gpu_device {
struct virtio_gpu_fpriv {
uint32_t ctx_id;
+ bool context_created;
+ struct mutex context_lock;
};
/* virtio_ioctl.c */
@@ -216,6 +225,7 @@ extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
/* virtio_kms.c */
int virtio_gpu_init(struct drm_device *dev);
void virtio_gpu_deinit(struct drm_device *dev);
+void virtio_gpu_release(struct drm_device *dev);
int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file);
void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file);
@@ -262,7 +272,7 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs,
struct virtio_gpu_fence *fence);
void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id);
+ struct virtio_gpu_object *bo);
void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
uint64_t offset,
uint32_t width, uint32_t height,
@@ -279,9 +289,8 @@ void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
uint32_t x, uint32_t y);
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- struct virtio_gpu_fence *fence);
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj);
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
@@ -332,8 +341,7 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work);
void virtio_gpu_dequeue_cursor_func(struct work_struct *work);
void virtio_gpu_dequeue_fence_func(struct work_struct *work);
-void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev);
-void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev);
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev);
/* virtio_gpu_display.c */
void virtio_gpu_modeset_init(struct virtio_gpu_device *vgdev);
@@ -355,12 +363,16 @@ void virtio_gpu_fence_event_process(struct virtio_gpu_device *vdev,
u64 last_seq);
/* virtio_gpu_object */
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo);
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size);
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
struct virtio_gpu_fence *fence);
+
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo);
+
/* virtgpu_prime.c */
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
diff --git a/drivers/gpu/drm/virtio/virtgpu_gem.c b/drivers/gpu/drm/virtio/virtgpu_gem.c
index 0a2b62279647..0d6152c99a27 100644
--- a/drivers/gpu/drm/virtio/virtgpu_gem.c
+++ b/drivers/gpu/drm/virtio/virtgpu_gem.c
@@ -123,6 +123,7 @@ int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
virtio_gpu_cmd_context_attach_resource(vgdev, vfpriv->ctx_id,
objs);
+ virtio_gpu_notify(vgdev);
return 0;
}
@@ -143,6 +144,7 @@ void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
virtio_gpu_cmd_context_detach_resource(vgdev, vfpriv->ctx_id,
objs);
+ virtio_gpu_notify(vgdev);
}
struct virtio_gpu_object_array *virtio_gpu_array_alloc(u32 nents)
diff --git a/drivers/gpu/drm/virtio/virtgpu_ioctl.c b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
index 205ec4abae2b..336cc9143205 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ioctl.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ioctl.c
@@ -33,13 +33,34 @@
#include "virtgpu_drv.h"
+static void virtio_gpu_create_context(struct drm_device *dev,
+ struct drm_file *file)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
+ char dbgname[TASK_COMM_LEN];
+
+ mutex_lock(&vfpriv->context_lock);
+ if (vfpriv->context_created)
+ goto out_unlock;
+
+ get_task_comm(dbgname, current);
+ virtio_gpu_cmd_context_create(vgdev, vfpriv->ctx_id,
+ strlen(dbgname), dbgname);
+ virtio_gpu_notify(vgdev);
+ vfpriv->context_created = true;
+
+out_unlock:
+ mutex_unlock(&vfpriv->context_lock);
+}
+
static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_map *virtio_gpu_map = data;
- return virtio_gpu_mode_dumb_mmap(file_priv, vgdev->ddev,
+ return virtio_gpu_mode_dumb_mmap(file, vgdev->ddev,
virtio_gpu_map->handle,
&virtio_gpu_map->offset);
}
@@ -51,11 +72,11 @@ static int virtio_gpu_map_ioctl(struct drm_device *dev, void *data,
* VIRTIO_GPUReleaseInfo struct (first XXX bytes)
*/
static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
- struct drm_file *drm_file)
+ struct drm_file *file)
{
struct drm_virtgpu_execbuffer *exbuf = data;
struct virtio_gpu_device *vgdev = dev->dev_private;
- struct virtio_gpu_fpriv *vfpriv = drm_file->driver_priv;
+ struct virtio_gpu_fpriv *vfpriv = file->driver_priv;
struct virtio_gpu_fence *out_fence;
int ret;
uint32_t *bo_handles = NULL;
@@ -74,6 +95,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
exbuf->fence_fd = -1;
+ virtio_gpu_create_context(dev, file);
if (exbuf->flags & VIRTGPU_EXECBUF_FENCE_FD_IN) {
struct dma_fence *in_fence;
@@ -116,7 +138,7 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
goto out_unused_fd;
}
- buflist = virtio_gpu_array_from_handles(drm_file, bo_handles,
+ buflist = virtio_gpu_array_from_handles(file, bo_handles,
exbuf->num_bo_handles);
if (!buflist) {
ret = -ENOENT;
@@ -126,22 +148,22 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
bo_handles = NULL;
}
- if (buflist) {
- ret = virtio_gpu_array_lock_resv(buflist);
- if (ret)
- goto out_unused_fd;
- }
-
buf = vmemdup_user(u64_to_user_ptr(exbuf->command), exbuf->size);
if (IS_ERR(buf)) {
ret = PTR_ERR(buf);
- goto out_unresv;
+ goto out_unused_fd;
+ }
+
+ if (buflist) {
+ ret = virtio_gpu_array_lock_resv(buflist);
+ if (ret)
+ goto out_memdup;
}
out_fence = virtio_gpu_fence_alloc(vgdev);
if(!out_fence) {
ret = -ENOMEM;
- goto out_memdup;
+ goto out_unresv;
}
if (out_fence_fd >= 0) {
@@ -158,13 +180,14 @@ static int virtio_gpu_execbuffer_ioctl(struct drm_device *dev, void *data,
virtio_gpu_cmd_submit(vgdev, buf, exbuf->size,
vfpriv->ctx_id, buflist, out_fence);
+ virtio_gpu_notify(vgdev);
return 0;
-out_memdup:
- kvfree(buf);
out_unresv:
if (buflist)
virtio_gpu_array_unlock_resv(buflist);
+out_memdup:
+ kvfree(buf);
out_unused_fd:
kvfree(bo_handles);
if (buflist)
@@ -177,7 +200,7 @@ out_unused_fd:
}
static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_getparam *param = data;
@@ -200,7 +223,7 @@ static int virtio_gpu_getparam_ioctl(struct drm_device *dev, void *data,
}
static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct drm_virtgpu_resource_create *rc = data;
@@ -211,7 +234,17 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
uint32_t handle = 0;
struct virtio_gpu_object_params params = { 0 };
- if (vgdev->has_virgl_3d == false) {
+ if (vgdev->has_virgl_3d) {
+ virtio_gpu_create_context(dev, file);
+ params.virgl = true;
+ params.target = rc->target;
+ params.bind = rc->bind;
+ params.depth = rc->depth;
+ params.array_size = rc->array_size;
+ params.last_level = rc->last_level;
+ params.nr_samples = rc->nr_samples;
+ params.flags = rc->flags;
+ } else {
if (rc->depth > 1)
return -EINVAL;
if (rc->nr_samples > 1)
@@ -228,16 +261,6 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
params.width = rc->width;
params.height = rc->height;
params.size = rc->size;
- if (vgdev->has_virgl_3d) {
- params.virgl = true;
- params.target = rc->target;
- params.bind = rc->bind;
- params.depth = rc->depth;
- params.array_size = rc->array_size;
- params.last_level = rc->last_level;
- params.nr_samples = rc->nr_samples;
- params.flags = rc->flags;
- }
/* allocate a single page size object */
if (params.size == 0)
params.size = PAGE_SIZE;
@@ -251,7 +274,7 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
return ret;
obj = &qobj->base.base;
- ret = drm_gem_handle_create(file_priv, obj, &handle);
+ ret = drm_gem_handle_create(file, obj, &handle);
if (ret) {
drm_gem_object_release(obj);
return ret;
@@ -264,13 +287,13 @@ static int virtio_gpu_resource_create_ioctl(struct drm_device *dev, void *data,
}
static int virtio_gpu_resource_info_ioctl(struct drm_device *dev, void *data,
- struct drm_file *file_priv)
+ struct drm_file *file)
{
struct drm_virtgpu_resource_info *ri = data;
struct drm_gem_object *gobj = NULL;
struct virtio_gpu_object *qobj = NULL;
- gobj = drm_gem_object_lookup(file_priv, ri->bo_handle);
+ gobj = drm_gem_object_lookup(file, ri->bo_handle);
if (gobj == NULL)
return -ENOENT;
@@ -297,6 +320,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
if (vgdev->has_virgl_3d == false)
return -ENOSYS;
+ virtio_gpu_create_context(dev, file);
objs = virtio_gpu_array_from_handles(file, &args->bo_handle, 1);
if (objs == NULL)
return -ENOENT;
@@ -314,6 +338,7 @@ static int virtio_gpu_transfer_from_host_ioctl(struct drm_device *dev,
(vgdev, vfpriv->ctx_id, offset, args->level,
&args->box, objs, fence);
dma_fence_put(&fence->f);
+ virtio_gpu_notify(vgdev);
return 0;
err_unlock:
@@ -344,6 +369,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->box.w, args->box.h, args->box.x, args->box.y,
objs, NULL);
} else {
+ virtio_gpu_create_context(dev, file);
ret = virtio_gpu_array_lock_resv(objs);
if (ret != 0)
goto err_put_free;
@@ -359,6 +385,7 @@ static int virtio_gpu_transfer_to_host_ioctl(struct drm_device *dev, void *data,
args->level, &args->box, objs, fence);
dma_fence_put(&fence->f);
}
+ virtio_gpu_notify(vgdev);
return 0;
err_unlock:
@@ -445,6 +472,7 @@ static int virtio_gpu_get_caps_ioctl(struct drm_device *dev,
/* not in cache - need to talk to hw */
virtio_gpu_cmd_get_capset(vgdev, found_valid, args->cap_set_ver,
&cache_ent);
+ virtio_gpu_notify(vgdev);
copy_exit:
ret = wait_event_timeout(vgdev->resp_wq,
diff --git a/drivers/gpu/drm/virtio/virtgpu_kms.c b/drivers/gpu/drm/virtio/virtgpu_kms.c
index 2f5773e43557..023a030ca7b9 100644
--- a/drivers/gpu/drm/virtio/virtgpu_kms.c
+++ b/drivers/gpu/drm/virtio/virtgpu_kms.c
@@ -44,6 +44,7 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
drm_helper_hpd_irq_event(vgdev->ddev);
events_clear |= VIRTIO_GPU_EVENT_DISPLAY;
}
@@ -51,22 +52,11 @@ static void virtio_gpu_config_changed_work_func(struct work_struct *work)
events_clear, &events_clear);
}
-static int virtio_gpu_context_create(struct virtio_gpu_device *vgdev,
- uint32_t nlen, const char *name)
-{
- int handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
-
- if (handle < 0)
- return handle;
- handle += 1;
- virtio_gpu_cmd_context_create(vgdev, handle, nlen, name);
- return handle;
-}
-
static void virtio_gpu_context_destroy(struct virtio_gpu_device *vgdev,
uint32_t ctx_id)
{
virtio_gpu_cmd_context_destroy(vgdev, ctx_id);
+ virtio_gpu_notify(vgdev);
ida_free(&vgdev->ctx_id_ida, ctx_id - 1);
}
@@ -92,6 +82,7 @@ static void virtio_gpu_get_capsets(struct virtio_gpu_device *vgdev,
}
for (i = 0; i < num_capsets; i++) {
virtio_gpu_cmd_get_capset_info(vgdev, i);
+ virtio_gpu_notify(vgdev);
ret = wait_event_timeout(vgdev->resp_wq,
vgdev->capsets[i].id > 0, 5 * HZ);
if (ret == 0) {
@@ -159,6 +150,9 @@ int virtio_gpu_init(struct drm_device *dev)
if (virtio_has_feature(vgdev->vdev, VIRTIO_GPU_F_EDID)) {
vgdev->has_edid = true;
}
+ if (virtio_has_feature(vgdev->vdev, VIRTIO_RING_F_INDIRECT_DESC)) {
+ vgdev->has_indirect = true;
+ }
DRM_INFO("features: %cvirgl %cedid\n",
vgdev->has_virgl_3d ? '+' : '-',
@@ -196,13 +190,13 @@ int virtio_gpu_init(struct drm_device *dev)
virtio_gpu_modeset_init(vgdev);
virtio_device_ready(vgdev->vdev);
- vgdev->vqs_ready = true;
if (num_capsets)
virtio_gpu_get_capsets(vgdev, num_capsets);
if (vgdev->has_edid)
virtio_gpu_cmd_get_edids(vgdev);
virtio_gpu_cmd_get_display_info(vgdev);
+ virtio_gpu_notify(vgdev);
wait_event_timeout(vgdev->resp_wq, !vgdev->display_info_pending,
5 * HZ);
return 0;
@@ -231,12 +225,16 @@ void virtio_gpu_deinit(struct drm_device *dev)
struct virtio_gpu_device *vgdev = dev->dev_private;
flush_work(&vgdev->obj_free_work);
- vgdev->vqs_ready = false;
flush_work(&vgdev->ctrlq.dequeue_work);
flush_work(&vgdev->cursorq.dequeue_work);
flush_work(&vgdev->config_changed_work);
vgdev->vdev->config->reset(vgdev->vdev);
vgdev->vdev->config->del_vqs(vgdev->vdev);
+}
+
+void virtio_gpu_release(struct drm_device *dev)
+{
+ struct virtio_gpu_device *vgdev = dev->dev_private;
virtio_gpu_modeset_fini(vgdev);
virtio_gpu_free_vbufs(vgdev);
@@ -249,8 +247,7 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
{
struct virtio_gpu_device *vgdev = dev->dev_private;
struct virtio_gpu_fpriv *vfpriv;
- int id;
- char dbgname[TASK_COMM_LEN];
+ int handle;
/* can't create contexts without 3d renderer */
if (!vgdev->has_virgl_3d)
@@ -261,14 +258,15 @@ int virtio_gpu_driver_open(struct drm_device *dev, struct drm_file *file)
if (!vfpriv)
return -ENOMEM;
- get_task_comm(dbgname, current);
- id = virtio_gpu_context_create(vgdev, strlen(dbgname), dbgname);
- if (id < 0) {
+ mutex_init(&vfpriv->context_lock);
+
+ handle = ida_alloc(&vgdev->ctx_id_ida, GFP_KERNEL);
+ if (handle < 0) {
kfree(vfpriv);
- return id;
+ return handle;
}
- vfpriv->ctx_id = id;
+ vfpriv->ctx_id = handle + 1;
file->driver_priv = vfpriv;
return 0;
}
@@ -284,6 +282,7 @@ void virtio_gpu_driver_postclose(struct drm_device *dev, struct drm_file *file)
vfpriv = file->driver_priv;
virtio_gpu_context_destroy(vgdev, vfpriv->ctx_id);
+ mutex_destroy(&vfpriv->context_lock);
kfree(vfpriv);
file->driver_priv = NULL;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_object.c b/drivers/gpu/drm/virtio/virtgpu_object.c
index 017a9e0fc3bb..2bfb13d1932e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_object.c
+++ b/drivers/gpu/drm/virtio/virtgpu_object.c
@@ -23,6 +23,7 @@
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/dma-mapping.h>
#include <linux/moduleparam.h>
#include "virtgpu_drv.h"
@@ -42,8 +43,8 @@ static int virtio_gpu_resource_id_get(struct virtio_gpu_device *vgdev,
* "f91a9dd35715 Fix unlinking resources from hash
* table." (Feb 2019) fixes the bug.
*/
- static int handle;
- handle++;
+ static atomic_t seqno = ATOMIC_INIT(0);
+ int handle = atomic_inc_return(&seqno);
*resid = handle + 1;
} else {
int handle = ida_alloc(&vgdev->resource_ida, GFP_KERNEL);
@@ -61,21 +62,46 @@ static void virtio_gpu_resource_id_put(struct virtio_gpu_device *vgdev, uint32_t
}
}
-static void virtio_gpu_free_object(struct drm_gem_object *obj)
+void virtio_gpu_cleanup_object(struct virtio_gpu_object *bo)
{
- struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- if (bo->pages)
- virtio_gpu_object_detach(vgdev, bo);
- if (bo->created)
- virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle);
virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle);
+ if (virtio_gpu_is_shmem(bo)) {
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+
+ if (shmem->pages) {
+ if (shmem->mapped) {
+ dma_unmap_sg(vgdev->vdev->dev.parent,
+ shmem->pages->sgl, shmem->mapped,
+ DMA_TO_DEVICE);
+ shmem->mapped = 0;
+ }
+
+ sg_free_table(shmem->pages);
+ shmem->pages = NULL;
+ drm_gem_shmem_unpin(&bo->base.base);
+ }
+
+ drm_gem_shmem_free_object(&bo->base.base);
+ }
+}
+
+static void virtio_gpu_free_object(struct drm_gem_object *obj)
+{
+ struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+ struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private;
- drm_gem_shmem_free_object(obj);
+ if (bo->created) {
+ virtio_gpu_cmd_unref_resource(vgdev, bo);
+ virtio_gpu_notify(vgdev);
+ /* completion handler calls virtio_gpu_cleanup_object() */
+ return;
+ }
+ virtio_gpu_cleanup_object(bo);
}
-static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
+static const struct drm_gem_object_funcs virtio_gpu_shmem_funcs = {
.free = virtio_gpu_free_object,
.open = virtio_gpu_gem_object_open,
.close = virtio_gpu_gem_object_close,
@@ -86,9 +112,14 @@ static const struct drm_gem_object_funcs virtio_gpu_gem_funcs = {
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
- .mmap = &drm_gem_shmem_mmap,
+ .mmap = drm_gem_shmem_mmap,
};
+bool virtio_gpu_is_shmem(struct virtio_gpu_object *bo)
+{
+ return bo->base.base.funcs == &virtio_gpu_shmem_funcs;
+}
+
struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
size_t size)
{
@@ -98,10 +129,58 @@ struct drm_gem_object *virtio_gpu_create_object(struct drm_device *dev,
if (!bo)
return NULL;
- bo->base.base.funcs = &virtio_gpu_gem_funcs;
+ bo->base.base.funcs = &virtio_gpu_shmem_funcs;
+ bo->base.map_cached = true;
return &bo->base.base;
}
+static int virtio_gpu_object_shmem_init(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo,
+ struct virtio_gpu_mem_entry **ents,
+ unsigned int *nents)
+{
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
+ struct scatterlist *sg;
+ int si, ret;
+
+ ret = drm_gem_shmem_pin(&bo->base.base);
+ if (ret < 0)
+ return -EINVAL;
+
+ shmem->pages = drm_gem_shmem_get_sg_table(&bo->base.base);
+ if (!shmem->pages) {
+ drm_gem_shmem_unpin(&bo->base.base);
+ return -EINVAL;
+ }
+
+ if (use_dma_api) {
+ shmem->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+ shmem->pages->sgl,
+ shmem->pages->nents,
+ DMA_TO_DEVICE);
+ *nents = shmem->mapped;
+ } else {
+ *nents = shmem->pages->nents;
+ }
+
+ *ents = kmalloc_array(*nents, sizeof(struct virtio_gpu_mem_entry),
+ GFP_KERNEL);
+ if (!(*ents)) {
+ DRM_ERROR("failed to allocate ent list\n");
+ return -ENOMEM;
+ }
+
+ for_each_sg(shmem->pages->sgl, sg, *nents, si) {
+ (*ents)[si].addr = cpu_to_le64(use_dma_api
+ ? sg_dma_address(sg)
+ : sg_phys(sg));
+ (*ents)[si].length = cpu_to_le32(sg->length);
+ (*ents)[si].padding = 0;
+ }
+ return 0;
+}
+
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_params *params,
struct virtio_gpu_object **bo_ptr,
@@ -110,6 +189,8 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object_array *objs = NULL;
struct drm_gem_shmem_object *shmem_obj;
struct virtio_gpu_object *bo;
+ struct virtio_gpu_mem_entry *ents;
+ unsigned int nents;
int ret;
*bo_ptr = NULL;
@@ -146,12 +227,19 @@ int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
objs, fence);
}
- ret = virtio_gpu_object_attach(vgdev, bo, NULL);
+ ret = virtio_gpu_object_shmem_init(vgdev, bo, &ents, &nents);
+ if (ret != 0) {
+ virtio_gpu_free_object(&shmem_obj->base);
+ return ret;
+ }
+
+ ret = virtio_gpu_object_attach(vgdev, bo, ents, nents);
if (ret != 0) {
virtio_gpu_free_object(&shmem_obj->base);
return ret;
}
+ virtio_gpu_notify(vgdev);
*bo_ptr = bo;
return 0;
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index d1c3f5fbfee4..52d24179bcec 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -148,14 +148,13 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
plane->state->src_w >> 16,
plane->state->src_h >> 16,
0, 0);
+ virtio_gpu_notify(vgdev);
return;
}
if (!drm_atomic_helper_damage_merged(old_state, plane->state, &rect))
return;
- virtio_gpu_disable_notify(vgdev);
-
bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]);
if (bo->dumb)
virtio_gpu_update_dumb_bo(vgdev, plane->state, &rect);
@@ -186,8 +185,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
rect.y1,
rect.x2 - rect.x1,
rect.y2 - rect.y1);
-
- virtio_gpu_enable_notify(vgdev);
+ virtio_gpu_notify(vgdev);
}
static int virtio_gpu_cursor_prepare_fb(struct drm_plane *plane,
@@ -266,6 +264,7 @@ static void virtio_gpu_cursor_plane_update(struct drm_plane *plane,
plane->state->crtc_w,
plane->state->crtc_h,
0, 0, objs, vgfb->fence);
+ virtio_gpu_notify(vgdev);
dma_fence_wait(&vgfb->fence->f, true);
dma_fence_put(&vgfb->fence->f);
vgfb->fence = NULL;
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 5914e79d3429..73854915ec34 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -95,7 +95,8 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
if (!vbuf)
return ERR_PTR(-ENOMEM);
- BUG_ON(size > MAX_INLINE_CMD_SIZE);
+ BUG_ON(size > MAX_INLINE_CMD_SIZE ||
+ size < sizeof(struct virtio_gpu_ctrl_hdr));
vbuf->buf = (void *)vbuf + sizeof(*vbuf);
vbuf->size = size;
@@ -109,21 +110,14 @@ virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
return vbuf;
}
-static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer **vbuffer_p,
- int size)
+static struct virtio_gpu_ctrl_hdr *
+virtio_gpu_vbuf_ctrl_hdr(struct virtio_gpu_vbuffer *vbuf)
{
- struct virtio_gpu_vbuffer *vbuf;
-
- vbuf = virtio_gpu_get_vbuf(vgdev, size,
- sizeof(struct virtio_gpu_ctrl_hdr),
- NULL, NULL);
- if (IS_ERR(vbuf)) {
- *vbuffer_p = NULL;
- return ERR_CAST(vbuf);
- }
- *vbuffer_p = vbuf;
- return vbuf->buf;
+ /* this assumes a vbuf contains a command that starts with a
+ * virtio_gpu_ctrl_hdr, which is true for both ctrl and cursor
+ * virtqueues.
+ */
+ return (struct virtio_gpu_ctrl_hdr *)vbuf->buf;
}
static struct virtio_gpu_update_cursor*
@@ -161,6 +155,25 @@ static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
return (struct virtio_gpu_command *)vbuf->buf;
}
+static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int size)
+{
+ return virtio_gpu_alloc_cmd_resp(vgdev, NULL, vbuffer_p, size,
+ sizeof(struct virtio_gpu_ctrl_hdr),
+ NULL);
+}
+
+static void *virtio_gpu_alloc_cmd_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer **vbuffer_p,
+ int size,
+ virtio_gpu_resp_cb cb)
+{
+ return virtio_gpu_alloc_cmd_resp(vgdev, cb, vbuffer_p, size,
+ sizeof(struct virtio_gpu_ctrl_hdr),
+ NULL);
+}
+
static void free_vbuf(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
@@ -209,12 +222,12 @@ void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
trace_virtio_gpu_cmd_response(vgdev->ctrlq.vq, resp);
if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
- if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
+ if (le32_to_cpu(resp->type) >= VIRTIO_GPU_RESP_ERR_UNSPEC) {
struct virtio_gpu_ctrl_hdr *cmd;
- cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
- DRM_ERROR("response 0x%x (command 0x%x)\n",
- le32_to_cpu(resp->type),
- le32_to_cpu(cmd->type));
+ cmd = virtio_gpu_vbuf_ctrl_hdr(entry);
+ DRM_ERROR_RATELIMITED("response 0x%x (command 0x%x)\n",
+ le32_to_cpu(resp->type),
+ le32_to_cpu(cmd->type));
} else
DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
}
@@ -307,109 +320,107 @@ static struct sg_table *vmalloc_to_sgt(char *data, uint32_t size, int *sg_ents)
return sgt;
}
-static bool virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_vbuffer *vbuf,
- struct scatterlist *vout)
- __releases(&vgdev->ctrlq.qlock)
- __acquires(&vgdev->ctrlq.qlock)
+static void virtio_gpu_queue_ctrl_sgs(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf,
+ struct virtio_gpu_fence *fence,
+ int elemcnt,
+ struct scatterlist **sgs,
+ int outcnt,
+ int incnt)
{
struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *sgs[3], vcmd, vresp;
- int outcnt = 0, incnt = 0;
- bool notify = false;
- int ret;
+ int ret, idx;
- if (!vgdev->vqs_ready)
- return notify;
+ if (!drm_dev_enter(vgdev->ddev, &idx)) {
+ if (fence && vbuf->objs)
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ free_vbuf(vgdev, vbuf);
+ return;
+ }
- sg_init_one(&vcmd, vbuf->buf, vbuf->size);
- sgs[outcnt + incnt] = &vcmd;
- outcnt++;
+ if (vgdev->has_indirect)
+ elemcnt = 1;
- if (vout) {
- sgs[outcnt + incnt] = vout;
- outcnt++;
+again:
+ spin_lock(&vgdev->ctrlq.qlock);
+
+ if (vq->num_free < elemcnt) {
+ spin_unlock(&vgdev->ctrlq.qlock);
+ virtio_gpu_notify(vgdev);
+ wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= elemcnt);
+ goto again;
}
- if (vbuf->resp_size) {
- sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
- sgs[outcnt + incnt] = &vresp;
- incnt++;
+ /* now that the position of the vbuf in the virtqueue is known, we can
+ * finally set the fence id
+ */
+ if (fence) {
+ virtio_gpu_fence_emit(vgdev, virtio_gpu_vbuf_ctrl_hdr(vbuf),
+ fence);
+ if (vbuf->objs) {
+ virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
+ virtio_gpu_array_unlock_resv(vbuf->objs);
+ }
}
-retry:
ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
- if (ret == -ENOSPC) {
- spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
- spin_lock(&vgdev->ctrlq.qlock);
- goto retry;
- } else {
- trace_virtio_gpu_cmd_queue(vq,
- (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+ WARN_ON(ret);
- notify = virtqueue_kick_prepare(vq);
- }
- return notify;
+ trace_virtio_gpu_cmd_queue(vq, virtio_gpu_vbuf_ctrl_hdr(vbuf));
+
+ atomic_inc(&vgdev->pending_commands);
+
+ spin_unlock(&vgdev->ctrlq.qlock);
+
+ drm_dev_exit(idx);
}
static void virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf,
- struct virtio_gpu_ctrl_hdr *hdr,
struct virtio_gpu_fence *fence)
{
- struct virtqueue *vq = vgdev->ctrlq.vq;
- struct scatterlist *vout = NULL, sg;
+ struct scatterlist *sgs[3], vcmd, vout, vresp;
struct sg_table *sgt = NULL;
- bool notify;
- int outcnt = 0;
+ int elemcnt = 0, outcnt = 0, incnt = 0;
+ /* set up vcmd */
+ sg_init_one(&vcmd, vbuf->buf, vbuf->size);
+ elemcnt++;
+ sgs[outcnt] = &vcmd;
+ outcnt++;
+
+ /* set up vout */
if (vbuf->data_size) {
if (is_vmalloc_addr(vbuf->data_buf)) {
+ int sg_ents;
sgt = vmalloc_to_sgt(vbuf->data_buf, vbuf->data_size,
- &outcnt);
- if (!sgt)
+ &sg_ents);
+ if (!sgt) {
+ if (fence && vbuf->objs)
+ virtio_gpu_array_unlock_resv(vbuf->objs);
return;
- vout = sgt->sgl;
+ }
+
+ elemcnt += sg_ents;
+ sgs[outcnt] = sgt->sgl;
} else {
- sg_init_one(&sg, vbuf->data_buf, vbuf->data_size);
- vout = &sg;
- outcnt = 1;
+ sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
+ elemcnt++;
+ sgs[outcnt] = &vout;
}
+ outcnt++;
}
-again:
- spin_lock(&vgdev->ctrlq.qlock);
-
- /*
- * Make sure we have enouth space in the virtqueue. If not
- * wait here until we have.
- *
- * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
- * to wait for free space, which can result in fence ids being
- * submitted out-of-order.
- */
- if (vq->num_free < 2 + outcnt) {
- spin_unlock(&vgdev->ctrlq.qlock);
- wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
- goto again;
+ /* set up vresp */
+ if (vbuf->resp_size) {
+ sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
+ elemcnt++;
+ sgs[outcnt + incnt] = &vresp;
+ incnt++;
}
- if (hdr && fence) {
- virtio_gpu_fence_emit(vgdev, hdr, fence);
- if (vbuf->objs) {
- virtio_gpu_array_add_fence(vbuf->objs, &fence->f);
- virtio_gpu_array_unlock_resv(vbuf->objs);
- }
- }
- notify = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf, vout);
- spin_unlock(&vgdev->ctrlq.qlock);
- if (notify) {
- if (vgdev->disable_notify)
- vgdev->pending_notify = true;
- else
- virtqueue_notify(vgdev->ctrlq.vq);
- }
+ virtio_gpu_queue_ctrl_sgs(vgdev, vbuf, fence, elemcnt, sgs, outcnt,
+ incnt);
if (sgt) {
sg_free_table(sgt);
@@ -417,25 +428,26 @@ again:
}
}
-void virtio_gpu_disable_notify(struct virtio_gpu_device *vgdev)
+void virtio_gpu_notify(struct virtio_gpu_device *vgdev)
{
- vgdev->disable_notify = true;
-}
-
-void virtio_gpu_enable_notify(struct virtio_gpu_device *vgdev)
-{
- vgdev->disable_notify = false;
+ bool notify;
- if (!vgdev->pending_notify)
+ if (!atomic_read(&vgdev->pending_commands))
return;
- vgdev->pending_notify = false;
- virtqueue_notify(vgdev->ctrlq.vq);
+
+ spin_lock(&vgdev->ctrlq.qlock);
+ atomic_set(&vgdev->pending_commands, 0);
+ notify = virtqueue_kick_prepare(vgdev->ctrlq.vq);
+ spin_unlock(&vgdev->ctrlq.qlock);
+
+ if (notify)
+ virtqueue_notify(vgdev->ctrlq.vq);
}
static void virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
struct virtio_gpu_vbuffer *vbuf)
{
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL, NULL);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, NULL);
}
static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
@@ -443,12 +455,13 @@ static void virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
{
struct virtqueue *vq = vgdev->cursorq.vq;
struct scatterlist *sgs[1], ccmd;
+ int idx, ret, outcnt;
bool notify;
- int ret;
- int outcnt;
- if (!vgdev->vqs_ready)
+ if (!drm_dev_enter(vgdev->ddev, &idx)) {
+ free_vbuf(vgdev, vbuf);
return;
+ }
sg_init_one(&ccmd, vbuf->buf, vbuf->size);
sgs[0] = &ccmd;
@@ -464,7 +477,7 @@ retry:
goto retry;
} else {
trace_virtio_gpu_cmd_queue(vq,
- (struct virtio_gpu_ctrl_hdr *)vbuf->buf);
+ virtio_gpu_vbuf_ctrl_hdr(vbuf));
notify = virtqueue_kick_prepare(vq);
}
@@ -473,6 +486,8 @@ retry:
if (notify)
virtqueue_notify(vq);
+
+ drm_dev_exit(idx);
}
/* just create gem objects for userspace and long lived objects,
@@ -499,39 +514,36 @@ void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
cmd_p->width = cpu_to_le32(params->width);
cmd_p->height = cpu_to_le32(params->height);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
bo->created = true;
}
-void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
- uint32_t resource_id)
+static void virtio_gpu_cmd_unref_cb(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_vbuffer *vbuf)
{
- struct virtio_gpu_resource_unref *cmd_p;
- struct virtio_gpu_vbuffer *vbuf;
+ struct virtio_gpu_object *bo;
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
- memset(cmd_p, 0, sizeof(*cmd_p));
+ bo = vbuf->resp_cb_data;
+ vbuf->resp_cb_data = NULL;
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
- cmd_p->resource_id = cpu_to_le32(resource_id);
-
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_cleanup_object(bo);
}
-static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
- uint32_t resource_id,
- struct virtio_gpu_fence *fence)
+void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *bo)
{
- struct virtio_gpu_resource_detach_backing *cmd_p;
+ struct virtio_gpu_resource_unref *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+ cmd_p = virtio_gpu_alloc_cmd_cb(vgdev, &vbuf, sizeof(*cmd_p),
+ virtio_gpu_cmd_unref_cb);
memset(cmd_p, 0, sizeof(*cmd_p));
- cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
- cmd_p->resource_id = cpu_to_le32(resource_id);
+ cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
+ cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ vbuf->resp_cb_data = bo;
+ virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -588,10 +600,11 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_to_host_2d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
+ shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -606,7 +619,7 @@ void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
cmd_p->r.x = cpu_to_le32(x);
cmd_p->r.y = cpu_to_le32(y);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
static void
@@ -629,7 +642,7 @@ virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
vbuf->data_buf = ents;
vbuf->data_size = sizeof(*ents) * nents;
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
@@ -939,7 +952,6 @@ void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
-
}
void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
@@ -988,7 +1000,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
cmd_p->flags = cpu_to_le32(params->flags);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
+
bo->created = true;
}
@@ -1003,10 +1016,11 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
struct virtio_gpu_transfer_host_3d *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
if (use_dma_api)
dma_sync_sg_for_device(vgdev->vdev->dev.parent,
- bo->pages->sgl, bo->pages->nents,
+ shmem->pages->sgl, shmem->pages->nents,
DMA_TO_DEVICE);
cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
@@ -1021,7 +1035,7 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
@@ -1047,7 +1061,7 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
cmd_p->offset = cpu_to_le64(offset);
cmd_p->level = cpu_to_le32(level);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
@@ -1070,94 +1084,19 @@ void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
cmd_p->size = cpu_to_le32(data_size);
- virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
}
int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
- struct virtio_gpu_fence *fence)
+ struct virtio_gpu_mem_entry *ents,
+ unsigned int nents)
{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
- struct virtio_gpu_mem_entry *ents;
- struct scatterlist *sg;
- int si, nents, ret;
-
- if (WARN_ON_ONCE(!obj->created))
- return -EINVAL;
- if (WARN_ON_ONCE(obj->pages))
- return -EINVAL;
-
- ret = drm_gem_shmem_pin(&obj->base.base);
- if (ret < 0)
- return -EINVAL;
-
- obj->pages = drm_gem_shmem_get_sg_table(&obj->base.base);
- if (obj->pages == NULL) {
- drm_gem_shmem_unpin(&obj->base.base);
- return -EINVAL;
- }
-
- if (use_dma_api) {
- obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->pages->nents,
- DMA_TO_DEVICE);
- nents = obj->mapped;
- } else {
- nents = obj->pages->nents;
- }
-
- /* gets freed when the ring has consumed it */
- ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
- GFP_KERNEL);
- if (!ents) {
- DRM_ERROR("failed to allocate ent list\n");
- return -ENOMEM;
- }
-
- for_each_sg(obj->pages->sgl, sg, nents, si) {
- ents[si].addr = cpu_to_le64(use_dma_api
- ? sg_dma_address(sg)
- : sg_phys(sg));
- ents[si].length = cpu_to_le32(sg->length);
- ents[si].padding = 0;
- }
-
virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
- ents, nents,
- fence);
+ ents, nents, NULL);
return 0;
}
-void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
- struct virtio_gpu_object *obj)
-{
- bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
-
- if (WARN_ON_ONCE(!obj->pages))
- return;
-
- if (use_dma_api && obj->mapped) {
- struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
- /* detach backing and wait for the host process it ... */
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
- dma_fence_wait(&fence->f, true);
- dma_fence_put(&fence->f);
-
- /* ... then tear down iommu mappings */
- dma_unmap_sg(vgdev->vdev->dev.parent,
- obj->pages->sgl, obj->mapped,
- DMA_TO_DEVICE);
- obj->mapped = 0;
- } else {
- virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
- }
-
- sg_free_table(obj->pages);
- obj->pages = NULL;
-
- drm_gem_shmem_unpin(&obj->base.base);
-}
-
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output)
{
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 74f703b8d22a..ac85e17428f8 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -76,10 +76,12 @@ static void vkms_disable_vblank(struct drm_crtc *crtc)
hrtimer_cancel(&out->vblank_hrtimer);
}
-bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
- int *max_error, ktime_t *vblank_time,
- bool in_vblank_irq)
+static bool vkms_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error, ktime_t *vblank_time,
+ bool in_vblank_irq)
{
+ struct drm_device *dev = crtc->dev;
+ unsigned int pipe = crtc->index;
struct vkms_device *vkmsdev = drm_device_to_vkms_device(dev);
struct vkms_output *output = &vkmsdev->output;
struct drm_vblank_crtc *vblank = &dev->vblank[pipe];
@@ -154,6 +156,7 @@ static const struct drm_crtc_funcs vkms_crtc_funcs = {
.atomic_destroy_state = vkms_atomic_crtc_destroy_state,
.enable_vblank = vkms_enable_vblank,
.disable_vblank = vkms_disable_vblank,
+ .get_vblank_timestamp = vkms_get_vblank_timestamp,
.get_crc_sources = vkms_get_crc_sources,
.set_crc_source = vkms_set_crc_source,
.verify_crc_source = vkms_verify_crc_source,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 25bd7519295f..860de052e820 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -103,7 +103,6 @@ static struct drm_driver vkms_driver = {
.dumb_create = vkms_dumb_create,
.gem_vm_ops = &vkms_gem_vm_ops,
.gem_free_object_unlocked = vkms_gem_free_object,
- .get_vblank_timestamp = vkms_get_vblank_timestamp,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = vkms_prime_import_sg_table,
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 7d52e24564db..eda04ffba7b1 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -111,10 +111,6 @@ struct vkms_gem_object {
int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor);
-bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
- int *max_error, ktime_t *vblank_time,
- bool in_vblank_irq);
-
int vkms_output_init(struct vkms_device *vkmsdev, int index);
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev,
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 5fc8f85aaf3d..6d31265a2ab7 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -117,7 +117,7 @@ static int vkms_plane_atomic_check(struct drm_plane *plane,
bool can_position = false;
int ret;
- if (!state->fb | !state->crtc)
+ if (!state->fb || WARN_ON(!state->crtc))
return 0;
crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
index 065015d2a8f6..3b41cf63110a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf.c
@@ -1241,7 +1241,8 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
* actually call into the already enabled manager, when
* binding the MOB.
*/
- if (!(dev_priv->capabilities & SVGA_CAP_DX))
+ if (!(dev_priv->capabilities & SVGA_CAP_DX) ||
+ !dev_priv->has_mob)
return -ENOMEM;
ret = ttm_bo_create(&dev_priv->bdev, size, ttm_bo_type_device,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 827458f49112..4f58364421ce 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -29,6 +29,7 @@
#include <linux/dma-mapping.h>
#include <linux/module.h>
#include <linux/pci.h>
+#include <linux/mem_encrypt.h>
#include <drm/drm_drv.h>
#include <drm/drm_ioctl.h>
@@ -575,6 +576,10 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
[vmw_dma_map_populate] = "Caching DMA mappings.",
[vmw_dma_map_bind] = "Giving up DMA mappings early."};
+ /* TTM currently doesn't fully support SEV encryption. */
+ if (mem_encrypt_active())
+ return -EINVAL;
+
if (vmw_force_coherent)
dev_priv->map_mode = vmw_dma_alloc_coherent;
else if (vmw_restrict_iommu)
@@ -682,8 +687,10 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
ret = vmw_dma_select_mode(dev_priv);
if (unlikely(ret != 0)) {
- DRM_INFO("Restricting capabilities due to IOMMU setup.\n");
+ DRM_INFO("Restricting capabilities since DMA not available.\n");
refuse_dma = true;
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS)
+ DRM_INFO("Disabling 3D acceleration.\n");
}
dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE);
@@ -866,7 +873,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
dev_priv->has_gmr = false;
}
- if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) {
+ if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS && !refuse_dma) {
dev_priv->has_mob = true;
if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB,
VMW_PL_MOB) != 0) {
@@ -1399,9 +1406,6 @@ static const struct file_operations vmwgfx_driver_fops = {
static struct drm_driver driver = {
.driver_features =
DRIVER_MODESET | DRIVER_RENDER | DRIVER_ATOMIC,
- .get_vblank_counter = vmw_get_vblank_counter,
- .enable_vblank = vmw_enable_vblank,
- .disable_vblank = vmw_disable_vblank,
.ioctls = vmw_ioctls,
.num_ioctls = ARRAY_SIZE(vmw_ioctls),
.master_set = vmw_master_set,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
index 86b69397d166..b70d73225707 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
@@ -480,19 +480,6 @@ struct vmw_private {
bool has_sm4_1;
/*
- * VGA registers.
- */
-
- struct vmw_vga_topology_state vga_save[VMWGFX_MAX_DISPLAYS];
- uint32_t vga_width;
- uint32_t vga_height;
- uint32_t vga_bpp;
- uint32_t vga_bpl;
- uint32_t vga_pitchlock;
-
- uint32_t num_displays;
-
- /*
* Framebuffer info.
*/
@@ -900,7 +887,6 @@ extern void vmw_fifo_commit(struct vmw_private *dev_priv, uint32_t bytes);
extern void vmw_fifo_commit_flush(struct vmw_private *dev_priv, uint32_t bytes);
extern int vmw_fifo_send_fence(struct vmw_private *dev_priv,
uint32_t *seqno);
-extern void vmw_fifo_ping_host_locked(struct vmw_private *, uint32_t reason);
extern void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason);
extern bool vmw_fifo_have_3d(struct vmw_private *dev_priv);
extern bool vmw_fifo_have_pitchlock(struct vmw_private *dev_priv);
@@ -947,7 +933,6 @@ extern struct ttm_placement vmw_mob_placement;
extern struct ttm_placement vmw_mob_ne_placement;
extern struct ttm_placement vmw_nonfixed_placement;
extern struct ttm_bo_driver vmw_bo_driver;
-extern int vmw_dma_quiescent(struct drm_device *dev);
extern int vmw_bo_map_dma(struct ttm_buffer_object *bo);
extern void vmw_bo_unmap_dma(struct ttm_buffer_object *bo);
extern const struct vmw_sg_table *
@@ -1085,8 +1070,6 @@ int vmw_fb_on(struct vmw_private *vmw_priv);
int vmw_kms_init(struct vmw_private *dev_priv);
int vmw_kms_close(struct vmw_private *dev_priv);
-int vmw_kms_save_vga(struct vmw_private *vmw_priv);
-int vmw_kms_restore_vga(struct vmw_private *vmw_priv);
int vmw_kms_cursor_bypass_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
void vmw_kms_cursor_post_execbuf(struct vmw_private *dev_priv);
@@ -1100,9 +1083,9 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height);
-u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe);
-int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe);
-void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe);
+u32 vmw_get_vblank_counter(struct drm_crtc *crtc);
+int vmw_enable_vblank(struct drm_crtc *crtc);
+void vmw_disable_vblank(struct drm_crtc *crtc);
int vmw_kms_present(struct vmw_private *dev_priv,
struct drm_file *file_priv,
struct vmw_framebuffer *vfb,
@@ -1139,7 +1122,6 @@ int vmw_overlay_init(struct vmw_private *dev_priv);
int vmw_overlay_close(struct vmw_private *dev_priv);
int vmw_overlay_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-int vmw_overlay_stop_all(struct vmw_private *dev_priv);
int vmw_overlay_resume_all(struct vmw_private *dev_priv);
int vmw_overlay_pause_all(struct vmw_private *dev_priv);
int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out);
@@ -1186,10 +1168,6 @@ extern void vmw_otables_takedown(struct vmw_private *dev_priv);
extern const struct vmw_user_resource_conv *user_context_converter;
-extern int vmw_context_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- int id,
- struct vmw_resource **p_res);
extern int vmw_context_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_extended_context_define_ioctl(struct drm_device *dev, void *data,
@@ -1219,7 +1197,6 @@ vmw_context_get_dx_query_mob(struct vmw_resource *ctx_res);
extern const struct vmw_user_resource_conv *user_surface_converter;
-extern void vmw_surface_res_free(struct vmw_resource *res);
extern int vmw_surface_destroy_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
@@ -1230,11 +1207,6 @@ extern int vmw_gb_surface_define_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int vmw_gb_surface_reference_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-extern int vmw_surface_check(struct vmw_private *dev_priv,
- struct ttm_object_file *tfile,
- uint32_t handle, int *id);
-extern int vmw_surface_validate(struct vmw_private *dev_priv,
- struct vmw_surface *srf);
int vmw_surface_gb_priv_define(struct drm_device *dev,
uint32_t user_accounting_size,
SVGA3dSurfaceAllFlags svga3d_flags,
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
index e5252ef3812f..6941689085ed 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
@@ -169,10 +169,8 @@ void vmw_fifo_ping_host(struct vmw_private *dev_priv, uint32_t reason)
{
u32 *fifo_mem = dev_priv->mmio_virt;
- preempt_disable();
if (cmpxchg(fifo_mem + SVGA_FIFO_BUSY, 0, 1) == 0)
vmw_write(dev_priv, SVGA_REG_SYNC, reason);
- preempt_enable();
}
void vmw_fifo_release(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index f47d5710cc95..52e086a5691e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -1897,87 +1897,6 @@ int vmw_kms_write_svga(struct vmw_private *vmw_priv,
return 0;
}
-int vmw_kms_save_vga(struct vmw_private *vmw_priv)
-{
- struct vmw_vga_topology_state *save;
- uint32_t i;
-
- vmw_priv->vga_width = vmw_read(vmw_priv, SVGA_REG_WIDTH);
- vmw_priv->vga_height = vmw_read(vmw_priv, SVGA_REG_HEIGHT);
- vmw_priv->vga_bpp = vmw_read(vmw_priv, SVGA_REG_BITS_PER_PIXEL);
- if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
- vmw_priv->vga_pitchlock =
- vmw_read(vmw_priv, SVGA_REG_PITCHLOCK);
- else if (vmw_fifo_have_pitchlock(vmw_priv))
- vmw_priv->vga_pitchlock = vmw_mmio_read(vmw_priv->mmio_virt +
- SVGA_FIFO_PITCHLOCK);
-
- if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
- return 0;
-
- vmw_priv->num_displays = vmw_read(vmw_priv,
- SVGA_REG_NUM_GUEST_DISPLAYS);
-
- if (vmw_priv->num_displays == 0)
- vmw_priv->num_displays = 1;
-
- for (i = 0; i < vmw_priv->num_displays; ++i) {
- save = &vmw_priv->vga_save[i];
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
- save->primary = vmw_read(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY);
- save->pos_x = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_X);
- save->pos_y = vmw_read(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y);
- save->width = vmw_read(vmw_priv, SVGA_REG_DISPLAY_WIDTH);
- save->height = vmw_read(vmw_priv, SVGA_REG_DISPLAY_HEIGHT);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- if (i == 0 && vmw_priv->num_displays == 1 &&
- save->width == 0 && save->height == 0) {
-
- /*
- * It should be fairly safe to assume that these
- * values are uninitialized.
- */
-
- save->width = vmw_priv->vga_width - save->pos_x;
- save->height = vmw_priv->vga_height - save->pos_y;
- }
- }
-
- return 0;
-}
-
-int vmw_kms_restore_vga(struct vmw_private *vmw_priv)
-{
- struct vmw_vga_topology_state *save;
- uint32_t i;
-
- vmw_write(vmw_priv, SVGA_REG_WIDTH, vmw_priv->vga_width);
- vmw_write(vmw_priv, SVGA_REG_HEIGHT, vmw_priv->vga_height);
- vmw_write(vmw_priv, SVGA_REG_BITS_PER_PIXEL, vmw_priv->vga_bpp);
- if (vmw_priv->capabilities & SVGA_CAP_PITCHLOCK)
- vmw_write(vmw_priv, SVGA_REG_PITCHLOCK,
- vmw_priv->vga_pitchlock);
- else if (vmw_fifo_have_pitchlock(vmw_priv))
- vmw_mmio_write(vmw_priv->vga_pitchlock,
- vmw_priv->mmio_virt + SVGA_FIFO_PITCHLOCK);
-
- if (!(vmw_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY))
- return 0;
-
- for (i = 0; i < vmw_priv->num_displays; ++i) {
- save = &vmw_priv->vga_save[i];
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, i);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_IS_PRIMARY, save->primary);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_X, save->pos_x);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, save->pos_y);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, save->width);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, save->height);
- vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
- }
-
- return 0;
-}
-
bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
uint32_t pitch,
uint32_t height)
@@ -1991,7 +1910,7 @@ bool vmw_kms_validate_mode_vram(struct vmw_private *dev_priv,
/**
* Function called by DRM code called with vbl_lock held.
*/
-u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
+u32 vmw_get_vblank_counter(struct drm_crtc *crtc)
{
return 0;
}
@@ -1999,7 +1918,7 @@ u32 vmw_get_vblank_counter(struct drm_device *dev, unsigned int pipe)
/**
* Function called by DRM code called with vbl_lock held.
*/
-int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
+int vmw_enable_vblank(struct drm_crtc *crtc)
{
return -EINVAL;
}
@@ -2007,7 +1926,7 @@ int vmw_enable_vblank(struct drm_device *dev, unsigned int pipe)
/**
* Function called by DRM code called with vbl_lock held.
*/
-void vmw_disable_vblank(struct drm_device *dev, unsigned int pipe)
+void vmw_disable_vblank(struct drm_crtc *crtc)
{
}
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
index 5702219ec38f..16dafff5cab1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ldu.c
@@ -236,6 +236,9 @@ static const struct drm_crtc_funcs vmw_legacy_crtc_funcs = {
.atomic_duplicate_state = vmw_du_crtc_duplicate_state,
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
+ .get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
};
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
index fdb52f6d29fb..cd7ed1650d60 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
@@ -354,37 +354,6 @@ static int vmw_overlay_update_stream(struct vmw_private *dev_priv,
}
/**
- * Stop all streams.
- *
- * Used by the fb code when starting.
- *
- * Takes the overlay lock.
- */
-int vmw_overlay_stop_all(struct vmw_private *dev_priv)
-{
- struct vmw_overlay *overlay = dev_priv->overlay_priv;
- int i, ret;
-
- if (!overlay)
- return 0;
-
- mutex_lock(&overlay->mutex);
-
- for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) {
- struct vmw_stream *stream = &overlay->stream[i];
- if (!stream->buf)
- continue;
-
- ret = vmw_overlay_stop(dev_priv, i, false, false);
- WARN_ON(ret != 0);
- }
-
- mutex_unlock(&overlay->mutex);
-
- return 0;
-}
-
-/**
* Try to resume all paused streams.
*
* Used by the kms code after moving a new scanout buffer to vram.
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
index f07aa857587c..60cfbfadd3f2 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c
@@ -69,7 +69,7 @@ struct vmw_bo_dirty {
unsigned int ref_count;
unsigned long bitmap_size;
size_t size;
- unsigned long bitmap[0];
+ unsigned long bitmap[];
};
/**
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
index e5a283263211..32a22e4eddb1 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_scrn.c
@@ -319,6 +319,9 @@ static const struct drm_crtc_funcs vmw_screen_object_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
};
/*
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
index 41a96fb49835..68aecb6d9f87 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_stdu.c
@@ -916,6 +916,9 @@ static const struct drm_crtc_funcs vmw_stdu_crtc_funcs = {
.atomic_destroy_state = vmw_du_crtc_destroy_state,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
+ .get_vblank_counter = vmw_get_vblank_counter,
+ .enable_vblank = vmw_enable_vblank,
+ .disable_vblank = vmw_disable_vblank,
};
@@ -1885,7 +1888,7 @@ int vmw_kms_stdu_init_display(struct vmw_private *dev_priv)
/* Do nothing if Screen Target support is turned off */
- if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE)
+ if (!VMWGFX_ENABLE_SCREEN_TARGET_OTABLE || !dev_priv->has_mob)
return -ENOSYS;
if (!(dev_priv->capabilities & SVGA_CAP_GBOBJECTS))
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
index 3ce630aa4fde..ec893cd17b50 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
@@ -79,7 +79,7 @@ struct vmw_surface_dirty {
struct svga3dsurface_cache cache;
size_t size;
u32 num_subres;
- SVGA3dBox boxes[0];
+ SVGA3dBox boxes[];
};
static void vmw_user_surface_free(struct vmw_resource *res);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
index d8ea3dd10af0..3f3b2c7a208a 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c
@@ -736,11 +736,6 @@ out_no_init:
return NULL;
}
-static int vmw_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
-{
- return 0;
-}
-
static int vmw_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
@@ -866,7 +861,6 @@ struct ttm_bo_driver vmw_bo_driver = {
.ttm_tt_create = &vmw_ttm_tt_create,
.ttm_tt_populate = &vmw_ttm_populate,
.ttm_tt_unpopulate = &vmw_ttm_unpopulate,
- .invalidate_caches = vmw_invalidate_caches,
.init_mem_type = vmw_init_mem_type,
.eviction_valuable = ttm_bo_eviction_valuable,
.evict_flags = vmw_evict_flags,
diff --git a/drivers/gpu/drm/xen/xen_drm_front_kms.c b/drivers/gpu/drm/xen/xen_drm_front_kms.c
index 4f34c5208180..78096bbcd226 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_kms.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_kms.c
@@ -220,6 +220,24 @@ static bool display_send_page_flip(struct drm_simple_display_pipe *pipe,
return false;
}
+static int display_check(struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *plane_state,
+ struct drm_crtc_state *crtc_state)
+{
+ /*
+ * Xen doesn't initialize vblanking via drm_vblank_init(), so
+ * DRM helpers assume that it doesn't handle vblanking and start
+ * sending out fake VBLANK events automatically.
+ *
+ * As xen contains it's own logic for sending out VBLANK events
+ * in send_pending_event(), disable no_vblank (i.e., the xen
+ * driver has vblanking support).
+ */
+ crtc_state->no_vblank = false;
+
+ return 0;
+}
+
static void display_update(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state)
{
@@ -284,6 +302,7 @@ static const struct drm_simple_display_pipe_funcs display_funcs = {
.enable = display_enable,
.disable = display_disable,
.prepare_fb = drm_gem_fb_simple_display_pipe_prepare_fb,
+ .check = display_check,
.update = display_update,
};
diff --git a/drivers/gpu/drm/zte/zx_plane.c b/drivers/gpu/drm/zte/zx_plane.c
index 086c50fac689..c8f7b21fa09e 100644
--- a/drivers/gpu/drm/zte/zx_plane.c
+++ b/drivers/gpu/drm/zte/zx_plane.c
@@ -54,7 +54,7 @@ static int zx_vl_plane_atomic_check(struct drm_plane *plane,
int min_scale = FRAC_16_16(1, 8);
int max_scale = FRAC_16_16(8, 1);
- if (!crtc || !fb)
+ if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
@@ -281,7 +281,7 @@ static int zx_gl_plane_atomic_check(struct drm_plane *plane,
struct drm_crtc *crtc = plane_state->crtc;
struct drm_crtc_state *crtc_state;
- if (!crtc || !fb)
+ if (!crtc || WARN_ON(!fb))
return 0;
crtc_state = drm_atomic_get_existing_crtc_state(plane_state->state,
diff --git a/drivers/hid/hid-alps.c b/drivers/hid/hid-alps.c
index ae79a7c66737..fa704153cb00 100644
--- a/drivers/hid/hid-alps.c
+++ b/drivers/hid/hid-alps.c
@@ -730,7 +730,7 @@ static int alps_input_configured(struct hid_device *hdev, struct hid_input *hi)
if (data->has_sp) {
input2 = input_allocate_device();
if (!input2) {
- input_free_device(input2);
+ ret = -ENOMEM;
goto exit;
}
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index 6ac8becc2372..d732d1d10caf 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -340,7 +340,8 @@ static int apple_input_mapping(struct hid_device *hdev, struct hid_input *hi,
unsigned long **bit, int *max)
{
if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
- usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
+ usage->hid == (HID_UP_MSVENDOR | 0x0003) ||
+ usage->hid == (HID_UP_HPVENDOR2 | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
diff --git a/drivers/hid/hid-bigbenff.c b/drivers/hid/hid-bigbenff.c
index 3f6abd190df4..db6da21ade06 100644
--- a/drivers/hid/hid-bigbenff.c
+++ b/drivers/hid/hid-bigbenff.c
@@ -174,6 +174,7 @@ static __u8 pid0902_rdesc_fixed[] = {
struct bigben_device {
struct hid_device *hid;
struct hid_report *report;
+ bool removed;
u8 led_state; /* LED1 = 1 .. LED4 = 8 */
u8 right_motor_on; /* right motor off/on 0/1 */
u8 left_motor_force; /* left motor force 0-255 */
@@ -190,6 +191,9 @@ static void bigben_worker(struct work_struct *work)
struct bigben_device, worker);
struct hid_field *report_field = bigben->report->field[0];
+ if (bigben->removed)
+ return;
+
if (bigben->work_led) {
bigben->work_led = false;
report_field->value[0] = 0x01; /* 1 = led message */
@@ -220,10 +224,16 @@ static void bigben_worker(struct work_struct *work)
static int hid_bigben_play_effect(struct input_dev *dev, void *data,
struct ff_effect *effect)
{
- struct bigben_device *bigben = data;
+ struct hid_device *hid = input_get_drvdata(dev);
+ struct bigben_device *bigben = hid_get_drvdata(hid);
u8 right_motor_on;
u8 left_motor_force;
+ if (!bigben) {
+ hid_err(hid, "no device data\n");
+ return 0;
+ }
+
if (effect->type != FF_RUMBLE)
return 0;
@@ -298,8 +308,8 @@ static void bigben_remove(struct hid_device *hid)
{
struct bigben_device *bigben = hid_get_drvdata(hid);
+ bigben->removed = true;
cancel_work_sync(&bigben->worker);
- hid_hw_close(hid);
hid_hw_stop(hid);
}
@@ -319,6 +329,7 @@ static int bigben_probe(struct hid_device *hid,
return -ENOMEM;
hid_set_drvdata(hid, bigben);
bigben->hid = hid;
+ bigben->removed = false;
error = hid_parse(hid);
if (error) {
@@ -341,10 +352,10 @@ static int bigben_probe(struct hid_device *hid,
INIT_WORK(&bigben->worker, bigben_worker);
- error = input_ff_create_memless(hidinput->input, bigben,
+ error = input_ff_create_memless(hidinput->input, NULL,
hid_bigben_play_effect);
if (error)
- return error;
+ goto error_hw_stop;
name_sz = strlen(dev_name(&hid->dev)) + strlen(":red:bigben#") + 1;
@@ -354,8 +365,10 @@ static int bigben_probe(struct hid_device *hid,
sizeof(struct led_classdev) + name_sz,
GFP_KERNEL
);
- if (!led)
- return -ENOMEM;
+ if (!led) {
+ error = -ENOMEM;
+ goto error_hw_stop;
+ }
name = (void *)(&led[1]);
snprintf(name, name_sz,
"%s:red:bigben%d",
@@ -369,7 +382,7 @@ static int bigben_probe(struct hid_device *hid,
bigben->leds[n] = led;
error = devm_led_classdev_register(&hid->dev, led);
if (error)
- return error;
+ goto error_hw_stop;
}
/* initial state: LED1 is on, no rumble effect */
@@ -383,6 +396,10 @@ static int bigben_probe(struct hid_device *hid,
hid_info(hid, "LED and force feedback support for BigBen gamepad\n");
return 0;
+
+error_hw_stop:
+ hid_hw_stop(hid);
+ return error;
}
static __u8 *bigben_report_fixup(struct hid_device *hid, __u8 *rdesc,
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 851fe54ea59e..359616e3efbb 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1741,7 +1741,9 @@ int hid_report_raw_event(struct hid_device *hid, int type, u8 *data, u32 size,
rsize = ((report->size - 1) >> 3) + 1;
- if (rsize > HID_MAX_BUFFER_SIZE)
+ if (report_enum->numbered && rsize >= HID_MAX_BUFFER_SIZE)
+ rsize = HID_MAX_BUFFER_SIZE - 1;
+ else if (rsize > HID_MAX_BUFFER_SIZE)
rsize = HID_MAX_BUFFER_SIZE;
if (csize < rsize) {
diff --git a/drivers/hid/hid-hyperv.c b/drivers/hid/hid-hyperv.c
index dddfca555df9..0b6ee1dee625 100644
--- a/drivers/hid/hid-hyperv.c
+++ b/drivers/hid/hid-hyperv.c
@@ -193,8 +193,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
goto cleanup;
/* The pointer is not NULL when we resume from hibernation */
- if (input_device->hid_desc != NULL)
- kfree(input_device->hid_desc);
+ kfree(input_device->hid_desc);
input_device->hid_desc = kmemdup(desc, desc->bLength, GFP_ATOMIC);
if (!input_device->hid_desc)
@@ -207,8 +206,7 @@ static void mousevsc_on_receive_device_info(struct mousevsc_dev *input_device,
}
/* The pointer is not NULL when we resume from hibernation */
- if (input_device->report_desc != NULL)
- kfree(input_device->report_desc);
+ kfree(input_device->report_desc);
input_device->report_desc = kzalloc(input_device->report_desc_size,
GFP_ATOMIC);
diff --git a/drivers/hid/hid-ite.c b/drivers/hid/hid-ite.c
index c436e12feb23..6c55682c5974 100644
--- a/drivers/hid/hid-ite.c
+++ b/drivers/hid/hid-ite.c
@@ -41,8 +41,9 @@ static const struct hid_device_id ite_devices[] = {
{ HID_USB_DEVICE(USB_VENDOR_ID_ITE, USB_DEVICE_ID_ITE8595) },
{ HID_USB_DEVICE(USB_VENDOR_ID_258A, USB_DEVICE_ID_258A_6A88) },
/* ITE8595 USB kbd ctlr, with Synaptics touchpad connected to it. */
- { HID_USB_DEVICE(USB_VENDOR_ID_SYNAPTICS,
- USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
+ { HID_DEVICE(BUS_USB, HID_GROUP_GENERIC,
+ USB_VENDOR_ID_SYNAPTICS,
+ USB_DEVICE_ID_SYNAPTICS_ACER_SWITCH5_012) },
{ }
};
MODULE_DEVICE_TABLE(hid, ite_devices);
diff --git a/drivers/hid/hid-logitech-hidpp.c b/drivers/hid/hid-logitech-hidpp.c
index 70e1cb928bf0..094f4f1b6555 100644
--- a/drivers/hid/hid-logitech-hidpp.c
+++ b/drivers/hid/hid-logitech-hidpp.c
@@ -1256,36 +1256,35 @@ static int hidpp20_battery_map_status_voltage(u8 data[3], int *voltage,
{
int status;
- long charge_sts = (long)data[2];
+ long flags = (long) data[2];
- *level = POWER_SUPPLY_CAPACITY_LEVEL_UNKNOWN;
- switch (data[2] & 0xe0) {
- case 0x00:
- status = POWER_SUPPLY_STATUS_CHARGING;
- break;
- case 0x20:
- status = POWER_SUPPLY_STATUS_FULL;
- *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
- break;
- case 0x40:
+ if (flags & 0x80)
+ switch (flags & 0x07) {
+ case 0:
+ status = POWER_SUPPLY_STATUS_CHARGING;
+ break;
+ case 1:
+ status = POWER_SUPPLY_STATUS_FULL;
+ *level = POWER_SUPPLY_CAPACITY_LEVEL_FULL;
+ break;
+ case 2:
+ status = POWER_SUPPLY_STATUS_NOT_CHARGING;
+ break;
+ default:
+ status = POWER_SUPPLY_STATUS_UNKNOWN;
+ break;
+ }
+ else
status = POWER_SUPPLY_STATUS_DISCHARGING;
- break;
- case 0xe0:
- status = POWER_SUPPLY_STATUS_NOT_CHARGING;
- break;
- default:
- status = POWER_SUPPLY_STATUS_UNKNOWN;
- }
*charge_type = POWER_SUPPLY_CHARGE_TYPE_STANDARD;
- if (test_bit(3, &charge_sts)) {
+ if (test_bit(3, &flags)) {
*charge_type = POWER_SUPPLY_CHARGE_TYPE_FAST;
}
- if (test_bit(4, &charge_sts)) {
+ if (test_bit(4, &flags)) {
*charge_type = POWER_SUPPLY_CHARGE_TYPE_TRICKLE;
}
-
- if (test_bit(5, &charge_sts)) {
+ if (test_bit(5, &flags)) {
*level = POWER_SUPPLY_CAPACITY_LEVEL_CRITICAL;
}
diff --git a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
index d31ea82b84c1..a66f08041a1a 100644
--- a/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
+++ b/drivers/hid/i2c-hid/i2c-hid-dmi-quirks.c
@@ -342,6 +342,14 @@ static const struct dmi_system_id i2c_hid_dmi_desc_override_table[] = {
.driver_data = (void *)&sipodev_desc
},
{
+ .ident = "Trekstor SURFBOOK E11B",
+ .matches = {
+ DMI_EXACT_MATCH(DMI_SYS_VENDOR, "TREKSTOR"),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "SURFBOOK E11B"),
+ },
+ .driver_data = (void *)&sipodev_desc
+ },
+ {
.ident = "Direkt-Tek DTLAPY116-2",
.matches = {
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Direkt-Tek"),
diff --git a/drivers/hid/usbhid/hiddev.c b/drivers/hid/usbhid/hiddev.c
index a970b809d778..4140dea693e9 100644
--- a/drivers/hid/usbhid/hiddev.c
+++ b/drivers/hid/usbhid/hiddev.c
@@ -932,9 +932,9 @@ void hiddev_disconnect(struct hid_device *hid)
hiddev->exist = 0;
if (hiddev->open) {
- mutex_unlock(&hiddev->existancelock);
hid_hw_close(hiddev->hid);
wake_up_interruptible(&hiddev->wait);
+ mutex_unlock(&hiddev->existancelock);
} else {
mutex_unlock(&hiddev->existancelock);
kfree(hiddev);
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 4cf25458f0b9..0db8ef4fd6e1 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -355,7 +355,9 @@ static ssize_t show_str(struct device *dev,
struct acpi_device *acpi_dev = to_acpi_device(dev);
struct acpi_power_meter_resource *resource = acpi_dev->driver_data;
acpi_string val;
+ int ret;
+ mutex_lock(&resource->lock);
switch (attr->index) {
case 0:
val = resource->model_number;
@@ -372,8 +374,9 @@ static ssize_t show_str(struct device *dev,
val = "";
break;
}
-
- return sprintf(buf, "%s\n", val);
+ ret = sprintf(buf, "%s\n", val);
+ mutex_unlock(&resource->lock);
+ return ret;
}
static ssize_t show_val(struct device *dev,
@@ -817,11 +820,12 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
resource = acpi_driver_data(device);
- mutex_lock(&resource->lock);
switch (event) {
case METER_NOTIFY_CONFIG:
+ mutex_lock(&resource->lock);
free_capabilities(resource);
res = read_capabilities(resource);
+ mutex_unlock(&resource->lock);
if (res)
break;
@@ -830,15 +834,12 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
break;
case METER_NOTIFY_TRIP:
sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME);
- update_meter(resource);
break;
case METER_NOTIFY_CAP:
sysfs_notify(&device->dev.kobj, NULL, POWER_CAP_NAME);
- update_cap(resource);
break;
case METER_NOTIFY_INTERVAL:
sysfs_notify(&device->dev.kobj, NULL, POWER_AVG_INTERVAL_NAME);
- update_avg_interval(resource);
break;
case METER_NOTIFY_CAPPING:
sysfs_notify(&device->dev.kobj, NULL, POWER_ALARM_NAME);
@@ -848,7 +849,6 @@ static void acpi_power_meter_notify(struct acpi_device *device, u32 event)
WARN(1, "Unexpected event %d\n", event);
break;
}
- mutex_unlock(&resource->lock);
acpi_bus_generate_netlink_event(ACPI_POWER_METER_CLASS,
dev_name(&device->dev), event, 0);
@@ -912,8 +912,8 @@ static int acpi_power_meter_remove(struct acpi_device *device)
resource = acpi_driver_data(device);
hwmon_device_unregister(resource->hwmon_dev);
- free_capabilities(resource);
remove_attrs(resource);
+ free_capabilities(resource);
kfree(resource);
return 0;
diff --git a/drivers/hwmon/adt7462.c b/drivers/hwmon/adt7462.c
index 9632e2e3c4bb..319a0519ebdb 100644
--- a/drivers/hwmon/adt7462.c
+++ b/drivers/hwmon/adt7462.c
@@ -413,7 +413,7 @@ static int ADT7462_REG_VOLT(struct adt7462_data *data, int which)
return 0x95;
break;
}
- return -ENODEV;
+ return 0;
}
/* Provide labels for sysfs */
diff --git a/drivers/hwmon/pmbus/ltc2978.c b/drivers/hwmon/pmbus/ltc2978.c
index f01f4887fb2e..a91ed01abb68 100644
--- a/drivers/hwmon/pmbus/ltc2978.c
+++ b/drivers/hwmon/pmbus/ltc2978.c
@@ -82,8 +82,8 @@ enum chips { ltc2974, ltc2975, ltc2977, ltc2978, ltc2980, ltc3880, ltc3882,
#define LTC_POLL_TIMEOUT 100 /* in milli-seconds */
-#define LTC_NOT_BUSY BIT(5)
-#define LTC_NOT_PENDING BIT(4)
+#define LTC_NOT_BUSY BIT(6)
+#define LTC_NOT_PENDING BIT(5)
/*
* LTC2978 clears peak data whenever the CLEAR_FAULTS command is executed, which
diff --git a/drivers/hwmon/pmbus/xdpe12284.c b/drivers/hwmon/pmbus/xdpe12284.c
index 3d47806ff4d3..660556b89e9f 100644
--- a/drivers/hwmon/pmbus/xdpe12284.c
+++ b/drivers/hwmon/pmbus/xdpe12284.c
@@ -18,6 +18,59 @@
#define XDPE122_AMD_625MV 0x10 /* AMD mode 6.25mV */
#define XDPE122_PAGE_NUM 2
+static int xdpe122_read_word_data(struct i2c_client *client, int page, int reg)
+{
+ const struct pmbus_driver_info *info = pmbus_get_driver_info(client);
+ long val;
+ s16 exponent;
+ s32 mantissa;
+ int ret;
+
+ switch (reg) {
+ case PMBUS_VOUT_OV_FAULT_LIMIT:
+ case PMBUS_VOUT_UV_FAULT_LIMIT:
+ ret = pmbus_read_word_data(client, page, reg);
+ if (ret < 0)
+ return ret;
+
+ /* Convert register value to LINEAR11 data. */
+ exponent = ((s16)ret) >> 11;
+ mantissa = ((s16)((ret & GENMASK(10, 0)) << 5)) >> 5;
+ val = mantissa * 1000L;
+ if (exponent >= 0)
+ val <<= exponent;
+ else
+ val >>= -exponent;
+
+ /* Convert data to VID register. */
+ switch (info->vrm_version[page]) {
+ case vr13:
+ if (val >= 500)
+ return 1 + DIV_ROUND_CLOSEST(val - 500, 10);
+ return 0;
+ case vr12:
+ if (val >= 250)
+ return 1 + DIV_ROUND_CLOSEST(val - 250, 5);
+ return 0;
+ case imvp9:
+ if (val >= 200)
+ return 1 + DIV_ROUND_CLOSEST(val - 200, 10);
+ return 0;
+ case amd625mv:
+ if (val >= 200 && val <= 1550)
+ return DIV_ROUND_CLOSEST((1550 - val) * 100,
+ 625);
+ return 0;
+ default:
+ return -EINVAL;
+ }
+ default:
+ return -ENODATA;
+ }
+
+ return 0;
+}
+
static int xdpe122_identify(struct i2c_client *client,
struct pmbus_driver_info *info)
{
@@ -70,6 +123,7 @@ static struct pmbus_driver_info xdpe122_info = {
PMBUS_HAVE_TEMP | PMBUS_HAVE_STATUS_TEMP |
PMBUS_HAVE_POUT | PMBUS_HAVE_PIN | PMBUS_HAVE_STATUS_INPUT,
.identify = xdpe122_identify,
+ .read_word_data = xdpe122_read_word_data,
};
static int xdpe122_probe(struct i2c_client *client,
@@ -94,8 +148,8 @@ static const struct i2c_device_id xdpe122_id[] = {
MODULE_DEVICE_TABLE(i2c, xdpe122_id);
static const struct of_device_id __maybe_unused xdpe122_of_match[] = {
- {.compatible = "infineon, xdpe12254"},
- {.compatible = "infineon, xdpe12284"},
+ {.compatible = "infineon,xdpe12254"},
+ {.compatible = "infineon,xdpe12284"},
{}
};
MODULE_DEVICE_TABLE(of, xdpe122_of_match);
diff --git a/drivers/hwmon/w83627ehf.c b/drivers/hwmon/w83627ehf.c
index 7ffadc2da57b..5a5120121e50 100644
--- a/drivers/hwmon/w83627ehf.c
+++ b/drivers/hwmon/w83627ehf.c
@@ -1346,8 +1346,13 @@ w83627ehf_is_visible(const void *drvdata, enum hwmon_sensor_types type,
/* channel 0.., name 1.. */
if (!(data->have_temp & (1 << channel)))
return 0;
- if (attr == hwmon_temp_input || attr == hwmon_temp_label)
+ if (attr == hwmon_temp_input)
return 0444;
+ if (attr == hwmon_temp_label) {
+ if (data->temp_label)
+ return 0444;
+ return 0;
+ }
if (channel == 2 && data->temp3_val_only)
return 0;
if (attr == hwmon_temp_max) {
diff --git a/drivers/i2c/busses/i2c-altera.c b/drivers/i2c/busses/i2c-altera.c
index 5255d3755411..1de23b4f3809 100644
--- a/drivers/i2c/busses/i2c-altera.c
+++ b/drivers/i2c/busses/i2c-altera.c
@@ -171,7 +171,7 @@ static void altr_i2c_init(struct altr_i2c_dev *idev)
/* SCL Low Time */
writel(t_low, idev->base + ALTR_I2C_SCL_LOW);
/* SDA Hold Time, 300ns */
- writel(div_u64(300 * clk_mhz, 1000), idev->base + ALTR_I2C_SDA_HOLD);
+ writel(3 * clk_mhz / 10, idev->base + ALTR_I2C_SDA_HOLD);
/* Mask all master interrupt bits */
altr_i2c_int_enable(idev, ALTR_I2C_ALL_IRQ, false);
diff --git a/drivers/i2c/busses/i2c-jz4780.c b/drivers/i2c/busses/i2c-jz4780.c
index 16a67a64284a..b426fc956938 100644
--- a/drivers/i2c/busses/i2c-jz4780.c
+++ b/drivers/i2c/busses/i2c-jz4780.c
@@ -78,25 +78,6 @@
#define X1000_I2C_DC_STOP BIT(9)
-static const char * const jz4780_i2c_abrt_src[] = {
- "ABRT_7B_ADDR_NOACK",
- "ABRT_10ADDR1_NOACK",
- "ABRT_10ADDR2_NOACK",
- "ABRT_XDATA_NOACK",
- "ABRT_GCALL_NOACK",
- "ABRT_GCALL_READ",
- "ABRT_HS_ACKD",
- "SBYTE_ACKDET",
- "ABRT_HS_NORSTRT",
- "SBYTE_NORSTRT",
- "ABRT_10B_RD_NORSTRT",
- "ABRT_MASTER_DIS",
- "ARB_LOST",
- "SLVFLUSH_TXFIFO",
- "SLV_ARBLOST",
- "SLVRD_INTX",
-};
-
#define JZ4780_I2C_INTST_IGC BIT(11)
#define JZ4780_I2C_INTST_ISTT BIT(10)
#define JZ4780_I2C_INTST_ISTP BIT(9)
@@ -576,21 +557,8 @@ done:
static void jz4780_i2c_txabrt(struct jz4780_i2c *i2c, int src)
{
- int i;
-
- dev_err(&i2c->adap.dev, "txabrt: 0x%08x\n", src);
- dev_err(&i2c->adap.dev, "device addr=%x\n",
- jz4780_i2c_readw(i2c, JZ4780_I2C_TAR));
- dev_err(&i2c->adap.dev, "send cmd count:%d %d\n",
- i2c->cmd, i2c->cmd_buf[i2c->cmd]);
- dev_err(&i2c->adap.dev, "receive data count:%d %d\n",
- i2c->cmd, i2c->data_buf[i2c->cmd]);
-
- for (i = 0; i < 16; i++) {
- if (src & BIT(i))
- dev_dbg(&i2c->adap.dev, "I2C TXABRT[%d]=%s\n",
- i, jz4780_i2c_abrt_src[i]);
- }
+ dev_dbg(&i2c->adap.dev, "txabrt: 0x%08x, cmd: %d, send: %d, recv: %d\n",
+ src, i2c->cmd, i2c->cmd_buf[i2c->cmd], i2c->data_buf[i2c->cmd]);
}
static inline int jz4780_i2c_xfer_read(struct jz4780_i2c *i2c,
diff --git a/drivers/ide/ide-gd.c b/drivers/ide/ide-gd.c
index 1bb99b556393..05c26986637b 100644
--- a/drivers/ide/ide-gd.c
+++ b/drivers/ide/ide-gd.c
@@ -361,7 +361,7 @@ static const struct block_device_operations ide_gd_ops = {
.release = ide_gd_release,
.ioctl = ide_gd_ioctl,
#ifdef CONFIG_COMPAT
- .ioctl = ide_gd_compat_ioctl,
+ .compat_ioctl = ide_gd_compat_ioctl,
#endif
.getgeo = ide_gd_getgeo,
.check_events = ide_gd_check_events,
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
index 68cc1b2d6824..15e99a888427 100644
--- a/drivers/infiniband/core/cm.c
+++ b/drivers/infiniband/core/cm.c
@@ -1191,6 +1191,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
/* Sharing an ib_cm_id with different handlers is not
* supported */
spin_unlock_irqrestore(&cm.lock, flags);
+ ib_destroy_cm_id(cm_id);
return ERR_PTR(-EINVAL);
}
refcount_inc(&cm_id_priv->refcount);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 72f032160c4b..2dec3a02ab9f 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -3212,19 +3212,26 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
int ret;
id_priv = container_of(id, struct rdma_id_private, id);
+ memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
if (id_priv->state == RDMA_CM_IDLE) {
ret = cma_bind_addr(id, src_addr, dst_addr);
- if (ret)
+ if (ret) {
+ memset(cma_dst_addr(id_priv), 0,
+ rdma_addr_size(dst_addr));
return ret;
+ }
}
- if (cma_family(id_priv) != dst_addr->sa_family)
+ if (cma_family(id_priv) != dst_addr->sa_family) {
+ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
return -EINVAL;
+ }
- if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY))
+ if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) {
+ memset(cma_dst_addr(id_priv), 0, rdma_addr_size(dst_addr));
return -EINVAL;
+ }
- memcpy(cma_dst_addr(id_priv), dst_addr, rdma_addr_size(dst_addr));
if (cma_any_addr(dst_addr)) {
ret = cma_resolve_loopback(id_priv);
} else {
diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h
index b1457b3464d3..cf42acca4a3a 100644
--- a/drivers/infiniband/core/core_priv.h
+++ b/drivers/infiniband/core/core_priv.h
@@ -338,6 +338,20 @@ static inline struct ib_qp *_ib_create_qp(struct ib_device *dev,
qp->pd = pd;
qp->uobject = uobj;
qp->real_qp = qp;
+
+ qp->qp_type = attr->qp_type;
+ qp->rwq_ind_tbl = attr->rwq_ind_tbl;
+ qp->send_cq = attr->send_cq;
+ qp->recv_cq = attr->recv_cq;
+ qp->srq = attr->srq;
+ qp->rwq_ind_tbl = attr->rwq_ind_tbl;
+ qp->event_handler = attr->event_handler;
+
+ atomic_set(&qp->usecnt, 0);
+ spin_lock_init(&qp->mr_lock);
+ INIT_LIST_HEAD(&qp->rdma_mrs);
+ INIT_LIST_HEAD(&qp->sig_mrs);
+
/*
* We don't track XRC QPs for now, because they don't have PD
* and more importantly they are created internaly by driver,
diff --git a/drivers/infiniband/core/iwcm.c b/drivers/infiniband/core/iwcm.c
index ade71823370f..da8adadf4755 100644
--- a/drivers/infiniband/core/iwcm.c
+++ b/drivers/infiniband/core/iwcm.c
@@ -159,8 +159,10 @@ static void dealloc_work_entries(struct iwcm_id_private *cm_id_priv)
{
struct list_head *e, *tmp;
- list_for_each_safe(e, tmp, &cm_id_priv->work_free_list)
+ list_for_each_safe(e, tmp, &cm_id_priv->work_free_list) {
+ list_del(e);
kfree(list_entry(e, struct iwcm_work, free_list));
+ }
}
static int alloc_work_entries(struct iwcm_id_private *cm_id_priv, int count)
diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
index 37b433aa7306..e0b0a91da696 100644
--- a/drivers/infiniband/core/nldev.c
+++ b/drivers/infiniband/core/nldev.c
@@ -1757,6 +1757,8 @@ static int nldev_stat_set_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
if (ret)
goto err_msg;
} else {
+ if (!tb[RDMA_NLDEV_ATTR_RES_LQPN])
+ goto err_msg;
qpn = nla_get_u32(tb[RDMA_NLDEV_ATTR_RES_LQPN]);
if (tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]) {
cntn = nla_get_u32(tb[RDMA_NLDEV_ATTR_STAT_COUNTER_ID]);
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index 4fad732f9b3c..06e5b6787443 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -273,6 +273,23 @@ static int rdma_rw_init_single_wr(struct rdma_rw_ctx *ctx, struct ib_qp *qp,
return 1;
}
+static void rdma_rw_unmap_sg(struct ib_device *dev, struct scatterlist *sg,
+ u32 sg_cnt, enum dma_data_direction dir)
+{
+ if (is_pci_p2pdma_page(sg_page(sg)))
+ pci_p2pdma_unmap_sg(dev->dma_device, sg, sg_cnt, dir);
+ else
+ ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
+}
+
+static int rdma_rw_map_sg(struct ib_device *dev, struct scatterlist *sg,
+ u32 sg_cnt, enum dma_data_direction dir)
+{
+ if (is_pci_p2pdma_page(sg_page(sg)))
+ return pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
+ return ib_dma_map_sg(dev, sg, sg_cnt, dir);
+}
+
/**
* rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
* @ctx: context to initialize
@@ -295,11 +312,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
struct ib_device *dev = qp->pd->device;
int ret;
- if (is_pci_p2pdma_page(sg_page(sg)))
- ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
- else
- ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);
-
+ ret = rdma_rw_map_sg(dev, sg, sg_cnt, dir);
if (!ret)
return -ENOMEM;
sg_cnt = ret;
@@ -338,7 +351,7 @@ int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
return ret;
out_unmap_sg:
- ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
+ rdma_rw_unmap_sg(dev, sg, sg_cnt, dir);
return ret;
}
EXPORT_SYMBOL(rdma_rw_ctx_init);
@@ -588,11 +601,7 @@ void rdma_rw_ctx_destroy(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
break;
}
- if (is_pci_p2pdma_page(sg_page(sg)))
- pci_p2pdma_unmap_sg(qp->pd->device->dma_device, sg,
- sg_cnt, dir);
- else
- ib_dma_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
+ rdma_rw_unmap_sg(qp->pd->device, sg, sg_cnt, dir);
}
EXPORT_SYMBOL(rdma_rw_ctx_destroy);
diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c
index 6eb6d2717ca5..2d5608315dc8 100644
--- a/drivers/infiniband/core/security.c
+++ b/drivers/infiniband/core/security.c
@@ -339,22 +339,20 @@ static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp,
if (!new_pps)
return NULL;
- if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) {
- if (!qp_pps) {
- new_pps->main.port_num = qp_attr->port_num;
- new_pps->main.pkey_index = qp_attr->pkey_index;
- } else {
- new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ?
- qp_attr->port_num :
- qp_pps->main.port_num;
-
- new_pps->main.pkey_index =
- (qp_attr_mask & IB_QP_PKEY_INDEX) ?
- qp_attr->pkey_index :
- qp_pps->main.pkey_index;
- }
+ if (qp_attr_mask & IB_QP_PORT)
+ new_pps->main.port_num = qp_attr->port_num;
+ else if (qp_pps)
+ new_pps->main.port_num = qp_pps->main.port_num;
+
+ if (qp_attr_mask & IB_QP_PKEY_INDEX)
+ new_pps->main.pkey_index = qp_attr->pkey_index;
+ else if (qp_pps)
+ new_pps->main.pkey_index = qp_pps->main.pkey_index;
+
+ if ((qp_attr_mask & IB_QP_PKEY_INDEX) && (qp_attr_mask & IB_QP_PORT))
new_pps->main.state = IB_PORT_PKEY_VALID;
- } else if (qp_pps) {
+
+ if (!(qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) && qp_pps) {
new_pps->main.port_num = qp_pps->main.port_num;
new_pps->main.pkey_index = qp_pps->main.pkey_index;
if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID)
diff --git a/drivers/infiniband/core/umem_odp.c b/drivers/infiniband/core/umem_odp.c
index b8c657b28380..cd656ad4953b 100644
--- a/drivers/infiniband/core/umem_odp.c
+++ b/drivers/infiniband/core/umem_odp.c
@@ -181,14 +181,28 @@ ib_umem_odp_alloc_child(struct ib_umem_odp *root, unsigned long addr,
odp_data->page_shift = PAGE_SHIFT;
odp_data->notifier.ops = ops;
+ /*
+ * A mmget must be held when registering a notifier, the owming_mm only
+ * has a mm_grab at this point.
+ */
+ if (!mmget_not_zero(umem->owning_mm)) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+
odp_data->tgid = get_pid(root->tgid);
ret = ib_init_umem_odp(odp_data, ops);
- if (ret) {
- put_pid(odp_data->tgid);
- kfree(odp_data);
- return ERR_PTR(ret);
- }
+ if (ret)
+ goto out_tgid;
+ mmput(umem->owning_mm);
return odp_data;
+
+out_tgid:
+ put_pid(odp_data->tgid);
+ mmput(umem->owning_mm);
+out_free:
+ kfree(odp_data);
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL(ib_umem_odp_alloc_child);
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index d1407fa378e8..1235ffb2389b 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -1312,6 +1312,9 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
struct ib_umad_file *file;
int id;
+ cdev_device_del(&port->sm_cdev, &port->sm_dev);
+ cdev_device_del(&port->cdev, &port->dev);
+
mutex_lock(&port->file_mutex);
/* Mark ib_dev NULL and block ioctl or other file ops to progress
@@ -1331,8 +1334,6 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
mutex_unlock(&port->file_mutex);
- cdev_device_del(&port->sm_cdev, &port->sm_dev);
- cdev_device_del(&port->cdev, &port->dev);
ida_free(&umad_ida, port->dev_num);
/* balances device_initialize() */
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index c8693f5231dd..060b4ebbd2ba 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -1445,16 +1445,7 @@ static int create_qp(struct uverbs_attr_bundle *attrs,
if (ret)
goto err_cb;
- qp->pd = pd;
- qp->send_cq = attr.send_cq;
- qp->recv_cq = attr.recv_cq;
- qp->srq = attr.srq;
- qp->rwq_ind_tbl = ind_tbl;
- qp->event_handler = attr.event_handler;
- qp->qp_type = attr.qp_type;
- atomic_set(&qp->usecnt, 0);
atomic_inc(&pd->usecnt);
- qp->port = 0;
if (attr.send_cq)
atomic_inc(&attr.send_cq->usecnt);
if (attr.recv_cq)
@@ -2745,12 +2736,6 @@ static int kern_spec_to_ib_spec_action(struct uverbs_attr_bundle *attrs,
return 0;
}
-static size_t kern_spec_filter_sz(const struct ib_uverbs_flow_spec_hdr *spec)
-{
- /* Returns user space filter size, includes padding */
- return (spec->size - sizeof(struct ib_uverbs_flow_spec_hdr)) / 2;
-}
-
static ssize_t spec_filter_size(const void *kern_spec_filter, u16 kern_filter_size,
u16 ib_real_filter_sz)
{
@@ -2894,11 +2879,16 @@ int ib_uverbs_kern_spec_to_ib_spec_filter(enum ib_flow_spec_type type,
static int kern_spec_to_ib_spec_filter(struct ib_uverbs_flow_spec *kern_spec,
union ib_flow_spec *ib_spec)
{
- ssize_t kern_filter_sz;
+ size_t kern_filter_sz;
void *kern_spec_mask;
void *kern_spec_val;
- kern_filter_sz = kern_spec_filter_sz(&kern_spec->hdr);
+ if (check_sub_overflow((size_t)kern_spec->hdr.size,
+ sizeof(struct ib_uverbs_flow_spec_hdr),
+ &kern_filter_sz))
+ return -EINVAL;
+
+ kern_filter_sz /= 2;
kern_spec_val = (void *)kern_spec +
sizeof(struct ib_uverbs_flow_spec_hdr);
diff --git a/drivers/infiniband/core/uverbs_std_types.c b/drivers/infiniband/core/uverbs_std_types.c
index 994d8744b246..3abfc63225cb 100644
--- a/drivers/infiniband/core/uverbs_std_types.c
+++ b/drivers/infiniband/core/uverbs_std_types.c
@@ -220,6 +220,7 @@ void ib_uverbs_free_event_queue(struct ib_uverbs_event_queue *event_queue)
list_for_each_entry_safe(entry, tmp, &event_queue->event_list, list) {
if (entry->counter)
list_del(&entry->obj_list);
+ list_del(&entry->list);
kfree(entry);
}
spin_unlock_irq(&event_queue->lock);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 3ebae3b65c28..e62c9dfc7837 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -1185,16 +1185,6 @@ struct ib_qp *ib_create_qp_user(struct ib_pd *pd,
if (ret)
goto err;
- qp->qp_type = qp_init_attr->qp_type;
- qp->rwq_ind_tbl = qp_init_attr->rwq_ind_tbl;
-
- atomic_set(&qp->usecnt, 0);
- qp->mrs_used = 0;
- spin_lock_init(&qp->mr_lock);
- INIT_LIST_HEAD(&qp->rdma_mrs);
- INIT_LIST_HEAD(&qp->sig_mrs);
- qp->port = 0;
-
if (qp_init_attr->qp_type == IB_QPT_XRC_TGT) {
struct ib_qp *xrc_qp =
create_xrc_qp_user(qp, qp_init_attr, udata);
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index ee1182f9b627..d69dece3b1d5 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -3036,6 +3036,10 @@ static int terminate(struct c4iw_dev *dev, struct sk_buff *skb)
C4IW_QP_ATTR_NEXT_STATE, &attrs, 1);
}
+ /* As per draft-hilland-iwarp-verbs-v1.0, sec 6.2.3,
+ * when entering the TERM state the RNIC MUST initiate a CLOSE.
+ */
+ c4iw_ep_disconnect(ep, 1, GFP_KERNEL);
c4iw_put_ep(&ep->com);
} else
pr_warn("TERM received tid %u no ep/qp\n", tid);
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index bbcac539777a..89ac2f9ae6dd 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1948,10 +1948,10 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
qhp->attr.layer_etype = attrs->layer_etype;
qhp->attr.ecode = attrs->ecode;
ep = qhp->ep;
- c4iw_get_ep(&ep->com);
- disconnect = 1;
if (!internal) {
+ c4iw_get_ep(&ep->com);
terminate = 1;
+ disconnect = 1;
} else {
terminate = qhp->attr.send_term;
ret = rdma_fini(rhp, qhp, ep);
diff --git a/drivers/infiniband/hw/hfi1/affinity.c b/drivers/infiniband/hw/hfi1/affinity.c
index c142b23bb401..1aeea5d65c01 100644
--- a/drivers/infiniband/hw/hfi1/affinity.c
+++ b/drivers/infiniband/hw/hfi1/affinity.c
@@ -479,6 +479,8 @@ static int _dev_comp_vect_mappings_create(struct hfi1_devdata *dd,
rvt_get_ibdev_name(&(dd)->verbs_dev.rdi), i, cpu);
}
+ free_cpumask_var(available_cpus);
+ free_cpumask_var(non_intr_cpus);
return 0;
fail:
diff --git a/drivers/infiniband/hw/hfi1/file_ops.c b/drivers/infiniband/hw/hfi1/file_ops.c
index bef6946861b2..259115886d35 100644
--- a/drivers/infiniband/hw/hfi1/file_ops.c
+++ b/drivers/infiniband/hw/hfi1/file_ops.c
@@ -200,23 +200,24 @@ static int hfi1_file_open(struct inode *inode, struct file *fp)
fd = kzalloc(sizeof(*fd), GFP_KERNEL);
- if (fd) {
- fd->rec_cpu_num = -1; /* no cpu affinity by default */
- fd->mm = current->mm;
- mmgrab(fd->mm);
- fd->dd = dd;
- kobject_get(&fd->dd->kobj);
- fp->private_data = fd;
- } else {
- fp->private_data = NULL;
-
- if (atomic_dec_and_test(&dd->user_refcount))
- complete(&dd->user_comp);
-
- return -ENOMEM;
- }
-
+ if (!fd || init_srcu_struct(&fd->pq_srcu))
+ goto nomem;
+ spin_lock_init(&fd->pq_rcu_lock);
+ spin_lock_init(&fd->tid_lock);
+ spin_lock_init(&fd->invalid_lock);
+ fd->rec_cpu_num = -1; /* no cpu affinity by default */
+ fd->mm = current->mm;
+ mmgrab(fd->mm);
+ fd->dd = dd;
+ kobject_get(&fd->dd->kobj);
+ fp->private_data = fd;
return 0;
+nomem:
+ kfree(fd);
+ fp->private_data = NULL;
+ if (atomic_dec_and_test(&dd->user_refcount))
+ complete(&dd->user_comp);
+ return -ENOMEM;
}
static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
@@ -301,21 +302,30 @@ static long hfi1_file_ioctl(struct file *fp, unsigned int cmd,
static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
{
struct hfi1_filedata *fd = kiocb->ki_filp->private_data;
- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
+ struct hfi1_user_sdma_pkt_q *pq;
struct hfi1_user_sdma_comp_q *cq = fd->cq;
int done = 0, reqs = 0;
unsigned long dim = from->nr_segs;
+ int idx;
- if (!cq || !pq)
+ idx = srcu_read_lock(&fd->pq_srcu);
+ pq = srcu_dereference(fd->pq, &fd->pq_srcu);
+ if (!cq || !pq) {
+ srcu_read_unlock(&fd->pq_srcu, idx);
return -EIO;
+ }
- if (!iter_is_iovec(from) || !dim)
+ if (!iter_is_iovec(from) || !dim) {
+ srcu_read_unlock(&fd->pq_srcu, idx);
return -EINVAL;
+ }
trace_hfi1_sdma_request(fd->dd, fd->uctxt->ctxt, fd->subctxt, dim);
- if (atomic_read(&pq->n_reqs) == pq->n_max_reqs)
+ if (atomic_read(&pq->n_reqs) == pq->n_max_reqs) {
+ srcu_read_unlock(&fd->pq_srcu, idx);
return -ENOSPC;
+ }
while (dim) {
int ret;
@@ -333,6 +343,7 @@ static ssize_t hfi1_write_iter(struct kiocb *kiocb, struct iov_iter *from)
reqs++;
}
+ srcu_read_unlock(&fd->pq_srcu, idx);
return reqs;
}
@@ -707,6 +718,7 @@ done:
if (atomic_dec_and_test(&dd->user_refcount))
complete(&dd->user_comp);
+ cleanup_srcu_struct(&fdata->pq_srcu);
kfree(fdata);
return 0;
}
diff --git a/drivers/infiniband/hw/hfi1/hfi.h b/drivers/infiniband/hw/hfi1/hfi.h
index 6365e8ffed9d..cae12f416ca0 100644
--- a/drivers/infiniband/hw/hfi1/hfi.h
+++ b/drivers/infiniband/hw/hfi1/hfi.h
@@ -1444,10 +1444,13 @@ struct mmu_rb_handler;
/* Private data for file operations */
struct hfi1_filedata {
+ struct srcu_struct pq_srcu;
struct hfi1_devdata *dd;
struct hfi1_ctxtdata *uctxt;
struct hfi1_user_sdma_comp_q *cq;
- struct hfi1_user_sdma_pkt_q *pq;
+ /* update side lock for SRCU */
+ spinlock_t pq_rcu_lock;
+ struct hfi1_user_sdma_pkt_q __rcu *pq;
u16 subctxt;
/* for cpu affinity; -1 if none */
int rec_cpu_num;
diff --git a/drivers/infiniband/hw/hfi1/user_exp_rcv.c b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
index f05742ac0949..4da03f823474 100644
--- a/drivers/infiniband/hw/hfi1/user_exp_rcv.c
+++ b/drivers/infiniband/hw/hfi1/user_exp_rcv.c
@@ -87,9 +87,6 @@ int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
{
int ret = 0;
- spin_lock_init(&fd->tid_lock);
- spin_lock_init(&fd->invalid_lock);
-
fd->entry_to_rb = kcalloc(uctxt->expected_count,
sizeof(struct rb_node *),
GFP_KERNEL);
@@ -142,10 +139,12 @@ void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
{
struct hfi1_ctxtdata *uctxt = fd->uctxt;
+ mutex_lock(&uctxt->exp_mutex);
if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
+ mutex_unlock(&uctxt->exp_mutex);
kfree(fd->invalid_tids);
fd->invalid_tids = NULL;
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index fd754a16475a..c2f0d9ba93de 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -179,7 +179,6 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
pq = kzalloc(sizeof(*pq), GFP_KERNEL);
if (!pq)
return -ENOMEM;
-
pq->dd = dd;
pq->ctxt = uctxt->ctxt;
pq->subctxt = fd->subctxt;
@@ -236,7 +235,7 @@ int hfi1_user_sdma_alloc_queues(struct hfi1_ctxtdata *uctxt,
goto pq_mmu_fail;
}
- fd->pq = pq;
+ rcu_assign_pointer(fd->pq, pq);
fd->cq = cq;
return 0;
@@ -264,8 +263,14 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
trace_hfi1_sdma_user_free_queues(uctxt->dd, uctxt->ctxt, fd->subctxt);
- pq = fd->pq;
+ spin_lock(&fd->pq_rcu_lock);
+ pq = srcu_dereference_check(fd->pq, &fd->pq_srcu,
+ lockdep_is_held(&fd->pq_rcu_lock));
if (pq) {
+ rcu_assign_pointer(fd->pq, NULL);
+ spin_unlock(&fd->pq_rcu_lock);
+ synchronize_srcu(&fd->pq_srcu);
+ /* at this point there can be no more new requests */
if (pq->handler)
hfi1_mmu_rb_unregister(pq->handler);
iowait_sdma_drain(&pq->busy);
@@ -277,7 +282,8 @@ int hfi1_user_sdma_free_queues(struct hfi1_filedata *fd,
kfree(pq->req_in_use);
kmem_cache_destroy(pq->txreq_cache);
kfree(pq);
- fd->pq = NULL;
+ } else {
+ spin_unlock(&fd->pq_rcu_lock);
}
if (fd->cq) {
vfree(fd->cq->comps);
@@ -321,7 +327,8 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
{
int ret = 0, i;
struct hfi1_ctxtdata *uctxt = fd->uctxt;
- struct hfi1_user_sdma_pkt_q *pq = fd->pq;
+ struct hfi1_user_sdma_pkt_q *pq =
+ srcu_dereference(fd->pq, &fd->pq_srcu);
struct hfi1_user_sdma_comp_q *cq = fd->cq;
struct hfi1_devdata *dd = pq->dd;
unsigned long idx = 0;
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 089e201d7550..2f6323ad9c59 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -515,10 +515,11 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
opa_get_lid(packet->dlid, 9B));
if (!mcast)
goto drop;
+ rcu_read_lock();
list_for_each_entry_rcu(p, &mcast->qp_list, list) {
packet->qp = p->qp;
if (hfi1_do_pkey_check(packet))
- goto drop;
+ goto unlock_drop;
spin_lock_irqsave(&packet->qp->r_lock, flags);
packet_handler = qp_ok(packet);
if (likely(packet_handler))
@@ -527,6 +528,7 @@ static inline void hfi1_handle_packet(struct hfi1_packet *packet,
ibp->rvp.n_pkt_drops++;
spin_unlock_irqrestore(&packet->qp->r_lock, flags);
}
+ rcu_read_unlock();
/*
* Notify rvt_multicast_detach() if it is waiting for us
* to finish.
diff --git a/drivers/infiniband/hw/mlx5/devx.c b/drivers/infiniband/hw/mlx5/devx.c
index d7efc9f6daf0..46e1ab771f10 100644
--- a/drivers/infiniband/hw/mlx5/devx.c
+++ b/drivers/infiniband/hw/mlx5/devx.c
@@ -2319,14 +2319,12 @@ static int deliver_event(struct devx_event_subscription *event_sub,
if (ev_file->omit_data) {
spin_lock_irqsave(&ev_file->lock, flags);
- if (!list_empty(&event_sub->event_list)) {
+ if (!list_empty(&event_sub->event_list) ||
+ ev_file->is_destroyed) {
spin_unlock_irqrestore(&ev_file->lock, flags);
return 0;
}
- /* is_destroyed is ignored here because we don't have any memory
- * allocation to clean up for the omit_data case
- */
list_add_tail(&event_sub->event_list, &ev_file->event_list);
spin_unlock_irqrestore(&ev_file->lock, flags);
wake_up_interruptible(&ev_file->poll_wait);
@@ -2473,11 +2471,11 @@ static ssize_t devx_async_cmd_event_read(struct file *filp, char __user *buf,
return -ERESTARTSYS;
}
- if (list_empty(&ev_queue->event_list) &&
- ev_queue->is_destroyed)
- return -EIO;
-
spin_lock_irq(&ev_queue->lock);
+ if (ev_queue->is_destroyed) {
+ spin_unlock_irq(&ev_queue->lock);
+ return -EIO;
+ }
}
event = list_entry(ev_queue->event_list.next,
@@ -2551,10 +2549,6 @@ static ssize_t devx_async_event_read(struct file *filp, char __user *buf,
return -EOVERFLOW;
}
- if (ev_file->is_destroyed) {
- spin_unlock_irq(&ev_file->lock);
- return -EIO;
- }
while (list_empty(&ev_file->event_list)) {
spin_unlock_irq(&ev_file->lock);
@@ -2667,8 +2661,10 @@ static int devx_async_cmd_event_destroy_uobj(struct ib_uobject *uobj,
spin_lock_irq(&comp_ev_file->ev_queue.lock);
list_for_each_entry_safe(entry, tmp,
- &comp_ev_file->ev_queue.event_list, list)
+ &comp_ev_file->ev_queue.event_list, list) {
+ list_del(&entry->list);
kvfree(entry);
+ }
spin_unlock_irq(&comp_ev_file->ev_queue.lock);
return 0;
};
@@ -2680,11 +2676,29 @@ static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
container_of(uobj, struct devx_async_event_file,
uobj);
struct devx_event_subscription *event_sub, *event_sub_tmp;
- struct devx_async_event_data *entry, *tmp;
struct mlx5_ib_dev *dev = ev_file->dev;
spin_lock_irq(&ev_file->lock);
ev_file->is_destroyed = 1;
+
+ /* free the pending events allocation */
+ if (ev_file->omit_data) {
+ struct devx_event_subscription *event_sub, *tmp;
+
+ list_for_each_entry_safe(event_sub, tmp, &ev_file->event_list,
+ event_list)
+ list_del_init(&event_sub->event_list);
+
+ } else {
+ struct devx_async_event_data *entry, *tmp;
+
+ list_for_each_entry_safe(entry, tmp, &ev_file->event_list,
+ list) {
+ list_del(&entry->list);
+ kfree(entry);
+ }
+ }
+
spin_unlock_irq(&ev_file->lock);
wake_up_interruptible(&ev_file->poll_wait);
@@ -2699,15 +2713,6 @@ static int devx_async_event_destroy_uobj(struct ib_uobject *uobj,
}
mutex_unlock(&dev->devx_event_table.event_xa_lock);
- /* free the pending events allocation */
- if (!ev_file->omit_data) {
- spin_lock_irq(&ev_file->lock);
- list_for_each_entry_safe(entry, tmp,
- &ev_file->event_list, list)
- kfree(entry); /* read can't come any more */
- spin_unlock_irq(&ev_file->lock);
- }
-
put_device(&dev->ib_dev.dev);
return 0;
};
diff --git a/drivers/infiniband/hw/mlx5/main.c b/drivers/infiniband/hw/mlx5/main.c
index e874d688d040..e4bcfa81b70a 100644
--- a/drivers/infiniband/hw/mlx5/main.c
+++ b/drivers/infiniband/hw/mlx5/main.c
@@ -2283,8 +2283,8 @@ static int mlx5_ib_mmap_offset(struct mlx5_ib_dev *dev,
static u64 mlx5_entry_to_mmap_offset(struct mlx5_user_mmap_entry *entry)
{
- u16 cmd = entry->rdma_entry.start_pgoff >> 16;
- u16 index = entry->rdma_entry.start_pgoff & 0xFFFF;
+ u64 cmd = (entry->rdma_entry.start_pgoff >> 16) & 0xFFFF;
+ u64 index = entry->rdma_entry.start_pgoff & 0xFFFF;
return (((index >> 8) << 16) | (cmd << MLX5_IB_MMAP_CMD_SHIFT) |
(index & 0xFF)) << PAGE_SHIFT;
@@ -6545,7 +6545,7 @@ static int mlx5_ib_init_var_table(struct mlx5_ib_dev *dev)
doorbell_bar_offset);
bar_size = (1ULL << log_doorbell_bar_size) * 4096;
var_table->stride_size = 1ULL << log_doorbell_stride;
- var_table->num_var_hw_entries = bar_size / var_table->stride_size;
+ var_table->num_var_hw_entries = div64_u64(bar_size, var_table->stride_size);
mutex_init(&var_table->bitmap_lock);
var_table->bitmap = bitmap_zalloc(var_table->num_var_hw_entries,
GFP_KERNEL);
diff --git a/drivers/infiniband/hw/mlx5/mlx5_ib.h b/drivers/infiniband/hw/mlx5/mlx5_ib.h
index d9bffcc93587..bb78142bca5e 100644
--- a/drivers/infiniband/hw/mlx5/mlx5_ib.h
+++ b/drivers/infiniband/hw/mlx5/mlx5_ib.h
@@ -636,6 +636,7 @@ struct mlx5_ib_mr {
/* For ODP and implicit */
atomic_t num_deferred_work;
+ wait_queue_head_t q_deferred_work;
struct xarray implicit_children;
union {
struct rcu_head rcu;
diff --git a/drivers/infiniband/hw/mlx5/odp.c b/drivers/infiniband/hw/mlx5/odp.c
index 4216814ba871..bf50cd91f472 100644
--- a/drivers/infiniband/hw/mlx5/odp.c
+++ b/drivers/infiniband/hw/mlx5/odp.c
@@ -235,7 +235,8 @@ static void free_implicit_child_mr(struct mlx5_ib_mr *mr, bool need_imr_xlt)
mr->parent = NULL;
mlx5_mr_cache_free(mr->dev, mr);
ib_umem_odp_release(odp);
- atomic_dec(&imr->num_deferred_work);
+ if (atomic_dec_and_test(&imr->num_deferred_work))
+ wake_up(&imr->q_deferred_work);
}
static void free_implicit_child_mr_work(struct work_struct *work)
@@ -554,6 +555,7 @@ struct mlx5_ib_mr *mlx5_ib_alloc_implicit_mr(struct mlx5_ib_pd *pd,
imr->umem = &umem_odp->umem;
imr->is_odp_implicit = true;
atomic_set(&imr->num_deferred_work, 0);
+ init_waitqueue_head(&imr->q_deferred_work);
xa_init(&imr->implicit_children);
err = mlx5_ib_update_xlt(imr, 0,
@@ -611,10 +613,7 @@ void mlx5_ib_free_implicit_mr(struct mlx5_ib_mr *imr)
* under xa_lock while the child is in the xarray. Thus at this point
* it is only decreasing, and all work holding it is now on the wq.
*/
- if (atomic_read(&imr->num_deferred_work)) {
- flush_workqueue(system_unbound_wq);
- WARN_ON(atomic_read(&imr->num_deferred_work));
- }
+ wait_event(imr->q_deferred_work, !atomic_read(&imr->num_deferred_work));
/*
* Fence the imr before we destroy the children. This allows us to
@@ -645,10 +644,7 @@ void mlx5_ib_fence_odp_mr(struct mlx5_ib_mr *mr)
/* Wait for all running page-fault handlers to finish. */
synchronize_srcu(&mr->dev->odp_srcu);
- if (atomic_read(&mr->num_deferred_work)) {
- flush_workqueue(system_unbound_wq);
- WARN_ON(atomic_read(&mr->num_deferred_work));
- }
+ wait_event(mr->q_deferred_work, !atomic_read(&mr->num_deferred_work));
dma_fence_odp_mr(mr);
}
@@ -1720,7 +1716,8 @@ static void destroy_prefetch_work(struct prefetch_mr_work *work)
u32 i;
for (i = 0; i < work->num_sge; ++i)
- atomic_dec(&work->frags[i].mr->num_deferred_work);
+ if (atomic_dec_and_test(&work->frags[i].mr->num_deferred_work))
+ wake_up(&work->frags[i].mr->q_deferred_work);
kvfree(work);
}
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index a4f8e7030787..957f3a52589b 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -3441,9 +3441,6 @@ static int __mlx5_ib_qp_set_counter(struct ib_qp *qp,
struct mlx5_ib_qp_base *base;
u32 set_id;
- if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id))
- return 0;
-
if (counter)
set_id = counter->id;
else
@@ -6576,6 +6573,7 @@ void mlx5_ib_drain_rq(struct ib_qp *qp)
*/
int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
{
+ struct mlx5_ib_dev *dev = to_mdev(qp->device);
struct mlx5_ib_qp *mqp = to_mqp(qp);
int err = 0;
@@ -6585,6 +6583,11 @@ int mlx5_ib_qp_set_counter(struct ib_qp *qp, struct rdma_counter *counter)
goto out;
}
+ if (!MLX5_CAP_GEN(dev->mdev, rts2rts_qp_counters_set_id)) {
+ err = -EOPNOTSUPP;
+ goto out;
+ }
+
if (mqp->state == IB_QPS_RTS) {
err = __mlx5_ib_qp_set_counter(qp, counter);
if (!err)
diff --git a/drivers/infiniband/hw/qib/qib_verbs.c b/drivers/infiniband/hw/qib/qib_verbs.c
index 33778d451b82..5ef93f8f17a1 100644
--- a/drivers/infiniband/hw/qib/qib_verbs.c
+++ b/drivers/infiniband/hw/qib/qib_verbs.c
@@ -329,8 +329,10 @@ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen)
if (mcast == NULL)
goto drop;
this_cpu_inc(ibp->pmastats->n_multicast_rcv);
+ rcu_read_lock();
list_for_each_entry_rcu(p, &mcast->qp_list, list)
qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp);
+ rcu_read_unlock();
/*
* Notify rvt_multicast_detach() if it is waiting for us
* to finish.
diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c
index 3cdf75d0c7a4..7858d499db03 100644
--- a/drivers/infiniband/sw/rdmavt/qp.c
+++ b/drivers/infiniband/sw/rdmavt/qp.c
@@ -61,6 +61,8 @@
#define RVT_RWQ_COUNT_THRESHOLD 16
static void rvt_rc_timeout(struct timer_list *t);
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type);
/*
* Convert the AETH RNR timeout code into the number of microseconds.
@@ -452,40 +454,41 @@ no_qp_table:
}
/**
- * free_all_qps - check for QPs still in use
+ * rvt_free_qp_cb - callback function to reset a qp
+ * @qp: the qp to reset
+ * @v: a 64-bit value
+ *
+ * This function resets the qp and removes it from the
+ * qp hash table.
+ */
+static void rvt_free_qp_cb(struct rvt_qp *qp, u64 v)
+{
+ unsigned int *qp_inuse = (unsigned int *)v;
+ struct rvt_dev_info *rdi = ib_to_rvt(qp->ibqp.device);
+
+ /* Reset the qp and remove it from the qp hash list */
+ rvt_reset_qp(rdi, qp, qp->ibqp.qp_type);
+
+ /* Increment the qp_inuse count */
+ (*qp_inuse)++;
+}
+
+/**
+ * rvt_free_all_qps - check for QPs still in use
* @rdi: rvt device info structure
*
* There should not be any QPs still in use.
* Free memory for table.
+ * Return the number of QPs still in use.
*/
static unsigned rvt_free_all_qps(struct rvt_dev_info *rdi)
{
- unsigned long flags;
- struct rvt_qp *qp;
- unsigned n, qp_inuse = 0;
- spinlock_t *ql; /* work around too long line below */
-
- if (rdi->driver_f.free_all_qps)
- qp_inuse = rdi->driver_f.free_all_qps(rdi);
+ unsigned int qp_inuse = 0;
qp_inuse += rvt_mcast_tree_empty(rdi);
- if (!rdi->qp_dev)
- return qp_inuse;
-
- ql = &rdi->qp_dev->qpt_lock;
- spin_lock_irqsave(ql, flags);
- for (n = 0; n < rdi->qp_dev->qp_table_size; n++) {
- qp = rcu_dereference_protected(rdi->qp_dev->qp_table[n],
- lockdep_is_held(ql));
- RCU_INIT_POINTER(rdi->qp_dev->qp_table[n], NULL);
+ rvt_qp_iter(rdi, (u64)&qp_inuse, rvt_free_qp_cb);
- for (; qp; qp = rcu_dereference_protected(qp->next,
- lockdep_is_held(ql)))
- qp_inuse++;
- }
- spin_unlock_irqrestore(ql, flags);
- synchronize_rcu();
return qp_inuse;
}
@@ -902,14 +905,14 @@ static void rvt_init_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
}
/**
- * rvt_reset_qp - initialize the QP state to the reset state
+ * _rvt_reset_qp - initialize the QP state to the reset state
* @qp: the QP to reset
* @type: the QP type
*
* r_lock, s_hlock, and s_lock are required to be held by the caller
*/
-static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
- enum ib_qp_type type)
+static void _rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
__must_hold(&qp->s_lock)
__must_hold(&qp->s_hlock)
__must_hold(&qp->r_lock)
@@ -955,6 +958,27 @@ static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
lockdep_assert_held(&qp->s_lock);
}
+/**
+ * rvt_reset_qp - initialize the QP state to the reset state
+ * @rdi: the device info
+ * @qp: the QP to reset
+ * @type: the QP type
+ *
+ * This is the wrapper function to acquire the r_lock, s_hlock, and s_lock
+ * before calling _rvt_reset_qp().
+ */
+static void rvt_reset_qp(struct rvt_dev_info *rdi, struct rvt_qp *qp,
+ enum ib_qp_type type)
+{
+ spin_lock_irq(&qp->r_lock);
+ spin_lock(&qp->s_hlock);
+ spin_lock(&qp->s_lock);
+ _rvt_reset_qp(rdi, qp, type);
+ spin_unlock(&qp->s_lock);
+ spin_unlock(&qp->s_hlock);
+ spin_unlock_irq(&qp->r_lock);
+}
+
/** rvt_free_qpn - Free a qpn from the bit map
* @qpt: QP table
* @qpn: queue pair number to free
@@ -1546,7 +1570,7 @@ int rvt_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
switch (new_state) {
case IB_QPS_RESET:
if (qp->state != IB_QPS_RESET)
- rvt_reset_qp(rdi, qp, ibqp->qp_type);
+ _rvt_reset_qp(rdi, qp, ibqp->qp_type);
break;
case IB_QPS_RTR:
@@ -1695,13 +1719,7 @@ int rvt_destroy_qp(struct ib_qp *ibqp, struct ib_udata *udata)
struct rvt_qp *qp = ibqp_to_rvtqp(ibqp);
struct rvt_dev_info *rdi = ib_to_rvt(ibqp->device);
- spin_lock_irq(&qp->r_lock);
- spin_lock(&qp->s_hlock);
- spin_lock(&qp->s_lock);
rvt_reset_qp(rdi, qp, ibqp->qp_type);
- spin_unlock(&qp->s_lock);
- spin_unlock(&qp->s_hlock);
- spin_unlock_irq(&qp->r_lock);
wait_event(qp->wait, !atomic_read(&qp->refcount));
/* qpn is now available for use again */
diff --git a/drivers/infiniband/sw/rxe/rxe_comp.c b/drivers/infiniband/sw/rxe/rxe_comp.c
index 116cafc9afcf..4bc88708b355 100644
--- a/drivers/infiniband/sw/rxe/rxe_comp.c
+++ b/drivers/infiniband/sw/rxe/rxe_comp.c
@@ -329,7 +329,7 @@ static inline enum comp_state check_ack(struct rxe_qp *qp,
qp->comp.psn = pkt->psn;
if (qp->req.wait_psn) {
qp->req.wait_psn = 0;
- rxe_run_task(&qp->req.task, 1);
+ rxe_run_task(&qp->req.task, 0);
}
}
return COMPST_ERROR_RETRY;
@@ -463,7 +463,7 @@ static void do_complete(struct rxe_qp *qp, struct rxe_send_wqe *wqe)
*/
if (qp->req.wait_fence) {
qp->req.wait_fence = 0;
- rxe_run_task(&qp->req.task, 1);
+ rxe_run_task(&qp->req.task, 0);
}
}
@@ -479,7 +479,7 @@ static inline enum comp_state complete_ack(struct rxe_qp *qp,
if (qp->req.need_rd_atomic) {
qp->comp.timeout_retry = 0;
qp->req.need_rd_atomic = 0;
- rxe_run_task(&qp->req.task, 1);
+ rxe_run_task(&qp->req.task, 0);
}
}
@@ -725,7 +725,7 @@ int rxe_completer(void *arg)
RXE_CNT_COMP_RETRY);
qp->req.need_retry = 1;
qp->comp.started_retry = 1;
- rxe_run_task(&qp->req.task, 1);
+ rxe_run_task(&qp->req.task, 0);
}
if (pkt) {
diff --git a/drivers/infiniband/sw/siw/siw_cm.c b/drivers/infiniband/sw/siw/siw_cm.c
index 0c3f0588346e..c5651a96b196 100644
--- a/drivers/infiniband/sw/siw/siw_cm.c
+++ b/drivers/infiniband/sw/siw/siw_cm.c
@@ -1225,10 +1225,9 @@ static void siw_cm_llp_data_ready(struct sock *sk)
read_lock(&sk->sk_callback_lock);
cep = sk_to_cep(sk);
- if (!cep) {
- WARN_ON(1);
+ if (!cep)
goto out;
- }
+
siw_dbg_cep(cep, "state: %d\n", cep->state);
switch (cep->state) {
diff --git a/drivers/infiniband/sw/siw/siw_main.c b/drivers/infiniband/sw/siw/siw_main.c
index 96ed349c0939..5cd40fb9e20c 100644
--- a/drivers/infiniband/sw/siw/siw_main.c
+++ b/drivers/infiniband/sw/siw/siw_main.c
@@ -388,6 +388,9 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
{ .max_segment_size = SZ_2G };
base_dev->num_comp_vectors = num_possible_cpus();
+ xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
+ xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
+
ib_set_device_ops(base_dev, &siw_device_ops);
rv = ib_device_set_netdev(base_dev, netdev, 1);
if (rv)
@@ -415,9 +418,6 @@ static struct siw_device *siw_device_create(struct net_device *netdev)
sdev->attrs.max_srq_wr = SIW_MAX_SRQ_WR;
sdev->attrs.max_srq_sge = SIW_MAX_SGE;
- xa_init_flags(&sdev->qp_xa, XA_FLAGS_ALLOC1);
- xa_init_flags(&sdev->mem_xa, XA_FLAGS_ALLOC1);
-
INIT_LIST_HEAD(&sdev->cep_list);
INIT_LIST_HEAD(&sdev->qp_list);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index b273e421e910..a1a035270cab 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -2575,6 +2575,17 @@ isert_wait4logout(struct isert_conn *isert_conn)
}
}
+static void
+isert_wait4cmds(struct iscsi_conn *conn)
+{
+ isert_info("iscsi_conn %p\n", conn);
+
+ if (conn->sess) {
+ target_sess_cmd_list_set_waiting(conn->sess->se_sess);
+ target_wait_for_sess_cmds(conn->sess->se_sess);
+ }
+}
+
/**
* isert_put_unsol_pending_cmds() - Drop commands waiting for
* unsolicitate dataout
@@ -2622,6 +2633,7 @@ static void isert_wait_conn(struct iscsi_conn *conn)
ib_drain_qp(isert_conn->qp);
isert_put_unsol_pending_cmds(conn);
+ isert_wait4cmds(conn);
isert_wait4logout(isert_conn);
queue_work(isert_release_wq, &isert_conn->release_work);
diff --git a/drivers/input/keyboard/goldfish_events.c b/drivers/input/keyboard/goldfish_events.c
index bc8c85a52a10..57d435fc5c73 100644
--- a/drivers/input/keyboard/goldfish_events.c
+++ b/drivers/input/keyboard/goldfish_events.c
@@ -30,7 +30,7 @@ struct event_dev {
struct input_dev *input;
int irq;
void __iomem *addr;
- char name[0];
+ char name[];
};
static irqreturn_t events_interrupt(int irq, void *dev_id)
diff --git a/drivers/input/keyboard/gpio_keys.c b/drivers/input/keyboard/gpio_keys.c
index 1f56d53454b2..53c9ff338dea 100644
--- a/drivers/input/keyboard/gpio_keys.c
+++ b/drivers/input/keyboard/gpio_keys.c
@@ -55,7 +55,7 @@ struct gpio_keys_drvdata {
struct input_dev *input;
struct mutex disable_lock;
unsigned short *keymap;
- struct gpio_button_data data[0];
+ struct gpio_button_data data[];
};
/*
diff --git a/drivers/input/keyboard/gpio_keys_polled.c b/drivers/input/keyboard/gpio_keys_polled.c
index 6eb0a2f3f9de..c3937d2fc744 100644
--- a/drivers/input/keyboard/gpio_keys_polled.c
+++ b/drivers/input/keyboard/gpio_keys_polled.c
@@ -38,7 +38,7 @@ struct gpio_keys_polled_dev {
const struct gpio_keys_platform_data *pdata;
unsigned long rel_axis_seen[BITS_TO_LONGS(REL_CNT)];
unsigned long abs_axis_seen[BITS_TO_LONGS(ABS_CNT)];
- struct gpio_keys_button_data data[0];
+ struct gpio_keys_button_data data[];
};
static void gpio_keys_button_event(struct input_dev *input,
diff --git a/drivers/input/keyboard/tca6416-keypad.c b/drivers/input/keyboard/tca6416-keypad.c
index 2a14769de637..21758767ccf0 100644
--- a/drivers/input/keyboard/tca6416-keypad.c
+++ b/drivers/input/keyboard/tca6416-keypad.c
@@ -33,7 +33,7 @@ MODULE_DEVICE_TABLE(i2c, tca6416_id);
struct tca6416_drv_data {
struct input_dev *input;
- struct tca6416_button data[0];
+ struct tca6416_button data[];
};
struct tca6416_keypad_chip {
@@ -48,7 +48,7 @@ struct tca6416_keypad_chip {
int irqnum;
u16 pinmask;
bool use_polling;
- struct tca6416_button buttons[0];
+ struct tca6416_button buttons[];
};
static int tca6416_write_reg(struct tca6416_keypad_chip *chip, int reg, u16 val)
diff --git a/drivers/input/mouse/cyapa_gen5.c b/drivers/input/mouse/cyapa_gen5.c
index 14239fbd72cf..7f012bfa2658 100644
--- a/drivers/input/mouse/cyapa_gen5.c
+++ b/drivers/input/mouse/cyapa_gen5.c
@@ -250,7 +250,7 @@ struct cyapa_tsg_bin_image_data_record {
struct cyapa_tsg_bin_image {
struct cyapa_tsg_bin_image_head image_head;
- struct cyapa_tsg_bin_image_data_record records[0];
+ struct cyapa_tsg_bin_image_data_record records[];
} __packed;
struct pip_bl_packet_start {
@@ -271,7 +271,7 @@ struct pip_bl_cmd_head {
u8 report_id; /* Bootloader output report id, must be 40h */
u8 rsvd; /* Reserved, must be 0 */
struct pip_bl_packet_start packet_start;
- u8 data[0]; /* Command data variable based on commands */
+ u8 data[]; /* Command data variable based on commands */
} __packed;
/* Initiate bootload command data structure. */
@@ -300,7 +300,7 @@ struct tsg_bl_metadata_row_params {
struct tsg_bl_flash_row_head {
u8 flash_array_id;
__le16 flash_row_id;
- u8 flash_data[0];
+ u8 flash_data[];
} __packed;
struct pip_app_cmd_head {
@@ -314,7 +314,7 @@ struct pip_app_cmd_head {
* Bit 6-0: command code.
*/
u8 cmd_code;
- u8 parameter_data[0]; /* Parameter data variable based on cmd_code */
+ u8 parameter_data[]; /* Parameter data variable based on cmd_code */
} __packed;
/* Application get/set parameter command data structure */
diff --git a/drivers/input/mouse/psmouse-smbus.c b/drivers/input/mouse/psmouse-smbus.c
index 027efdd2b2ad..a472489ccbad 100644
--- a/drivers/input/mouse/psmouse-smbus.c
+++ b/drivers/input/mouse/psmouse-smbus.c
@@ -190,6 +190,7 @@ static int psmouse_smbus_create_companion(struct device *dev, void *data)
struct psmouse_smbus_dev *smbdev = data;
unsigned short addr_list[] = { smbdev->board.addr, I2C_CLIENT_END };
struct i2c_adapter *adapter;
+ struct i2c_client *client;
adapter = i2c_verify_adapter(dev);
if (!adapter)
@@ -198,12 +199,13 @@ static int psmouse_smbus_create_companion(struct device *dev, void *data)
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_HOST_NOTIFY))
return 0;
- smbdev->client = i2c_new_probed_device(adapter, &smbdev->board,
- addr_list, NULL);
- if (!smbdev->client)
+ client = i2c_new_scanned_device(adapter, &smbdev->board,
+ addr_list, NULL);
+ if (IS_ERR(client))
return 0;
/* We have our(?) device, stop iterating i2c bus. */
+ smbdev->client = client;
return 1;
}
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 1ae6f8bba9ae..2c666fb34625 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -146,7 +146,6 @@ static const char * const topbuttonpad_pnp_ids[] = {
"LEN0042", /* Yoga */
"LEN0045",
"LEN0047",
- "LEN0049",
"LEN2000", /* S540 */
"LEN2001", /* Edge E431 */
"LEN2002", /* Edge E531 */
@@ -166,9 +165,11 @@ static const char * const smbus_pnp_ids[] = {
/* all of the topbuttonpad_pnp_ids are valid, we just add some extras */
"LEN0048", /* X1 Carbon 3 */
"LEN0046", /* X250 */
+ "LEN0049", /* Yoga 11e */
"LEN004a", /* W541 */
"LEN005b", /* P50 */
"LEN005e", /* T560 */
+ "LEN006c", /* T470s */
"LEN0071", /* T480 */
"LEN0072", /* X1 Carbon Gen 5 (2017) - Elan/ALPS trackpoint */
"LEN0073", /* X1 Carbon G5 (Elantech) */
@@ -179,6 +180,7 @@ static const char * const smbus_pnp_ids[] = {
"LEN0097", /* X280 -> ALPS trackpoint */
"LEN009b", /* T580 */
"LEN200f", /* T450s */
+ "LEN2044", /* L470 */
"LEN2054", /* E480 */
"LEN2055", /* E580 */
"SYN3052", /* HP EliteBook 840 G4 */
diff --git a/drivers/input/touchscreen/ili210x.c b/drivers/input/touchscreen/ili210x.c
index 4a17096e83e1..199cf3daec10 100644
--- a/drivers/input/touchscreen/ili210x.c
+++ b/drivers/input/touchscreen/ili210x.c
@@ -167,6 +167,36 @@ static const struct ili2xxx_chip ili211x_chip = {
.resolution = 2048,
};
+static bool ili212x_touchdata_to_coords(const u8 *touchdata,
+ unsigned int finger,
+ unsigned int *x, unsigned int *y)
+{
+ u16 val;
+
+ val = get_unaligned_be16(touchdata + 3 + (finger * 5) + 0);
+ if (!(val & BIT(15))) /* Touch indication */
+ return false;
+
+ *x = val & 0x3fff;
+ *y = get_unaligned_be16(touchdata + 3 + (finger * 5) + 2);
+
+ return true;
+}
+
+static bool ili212x_check_continue_polling(const u8 *data, bool touch)
+{
+ return touch;
+}
+
+static const struct ili2xxx_chip ili212x_chip = {
+ .read_reg = ili210x_read_reg,
+ .get_touch_data = ili210x_read_touch_data,
+ .parse_touch_data = ili212x_touchdata_to_coords,
+ .continue_polling = ili212x_check_continue_polling,
+ .max_touches = 10,
+ .has_calibrate_reg = true,
+};
+
static int ili251x_read_reg(struct i2c_client *client,
u8 reg, void *buf, size_t len)
{
@@ -321,7 +351,7 @@ static umode_t ili210x_calibrate_visible(struct kobject *kobj,
struct i2c_client *client = to_i2c_client(dev);
struct ili210x *priv = i2c_get_clientdata(client);
- return priv->chip->has_calibrate_reg;
+ return priv->chip->has_calibrate_reg ? attr->mode : 0;
}
static const struct attribute_group ili210x_attr_group = {
@@ -447,6 +477,7 @@ static int ili210x_i2c_probe(struct i2c_client *client,
static const struct i2c_device_id ili210x_i2c_id[] = {
{ "ili210x", (long)&ili210x_chip },
{ "ili2117", (long)&ili211x_chip },
+ { "ili2120", (long)&ili212x_chip },
{ "ili251x", (long)&ili251x_chip },
{ }
};
@@ -455,6 +486,7 @@ MODULE_DEVICE_TABLE(i2c, ili210x_i2c_id);
static const struct of_device_id ili210x_dt_ids[] = {
{ .compatible = "ilitek,ili210x", .data = &ili210x_chip },
{ .compatible = "ilitek,ili2117", .data = &ili211x_chip },
+ { .compatible = "ilitek,ili2120", .data = &ili212x_chip },
{ .compatible = "ilitek,ili251x", .data = &ili251x_chip },
{ }
};
diff --git a/drivers/interconnect/core.c b/drivers/interconnect/core.c
index f277e467156f..2c6515e3ecf1 100644
--- a/drivers/interconnect/core.c
+++ b/drivers/interconnect/core.c
@@ -445,6 +445,11 @@ struct icc_path *of_icc_get(struct device *dev, const char *name)
path->name = kasprintf(GFP_KERNEL, "%s-%s",
src_node->name, dst_node->name);
+ if (!path->name) {
+ kfree(path);
+ return ERR_PTR(-ENOMEM);
+ }
+
return path;
}
EXPORT_SYMBOL_GPL(of_icc_get);
@@ -579,6 +584,10 @@ struct icc_path *icc_get(struct device *dev, const int src_id, const int dst_id)
}
path->name = kasprintf(GFP_KERNEL, "%s-%s", src->name, dst->name);
+ if (!path->name) {
+ kfree(path);
+ path = ERR_PTR(-ENOMEM);
+ }
out:
mutex_unlock(&icc_lock);
return path;
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 2104fb8afc06..9f33fdb3bb05 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -14,8 +14,8 @@ obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o
obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o amd_iommu_quirks.o
obj-$(CONFIG_AMD_IOMMU_DEBUGFS) += amd_iommu_debugfs.o
obj-$(CONFIG_AMD_IOMMU_V2) += amd_iommu_v2.o
-obj-$(CONFIG_ARM_SMMU) += arm-smmu-mod.o
-arm-smmu-mod-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
+obj-$(CONFIG_ARM_SMMU) += arm_smmu.o
+arm_smmu-objs += arm-smmu.o arm-smmu-impl.o arm-smmu-qcom.o
obj-$(CONFIG_ARM_SMMU_V3) += arm-smmu-v3.o
obj-$(CONFIG_DMAR_TABLE) += dmar.o
obj-$(CONFIG_INTEL_IOMMU) += intel-iommu.o intel-pasid.o
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index 2759a8d57b7f..6be3853a5d97 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2523,6 +2523,7 @@ static int __init early_amd_iommu_init(void)
struct acpi_table_header *ivrs_base;
acpi_status status;
int i, remap_cache_sz, ret = 0;
+ u32 pci_id;
if (!amd_iommu_detected)
return -ENODEV;
@@ -2610,6 +2611,16 @@ static int __init early_amd_iommu_init(void)
if (ret)
goto out;
+ /* Disable IOMMU if there's Stoney Ridge graphics */
+ for (i = 0; i < 32; i++) {
+ pci_id = read_pci_config(0, i, 0, 0);
+ if ((pci_id & 0xffff) == 0x1002 && (pci_id >> 16) == 0x98e4) {
+ pr_info("Disable IOMMU on Stoney Ridge\n");
+ amd_iommu_disabled = true;
+ break;
+ }
+ }
+
/* Disable any previously enabled IOMMUs */
if (!is_kdump_kernel() || amd_iommu_disabled)
disable_iommus();
@@ -2718,7 +2729,7 @@ static int __init state_next(void)
ret = early_amd_iommu_init();
init_state = ret ? IOMMU_INIT_ERROR : IOMMU_ACPI_FINISHED;
if (init_state == IOMMU_ACPI_FINISHED && amd_iommu_disabled) {
- pr_info("AMD IOMMU disabled on kernel command-line\n");
+ pr_info("AMD IOMMU disabled\n");
init_state = IOMMU_CMDLINE_DISABLED;
ret = -EINVAL;
}
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 9dc37672bf89..6fa6de2b6ad5 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -762,6 +762,11 @@ static int iommu_dummy(struct device *dev)
return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
}
+static bool attach_deferred(struct device *dev)
+{
+ return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
+}
+
/**
* is_downstream_to_pci_bridge - test if a device belongs to the PCI
* sub-hierarchy of a candidate PCI-PCI bridge
@@ -2510,8 +2515,7 @@ struct dmar_domain *find_domain(struct device *dev)
{
struct device_domain_info *info;
- if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO ||
- dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO))
+ if (unlikely(attach_deferred(dev) || iommu_dummy(dev)))
return NULL;
if (dev_is_pci(dev))
@@ -2525,18 +2529,14 @@ struct dmar_domain *find_domain(struct device *dev)
return NULL;
}
-static struct dmar_domain *deferred_attach_domain(struct device *dev)
+static void do_deferred_attach(struct device *dev)
{
- if (unlikely(dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO)) {
- struct iommu_domain *domain;
-
- dev->archdata.iommu = NULL;
- domain = iommu_get_domain_for_dev(dev);
- if (domain)
- intel_iommu_attach_device(domain, dev);
- }
+ struct iommu_domain *domain;
- return find_domain(dev);
+ dev->archdata.iommu = NULL;
+ domain = iommu_get_domain_for_dev(dev);
+ if (domain)
+ intel_iommu_attach_device(domain, dev);
}
static inline struct device_domain_info *
@@ -2916,7 +2916,7 @@ static int identity_mapping(struct device *dev)
struct device_domain_info *info;
info = dev->archdata.iommu;
- if (info && info != DUMMY_DEVICE_DOMAIN_INFO && info != DEFER_DEVICE_DOMAIN_INFO)
+ if (info)
return (info->domain == si_domain);
return 0;
@@ -3587,6 +3587,9 @@ static bool iommu_need_mapping(struct device *dev)
if (iommu_dummy(dev))
return false;
+ if (unlikely(attach_deferred(dev)))
+ do_deferred_attach(dev);
+
ret = identity_mapping(dev);
if (ret) {
u64 dma_mask = *dev->dma_mask;
@@ -3635,7 +3638,7 @@ static dma_addr_t __intel_map_single(struct device *dev, phys_addr_t paddr,
BUG_ON(dir == DMA_NONE);
- domain = deferred_attach_domain(dev);
+ domain = find_domain(dev);
if (!domain)
return DMA_MAPPING_ERROR;
@@ -3855,7 +3858,7 @@ static int intel_map_sg(struct device *dev, struct scatterlist *sglist, int nele
if (!iommu_need_mapping(dev))
return dma_direct_map_sg(dev, sglist, nelems, dir, attrs);
- domain = deferred_attach_domain(dev);
+ domain = find_domain(dev);
if (!domain)
return 0;
@@ -3950,7 +3953,11 @@ bounce_map_single(struct device *dev, phys_addr_t paddr, size_t size,
int prot = 0;
int ret;
- domain = deferred_attach_domain(dev);
+ if (unlikely(attach_deferred(dev)))
+ do_deferred_attach(dev);
+
+ domain = find_domain(dev);
+
if (WARN_ON(dir == DMA_NONE || !domain))
return DMA_MAPPING_ERROR;
@@ -6133,7 +6140,7 @@ intel_iommu_aux_get_pasid(struct iommu_domain *domain, struct device *dev)
static bool intel_iommu_is_attach_deferred(struct iommu_domain *domain,
struct device *dev)
{
- return dev->archdata.iommu == DEFER_DEVICE_DOMAIN_INFO;
+ return attach_deferred(dev);
}
static int
diff --git a/drivers/iommu/qcom_iommu.c b/drivers/iommu/qcom_iommu.c
index 39759db4f003..4328da0b0a9f 100644
--- a/drivers/iommu/qcom_iommu.c
+++ b/drivers/iommu/qcom_iommu.c
@@ -344,21 +344,19 @@ static void qcom_iommu_domain_free(struct iommu_domain *domain)
{
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
- if (WARN_ON(qcom_domain->iommu)) /* forgot to detach? */
- return;
-
iommu_put_dma_cookie(domain);
- /* NOTE: unmap can be called after client device is powered off,
- * for example, with GPUs or anything involving dma-buf. So we
- * cannot rely on the device_link. Make sure the IOMMU is on to
- * avoid unclocked accesses in the TLB inv path:
- */
- pm_runtime_get_sync(qcom_domain->iommu->dev);
-
- free_io_pgtable_ops(qcom_domain->pgtbl_ops);
-
- pm_runtime_put_sync(qcom_domain->iommu->dev);
+ if (qcom_domain->iommu) {
+ /*
+ * NOTE: unmap can be called after client device is powered
+ * off, for example, with GPUs or anything involving dma-buf.
+ * So we cannot rely on the device_link. Make sure the IOMMU
+ * is on to avoid unclocked accesses in the TLB inv path:
+ */
+ pm_runtime_get_sync(qcom_domain->iommu->dev);
+ free_io_pgtable_ops(qcom_domain->pgtbl_ops);
+ pm_runtime_put_sync(qcom_domain->iommu->dev);
+ }
kfree(qcom_domain);
}
@@ -404,7 +402,7 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
struct qcom_iommu_domain *qcom_domain = to_qcom_iommu_domain(domain);
unsigned i;
- if (!qcom_domain->iommu)
+ if (WARN_ON(!qcom_domain->iommu))
return;
pm_runtime_get_sync(qcom_iommu->dev);
@@ -417,8 +415,6 @@ static void qcom_iommu_detach_dev(struct iommu_domain *domain, struct device *de
ctx->domain = NULL;
}
pm_runtime_put_sync(qcom_iommu->dev);
-
- qcom_domain->iommu = NULL;
}
static int qcom_iommu_map(struct iommu_domain *domain, unsigned long iova,
diff --git a/drivers/macintosh/therm_windtunnel.c b/drivers/macintosh/therm_windtunnel.c
index 8c744578122a..a0d87ed9da69 100644
--- a/drivers/macintosh/therm_windtunnel.c
+++ b/drivers/macintosh/therm_windtunnel.c
@@ -300,9 +300,11 @@ static int control_loop(void *dummy)
/* i2c probing and setup */
/************************************************************************/
-static int
-do_attach( struct i2c_adapter *adapter )
+static void do_attach(struct i2c_adapter *adapter)
{
+ struct i2c_board_info info = { };
+ struct device_node *np;
+
/* scan 0x48-0x4f (DS1775) and 0x2c-2x2f (ADM1030) */
static const unsigned short scan_ds1775[] = {
0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f,
@@ -313,25 +315,24 @@ do_attach( struct i2c_adapter *adapter )
I2C_CLIENT_END
};
- if( strncmp(adapter->name, "uni-n", 5) )
- return 0;
-
- if( !x.running ) {
- struct i2c_board_info info;
+ if (x.running || strncmp(adapter->name, "uni-n", 5))
+ return;
- memset(&info, 0, sizeof(struct i2c_board_info));
- strlcpy(info.type, "therm_ds1775", I2C_NAME_SIZE);
+ np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,ds1775");
+ if (np) {
+ of_node_put(np);
+ } else {
+ strlcpy(info.type, "MAC,ds1775", I2C_NAME_SIZE);
i2c_new_probed_device(adapter, &info, scan_ds1775, NULL);
+ }
- strlcpy(info.type, "therm_adm1030", I2C_NAME_SIZE);
+ np = of_find_compatible_node(adapter->dev.of_node, NULL, "MAC,adm1030");
+ if (np) {
+ of_node_put(np);
+ } else {
+ strlcpy(info.type, "MAC,adm1030", I2C_NAME_SIZE);
i2c_new_probed_device(adapter, &info, scan_adm1030, NULL);
-
- if( x.thermostat && x.fan ) {
- x.running = 1;
- x.poll_task = kthread_run(control_loop, NULL, "g4fand");
- }
}
- return 0;
}
static int
@@ -404,8 +405,8 @@ out:
enum chip { ds1775, adm1030 };
static const struct i2c_device_id therm_windtunnel_id[] = {
- { "therm_ds1775", ds1775 },
- { "therm_adm1030", adm1030 },
+ { "MAC,ds1775", ds1775 },
+ { "MAC,adm1030", adm1030 },
{ }
};
MODULE_DEVICE_TABLE(i2c, therm_windtunnel_id);
@@ -414,6 +415,7 @@ static int
do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
{
struct i2c_adapter *adapter = cl->adapter;
+ int ret = 0;
if( !i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA
| I2C_FUNC_SMBUS_WRITE_BYTE) )
@@ -421,11 +423,19 @@ do_probe(struct i2c_client *cl, const struct i2c_device_id *id)
switch (id->driver_data) {
case adm1030:
- return attach_fan( cl );
+ ret = attach_fan(cl);
+ break;
case ds1775:
- return attach_thermostat(cl);
+ ret = attach_thermostat(cl);
+ break;
}
- return 0;
+
+ if (!x.running && x.thermostat && x.fan) {
+ x.running = 1;
+ x.poll_task = kthread_run(control_loop, NULL, "g4fand");
+ }
+
+ return ret;
}
static struct i2c_driver g4fan_driver = {
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index 6730820780b0..0e3ff9745ac7 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -417,8 +417,6 @@ err:
/* Journalling */
-#define nr_to_fifo_front(p, front_p, mask) (((p) - (front_p)) & (mask))
-
static void btree_flush_write(struct cache_set *c)
{
struct btree *b, *t, *btree_nodes[BTREE_FLUSH_NR];
@@ -510,9 +508,8 @@ static void btree_flush_write(struct cache_set *c)
* journal entry can be reclaimed). These selected nodes
* will be ignored and skipped in the folowing for-loop.
*/
- if (nr_to_fifo_front(btree_current_write(b)->journal,
- fifo_front_p,
- mask) != 0) {
+ if (((btree_current_write(b)->journal - fifo_front_p) &
+ mask) != 0) {
mutex_unlock(&b->write_lock);
continue;
}
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 2749daf09724..0c3c5419c52b 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -1917,23 +1917,6 @@ static int run_cache_set(struct cache_set *c)
if (bch_btree_check(c))
goto err;
- /*
- * bch_btree_check() may occupy too much system memory which
- * has negative effects to user space application (e.g. data
- * base) performance. Shrink the mca cache memory proactively
- * here to avoid competing memory with user space workloads..
- */
- if (!c->shrinker_disabled) {
- struct shrink_control sc;
-
- sc.gfp_mask = GFP_KERNEL;
- sc.nr_to_scan = c->btree_cache_used * c->btree_pages;
- /* first run to clear b->accessed tag */
- c->shrink.scan_objects(&c->shrink, &sc);
- /* second run to reap non-accessed nodes */
- c->shrink.scan_objects(&c->shrink, &sc);
- }
-
bch_journal_mark(c, &journal);
bch_initial_gc_finish(c);
pr_debug("btree_check() done");
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index c82578af56a5..2ea0360108e1 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -20,8 +20,13 @@
struct dm_bio_details {
struct gendisk *bi_disk;
u8 bi_partno;
+ int __bi_remaining;
unsigned long bi_flags;
struct bvec_iter bi_iter;
+ bio_end_io_t *bi_end_io;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ struct bio_integrity_payload *bi_integrity;
+#endif
};
static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
@@ -30,6 +35,11 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
bd->bi_partno = bio->bi_partno;
bd->bi_flags = bio->bi_flags;
bd->bi_iter = bio->bi_iter;
+ bd->__bi_remaining = atomic_read(&bio->__bi_remaining);
+ bd->bi_end_io = bio->bi_end_io;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ bd->bi_integrity = bio_integrity(bio);
+#endif
}
static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
@@ -38,6 +48,11 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
bio->bi_partno = bd->bi_partno;
bio->bi_flags = bd->bi_flags;
bio->bi_iter = bd->bi_iter;
+ atomic_set(&bio->__bi_remaining, bd->__bi_remaining);
+ bio->bi_end_io = bd->bi_end_io;
+#if defined(CONFIG_BLK_DEV_INTEGRITY)
+ bio->bi_integrity = bd->bi_integrity;
+#endif
}
#endif
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 2d32821b3a5b..d3bb355819a4 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2846,8 +2846,8 @@ static void cache_postsuspend(struct dm_target *ti)
prevent_background_work(cache);
BUG_ON(atomic_read(&cache->nr_io_migrations));
- cancel_delayed_work(&cache->waker);
- flush_workqueue(cache->wq);
+ cancel_delayed_work_sync(&cache->waker);
+ drain_workqueue(cache->wq);
WARN_ON(cache->tracker.in_flight);
/*
@@ -3492,7 +3492,7 @@ static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
static struct target_type cache_target = {
.name = "cache",
- .version = {2, 1, 0},
+ .version = {2, 2, 0},
.module = THIS_MODULE,
.ctr = cache_ctr,
.dtr = cache_dtr,
diff --git a/drivers/md/dm-integrity.c b/drivers/md/dm-integrity.c
index b225b3e445fa..2f03fecd312d 100644
--- a/drivers/md/dm-integrity.c
+++ b/drivers/md/dm-integrity.c
@@ -6,6 +6,8 @@
* This file is released under the GPL.
*/
+#include "dm-bio-record.h"
+
#include <linux/compiler.h>
#include <linux/module.h>
#include <linux/device-mapper.h>
@@ -201,17 +203,19 @@ struct dm_integrity_c {
__u8 log2_blocks_per_bitmap_bit;
unsigned char mode;
- int suspending;
int failed;
struct crypto_shash *internal_hash;
+ struct dm_target *ti;
+
/* these variables are locked with endio_wait.lock */
struct rb_root in_progress;
struct list_head wait_list;
wait_queue_head_t endio_wait;
struct workqueue_struct *wait_wq;
+ struct workqueue_struct *offload_wq;
unsigned char commit_seq;
commit_id_t commit_ids[N_COMMIT_IDS];
@@ -293,11 +297,7 @@ struct dm_integrity_io {
struct completion *completion;
- struct gendisk *orig_bi_disk;
- u8 orig_bi_partno;
- bio_end_io_t *orig_bi_end_io;
- struct bio_integrity_payload *orig_bi_integrity;
- struct bvec_iter orig_bi_iter;
+ struct dm_bio_details bio_details;
};
struct journal_completion {
@@ -1439,7 +1439,7 @@ static void dec_in_flight(struct dm_integrity_io *dio)
dio->range.logical_sector += dio->range.n_sectors;
bio_advance(bio, dio->range.n_sectors << SECTOR_SHIFT);
INIT_WORK(&dio->work, integrity_bio_wait);
- queue_work(ic->wait_wq, &dio->work);
+ queue_work(ic->offload_wq, &dio->work);
return;
}
do_endio_flush(ic, dio);
@@ -1450,14 +1450,9 @@ static void integrity_end_io(struct bio *bio)
{
struct dm_integrity_io *dio = dm_per_bio_data(bio, sizeof(struct dm_integrity_io));
- bio->bi_iter = dio->orig_bi_iter;
- bio->bi_disk = dio->orig_bi_disk;
- bio->bi_partno = dio->orig_bi_partno;
- if (dio->orig_bi_integrity) {
- bio->bi_integrity = dio->orig_bi_integrity;
+ dm_bio_restore(&dio->bio_details, bio);
+ if (bio->bi_integrity)
bio->bi_opf |= REQ_INTEGRITY;
- }
- bio->bi_end_io = dio->orig_bi_end_io;
if (dio->completion)
complete(dio->completion);
@@ -1542,7 +1537,7 @@ static void integrity_metadata(struct work_struct *w)
}
}
- __bio_for_each_segment(bv, bio, iter, dio->orig_bi_iter) {
+ __bio_for_each_segment(bv, bio, iter, dio->bio_details.bi_iter) {
unsigned pos;
char *mem, *checksums_ptr;
@@ -1586,7 +1581,7 @@ again:
if (likely(checksums != checksums_onstack))
kfree(checksums);
} else {
- struct bio_integrity_payload *bip = dio->orig_bi_integrity;
+ struct bio_integrity_payload *bip = dio->bio_details.bi_integrity;
if (bip) {
struct bio_vec biv;
@@ -1865,7 +1860,7 @@ static void dm_integrity_map_continue(struct dm_integrity_io *dio, bool from_map
if (need_sync_io && from_map) {
INIT_WORK(&dio->work, integrity_bio_wait);
- queue_work(ic->metadata_wq, &dio->work);
+ queue_work(ic->offload_wq, &dio->work);
return;
}
@@ -2005,20 +2000,13 @@ offload_to_thread:
} else
dio->completion = NULL;
- dio->orig_bi_iter = bio->bi_iter;
-
- dio->orig_bi_disk = bio->bi_disk;
- dio->orig_bi_partno = bio->bi_partno;
+ dm_bio_record(&dio->bio_details, bio);
bio_set_dev(bio, ic->dev->bdev);
-
- dio->orig_bi_integrity = bio_integrity(bio);
bio->bi_integrity = NULL;
bio->bi_opf &= ~REQ_INTEGRITY;
-
- dio->orig_bi_end_io = bio->bi_end_io;
bio->bi_end_io = integrity_end_io;
-
bio->bi_iter.bi_size = dio->range.n_sectors << SECTOR_SHIFT;
+
generic_make_request(bio);
if (need_sync_io) {
@@ -2315,7 +2303,7 @@ static void integrity_writer(struct work_struct *w)
unsigned prev_free_sectors;
/* the following test is not needed, but it tests the replay code */
- if (READ_ONCE(ic->suspending) && !ic->meta_dev)
+ if (unlikely(dm_suspended(ic->ti)) && !ic->meta_dev)
return;
spin_lock_irq(&ic->endio_wait.lock);
@@ -2376,7 +2364,7 @@ static void integrity_recalc(struct work_struct *w)
next_chunk:
- if (unlikely(READ_ONCE(ic->suspending)))
+ if (unlikely(dm_suspended(ic->ti)))
goto unlock_ret;
range.logical_sector = le64_to_cpu(ic->sb->recalc_sector);
@@ -2501,7 +2489,7 @@ static void bitmap_block_work(struct work_struct *w)
dio->range.n_sectors, BITMAP_OP_TEST_ALL_SET)) {
remove_range(ic, &dio->range);
INIT_WORK(&dio->work, integrity_bio_wait);
- queue_work(ic->wait_wq, &dio->work);
+ queue_work(ic->offload_wq, &dio->work);
} else {
block_bitmap_op(ic, ic->journal, dio->range.logical_sector,
dio->range.n_sectors, BITMAP_OP_SET);
@@ -2524,7 +2512,7 @@ static void bitmap_block_work(struct work_struct *w)
remove_range(ic, &dio->range);
INIT_WORK(&dio->work, integrity_bio_wait);
- queue_work(ic->wait_wq, &dio->work);
+ queue_work(ic->offload_wq, &dio->work);
}
queue_delayed_work(ic->commit_wq, &ic->bitmap_flush_work, ic->bitmap_flush_interval);
@@ -2804,8 +2792,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
del_timer_sync(&ic->autocommit_timer);
- WRITE_ONCE(ic->suspending, 1);
-
if (ic->recalc_wq)
drain_workqueue(ic->recalc_wq);
@@ -2834,8 +2820,6 @@ static void dm_integrity_postsuspend(struct dm_target *ti)
#endif
}
- WRITE_ONCE(ic->suspending, 0);
-
BUG_ON(!RB_EMPTY_ROOT(&ic->in_progress));
ic->journal_uptodate = true;
@@ -2888,17 +2872,24 @@ static void dm_integrity_resume(struct dm_target *ti)
} else {
replay_journal(ic);
if (ic->mode == 'B') {
- int mode;
ic->sb->flags |= cpu_to_le32(SB_FLAG_DIRTY_BITMAP);
ic->sb->log2_blocks_per_bitmap_bit = ic->log2_blocks_per_bitmap_bit;
r = sync_rw_sb(ic, REQ_OP_WRITE, REQ_FUA);
if (unlikely(r))
dm_integrity_io_error(ic, "writing superblock", r);
- mode = ic->recalculate_flag ? BITMAP_OP_SET : BITMAP_OP_CLEAR;
- block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, mode);
- block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, mode);
- block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, mode);
+ block_bitmap_op(ic, ic->journal, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
+ block_bitmap_op(ic, ic->recalc_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
+ block_bitmap_op(ic, ic->may_write_bitmap, 0, ic->provided_data_sectors, BITMAP_OP_CLEAR);
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING) &&
+ le64_to_cpu(ic->sb->recalc_sector) < ic->provided_data_sectors) {
+ block_bitmap_op(ic, ic->journal, le64_to_cpu(ic->sb->recalc_sector),
+ ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
+ block_bitmap_op(ic, ic->recalc_bitmap, le64_to_cpu(ic->sb->recalc_sector),
+ ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
+ block_bitmap_op(ic, ic->may_write_bitmap, le64_to_cpu(ic->sb->recalc_sector),
+ ic->provided_data_sectors - le64_to_cpu(ic->sb->recalc_sector), BITMAP_OP_SET);
+ }
rw_journal_sectors(ic, REQ_OP_WRITE, REQ_FUA | REQ_SYNC, 0,
ic->n_bitmap_blocks * (BITMAP_BLOCK_SIZE >> SECTOR_SHIFT), NULL);
}
@@ -2967,7 +2958,7 @@ static void dm_integrity_status(struct dm_target *ti, status_type_t type,
DMEMIT(" meta_device:%s", ic->meta_dev->name);
if (ic->sectors_per_block != 1)
DMEMIT(" block_size:%u", ic->sectors_per_block << SECTOR_SHIFT);
- if (ic->recalculate_flag)
+ if (ic->sb->flags & cpu_to_le32(SB_FLAG_RECALCULATING))
DMEMIT(" recalculate");
DMEMIT(" journal_sectors:%u", ic->initial_sectors - SB_SECTORS);
DMEMIT(" interleave_sectors:%u", 1U << ic->sb->log2_interleave_sectors);
@@ -3623,6 +3614,7 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
}
ti->private = ic;
ti->per_io_data_size = sizeof(struct dm_integrity_io);
+ ic->ti = ti;
ic->in_progress = RB_ROOT;
INIT_LIST_HEAD(&ic->wait_list);
@@ -3836,6 +3828,14 @@ static int dm_integrity_ctr(struct dm_target *ti, unsigned argc, char **argv)
goto bad;
}
+ ic->offload_wq = alloc_workqueue("dm-integrity-offload", WQ_MEM_RECLAIM,
+ METADATA_WORKQUEUE_MAX_ACTIVE);
+ if (!ic->offload_wq) {
+ ti->error = "Cannot allocate workqueue";
+ r = -ENOMEM;
+ goto bad;
+ }
+
ic->commit_wq = alloc_workqueue("dm-integrity-commit", WQ_MEM_RECLAIM, 1);
if (!ic->commit_wq) {
ti->error = "Cannot allocate workqueue";
@@ -4140,6 +4140,8 @@ static void dm_integrity_dtr(struct dm_target *ti)
destroy_workqueue(ic->metadata_wq);
if (ic->wait_wq)
destroy_workqueue(ic->wait_wq);
+ if (ic->offload_wq)
+ destroy_workqueue(ic->offload_wq);
if (ic->commit_wq)
destroy_workqueue(ic->commit_wq);
if (ic->writer_wq)
@@ -4200,7 +4202,7 @@ static void dm_integrity_dtr(struct dm_target *ti)
static struct target_type integrity_target = {
.name = "integrity",
- .version = {1, 4, 0},
+ .version = {1, 5, 0},
.module = THIS_MODULE,
.features = DM_TARGET_SINGLETON | DM_TARGET_INTEGRITY,
.ctr = dm_integrity_ctr,
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 2bc18c9c3abc..58fd137b6ae1 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -2053,7 +2053,7 @@ static int multipath_busy(struct dm_target *ti)
*---------------------------------------------------------------*/
static struct target_type multipath_target = {
.name = "multipath",
- .version = {1, 13, 0},
+ .version = {1, 14, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE |
DM_TARGET_PASSES_INTEGRITY,
.module = THIS_MODULE,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index fc9947d6210c..76b6b323bf4b 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -960,9 +960,9 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
DMWARN("%s: __commit_transaction() failed, error = %d",
__func__, r);
}
+ pmd_write_unlock(pmd);
if (!pmd->fail_io)
__destroy_persistent_data_objects(pmd);
- pmd_write_unlock(pmd);
kfree(pmd);
return 0;
diff --git a/drivers/md/dm-verity-target.c b/drivers/md/dm-verity-target.c
index 0d61e9c67986..eec9f252e935 100644
--- a/drivers/md/dm-verity-target.c
+++ b/drivers/md/dm-verity-target.c
@@ -1221,7 +1221,7 @@ bad:
static struct target_type verity_target = {
.name = "verity",
- .version = {1, 5, 0},
+ .version = {1, 6, 0},
.module = THIS_MODULE,
.ctr = verity_ctr,
.dtr = verity_dtr,
diff --git a/drivers/md/dm-writecache.c b/drivers/md/dm-writecache.c
index b9e27e37a943..a09bdc000e64 100644
--- a/drivers/md/dm-writecache.c
+++ b/drivers/md/dm-writecache.c
@@ -625,6 +625,12 @@ static void writecache_add_to_freelist(struct dm_writecache *wc, struct wc_entry
wc->freelist_size++;
}
+static inline void writecache_verify_watermark(struct dm_writecache *wc)
+{
+ if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
+ queue_work(wc->writeback_wq, &wc->writeback_work);
+}
+
static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, sector_t expected_sector)
{
struct wc_entry *e;
@@ -650,8 +656,8 @@ static struct wc_entry *writecache_pop_from_freelist(struct dm_writecache *wc, s
list_del(&e->lru);
}
wc->freelist_size--;
- if (unlikely(wc->freelist_size + wc->writeback_size <= wc->freelist_high_watermark))
- queue_work(wc->writeback_wq, &wc->writeback_work);
+
+ writecache_verify_watermark(wc);
return e;
}
@@ -842,7 +848,7 @@ static void writecache_suspend(struct dm_target *ti)
}
wc_unlock(wc);
- flush_workqueue(wc->writeback_wq);
+ drain_workqueue(wc->writeback_wq);
wc_lock(wc);
if (flush_on_suspend)
@@ -965,6 +971,8 @@ erase_this:
writecache_commit_flushed(wc, false);
}
+ writecache_verify_watermark(wc);
+
wc_unlock(wc);
}
@@ -2312,7 +2320,7 @@ static void writecache_status(struct dm_target *ti, status_type_t type,
static struct target_type writecache_target = {
.name = "writecache",
- .version = {1, 1, 1},
+ .version = {1, 2, 0},
.module = THIS_MODULE,
.ctr = writecache_ctr,
.dtr = writecache_dtr,
diff --git a/drivers/md/dm-zoned-target.c b/drivers/md/dm-zoned-target.c
index 70a1063161c0..f4f83d39b3dc 100644
--- a/drivers/md/dm-zoned-target.c
+++ b/drivers/md/dm-zoned-target.c
@@ -533,8 +533,9 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
/* Get the BIO chunk work. If one is not active yet, create one */
cw = radix_tree_lookup(&dmz->chunk_rxtree, chunk);
- if (!cw) {
-
+ if (cw) {
+ dmz_get_chunk_work(cw);
+ } else {
/* Create a new chunk work */
cw = kmalloc(sizeof(struct dm_chunk_work), GFP_NOIO);
if (unlikely(!cw)) {
@@ -543,7 +544,7 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
}
INIT_WORK(&cw->work, dmz_chunk_work);
- refcount_set(&cw->refcount, 0);
+ refcount_set(&cw->refcount, 1);
cw->target = dmz;
cw->chunk = chunk;
bio_list_init(&cw->bio_list);
@@ -556,7 +557,6 @@ static int dmz_queue_chunk_work(struct dmz_target *dmz, struct bio *bio)
}
bio_list_add(&cw->bio_list, bio);
- dmz_get_chunk_work(cw);
dmz_reclaim_bio_acc(dmz->reclaim);
if (queue_work(dmz->chunk_wq, &cw->work))
@@ -967,7 +967,7 @@ static int dmz_iterate_devices(struct dm_target *ti,
static struct target_type dmz_type = {
.name = "zoned",
- .version = {1, 0, 0},
+ .version = {1, 1, 0},
.features = DM_TARGET_SINGLETON | DM_TARGET_ZONED_HM,
.module = THIS_MODULE,
.ctr = dmz_ctr,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b89f07ee2eff..0413018c8305 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1788,7 +1788,8 @@ static int dm_any_congested(void *congested_data, int bdi_bits)
* With request-based DM we only need to check the
* top-level queue for congestion.
*/
- r = md->queue->backing_dev_info->wb.state & bdi_bits;
+ struct backing_dev_info *bdi = md->queue->backing_dev_info;
+ r = bdi->wb.congested->state & bdi_bits;
} else {
map = dm_get_live_table_fast(md);
if (map)
@@ -1854,15 +1855,6 @@ static const struct dax_operations dm_dax_ops;
static void dm_wq_work(struct work_struct *work);
-static void dm_init_normal_md_queue(struct mapped_device *md)
-{
- /*
- * Initialize aspects of queue that aren't relevant for blk-mq
- */
- md->queue->backing_dev_info->congested_data = md;
- md->queue->backing_dev_info->congested_fn = dm_any_congested;
-}
-
static void cleanup_mapped_device(struct mapped_device *md)
{
if (md->wq)
@@ -2249,6 +2241,12 @@ struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
}
EXPORT_SYMBOL_GPL(dm_get_queue_limits);
+static void dm_init_congested_fn(struct mapped_device *md)
+{
+ md->queue->backing_dev_info->congested_data = md;
+ md->queue->backing_dev_info->congested_fn = dm_any_congested;
+}
+
/*
* Setup the DM device's queue based on md's type
*/
@@ -2265,11 +2263,12 @@ int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
DMERR("Cannot initialize queue for request-based dm-mq mapped device");
return r;
}
+ dm_init_congested_fn(md);
break;
case DM_TYPE_BIO_BASED:
case DM_TYPE_DAX_BIO_BASED:
case DM_TYPE_NVME_BIO_BASED:
- dm_init_normal_md_queue(md);
+ dm_init_congested_fn(md);
break;
case DM_TYPE_NONE:
WARN_ON_ONCE(true);
@@ -2368,6 +2367,7 @@ static void __dm_destroy(struct mapped_device *md, bool wait)
map = dm_get_live_table(md, &srcu_idx);
if (!dm_suspended_md(md)) {
dm_table_presuspend_targets(map);
+ set_bit(DMF_SUSPENDED, &md->flags);
dm_table_postsuspend_targets(map);
}
/* dm_put_live_table must be before msleep, otherwise deadlock is possible */
diff --git a/drivers/media/mc/mc-entity.c b/drivers/media/mc/mc-entity.c
index 7c429ce98bae..668770e9f609 100644
--- a/drivers/media/mc/mc-entity.c
+++ b/drivers/media/mc/mc-entity.c
@@ -639,9 +639,9 @@ int media_get_pad_index(struct media_entity *entity, bool is_sink,
return -EINVAL;
for (i = 0; i < entity->num_pads; i++) {
- if (entity->pads[i].flags == MEDIA_PAD_FL_SINK)
+ if (entity->pads[i].flags & MEDIA_PAD_FL_SINK)
pad_is_sink = true;
- else if (entity->pads[i].flags == MEDIA_PAD_FL_SOURCE)
+ else if (entity->pads[i].flags & MEDIA_PAD_FL_SOURCE)
pad_is_sink = false;
else
continue; /* This is an error! */
diff --git a/drivers/media/platform/vicodec/codec-v4l2-fwht.c b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
index 3c93d9232c3c..b6e39fbd8ad5 100644
--- a/drivers/media/platform/vicodec/codec-v4l2-fwht.c
+++ b/drivers/media/platform/vicodec/codec-v4l2-fwht.c
@@ -27,17 +27,17 @@ static const struct v4l2_fwht_pixfmt_info v4l2_fwht_pixfmts[] = {
{ V4L2_PIX_FMT_BGR24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_RGB24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_HSV24, 3, 3, 1, 3, 3, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
- { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_BGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_XBGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_ABGR32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_RGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_XRGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_ARGB32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_BGRX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_BGRA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_RGB},
+ { V4L2_PIX_FMT_RGBX32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
{ V4L2_PIX_FMT_RGBA32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_RGB},
- { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 3, 1, FWHT_FL_PIXENC_HSV},
+ { V4L2_PIX_FMT_HSV32, 4, 4, 1, 4, 4, 1, 1, 4, 1, FWHT_FL_PIXENC_HSV},
{ V4L2_PIX_FMT_GREY, 1, 1, 1, 1, 0, 1, 1, 1, 1, FWHT_FL_PIXENC_RGB},
};
@@ -175,22 +175,14 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
case V4L2_PIX_FMT_RGB32:
case V4L2_PIX_FMT_XRGB32:
case V4L2_PIX_FMT_HSV32:
- rf->cr = rf->luma + 1;
- rf->cb = rf->cr + 2;
- rf->luma += 2;
- break;
- case V4L2_PIX_FMT_BGR32:
- case V4L2_PIX_FMT_XBGR32:
- rf->cb = rf->luma;
- rf->cr = rf->cb + 2;
- rf->luma++;
- break;
case V4L2_PIX_FMT_ARGB32:
rf->alpha = rf->luma;
rf->cr = rf->luma + 1;
rf->cb = rf->cr + 2;
rf->luma += 2;
break;
+ case V4L2_PIX_FMT_BGR32:
+ case V4L2_PIX_FMT_XBGR32:
case V4L2_PIX_FMT_ABGR32:
rf->cb = rf->luma;
rf->cr = rf->cb + 2;
@@ -198,10 +190,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
rf->alpha = rf->cr + 1;
break;
case V4L2_PIX_FMT_BGRX32:
- rf->cb = rf->luma + 1;
- rf->cr = rf->cb + 2;
- rf->luma += 2;
- break;
case V4L2_PIX_FMT_BGRA32:
rf->alpha = rf->luma;
rf->cb = rf->luma + 1;
@@ -209,10 +197,6 @@ static int prepare_raw_frame(struct fwht_raw_frame *rf,
rf->luma += 2;
break;
case V4L2_PIX_FMT_RGBX32:
- rf->cr = rf->luma;
- rf->cb = rf->cr + 2;
- rf->luma++;
- break;
case V4L2_PIX_FMT_RGBA32:
rf->alpha = rf->luma + 3;
rf->cr = rf->luma;
diff --git a/drivers/media/usb/pulse8-cec/pulse8-cec.c b/drivers/media/usb/pulse8-cec/pulse8-cec.c
index afda438d4e0a..0655aa9ecf28 100644
--- a/drivers/media/usb/pulse8-cec/pulse8-cec.c
+++ b/drivers/media/usb/pulse8-cec/pulse8-cec.c
@@ -635,8 +635,6 @@ static void pulse8_cec_adap_free(struct cec_adapter *adap)
cancel_delayed_work_sync(&pulse8->ping_eeprom_work);
cancel_work_sync(&pulse8->irq_work);
cancel_work_sync(&pulse8->tx_work);
- serio_close(pulse8->serio);
- serio_set_drvdata(pulse8->serio, NULL);
kfree(pulse8);
}
@@ -652,6 +650,9 @@ static void pulse8_disconnect(struct serio *serio)
struct pulse8 *pulse8 = serio_get_drvdata(serio);
cec_unregister_adapter(pulse8->adap);
+ pulse8->serio = NULL;
+ serio_set_drvdata(serio, NULL);
+ serio_close(serio);
}
static int pulse8_setup(struct pulse8 *pulse8, struct serio *serio,
@@ -840,6 +841,8 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
serio_set_drvdata(serio, pulse8);
INIT_WORK(&pulse8->irq_work, pulse8_irq_work_handler);
INIT_WORK(&pulse8->tx_work, pulse8_tx_work_handler);
+ INIT_DELAYED_WORK(&pulse8->ping_eeprom_work,
+ pulse8_ping_eeprom_work_handler);
mutex_init(&pulse8->lock);
spin_lock_init(&pulse8->msg_lock);
pulse8->config_pending = false;
@@ -865,17 +868,16 @@ static int pulse8_connect(struct serio *serio, struct serio_driver *drv)
pulse8->restoring_config = true;
}
- INIT_DELAYED_WORK(&pulse8->ping_eeprom_work,
- pulse8_ping_eeprom_work_handler);
schedule_delayed_work(&pulse8->ping_eeprom_work, PING_PERIOD);
return 0;
close_serio:
+ pulse8->serio = NULL;
+ serio_set_drvdata(serio, NULL);
serio_close(serio);
delete_adap:
cec_delete_adapter(pulse8->adap);
- serio_set_drvdata(serio, NULL);
free_device:
kfree(pulse8);
return err;
diff --git a/drivers/media/v4l2-core/v4l2-mem2mem.c b/drivers/media/v4l2-core/v4l2-mem2mem.c
index 1afd9c6ad908..cc34c5ab7009 100644
--- a/drivers/media/v4l2-core/v4l2-mem2mem.c
+++ b/drivers/media/v4l2-core/v4l2-mem2mem.c
@@ -880,12 +880,12 @@ int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
goto err_rel_entity1;
/* Connect the three entities */
- ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 1,
+ ret = media_create_pad_link(m2m_dev->source, 0, &m2m_dev->proc, 0,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (ret)
goto err_rel_entity2;
- ret = media_create_pad_link(&m2m_dev->proc, 0, &m2m_dev->sink, 0,
+ ret = media_create_pad_link(&m2m_dev->proc, 1, &m2m_dev->sink, 0,
MEDIA_LNK_FL_IMMUTABLE | MEDIA_LNK_FL_ENABLED);
if (ret)
goto err_rm_links0;
diff --git a/drivers/misc/altera-stapl/altera.c b/drivers/misc/altera-stapl/altera.c
index 25e5f24b3fec..5bdf57472314 100644
--- a/drivers/misc/altera-stapl/altera.c
+++ b/drivers/misc/altera-stapl/altera.c
@@ -2112,8 +2112,8 @@ exit_done:
return status;
}
-static int altera_get_note(u8 *p, s32 program_size,
- s32 *offset, char *key, char *value, int length)
+static int altera_get_note(u8 *p, s32 program_size, s32 *offset,
+ char *key, char *value, int keylen, int vallen)
/*
* Gets key and value of NOTE fields in the JBC file.
* Can be called in two modes: if offset pointer is NULL,
@@ -2170,7 +2170,7 @@ static int altera_get_note(u8 *p, s32 program_size,
&p[note_table + (8 * i) + 4])];
if (value != NULL)
- strlcpy(value, value_ptr, length);
+ strlcpy(value, value_ptr, vallen);
}
}
@@ -2189,13 +2189,13 @@ static int altera_get_note(u8 *p, s32 program_size,
strlcpy(key, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i)])],
- length);
+ keylen);
if (value != NULL)
strlcpy(value, &p[note_strings +
get_unaligned_be32(
&p[note_table + (8 * i) + 4])],
- length);
+ vallen);
*offset = i + 1;
}
@@ -2449,7 +2449,7 @@ int altera_init(struct altera_config *config, const struct firmware *fw)
__func__, (format_version == 2) ? "Jam STAPL" :
"pre-standardized Jam 1.1");
while (altera_get_note((u8 *)fw->data, fw->size,
- &offset, key, value, 256) == 0)
+ &offset, key, value, 32, 256) == 0)
printk(KERN_INFO "%s: NOTE \"%s\" = \"%s\"\n",
__func__, key, value);
}
diff --git a/drivers/misc/habanalabs/device.c b/drivers/misc/habanalabs/device.c
index b155e9549076..b680b0caa69b 100644
--- a/drivers/misc/habanalabs/device.c
+++ b/drivers/misc/habanalabs/device.c
@@ -598,7 +598,9 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable)
goto out;
}
- hdev->asic_funcs->halt_coresight(hdev);
+ if (!hdev->hard_reset_pending)
+ hdev->asic_funcs->halt_coresight(hdev);
+
hdev->in_debug = 0;
goto out;
@@ -1189,6 +1191,7 @@ int hl_device_init(struct hl_device *hdev, struct class *hclass)
if (hdev->asic_funcs->get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) {
dev_info(hdev->dev,
"H/W state is dirty, must reset before initializing\n");
+ hdev->asic_funcs->halt_engines(hdev, true);
hdev->asic_funcs->hw_fini(hdev, true);
}
diff --git a/drivers/misc/habanalabs/goya/goya.c b/drivers/misc/habanalabs/goya/goya.c
index 7344e8a222ae..b8a8de24aaf7 100644
--- a/drivers/misc/habanalabs/goya/goya.c
+++ b/drivers/misc/habanalabs/goya/goya.c
@@ -895,6 +895,11 @@ void goya_init_dma_qmans(struct hl_device *hdev)
*/
static void goya_disable_external_queues(struct hl_device *hdev)
{
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_DMA))
+ return;
+
WREG32(mmDMA_QM_0_GLBL_CFG0, 0);
WREG32(mmDMA_QM_1_GLBL_CFG0, 0);
WREG32(mmDMA_QM_2_GLBL_CFG0, 0);
@@ -956,6 +961,11 @@ static int goya_stop_external_queues(struct hl_device *hdev)
{
int rc, retval = 0;
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_DMA))
+ return retval;
+
rc = goya_stop_queue(hdev,
mmDMA_QM_0_GLBL_CFG1,
mmDMA_QM_0_CP_STS,
@@ -1744,9 +1754,18 @@ void goya_init_tpc_qmans(struct hl_device *hdev)
*/
static void goya_disable_internal_queues(struct hl_device *hdev)
{
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MME))
+ goto disable_tpc;
+
WREG32(mmMME_QM_GLBL_CFG0, 0);
WREG32(mmMME_CMDQ_GLBL_CFG0, 0);
+disable_tpc:
+ if (!(goya->hw_cap_initialized & HW_CAP_TPC))
+ return;
+
WREG32(mmTPC0_QM_GLBL_CFG0, 0);
WREG32(mmTPC0_CMDQ_GLBL_CFG0, 0);
@@ -1782,8 +1801,12 @@ static void goya_disable_internal_queues(struct hl_device *hdev)
*/
static int goya_stop_internal_queues(struct hl_device *hdev)
{
+ struct goya_device *goya = hdev->asic_specific;
int rc, retval = 0;
+ if (!(goya->hw_cap_initialized & HW_CAP_MME))
+ goto stop_tpc;
+
/*
* Each queue (QMAN) is a separate H/W logic. That means that each
* QMAN can be stopped independently and failure to stop one does NOT
@@ -1810,6 +1833,10 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
retval = -EIO;
}
+stop_tpc:
+ if (!(goya->hw_cap_initialized & HW_CAP_TPC))
+ return retval;
+
rc = goya_stop_queue(hdev,
mmTPC0_QM_GLBL_CFG1,
mmTPC0_QM_CP_STS,
@@ -1975,6 +2002,11 @@ static int goya_stop_internal_queues(struct hl_device *hdev)
static void goya_dma_stall(struct hl_device *hdev)
{
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_DMA))
+ return;
+
WREG32(mmDMA_QM_0_GLBL_CFG1, 1 << DMA_QM_0_GLBL_CFG1_DMA_STOP_SHIFT);
WREG32(mmDMA_QM_1_GLBL_CFG1, 1 << DMA_QM_1_GLBL_CFG1_DMA_STOP_SHIFT);
WREG32(mmDMA_QM_2_GLBL_CFG1, 1 << DMA_QM_2_GLBL_CFG1_DMA_STOP_SHIFT);
@@ -1984,6 +2016,11 @@ static void goya_dma_stall(struct hl_device *hdev)
static void goya_tpc_stall(struct hl_device *hdev)
{
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_TPC))
+ return;
+
WREG32(mmTPC0_CFG_TPC_STALL, 1 << TPC0_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC1_CFG_TPC_STALL, 1 << TPC1_CFG_TPC_STALL_V_SHIFT);
WREG32(mmTPC2_CFG_TPC_STALL, 1 << TPC2_CFG_TPC_STALL_V_SHIFT);
@@ -1996,6 +2033,11 @@ static void goya_tpc_stall(struct hl_device *hdev)
static void goya_mme_stall(struct hl_device *hdev)
{
+ struct goya_device *goya = hdev->asic_specific;
+
+ if (!(goya->hw_cap_initialized & HW_CAP_MME))
+ return;
+
WREG32(mmMME_STALL, 0xFFFFFFFF);
}
@@ -4648,8 +4690,6 @@ static int goya_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size,
rc = goya_send_job_on_qman0(hdev, job);
- hl_cb_put(job->patched_cb);
-
hl_debugfs_remove_job(hdev, job);
kfree(job);
cb->cs_cnt--;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 48d5ec770b94..d10805e5e623 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -3526,6 +3526,47 @@ static void bond_fold_stats(struct rtnl_link_stats64 *_res,
}
}
+#ifdef CONFIG_LOCKDEP
+static int bond_get_lowest_level_rcu(struct net_device *dev)
+{
+ struct net_device *ldev, *next, *now, *dev_stack[MAX_NEST_DEV + 1];
+ struct list_head *niter, *iter, *iter_stack[MAX_NEST_DEV + 1];
+ int cur = 0, max = 0;
+
+ now = dev;
+ iter = &dev->adj_list.lower;
+
+ while (1) {
+ next = NULL;
+ while (1) {
+ ldev = netdev_next_lower_dev_rcu(now, &iter);
+ if (!ldev)
+ break;
+
+ next = ldev;
+ niter = &ldev->adj_list.lower;
+ dev_stack[cur] = now;
+ iter_stack[cur++] = iter;
+ if (max <= cur)
+ max = cur;
+ break;
+ }
+
+ if (!next) {
+ if (!cur)
+ return max;
+ next = dev_stack[--cur];
+ niter = iter_stack[cur];
+ }
+
+ now = next;
+ iter = niter;
+ }
+
+ return max;
+}
+#endif
+
static void bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats)
{
@@ -3533,11 +3574,17 @@ static void bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 temp;
struct list_head *iter;
struct slave *slave;
+ int nest_level = 0;
- spin_lock(&bond->stats_lock);
- memcpy(stats, &bond->bond_stats, sizeof(*stats));
rcu_read_lock();
+#ifdef CONFIG_LOCKDEP
+ nest_level = bond_get_lowest_level_rcu(bond_dev);
+#endif
+
+ spin_lock_nested(&bond->stats_lock, nest_level);
+ memcpy(stats, &bond->bond_stats, sizeof(*stats));
+
bond_for_each_slave_rcu(bond, slave, iter) {
const struct rtnl_link_stats64 *new =
dev_get_stats(slave->dev, &temp);
@@ -3547,10 +3594,10 @@ static void bond_get_stats(struct net_device *bond_dev,
/* save off the slave stats for the next run */
memcpy(&slave->slave_stats, new, sizeof(*new));
}
- rcu_read_unlock();
memcpy(&bond->bond_stats, stats, sizeof(*stats));
spin_unlock(&bond->stats_lock);
+ rcu_read_unlock();
}
static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd)
@@ -3640,6 +3687,8 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
case BOND_RELEASE_OLD:
case SIOCBONDRELEASE:
res = bond_release(bond_dev, slave_dev);
+ if (!res)
+ netdev_update_lockdep_key(slave_dev);
break;
case BOND_SETHWADDR_OLD:
case SIOCBONDSETHWADDR:
diff --git a/drivers/net/bonding/bond_options.c b/drivers/net/bonding/bond_options.c
index ddb3916d3506..215c10923289 100644
--- a/drivers/net/bonding/bond_options.c
+++ b/drivers/net/bonding/bond_options.c
@@ -1398,6 +1398,8 @@ static int bond_option_slaves_set(struct bonding *bond,
case '-':
slave_dbg(bond->dev, dev, "Releasing interface\n");
ret = bond_release(bond->dev, dev);
+ if (!ret)
+ netdev_update_lockdep_key(dev);
break;
default:
diff --git a/drivers/net/dsa/b53/b53_common.c b/drivers/net/dsa/b53/b53_common.c
index 449a22172e07..1a69286daa8d 100644
--- a/drivers/net/dsa/b53/b53_common.c
+++ b/drivers/net/dsa/b53/b53_common.c
@@ -1366,6 +1366,9 @@ void b53_vlan_add(struct dsa_switch *ds, int port,
b53_get_vlan_entry(dev, vid, vl);
+ if (vid == 0 && vid == b53_default_pvid(dev))
+ untagged = true;
+
vl->members |= BIT(port);
if (untagged && !dsa_is_cpu_port(ds, port))
vl->untag |= BIT(port);
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index d1955543acd1..b0f5280a83cb 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -69,8 +69,7 @@ static void bcm_sf2_imp_setup(struct dsa_switch *ds, int port)
/* Force link status for IMP port */
reg = core_readl(priv, offset);
reg |= (MII_SW_OR | LINK_STS);
- if (priv->type == BCM7278_DEVICE_ID)
- reg |= GMII_SPEED_UP_2G;
+ reg &= ~GMII_SPEED_UP_2G;
core_writel(priv, reg, offset);
/* Enable Broadcast, Multicast, Unicast forwarding to IMP port */
diff --git a/drivers/net/dsa/mv88e6xxx/chip.h b/drivers/net/dsa/mv88e6xxx/chip.h
index f332cb4b2fbf..79cad5e751c6 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.h
+++ b/drivers/net/dsa/mv88e6xxx/chip.h
@@ -236,7 +236,7 @@ struct mv88e6xxx_port {
bool mirror_ingress;
bool mirror_egress;
unsigned int serdes_irq;
- char serdes_irq_name[32];
+ char serdes_irq_name[64];
};
struct mv88e6xxx_chip {
@@ -293,16 +293,16 @@ struct mv88e6xxx_chip {
struct mv88e6xxx_irq g1_irq;
struct mv88e6xxx_irq g2_irq;
int irq;
- char irq_name[32];
+ char irq_name[64];
int device_irq;
- char device_irq_name[32];
+ char device_irq_name[64];
int watchdog_irq;
- char watchdog_irq_name[32];
+ char watchdog_irq_name[64];
int atu_prob_irq;
- char atu_prob_irq_name[32];
+ char atu_prob_irq_name[64];
int vtu_prob_irq;
- char vtu_prob_irq_name[32];
+ char vtu_prob_irq_name[64];
struct kthread_worker *kworker;
struct kthread_delayed_work irq_poll_work;
diff --git a/drivers/net/dsa/mv88e6xxx/global1.c b/drivers/net/dsa/mv88e6xxx/global1.c
index b016cc205f81..ca3a7a7a73c3 100644
--- a/drivers/net/dsa/mv88e6xxx/global1.c
+++ b/drivers/net/dsa/mv88e6xxx/global1.c
@@ -278,13 +278,13 @@ int mv88e6095_g1_set_egress_port(struct mv88e6xxx_chip *chip,
switch (direction) {
case MV88E6XXX_EGRESS_DIR_INGRESS:
dest_port_chip = &chip->ingress_dest_port;
- reg &= MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK;
+ reg &= ~MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK;
reg |= port <<
__bf_shf(MV88E6185_G1_MONITOR_CTL_INGRESS_DEST_MASK);
break;
case MV88E6XXX_EGRESS_DIR_EGRESS:
dest_port_chip = &chip->egress_dest_port;
- reg &= MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK;
+ reg &= ~MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK;
reg |= port <<
__bf_shf(MV88E6185_G1_MONITOR_CTL_EGRESS_DEST_MASK);
break;
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.c b/drivers/net/ethernet/amazon/ena/ena_com.c
index ea62604fdf8c..1fb58f9ad80b 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.c
+++ b/drivers/net/ethernet/amazon/ena/ena_com.c
@@ -200,6 +200,11 @@ static void comp_ctxt_release(struct ena_com_admin_queue *queue,
static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
u16 command_id, bool capture)
{
+ if (unlikely(!queue->comp_ctx)) {
+ pr_err("Completion context is NULL\n");
+ return NULL;
+ }
+
if (unlikely(command_id >= queue->q_depth)) {
pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
command_id, queue->q_depth);
@@ -1041,9 +1046,41 @@ static int ena_com_get_feature(struct ena_com_dev *ena_dev,
feature_ver);
}
+int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
+{
+ return ena_dev->rss.hash_func;
+}
+
+static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
+{
+ struct ena_admin_feature_rss_flow_hash_control *hash_key =
+ (ena_dev->rss).hash_key;
+
+ netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
+ /* The key is stored in the device in u32 array
+ * as well as the API requires the key to be passed in this
+ * format. Thus the size of our array should be divided by 4
+ */
+ hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
+}
+
static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
{
struct ena_rss *rss = &ena_dev->rss;
+ struct ena_admin_feature_rss_flow_hash_control *hash_key;
+ struct ena_admin_get_feat_resp get_resp;
+ int rc;
+
+ hash_key = (ena_dev->rss).hash_key;
+
+ rc = ena_com_get_feature_ex(ena_dev, &get_resp,
+ ENA_ADMIN_RSS_HASH_FUNCTION,
+ ena_dev->rss.hash_key_dma_addr,
+ sizeof(ena_dev->rss.hash_key), 0);
+ if (unlikely(rc)) {
+ hash_key = NULL;
+ return -EOPNOTSUPP;
+ }
rss->hash_key =
dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
@@ -1254,30 +1291,6 @@ static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
return 0;
}
-static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
-{
- u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
- struct ena_rss *rss = &ena_dev->rss;
- u8 idx;
- u16 i;
-
- for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
- dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
-
- for (i = 0; i < 1 << rss->tbl_log_size; i++) {
- if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
- return -EINVAL;
- idx = (u8)rss->rss_ind_tbl[i].cq_idx;
-
- if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
- return -EINVAL;
-
- rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
- }
-
- return 0;
-}
-
static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
u16 intr_delay_resolution)
{
@@ -2297,15 +2310,16 @@ int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
switch (func) {
case ENA_ADMIN_TOEPLITZ:
- if (key_len > sizeof(hash_key->key)) {
- pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
- key_len, sizeof(hash_key->key));
- return -EINVAL;
+ if (key) {
+ if (key_len != sizeof(hash_key->key)) {
+ pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
+ key_len, sizeof(hash_key->key));
+ return -EINVAL;
+ }
+ memcpy(hash_key->key, key, key_len);
+ rss->hash_init_val = init_val;
+ hash_key->keys_num = key_len >> 2;
}
-
- memcpy(hash_key->key, key, key_len);
- rss->hash_init_val = init_val;
- hash_key->keys_num = key_len >> 2;
break;
case ENA_ADMIN_CRC32:
rss->hash_init_val = init_val;
@@ -2342,7 +2356,11 @@ int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
if (unlikely(rc))
return rc;
- rss->hash_func = get_resp.u.flow_hash_func.selected_func;
+ /* ffs() returns 1 in case the lsb is set */
+ rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
+ if (rss->hash_func)
+ rss->hash_func--;
+
if (func)
*func = rss->hash_func;
@@ -2606,10 +2624,6 @@ int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
if (!ind_tbl)
return 0;
- rc = ena_com_ind_tbl_convert_from_device(ena_dev);
- if (unlikely(rc))
- return rc;
-
for (i = 0; i < (1 << rss->tbl_log_size); i++)
ind_tbl[i] = rss->host_rss_ind_tbl[i];
@@ -2626,9 +2640,15 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
if (unlikely(rc))
goto err_indr_tbl;
+ /* The following function might return unsupported in case the
+ * device doesn't support setting the key / hash function. We can safely
+ * ignore this error and have indirection table support only.
+ */
rc = ena_com_hash_key_allocate(ena_dev);
- if (unlikely(rc))
+ if (unlikely(rc) && rc != -EOPNOTSUPP)
goto err_hash_key;
+ else if (rc != -EOPNOTSUPP)
+ ena_com_hash_key_fill_default_key(ena_dev);
rc = ena_com_hash_ctrl_init(ena_dev);
if (unlikely(rc))
diff --git a/drivers/net/ethernet/amazon/ena/ena_com.h b/drivers/net/ethernet/amazon/ena/ena_com.h
index 0ce37d54ed10..469f298199a7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_com.h
+++ b/drivers/net/ethernet/amazon/ena/ena_com.h
@@ -44,6 +44,7 @@
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/wait.h>
+#include <linux/netdevice.h>
#include "ena_common_defs.h"
#include "ena_admin_defs.h"
@@ -655,6 +656,14 @@ int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 log_size);
*/
void ena_com_rss_destroy(struct ena_com_dev *ena_dev);
+/* ena_com_get_current_hash_function - Get RSS hash function
+ * @ena_dev: ENA communication layer struct
+ *
+ * Return the current hash function.
+ * @return: 0 or one of the ena_admin_hash_functions values.
+ */
+int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev);
+
/* ena_com_fill_hash_function - Fill RSS hash function
* @ena_dev: ENA communication layer struct
* @func: The hash function (Toeplitz or crc)
diff --git a/drivers/net/ethernet/amazon/ena/ena_ethtool.c b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
index b4e891d49a94..ced1d577b62a 100644
--- a/drivers/net/ethernet/amazon/ena/ena_ethtool.c
+++ b/drivers/net/ethernet/amazon/ena/ena_ethtool.c
@@ -636,6 +636,28 @@ static u32 ena_get_rxfh_key_size(struct net_device *netdev)
return ENA_HASH_KEY_SIZE;
}
+static int ena_indirection_table_get(struct ena_adapter *adapter, u32 *indir)
+{
+ struct ena_com_dev *ena_dev = adapter->ena_dev;
+ int i, rc;
+
+ if (!indir)
+ return 0;
+
+ rc = ena_com_indirect_table_get(ena_dev, indir);
+ if (rc)
+ return rc;
+
+ /* Our internal representation of the indices is: even indices
+ * for Tx and uneven indices for Rx. We need to convert the Rx
+ * indices to be consecutive
+ */
+ for (i = 0; i < ENA_RX_RSS_TABLE_SIZE; i++)
+ indir[i] = ENA_IO_RXQ_IDX_TO_COMBINED_IDX(indir[i]);
+
+ return rc;
+}
+
static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc)
{
@@ -644,11 +666,25 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 func;
int rc;
- rc = ena_com_indirect_table_get(adapter->ena_dev, indir);
+ rc = ena_indirection_table_get(adapter, indir);
if (rc)
return rc;
+ /* We call this function in order to check if the device
+ * supports getting/setting the hash function.
+ */
rc = ena_com_get_hash_function(adapter->ena_dev, &ena_func, key);
+
+ if (rc) {
+ if (rc == -EOPNOTSUPP) {
+ key = NULL;
+ hfunc = NULL;
+ rc = 0;
+ }
+
+ return rc;
+ }
+
if (rc)
return rc;
@@ -657,7 +693,7 @@ static int ena_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
func = ETH_RSS_HASH_TOP;
break;
case ENA_ADMIN_CRC32:
- func = ETH_RSS_HASH_XOR;
+ func = ETH_RSS_HASH_CRC32;
break;
default:
netif_err(adapter, drv, netdev,
@@ -700,10 +736,13 @@ static int ena_set_rxfh(struct net_device *netdev, const u32 *indir,
}
switch (hfunc) {
+ case ETH_RSS_HASH_NO_CHANGE:
+ func = ena_com_get_current_hash_function(ena_dev);
+ break;
case ETH_RSS_HASH_TOP:
func = ENA_ADMIN_TOEPLITZ;
break;
- case ETH_RSS_HASH_XOR:
+ case ETH_RSS_HASH_CRC32:
func = ENA_ADMIN_CRC32;
break;
default:
@@ -814,6 +853,7 @@ static const struct ethtool_ops ena_ethtool_ops = {
.set_channels = ena_set_channels,
.get_tunable = ena_get_tunable,
.set_tunable = ena_set_tunable,
+ .get_ts_info = ethtool_op_get_ts_info,
};
void ena_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 894e8c1a8cf1..0b2fd96b93d7 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -3706,8 +3706,8 @@ static void check_for_missing_keep_alive(struct ena_adapter *adapter)
if (adapter->keep_alive_timeout == ENA_HW_HINTS_NO_TIMEOUT)
return;
- keep_alive_expired = round_jiffies(adapter->last_keep_alive_jiffies +
- adapter->keep_alive_timeout);
+ keep_alive_expired = adapter->last_keep_alive_jiffies +
+ adapter->keep_alive_timeout;
if (unlikely(time_is_before_jiffies(keep_alive_expired))) {
netif_err(adapter, drv, adapter->netdev,
"Keep alive watchdog timeout.\n");
@@ -3809,7 +3809,7 @@ static void ena_timer_service(struct timer_list *t)
}
/* Reset the timer */
- mod_timer(&adapter->timer_service, jiffies + HZ);
+ mod_timer(&adapter->timer_service, round_jiffies(jiffies + HZ));
}
static int ena_calc_max_io_queue_num(struct pci_dev *pdev,
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index 094324fd0edc..8795e0b1dc3c 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -130,6 +130,8 @@
#define ENA_IO_TXQ_IDX(q) (2 * (q))
#define ENA_IO_RXQ_IDX(q) (2 * (q) + 1)
+#define ENA_IO_TXQ_IDX_TO_COMBINED_IDX(q) ((q) / 2)
+#define ENA_IO_RXQ_IDX_TO_COMBINED_IDX(q) (((q) - 1) / 2)
#define ENA_MGMNT_IRQ_IDX 0
#define ENA_IO_IRQ_FIRST_IDX 1
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
index a1f99bef4a68..7b55633d2cb9 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ethtool.c
@@ -722,6 +722,11 @@ static int aq_ethtool_set_priv_flags(struct net_device *ndev, u32 flags)
if (flags & ~AQ_PRIV_FLAGS_MASK)
return -EOPNOTSUPP;
+ if (hweight32((flags | priv_flags) & AQ_HW_LOOPBACK_MASK) > 1) {
+ netdev_info(ndev, "Can't enable more than one loopback simultaneously\n");
+ return -EINVAL;
+ }
+
cfg->priv_flags = flags;
if ((priv_flags ^ flags) & BIT(AQ_HW_LOOPBACK_DMA_NET)) {
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
index 6102251bb909..03ff92bc4a7f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_filters.c
@@ -163,7 +163,7 @@ aq_check_approve_fvlan(struct aq_nic_s *aq_nic,
}
if ((aq_nic->ndev->features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
- (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci),
+ (!test_bit(be16_to_cpu(fsp->h_ext.vlan_tci) & VLAN_VID_MASK,
aq_nic->active_vlans))) {
netdev_err(aq_nic->ndev,
"ethtool: unknown vlan-id specified");
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
index cc70c606b6ef..251767c31f7e 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_hw.h
@@ -337,6 +337,8 @@ struct aq_fw_ops {
void (*enable_ptp)(struct aq_hw_s *self, int enable);
+ void (*adjust_ptp)(struct aq_hw_s *self, uint64_t adj);
+
int (*set_eee_rate)(struct aq_hw_s *self, u32 speed);
int (*get_eee_rate)(struct aq_hw_s *self, u32 *rate,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
index c85e3e29012c..e95f6a6bef73 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_nic.c
@@ -533,8 +533,10 @@ unsigned int aq_nic_map_skb(struct aq_nic_s *self, struct sk_buff *skb,
dx_buff->len,
DMA_TO_DEVICE);
- if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa)))
+ if (unlikely(dma_mapping_error(aq_nic_get_dev(self), dx_buff->pa))) {
+ ret = 0;
goto exit;
+ }
first = dx_buff;
dx_buff->len_pkt = skb->len;
@@ -655,10 +657,6 @@ int aq_nic_xmit(struct aq_nic_s *self, struct sk_buff *skb)
if (likely(frags)) {
err = self->aq_hw_ops->hw_ring_tx_xmit(self->aq_hw,
ring, frags);
- if (err >= 0) {
- ++ring->stats.tx.packets;
- ring->stats.tx.bytes += skb->len;
- }
} else {
err = NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
index 6b27af0db499..78b6f3248756 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_pci_func.c
@@ -359,7 +359,8 @@ static int aq_suspend_common(struct device *dev, bool deep)
netif_device_detach(nic->ndev);
netif_tx_stop_all_queues(nic->ndev);
- aq_nic_stop(nic);
+ if (netif_running(nic->ndev))
+ aq_nic_stop(nic);
if (deep) {
aq_nic_deinit(nic, !nic->aq_hw->aq_nic_cfg->wol);
@@ -375,7 +376,7 @@ static int atl_resume_common(struct device *dev, bool deep)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct aq_nic_s *nic;
- int ret;
+ int ret = 0;
nic = pci_get_drvdata(pdev);
@@ -390,9 +391,11 @@ static int atl_resume_common(struct device *dev, bool deep)
goto err_exit;
}
- ret = aq_nic_start(nic);
- if (ret)
- goto err_exit;
+ if (netif_running(nic->ndev)) {
+ ret = aq_nic_start(nic);
+ if (ret)
+ goto err_exit;
+ }
netif_device_attach(nic->ndev);
netif_tx_start_all_queues(nic->ndev);
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
index 951d86f8b66e..bae95a618560 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.c
@@ -272,9 +272,12 @@ bool aq_ring_tx_clean(struct aq_ring_s *self)
}
}
- if (unlikely(buff->is_eop))
- dev_kfree_skb_any(buff->skb);
+ if (unlikely(buff->is_eop)) {
+ ++self->stats.rx.packets;
+ self->stats.tx.bytes += buff->skb->len;
+ dev_kfree_skb_any(buff->skb);
+ }
buff->pa = 0U;
buff->eop_index = 0xffffU;
self->sw_head = aq_ring_next_dx(self, self->sw_head);
@@ -351,7 +354,8 @@ int aq_ring_rx_clean(struct aq_ring_s *self,
err = 0;
goto err_exit;
}
- if (buff->is_error || buff->is_cso_err) {
+ if (buff->is_error ||
+ (buff->is_lro && buff->is_cso_err)) {
buff_ = buff;
do {
next_ = buff_->next,
diff --git a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
index 991e4d31b094..2c96f20f6289 100644
--- a/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
+++ b/drivers/net/ethernet/aquantia/atlantic/aq_ring.h
@@ -78,7 +78,8 @@ struct __packed aq_ring_buff_s {
u32 is_cleaned:1;
u32 is_error:1;
u32 is_vlan:1;
- u32 rsvd3:4;
+ u32 is_lro:1;
+ u32 rsvd3:3;
u16 eop_index;
u16 rsvd4;
};
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
index ec041f78d063..d20d91cdece8 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_b0.c
@@ -823,6 +823,8 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
}
}
+ buff->is_lro = !!(HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
+ rxd_wb->status);
if (HW_ATL_B0_RXD_WB_STAT2_EOP & rxd_wb->status) {
buff->len = rxd_wb->pkt_len %
AQ_CFG_RX_FRAME_MAX;
@@ -835,8 +837,7 @@ static int hw_atl_b0_hw_ring_rx_receive(struct aq_hw_s *self,
rxd_wb->pkt_len > AQ_CFG_RX_FRAME_MAX ?
AQ_CFG_RX_FRAME_MAX : rxd_wb->pkt_len;
- if (HW_ATL_B0_RXD_WB_STAT2_RSCCNT &
- rxd_wb->status) {
+ if (buff->is_lro) {
/* LRO */
buff->next = rxd_wb->next_desc_ptr;
++ring->stats.rx.lro_packets;
@@ -884,13 +885,16 @@ static int hw_atl_b0_hw_packet_filter_set(struct aq_hw_s *self,
{
struct aq_nic_cfg_s *cfg = self->aq_nic_cfg;
unsigned int i = 0U;
+ u32 vlan_promisc;
+ u32 l2_promisc;
- hw_atl_rpfl2promiscuous_mode_en_set(self,
- IS_FILTER_ENABLED(IFF_PROMISC));
+ l2_promisc = IS_FILTER_ENABLED(IFF_PROMISC) ||
+ !!(cfg->priv_flags & BIT(AQ_HW_LOOPBACK_DMA_NET));
+ vlan_promisc = l2_promisc || cfg->is_vlan_force_promisc;
- hw_atl_rpf_vlan_prom_mode_en_set(self,
- IS_FILTER_ENABLED(IFF_PROMISC) ||
- cfg->is_vlan_force_promisc);
+ hw_atl_rpfl2promiscuous_mode_en_set(self, l2_promisc);
+
+ hw_atl_rpf_vlan_prom_mode_en_set(self, vlan_promisc);
hw_atl_rpfl2multicast_flr_en_set(self,
IS_FILTER_ENABLED(IFF_ALLMULTI) &&
@@ -1161,6 +1165,8 @@ static int hw_atl_b0_adj_sys_clock(struct aq_hw_s *self, s64 delta)
{
self->ptp_clk_offset += delta;
+ self->aq_fw_ops->adjust_ptp(self, self->ptp_clk_offset);
+
return 0;
}
@@ -1211,7 +1217,7 @@ static int hw_atl_b0_gpio_pulse(struct aq_hw_s *self, u32 index,
fwreq.ptp_gpio_ctrl.index = index;
fwreq.ptp_gpio_ctrl.period = period;
/* Apply time offset */
- fwreq.ptp_gpio_ctrl.start = start - self->ptp_clk_offset;
+ fwreq.ptp_gpio_ctrl.start = start;
size = sizeof(fwreq.msg_id) + sizeof(fwreq.ptp_gpio_ctrl);
return self->aq_fw_ops->send_fw_request(self, &fwreq, size);
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
index f547baa6c954..354705f9bc49 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils.c
@@ -22,6 +22,7 @@
#define HW_ATL_MIF_ADDR 0x0208U
#define HW_ATL_MIF_VAL 0x020CU
+#define HW_ATL_MPI_RPC_ADDR 0x0334U
#define HW_ATL_RPC_CONTROL_ADR 0x0338U
#define HW_ATL_RPC_STATE_ADR 0x033CU
@@ -53,15 +54,14 @@ enum mcp_area {
};
static int hw_atl_utils_ver_match(u32 ver_expected, u32 ver_actual);
-
static int hw_atl_utils_mpi_set_state(struct aq_hw_s *self,
enum hal_atl_utils_fw_state_e state);
-
static u32 hw_atl_utils_get_mpi_mbox_tid(struct aq_hw_s *self);
static u32 hw_atl_utils_mpi_get_state(struct aq_hw_s *self);
static u32 hw_atl_utils_mif_cmd_get(struct aq_hw_s *self);
static u32 hw_atl_utils_mif_addr_get(struct aq_hw_s *self);
static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self);
+static u32 aq_fw1x_rpc_get(struct aq_hw_s *self);
int hw_atl_utils_initfw(struct aq_hw_s *self, const struct aq_fw_ops **fw_ops)
{
@@ -476,6 +476,10 @@ static int hw_atl_utils_init_ucp(struct aq_hw_s *self,
self, self->mbox_addr,
self->mbox_addr != 0U,
1000U, 10000U);
+ err = readx_poll_timeout_atomic(aq_fw1x_rpc_get, self,
+ self->rpc_addr,
+ self->rpc_addr != 0U,
+ 1000U, 100000U);
return err;
}
@@ -531,6 +535,12 @@ int hw_atl_utils_fw_rpc_wait(struct aq_hw_s *self,
self, fw.val,
sw.tid == fw.tid,
1000U, 100000U);
+ if (err < 0)
+ goto err_exit;
+
+ err = aq_hw_err_from_flags(self);
+ if (err < 0)
+ goto err_exit;
if (fw.len == 0xFFFFU) {
err = hw_atl_utils_fw_rpc_call(self, sw.len);
@@ -1025,6 +1035,11 @@ static u32 hw_atl_utils_rpc_state_get(struct aq_hw_s *self)
return aq_hw_read_reg(self, HW_ATL_RPC_STATE_ADR);
}
+static u32 aq_fw1x_rpc_get(struct aq_hw_s *self)
+{
+ return aq_hw_read_reg(self, HW_ATL_MPI_RPC_ADDR);
+}
+
const struct aq_fw_ops aq_fw_1x_ops = {
.init = hw_atl_utils_mpi_create,
.deinit = hw_atl_fw1x_deinit,
diff --git a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
index 97ebf849695f..77a4ed64830f 100644
--- a/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
+++ b/drivers/net/ethernet/aquantia/atlantic/hw_atl/hw_atl_utils_fw2x.c
@@ -30,6 +30,9 @@
#define HW_ATL_FW3X_EXT_CONTROL_ADDR 0x378
#define HW_ATL_FW3X_EXT_STATE_ADDR 0x37c
+#define HW_ATL_FW3X_PTP_ADJ_LSW_ADDR 0x50a0
+#define HW_ATL_FW3X_PTP_ADJ_MSW_ADDR 0x50a4
+
#define HW_ATL_FW2X_CAP_PAUSE BIT(CAPS_HI_PAUSE)
#define HW_ATL_FW2X_CAP_ASYM_PAUSE BIT(CAPS_HI_ASYMMETRIC_PAUSE)
#define HW_ATL_FW2X_CAP_SLEEP_PROXY BIT(CAPS_HI_SLEEP_PROXY)
@@ -475,6 +478,14 @@ static void aq_fw3x_enable_ptp(struct aq_hw_s *self, int enable)
aq_hw_write_reg(self, HW_ATL_FW3X_EXT_CONTROL_ADDR, ptp_opts);
}
+static void aq_fw3x_adjust_ptp(struct aq_hw_s *self, uint64_t adj)
+{
+ aq_hw_write_reg(self, HW_ATL_FW3X_PTP_ADJ_LSW_ADDR,
+ (adj >> 0) & 0xffffffff);
+ aq_hw_write_reg(self, HW_ATL_FW3X_PTP_ADJ_MSW_ADDR,
+ (adj >> 32) & 0xffffffff);
+}
+
static int aq_fw2x_led_control(struct aq_hw_s *self, u32 mode)
{
if (self->fw_ver_actual < HW_ATL_FW_VER_LED)
@@ -633,4 +644,5 @@ const struct aq_fw_ops aq_fw_2x_ops = {
.enable_ptp = aq_fw3x_enable_ptp,
.led_control = aq_fw2x_led_control,
.set_phyloopback = aq_fw2x_set_phyloopback,
+ .adjust_ptp = aq_fw3x_adjust_ptp,
};
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 597e6fd5bfea..f9a8151f092c 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -11252,7 +11252,7 @@ static void bnxt_cfg_ntp_filters(struct bnxt *bp)
}
}
if (test_and_clear_bit(BNXT_HWRM_PF_UNLOAD_SP_EVENT, &bp->sp_event))
- netdev_info(bp->dev, "Receive PF driver unload event!");
+ netdev_info(bp->dev, "Receive PF driver unload event!\n");
}
#else
@@ -11759,7 +11759,7 @@ static int bnxt_pcie_dsn_get(struct bnxt *bp, u8 dsn[])
u32 dw;
if (!pos) {
- netdev_info(bp->dev, "Unable do read adapter's DSN");
+ netdev_info(bp->dev, "Unable do read adapter's DSN\n");
return -EOPNOTSUPP;
}
@@ -11786,6 +11786,14 @@ static int bnxt_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
if (version_printed++ == 0)
pr_info("%s", version);
+ /* Clear any pending DMA transactions from crash kernel
+ * while loading driver in capture kernel.
+ */
+ if (is_kdump_kernel()) {
+ pci_clear_master(pdev);
+ pcie_flr(pdev);
+ }
+
max_irqs = bnxt_get_max_irq(pdev);
dev = alloc_etherdev_mq(sizeof(*bp), max_irqs);
if (!dev)
@@ -11983,10 +11991,10 @@ static void bnxt_shutdown(struct pci_dev *pdev)
dev_close(dev);
bnxt_ulp_shutdown(bp);
+ bnxt_clear_int_mode(bp);
+ pci_disable_device(pdev);
if (system_state == SYSTEM_POWER_OFF) {
- bnxt_clear_int_mode(bp);
- pci_disable_device(pdev);
pci_wake_from_d3(pdev, bp->wol);
pci_set_power_state(pdev, PCI_D3hot);
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
index eec0168330b7..d3c93ccee86a 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_devlink.c
@@ -641,14 +641,14 @@ static int bnxt_dl_params_register(struct bnxt *bp)
rc = devlink_params_register(bp->dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
if (rc) {
- netdev_warn(bp->dev, "devlink_params_register failed. rc=%d",
+ netdev_warn(bp->dev, "devlink_params_register failed. rc=%d\n",
rc);
return rc;
}
rc = devlink_port_params_register(&bp->dl_port, bnxt_dl_port_params,
ARRAY_SIZE(bnxt_dl_port_params));
if (rc) {
- netdev_err(bp->dev, "devlink_port_params_register failed");
+ netdev_err(bp->dev, "devlink_port_params_register failed\n");
devlink_params_unregister(bp->dl, bnxt_dl_params,
ARRAY_SIZE(bnxt_dl_params));
return rc;
@@ -679,7 +679,7 @@ int bnxt_dl_register(struct bnxt *bp)
else
dl = devlink_alloc(&bnxt_vf_dl_ops, sizeof(struct bnxt_dl));
if (!dl) {
- netdev_warn(bp->dev, "devlink_alloc failed");
+ netdev_warn(bp->dev, "devlink_alloc failed\n");
return -ENOMEM;
}
@@ -692,7 +692,7 @@ int bnxt_dl_register(struct bnxt *bp)
rc = devlink_register(dl, &bp->pdev->dev);
if (rc) {
- netdev_warn(bp->dev, "devlink_register failed. rc=%d", rc);
+ netdev_warn(bp->dev, "devlink_register failed. rc=%d\n", rc);
goto err_dl_free;
}
@@ -704,7 +704,7 @@ int bnxt_dl_register(struct bnxt *bp)
sizeof(bp->dsn));
rc = devlink_port_register(dl, &bp->dl_port, bp->pf.port_id);
if (rc) {
- netdev_err(bp->dev, "devlink_port_register failed");
+ netdev_err(bp->dev, "devlink_port_register failed\n");
goto err_dl_unreg;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 6171fa8b3677..e8fc1671c581 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -2028,7 +2028,7 @@ int bnxt_flash_package_from_file(struct net_device *dev, const char *filename,
}
if (fw->size > item_len) {
- netdev_err(dev, "PKG insufficient update area in nvram: %lu",
+ netdev_err(dev, "PKG insufficient update area in nvram: %lu\n",
(unsigned long)fw->size);
rc = -EFBIG;
} else {
@@ -3338,7 +3338,7 @@ err:
kfree(coredump.data);
*dump_len += sizeof(struct bnxt_coredump_record);
if (rc == -ENOBUFS)
- netdev_err(bp->dev, "Firmware returned large coredump buffer");
+ netdev_err(bp->dev, "Firmware returned large coredump buffer\n");
return rc;
}
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
index 0cc6ec51f45f..9bec256b0934 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_tc.c
@@ -50,7 +50,7 @@ static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
/* check if dev belongs to the same switch */
if (!netdev_port_same_parent_id(pf_bp->dev, dev)) {
- netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
+ netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch\n",
dev->ifindex);
return BNXT_FID_INVALID;
}
@@ -70,7 +70,7 @@ static int bnxt_tc_parse_redir(struct bnxt *bp,
struct net_device *dev = act->dev;
if (!dev) {
- netdev_info(bp->dev, "no dev in mirred action");
+ netdev_info(bp->dev, "no dev in mirred action\n");
return -EINVAL;
}
@@ -106,7 +106,7 @@ static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
const struct ip_tunnel_key *tun_key = &tun_info->key;
if (ip_tunnel_info_af(tun_info) != AF_INET) {
- netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
+ netdev_info(bp->dev, "only IPv4 tunnel-encap is supported\n");
return -EOPNOTSUPP;
}
@@ -295,7 +295,7 @@ static int bnxt_tc_parse_actions(struct bnxt *bp,
int i, rc;
if (!flow_action_has_entries(flow_action)) {
- netdev_info(bp->dev, "no actions");
+ netdev_info(bp->dev, "no actions\n");
return -EINVAL;
}
@@ -370,7 +370,7 @@ static int bnxt_tc_parse_flow(struct bnxt *bp,
/* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
(dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
- netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
+ netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x\n",
dissector->used_keys);
return -EOPNOTSUPP;
}
@@ -508,7 +508,7 @@ static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
return rc;
}
@@ -841,7 +841,7 @@ static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
resp = bnxt_get_hwrm_resp_addr(bp, &req);
*decap_filter_handle = resp->decap_filter_id;
} else {
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -859,7 +859,7 @@ static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
return rc;
}
@@ -906,7 +906,7 @@ static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
resp = bnxt_get_hwrm_resp_addr(bp, &req);
*encap_record_handle = resp->encap_record_id;
} else {
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -924,7 +924,7 @@ static int hwrm_cfa_encap_record_free(struct bnxt *bp,
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s: Error rc=%d\n", __func__, rc);
return rc;
}
@@ -943,7 +943,7 @@ static int bnxt_tc_put_l2_node(struct bnxt *bp,
tc_info->l2_ht_params);
if (rc)
netdev_err(bp->dev,
- "Error: %s: rhashtable_remove_fast: %d",
+ "Error: %s: rhashtable_remove_fast: %d\n",
__func__, rc);
kfree_rcu(l2_node, rcu);
}
@@ -972,7 +972,7 @@ bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
if (rc) {
kfree_rcu(l2_node, rcu);
netdev_err(bp->dev,
- "Error: %s: rhashtable_insert_fast: %d",
+ "Error: %s: rhashtable_insert_fast: %d\n",
__func__, rc);
return NULL;
}
@@ -1031,7 +1031,7 @@ static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
(flow->l4_key.ip_proto != IPPROTO_TCP &&
flow->l4_key.ip_proto != IPPROTO_UDP)) {
- netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
+ netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports\n",
flow->l4_key.ip_proto);
return false;
}
@@ -1088,7 +1088,7 @@ static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
*ht_params);
if (rc) {
- netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
+ netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc);
rc = -1;
}
kfree_rcu(tunnel_node, rcu);
@@ -1129,7 +1129,7 @@ bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
tunnel_node->refcount++;
return tunnel_node;
err:
- netdev_info(bp->dev, "error rc=%d", rc);
+ netdev_info(bp->dev, "error rc=%d\n", rc);
return NULL;
}
@@ -1187,7 +1187,7 @@ static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
&decap_l2_node->node,
tc_info->decap_l2_ht_params);
if (rc)
- netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
+ netdev_err(bp->dev, "rhashtable_remove_fast rc=%d\n", rc);
kfree_rcu(decap_l2_node, rcu);
}
}
@@ -1227,7 +1227,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
if (IS_ERR(rt)) {
- netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
+ netdev_info(bp->dev, "no route to %pI4b\n", &flow.daddr);
return -EOPNOTSUPP;
}
@@ -1241,7 +1241,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
if (vlan->real_dev != real_dst_dev) {
netdev_info(bp->dev,
- "dst_dev(%s) doesn't use PF-if(%s)",
+ "dst_dev(%s) doesn't use PF-if(%s)\n",
netdev_name(dst_dev),
netdev_name(real_dst_dev));
rc = -EOPNOTSUPP;
@@ -1253,7 +1253,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
#endif
} else if (dst_dev != real_dst_dev) {
netdev_info(bp->dev,
- "dst_dev(%s) for %pI4b is not PF-if(%s)",
+ "dst_dev(%s) for %pI4b is not PF-if(%s)\n",
netdev_name(dst_dev), &flow.daddr,
netdev_name(real_dst_dev));
rc = -EOPNOTSUPP;
@@ -1262,7 +1262,7 @@ static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
if (!nbr) {
- netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
+ netdev_info(bp->dev, "can't lookup neighbor for %pI4b\n",
&flow.daddr);
rc = -EOPNOTSUPP;
goto put_rt;
@@ -1472,7 +1472,7 @@ static int __bnxt_tc_del_flow(struct bnxt *bp,
rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
tc_info->flow_ht_params);
if (rc)
- netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
+ netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d\n",
__func__, rc);
kfree_rcu(flow_node, rcu);
@@ -1587,7 +1587,7 @@ unlock:
free_node:
kfree_rcu(new_node, rcu);
done:
- netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
+ netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d\n",
__func__, tc_flow_cmd->cookie, rc);
return rc;
}
@@ -1700,7 +1700,7 @@ bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
le64_to_cpu(resp_bytes[i]);
}
} else {
- netdev_info(bp->dev, "error rc=%d", rc);
+ netdev_info(bp->dev, "error rc=%d\n", rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -1970,7 +1970,7 @@ static int bnxt_tc_indr_block_event(struct notifier_block *nb,
bp);
if (rc)
netdev_info(bp->dev,
- "Failed to register indirect blk: dev: %s",
+ "Failed to register indirect blk: dev: %s\n",
netdev->name);
break;
case NETDEV_UNREGISTER:
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
index b010b34cdaf8..6f2faf81c1ae 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_vfr.c
@@ -43,7 +43,7 @@ static int hwrm_cfa_vfr_alloc(struct bnxt *bp, u16 vf_idx,
netdev_dbg(bp->dev, "tx_cfa_action=0x%x, rx_cfa_code=0x%x",
*tx_cfa_action, *rx_cfa_code);
} else {
- netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
}
mutex_unlock(&bp->hwrm_cmd_lock);
@@ -60,7 +60,7 @@ static int hwrm_cfa_vfr_free(struct bnxt *bp, u16 vf_idx)
rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
if (rc)
- netdev_info(bp->dev, "%s error rc=%d", __func__, rc);
+ netdev_info(bp->dev, "%s error rc=%d\n", __func__, rc);
return rc;
}
@@ -465,7 +465,7 @@ static int bnxt_vf_reps_create(struct bnxt *bp)
return 0;
err:
- netdev_info(bp->dev, "%s error=%d", __func__, rc);
+ netdev_info(bp->dev, "%s error=%d\n", __func__, rc);
kfree(cfa_code_map);
__bnxt_vf_reps_destroy(bp);
return rc;
@@ -488,7 +488,7 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
mutex_lock(&bp->sriov_lock);
if (bp->eswitch_mode == mode) {
- netdev_info(bp->dev, "already in %s eswitch mode",
+ netdev_info(bp->dev, "already in %s eswitch mode\n",
mode == DEVLINK_ESWITCH_MODE_LEGACY ?
"legacy" : "switchdev");
rc = -EINVAL;
@@ -508,7 +508,7 @@ int bnxt_dl_eswitch_mode_set(struct devlink *devlink, u16 mode,
}
if (pci_num_vf(bp->pdev) == 0) {
- netdev_info(bp->dev, "Enable VFs before setting switchdev mode");
+ netdev_info(bp->dev, "Enable VFs before setting switchdev mode\n");
rc = -EPERM;
goto done;
}
diff --git a/drivers/net/ethernet/broadcom/cnic_defs.h b/drivers/net/ethernet/broadcom/cnic_defs.h
index b38499774071..99e2c6d4d8c3 100644
--- a/drivers/net/ethernet/broadcom/cnic_defs.h
+++ b/drivers/net/ethernet/broadcom/cnic_defs.h
@@ -543,13 +543,13 @@ struct l4_kwq_update_pg {
#define L4_KWQ_UPDATE_PG_RESERVERD2_SHIFT 2
#endif
#if defined(__BIG_ENDIAN)
- u16 reserverd3;
+ u16 reserved3;
u8 da0;
u8 da1;
#elif defined(__LITTLE_ENDIAN)
u8 da1;
u8 da0;
- u16 reserverd3;
+ u16 reserved3;
#endif
#if defined(__BIG_ENDIAN)
u8 da2;
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 6392a2530183..10244941a7a6 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -294,6 +294,7 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
*/
if (priv->ext_phy) {
reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+ reg &= ~ID_MODE_DIS;
reg |= id_mode_dis;
if (GENET_IS_V1(priv) || GENET_IS_V2(priv) || GENET_IS_V3(priv))
reg |= RGMII_MODE_EN_V123;
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index dbf7070fcdba..a3f0f27fc79a 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -652,6 +652,7 @@
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
+#define MACB_CAPS_MACB_IS_EMAC 0x08000000
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 4508f0d150da..2c28da1737fe 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -572,8 +572,21 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
/* Clear all the bits we might set later */
- ctrl &= ~(GEM_BIT(GBE) | MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE) |
- GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
+ ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD) | MACB_BIT(PAE));
+
+ if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
+ if (state->interface == PHY_INTERFACE_MODE_RMII)
+ ctrl |= MACB_BIT(RM9200_RMII);
+ } else {
+ ctrl &= ~(GEM_BIT(GBE) | GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
+
+ /* We do not support MLO_PAUSE_RX yet */
+ if (state->pause & MLO_PAUSE_TX)
+ ctrl |= MACB_BIT(PAE);
+
+ if (state->interface == PHY_INTERFACE_MODE_SGMII)
+ ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
+ }
if (state->speed == SPEED_1000)
ctrl |= GEM_BIT(GBE);
@@ -583,13 +596,6 @@ static void macb_mac_config(struct phylink_config *config, unsigned int mode,
if (state->duplex)
ctrl |= MACB_BIT(FD);
- /* We do not support MLO_PAUSE_RX yet */
- if (state->pause & MLO_PAUSE_TX)
- ctrl |= MACB_BIT(PAE);
-
- if (state->interface == PHY_INTERFACE_MODE_SGMII)
- ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
-
/* Apply the new configuration, if any */
if (old_ctrl ^ ctrl)
macb_or_gem_writel(bp, NCFGR, ctrl);
@@ -608,9 +614,10 @@ static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
unsigned int q;
u32 ctrl;
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- queue_writel(queue, IDR,
- bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ queue_writel(queue, IDR,
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
/* Disable Rx and Tx */
ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
@@ -627,17 +634,19 @@ static void macb_mac_link_up(struct phylink_config *config, unsigned int mode,
struct macb_queue *queue;
unsigned int q;
- macb_set_tx_clk(bp->tx_clk, bp->speed, ndev);
+ if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
+ macb_set_tx_clk(bp->tx_clk, bp->speed, ndev);
- /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
- * cleared the pipeline and control registers.
- */
- bp->macbgem_ops.mog_init_rings(bp);
- macb_init_buffers(bp);
+ /* Initialize rings & buffers as clearing MACB_BIT(TE) in link down
+ * cleared the pipeline and control registers.
+ */
+ bp->macbgem_ops.mog_init_rings(bp);
+ macb_init_buffers(bp);
- for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
- queue_writel(queue, IER,
- bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+ for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
+ queue_writel(queue, IER,
+ bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
+ }
/* Enable Rx and Tx */
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(RE) | MACB_BIT(TE));
@@ -3790,6 +3799,10 @@ static int at91ether_open(struct net_device *dev)
u32 ctl;
int ret;
+ ret = pm_runtime_get_sync(&lp->pdev->dev);
+ if (ret < 0)
+ return ret;
+
/* Clear internal statistics */
ctl = macb_readl(lp, NCR);
macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
@@ -3854,7 +3867,7 @@ static int at91ether_close(struct net_device *dev)
q->rx_buffers, q->rx_buffers_dma);
q->rx_buffers = NULL;
- return 0;
+ return pm_runtime_put(&lp->pdev->dev);
}
/* Transmit packet */
@@ -4037,7 +4050,6 @@ static int at91ether_init(struct platform_device *pdev)
struct net_device *dev = platform_get_drvdata(pdev);
struct macb *bp = netdev_priv(dev);
int err;
- u32 reg;
bp->queues[0].bp = bp;
@@ -4051,11 +4063,7 @@ static int at91ether_init(struct platform_device *pdev)
macb_writel(bp, NCR, 0);
- reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
- if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
- reg |= MACB_BIT(RM9200_RMII);
-
- macb_writel(bp, NCFGR, reg);
+ macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG));
return 0;
}
@@ -4214,7 +4222,7 @@ static const struct macb_config sama5d4_config = {
};
static const struct macb_config emac_config = {
- .caps = MACB_CAPS_NEEDS_RSTONUBR,
+ .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC,
.clk_init = at91ether_clk_init,
.init = at91ether_init,
};
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 17a4110c2e49..8ff28ed04b7f 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -410,10 +410,19 @@ void bgx_lmac_rx_tx_enable(int node, int bgx_idx, int lmacid, bool enable)
lmac = &bgx->lmac[lmacid];
cfg = bgx_reg_read(bgx, lmacid, BGX_CMRX_CFG);
- if (enable)
+ if (enable) {
cfg |= CMR_PKT_RX_EN | CMR_PKT_TX_EN;
- else
+
+ /* enable TX FIFO Underflow interrupt */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1S,
+ GMI_TXX_INT_UNDFLW);
+ } else {
cfg &= ~(CMR_PKT_RX_EN | CMR_PKT_TX_EN);
+
+ /* Disable TX FIFO Underflow interrupt */
+ bgx_reg_modify(bgx, lmacid, BGX_GMP_GMI_TXX_INT_ENA_W1C,
+ GMI_TXX_INT_UNDFLW);
+ }
bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cfg);
if (bgx->is_rgx)
@@ -1535,6 +1544,48 @@ static int bgx_init_phy(struct bgx *bgx)
return bgx_init_of_phy(bgx);
}
+static irqreturn_t bgx_intr_handler(int irq, void *data)
+{
+ struct bgx *bgx = (struct bgx *)data;
+ u64 status, val;
+ int lmac;
+
+ for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
+ status = bgx_reg_read(bgx, lmac, BGX_GMP_GMI_TXX_INT);
+ if (status & GMI_TXX_INT_UNDFLW) {
+ pci_err(bgx->pdev, "BGX%d lmac%d UNDFLW\n",
+ bgx->bgx_id, lmac);
+ val = bgx_reg_read(bgx, lmac, BGX_CMRX_CFG);
+ val &= ~CMR_EN;
+ bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
+ val |= CMR_EN;
+ bgx_reg_write(bgx, lmac, BGX_CMRX_CFG, val);
+ }
+ /* clear interrupts */
+ bgx_reg_write(bgx, lmac, BGX_GMP_GMI_TXX_INT, status);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void bgx_register_intr(struct pci_dev *pdev)
+{
+ struct bgx *bgx = pci_get_drvdata(pdev);
+ int ret;
+
+ ret = pci_alloc_irq_vectors(pdev, BGX_LMAC_VEC_OFFSET,
+ BGX_LMAC_VEC_OFFSET, PCI_IRQ_ALL_TYPES);
+ if (ret < 0) {
+ pci_err(pdev, "Req for #%d msix vectors failed\n",
+ BGX_LMAC_VEC_OFFSET);
+ return;
+ }
+ ret = pci_request_irq(pdev, GMPX_GMI_TX_INT, bgx_intr_handler, NULL,
+ bgx, "BGX%d", bgx->bgx_id);
+ if (ret)
+ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
+}
+
static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
int err;
@@ -1550,7 +1601,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pci_set_drvdata(pdev, bgx);
- err = pci_enable_device(pdev);
+ err = pcim_enable_device(pdev);
if (err) {
dev_err(dev, "Failed to enable PCI device\n");
pci_set_drvdata(pdev, NULL);
@@ -1604,6 +1655,8 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
bgx_init_hw(bgx);
+ bgx_register_intr(pdev);
+
/* Enable all LMACs */
for (lmac = 0; lmac < bgx->lmac_count; lmac++) {
err = bgx_lmac_enable(bgx, lmac);
@@ -1620,6 +1673,7 @@ static int bgx_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
err_enable:
bgx_vnic[bgx->bgx_id] = NULL;
+ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
err_release_regions:
pci_release_regions(pdev);
err_disable_device:
@@ -1637,6 +1691,8 @@ static void bgx_remove(struct pci_dev *pdev)
for (lmac = 0; lmac < bgx->lmac_count; lmac++)
bgx_lmac_disable(bgx, lmac);
+ pci_free_irq(pdev, GMPX_GMI_TX_INT, bgx);
+
bgx_vnic[bgx->bgx_id] = NULL;
pci_release_regions(pdev);
pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
index 25888706bdcd..cdea49392185 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.h
@@ -180,6 +180,15 @@
#define BGX_GMP_GMI_TXX_BURST 0x38228
#define BGX_GMP_GMI_TXX_MIN_PKT 0x38240
#define BGX_GMP_GMI_TXX_SGMII_CTL 0x38300
+#define BGX_GMP_GMI_TXX_INT 0x38500
+#define BGX_GMP_GMI_TXX_INT_W1S 0x38508
+#define BGX_GMP_GMI_TXX_INT_ENA_W1C 0x38510
+#define BGX_GMP_GMI_TXX_INT_ENA_W1S 0x38518
+#define GMI_TXX_INT_PTP_LOST BIT_ULL(4)
+#define GMI_TXX_INT_LATE_COL BIT_ULL(3)
+#define GMI_TXX_INT_XSDEF BIT_ULL(2)
+#define GMI_TXX_INT_XSCOL BIT_ULL(1)
+#define GMI_TXX_INT_UNDFLW BIT_ULL(0)
#define BGX_MSIX_VEC_0_29_ADDR 0x400000 /* +(0..29) << 4 */
#define BGX_MSIX_VEC_0_29_CTL 0x400008
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index bbd7b3175f09..ddf60dc9ad16 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -2013,10 +2013,10 @@ static int enic_stop(struct net_device *netdev)
napi_disable(&enic->napi[i]);
netif_carrier_off(netdev);
- netif_tx_disable(netdev);
if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
for (i = 0; i < enic->wq_count; i++)
napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
+ netif_tx_disable(netdev);
if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
enic_dev_del_station_addr(enic);
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index 1ea3372775e6..e94ae9b94dbf 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -1405,6 +1405,8 @@ static struct dm9000_plat_data *dm9000_parse_dt(struct device *dev)
mac_addr = of_get_mac_address(np);
if (!IS_ERR(mac_addr))
ether_addr_copy(pdata->dev_addr, mac_addr);
+ else if (PTR_ERR(mac_addr) == -EPROBE_DEFER)
+ return ERR_CAST(mac_addr);
return pdata;
}
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
index ec5f6eeb639b..492bc9446463 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_main.c
@@ -6113,6 +6113,9 @@ static int hclge_get_all_rules(struct hnae3_handle *handle,
static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
struct hclge_fd_rule_tuples *tuples)
{
+#define flow_ip6_src fkeys->addrs.v6addrs.src.in6_u.u6_addr32
+#define flow_ip6_dst fkeys->addrs.v6addrs.dst.in6_u.u6_addr32
+
tuples->ether_proto = be16_to_cpu(fkeys->basic.n_proto);
tuples->ip_proto = fkeys->basic.ip_proto;
tuples->dst_port = be16_to_cpu(fkeys->ports.dst);
@@ -6121,12 +6124,12 @@ static void hclge_fd_get_flow_tuples(const struct flow_keys *fkeys,
tuples->src_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.src);
tuples->dst_ip[3] = be32_to_cpu(fkeys->addrs.v4addrs.dst);
} else {
- memcpy(tuples->src_ip,
- fkeys->addrs.v6addrs.src.in6_u.u6_addr32,
- sizeof(tuples->src_ip));
- memcpy(tuples->dst_ip,
- fkeys->addrs.v6addrs.dst.in6_u.u6_addr32,
- sizeof(tuples->dst_ip));
+ int i;
+
+ for (i = 0; i < IPV6_SIZE; i++) {
+ tuples->src_ip[i] = be32_to_cpu(flow_ip6_src[i]);
+ tuples->dst_ip[i] = be32_to_cpu(flow_ip6_dst[i]);
+ }
}
}
@@ -9834,6 +9837,13 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
+ ret = init_mgr_tbl(hdev);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "failed to reinit manager table, ret = %d\n", ret);
+ return ret;
+ }
+
ret = hclge_init_fd_config(hdev);
if (ret) {
dev_err(&pdev->dev, "fd table init fail, ret=%d\n", ret);
diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
index 180224eab1ca..28db13253a5e 100644
--- a/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
+++ b/drivers/net/ethernet/hisilicon/hns3/hns3pf/hclge_tm.c
@@ -566,7 +566,7 @@ static void hclge_tm_vport_tc_info_update(struct hclge_vport *vport)
*/
kinfo->num_tc = vport->vport_id ? 1 :
min_t(u16, vport->alloc_tqps, hdev->tm_info.num_tc);
- vport->qs_offset = (vport->vport_id ? hdev->tm_info.num_tc : 0) +
+ vport->qs_offset = (vport->vport_id ? HNAE3_MAX_TC : 0) +
(vport->vport_id ? (vport->vport_id - 1) : 0);
max_rss_size = min_t(u16, hdev->rss_size_max,
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
index 6f2cf569a283..79b3d53f2fbf 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.c
@@ -297,6 +297,7 @@ static int set_hw_ioctxt(struct hinic_hwdev *hwdev, unsigned int rq_depth,
}
hw_ioctxt.func_idx = HINIC_HWIF_FUNC_IDX(hwif);
+ hw_ioctxt.ppf_idx = HINIC_HWIF_PPF_IDX(hwif);
hw_ioctxt.set_cmdq_depth = HW_IOCTXT_SET_CMDQ_DEPTH_DEFAULT;
hw_ioctxt.cmdq_depth = 0;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
index b069045de416..66fd2340d447 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_dev.h
@@ -151,8 +151,8 @@ struct hinic_cmd_hw_ioctxt {
u8 lro_en;
u8 rsvd3;
+ u8 ppf_idx;
u8 rsvd4;
- u8 rsvd5;
u16 rq_depth;
u16 rx_buf_sz_idx;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
index 517794509eb2..c7bb9ceca72c 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_if.h
@@ -137,6 +137,7 @@
#define HINIC_HWIF_FUNC_IDX(hwif) ((hwif)->attr.func_idx)
#define HINIC_HWIF_PCI_INTF(hwif) ((hwif)->attr.pci_intf_idx)
#define HINIC_HWIF_PF_IDX(hwif) ((hwif)->attr.pf_idx)
+#define HINIC_HWIF_PPF_IDX(hwif) ((hwif)->attr.ppf_idx)
#define HINIC_FUNC_TYPE(hwif) ((hwif)->attr.func_type)
#define HINIC_IS_PF(hwif) (HINIC_FUNC_TYPE(hwif) == HINIC_PF)
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
index f4a339b10b10..79091e131418 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
+++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_qp.h
@@ -94,6 +94,7 @@ struct hinic_rq {
struct hinic_wq *wq;
+ struct cpumask affinity_mask;
u32 irq;
u16 msix_entry;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c
index 02a14f5e7fe3..13560975c103 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_main.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c
@@ -356,7 +356,8 @@ static void hinic_enable_rss(struct hinic_dev *nic_dev)
if (!num_cpus)
num_cpus = num_online_cpus();
- nic_dev->num_qps = min_t(u16, nic_dev->max_qps, num_cpus);
+ nic_dev->num_qps = hinic_hwdev_num_qps(hwdev);
+ nic_dev->num_qps = min_t(u16, nic_dev->num_qps, num_cpus);
nic_dev->rss_limit = nic_dev->num_qps;
nic_dev->num_rss = nic_dev->num_qps;
diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
index 56ea6d692f1c..2695ad69fca6 100644
--- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c
+++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c
@@ -475,7 +475,6 @@ static int rx_request_irq(struct hinic_rxq *rxq)
struct hinic_hwdev *hwdev = nic_dev->hwdev;
struct hinic_rq *rq = rxq->rq;
struct hinic_qp *qp;
- struct cpumask mask;
int err;
rx_add_napi(rxq);
@@ -492,8 +491,8 @@ static int rx_request_irq(struct hinic_rxq *rxq)
}
qp = container_of(rq, struct hinic_qp, rq);
- cpumask_set_cpu(qp->q_id % num_online_cpus(), &mask);
- return irq_set_affinity_hint(rq->irq, &mask);
+ cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask);
+ return irq_set_affinity_hint(rq->irq, &rq->affinity_mask);
}
static void rx_free_irq(struct hinic_rxq *rxq)
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index 69523ac85639..56b9e445732b 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -2362,7 +2362,7 @@ static int i40e_vc_enable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if (i40e_vc_validate_vqs_bitmaps(vqs)) {
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
@@ -2424,7 +2424,7 @@ static int i40e_vc_disable_queues_msg(struct i40e_vf *vf, u8 *msg)
goto error_param;
}
- if (i40e_vc_validate_vqs_bitmaps(vqs)) {
+ if (!i40e_vc_validate_vqs_bitmaps(vqs)) {
aq_ret = I40E_ERR_PARAM;
goto error_param;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 4459bc564b11..6873998cf145 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -1660,6 +1660,7 @@ struct ice_aqc_get_pkg_info_resp {
__le32 count;
struct ice_aqc_get_pkg_info pkg_info[1];
};
+
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
diff --git a/drivers/net/ethernet/intel/ice/ice_base.c b/drivers/net/ethernet/intel/ice/ice_base.c
index d8e975cceb21..81885efadc7a 100644
--- a/drivers/net/ethernet/intel/ice/ice_base.c
+++ b/drivers/net/ethernet/intel/ice/ice_base.c
@@ -324,7 +324,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
if (err)
return err;
- dev_info(&vsi->back->pdev->dev, "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
+ dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n",
ring->q_index);
} else {
ring->zca.free = NULL;
@@ -405,8 +405,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
/* Absolute queue number out of 2K needs to be passed */
err = ice_write_rxq_ctx(hw, &rlan_ctx, pf_q);
if (err) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to set LAN Rx queue context for absolute Rx queue %d error: %d\n",
pf_q, err);
return -EIO;
}
@@ -428,8 +427,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) :
ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
if (err)
- dev_info(&vsi->back->pdev->dev,
- "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
+ dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
ring->xsk_umem ? "UMEM enabled " : "",
ring->q_index, pf_q);
@@ -490,8 +488,7 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
/* wait for the change to finish */
ret = ice_pf_rxq_wait(pf, pf_q, ena);
if (ret)
- dev_err(ice_pf_to_dev(pf),
- "VSI idx %d Rx ring %d %sable timeout\n",
+ dev_err(ice_pf_to_dev(pf), "VSI idx %d Rx ring %d %sable timeout\n",
vsi->idx, pf_q, (ena ? "en" : "dis"));
return ret;
@@ -506,20 +503,15 @@ int ice_vsi_ctrl_rx_ring(struct ice_vsi *vsi, bool ena, u16 rxq_idx)
*/
int ice_vsi_alloc_q_vectors(struct ice_vsi *vsi)
{
- struct ice_pf *pf = vsi->back;
- int v_idx = 0, num_q_vectors;
- struct device *dev;
- int err;
+ struct device *dev = ice_pf_to_dev(vsi->back);
+ int v_idx, err;
- dev = ice_pf_to_dev(pf);
if (vsi->q_vectors[0]) {
dev_dbg(dev, "VSI %d has existing q_vectors\n", vsi->vsi_num);
return -EEXIST;
}
- num_q_vectors = vsi->num_q_vectors;
-
- for (v_idx = 0; v_idx < num_q_vectors; v_idx++) {
+ for (v_idx = 0; v_idx < vsi->num_q_vectors; v_idx++) {
err = ice_vsi_alloc_q_vector(vsi, v_idx);
if (err)
goto err_out;
@@ -648,8 +640,7 @@ ice_vsi_cfg_txq(struct ice_vsi *vsi, struct ice_ring *ring,
status = ice_ena_vsi_txq(vsi->port_info, vsi->idx, tc, ring->q_handle,
1, qg_buf, buf_len, NULL);
if (status) {
- dev_err(ice_pf_to_dev(pf),
- "Failed to set LAN Tx queue context, error: %d\n",
+ dev_err(ice_pf_to_dev(pf), "Failed to set LAN Tx queue context, error: %d\n",
status);
return -ENODEV;
}
@@ -815,14 +806,12 @@ ice_vsi_stop_tx_ring(struct ice_vsi *vsi, enum ice_disq_rst_src rst_src,
* queues at the hardware level anyway.
*/
if (status == ICE_ERR_RESET_ONGOING) {
- dev_dbg(&vsi->back->pdev->dev,
- "Reset in progress. LAN Tx queues already disabled\n");
+ dev_dbg(ice_pf_to_dev(vsi->back), "Reset in progress. LAN Tx queues already disabled\n");
} else if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(&vsi->back->pdev->dev,
- "LAN Tx queues do not exist, nothing to disable\n");
+ dev_dbg(ice_pf_to_dev(vsi->back), "LAN Tx queues do not exist, nothing to disable\n");
} else if (status) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to disable LAN Tx queues, error: %d\n", status);
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to disable LAN Tx queues, error: %d\n",
+ status);
return -ENODEV;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 0207e28c2682..04d5db0a25bf 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -25,20 +25,6 @@ static enum ice_status ice_set_mac_type(struct ice_hw *hw)
}
/**
- * ice_dev_onetime_setup - Temporary HW/FW workarounds
- * @hw: pointer to the HW structure
- *
- * This function provides temporary workarounds for certain issues
- * that are expected to be fixed in the HW/FW.
- */
-void ice_dev_onetime_setup(struct ice_hw *hw)
-{
-#define MBX_PF_VT_PFALLOC 0x00231E80
- /* set VFs per PF */
- wr32(hw, MBX_PF_VT_PFALLOC, rd32(hw, PF_VT_PFALLOC_HIF));
-}
-
-/**
* ice_clear_pf_cfg - Clear PF configuration
* @hw: pointer to the hardware structure
*
@@ -602,10 +588,10 @@ void ice_output_fw_log(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf)
}
/**
- * ice_get_itr_intrl_gran - determine int/intrl granularity
+ * ice_get_itr_intrl_gran
* @hw: pointer to the HW struct
*
- * Determines the ITR/intrl granularities based on the maximum aggregate
+ * Determines the ITR/INTRL granularities based on the maximum aggregate
* bandwidth according to the device's configuration during power-on.
*/
static void ice_get_itr_intrl_gran(struct ice_hw *hw)
@@ -763,8 +749,6 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
if (status)
goto err_unroll_sched;
- ice_dev_onetime_setup(hw);
-
/* Get MAC information */
/* A single port can report up to two (LAN and WoL) addresses */
mac_buf = devm_kcalloc(ice_hw_to_dev(hw), 2,
@@ -834,7 +818,7 @@ void ice_deinit_hw(struct ice_hw *hw)
*/
enum ice_status ice_check_reset(struct ice_hw *hw)
{
- u32 cnt, reg = 0, grst_delay;
+ u32 cnt, reg = 0, grst_delay, uld_mask;
/* Poll for Device Active state in case a recent CORER, GLOBR,
* or EMPR has occurred. The grst delay value is in 100ms units.
@@ -856,13 +840,20 @@ enum ice_status ice_check_reset(struct ice_hw *hw)
return ICE_ERR_RESET_FAILED;
}
-#define ICE_RESET_DONE_MASK (GLNVM_ULD_CORER_DONE_M | \
- GLNVM_ULD_GLOBR_DONE_M)
+#define ICE_RESET_DONE_MASK (GLNVM_ULD_PCIER_DONE_M |\
+ GLNVM_ULD_PCIER_DONE_1_M |\
+ GLNVM_ULD_CORER_DONE_M |\
+ GLNVM_ULD_GLOBR_DONE_M |\
+ GLNVM_ULD_POR_DONE_M |\
+ GLNVM_ULD_POR_DONE_1_M |\
+ GLNVM_ULD_PCIER_DONE_2_M)
+
+ uld_mask = ICE_RESET_DONE_MASK;
/* Device is Active; check Global Reset processes are done */
for (cnt = 0; cnt < ICE_PF_RESET_WAIT_COUNT; cnt++) {
- reg = rd32(hw, GLNVM_ULD) & ICE_RESET_DONE_MASK;
- if (reg == ICE_RESET_DONE_MASK) {
+ reg = rd32(hw, GLNVM_ULD) & uld_mask;
+ if (reg == uld_mask) {
ice_debug(hw, ICE_DBG_INIT,
"Global reset processes done. %d\n", cnt);
break;
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index b5c013fdaaf9..f9fc005d35a7 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -54,8 +54,6 @@ enum ice_status ice_get_caps(struct ice_hw *hw);
void ice_set_safe_mode_caps(struct ice_hw *hw);
-void ice_dev_onetime_setup(struct ice_hw *hw);
-
enum ice_status
ice_write_rxq_ctx(struct ice_hw *hw, struct ice_rlan_ctx *rlan_ctx,
u32 rxq_index);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb.c b/drivers/net/ethernet/intel/ice/ice_dcb.c
index 713e8a892e14..adb8dab765c8 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb.c
@@ -1323,13 +1323,13 @@ enum ice_status ice_set_dcb_cfg(struct ice_port_info *pi)
}
/**
- * ice_aq_query_port_ets - query port ets configuration
+ * ice_aq_query_port_ets - query port ETS configuration
* @pi: port information structure
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @cd: pointer to command details structure or NULL
*
- * query current port ets configuration
+ * query current port ETS configuration
*/
static enum ice_status
ice_aq_query_port_ets(struct ice_port_info *pi,
@@ -1416,13 +1416,13 @@ ice_update_port_tc_tree_cfg(struct ice_port_info *pi,
}
/**
- * ice_query_port_ets - query port ets configuration
+ * ice_query_port_ets - query port ETS configuration
* @pi: port information structure
* @buf: pointer to buffer
* @buf_size: buffer size in bytes
* @cd: pointer to command details structure or NULL
*
- * query current port ets configuration and update the
+ * query current port ETS configuration and update the
* SW DB with the TC changes
*/
enum ice_status
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 0664e5b8d130..7108fb41b604 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -315,9 +315,9 @@ ice_dcb_need_recfg(struct ice_pf *pf, struct ice_dcbx_cfg *old_cfg,
*/
void ice_dcb_rebuild(struct ice_pf *pf)
{
- struct ice_dcbx_cfg *local_dcbx_cfg, *desired_dcbx_cfg, *prev_cfg;
struct ice_aqc_port_ets_elem buf = { 0 };
struct device *dev = ice_pf_to_dev(pf);
+ struct ice_dcbx_cfg *err_cfg;
enum ice_status ret;
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
@@ -330,53 +330,25 @@ void ice_dcb_rebuild(struct ice_pf *pf)
if (!test_bit(ICE_FLAG_DCB_ENA, pf->flags))
return;
- local_dcbx_cfg = &pf->hw.port_info->local_dcbx_cfg;
- desired_dcbx_cfg = &pf->hw.port_info->desired_dcbx_cfg;
+ mutex_lock(&pf->tc_mutex);
- /* Save current willing state and force FW to unwilling */
- local_dcbx_cfg->etscfg.willing = 0x0;
- local_dcbx_cfg->pfc.willing = 0x0;
- local_dcbx_cfg->app_mode = ICE_DCBX_APPS_NON_WILLING;
+ if (!pf->hw.port_info->is_sw_lldp)
+ ice_cfg_etsrec_defaults(pf->hw.port_info);
- ice_cfg_etsrec_defaults(pf->hw.port_info);
ret = ice_set_dcb_cfg(pf->hw.port_info);
if (ret) {
- dev_err(dev, "Failed to set DCB to unwilling\n");
+ dev_err(dev, "Failed to set DCB config in rebuild\n");
goto dcb_error;
}
- /* Retrieve DCB config and ensure same as current in SW */
- prev_cfg = kmemdup(local_dcbx_cfg, sizeof(*prev_cfg), GFP_KERNEL);
- if (!prev_cfg)
- goto dcb_error;
-
- ice_init_dcb(&pf->hw, true);
- if (pf->hw.port_info->dcbx_status == ICE_DCBX_STATUS_DIS)
- pf->hw.port_info->is_sw_lldp = true;
- else
- pf->hw.port_info->is_sw_lldp = false;
-
- if (ice_dcb_need_recfg(pf, prev_cfg, local_dcbx_cfg)) {
- /* difference in cfg detected - disable DCB till next MIB */
- dev_err(dev, "Set local MIB not accurate\n");
- kfree(prev_cfg);
- goto dcb_error;
+ if (!pf->hw.port_info->is_sw_lldp) {
+ ret = ice_cfg_lldp_mib_change(&pf->hw, true);
+ if (ret && !pf->hw.port_info->is_sw_lldp) {
+ dev_err(dev, "Failed to register for MIB changes\n");
+ goto dcb_error;
+ }
}
- /* fetched config congruent to previous configuration */
- kfree(prev_cfg);
-
- /* Set the local desired config */
- if (local_dcbx_cfg->dcbx_mode == ICE_DCBX_MODE_CEE)
- memcpy(local_dcbx_cfg, desired_dcbx_cfg,
- sizeof(*local_dcbx_cfg));
-
- ice_cfg_etsrec_defaults(pf->hw.port_info);
- ret = ice_set_dcb_cfg(pf->hw.port_info);
- if (ret) {
- dev_err(dev, "Failed to set desired config\n");
- goto dcb_error;
- }
dev_info(dev, "DCB restored after reset\n");
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
@@ -384,26 +356,32 @@ void ice_dcb_rebuild(struct ice_pf *pf)
goto dcb_error;
}
+ mutex_unlock(&pf->tc_mutex);
+
return;
dcb_error:
dev_err(dev, "Disabling DCB until new settings occur\n");
- prev_cfg = kzalloc(sizeof(*prev_cfg), GFP_KERNEL);
- if (!prev_cfg)
+ err_cfg = kzalloc(sizeof(*err_cfg), GFP_KERNEL);
+ if (!err_cfg) {
+ mutex_unlock(&pf->tc_mutex);
return;
+ }
- prev_cfg->etscfg.willing = true;
- prev_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
- prev_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
- memcpy(&prev_cfg->etsrec, &prev_cfg->etscfg, sizeof(prev_cfg->etsrec));
+ err_cfg->etscfg.willing = true;
+ err_cfg->etscfg.tcbwtable[0] = ICE_TC_MAX_BW;
+ err_cfg->etscfg.tsatable[0] = ICE_IEEE_TSA_ETS;
+ memcpy(&err_cfg->etsrec, &err_cfg->etscfg, sizeof(err_cfg->etsrec));
/* Coverity warns the return code of ice_pf_dcb_cfg() is not checked
* here as is done for other calls to that function. That check is
* not necessary since this is in this function's error cleanup path.
* Suppress the Coverity warning with the following comment...
*/
/* coverity[check_return] */
- ice_pf_dcb_cfg(pf, prev_cfg, false);
- kfree(prev_cfg);
+ ice_pf_dcb_cfg(pf, err_cfg, false);
+ kfree(err_cfg);
+
+ mutex_unlock(&pf->tc_mutex);
}
/**
@@ -434,9 +412,9 @@ static int ice_dcb_init_cfg(struct ice_pf *pf, bool locked)
}
/**
- * ice_dcb_sw_default_config - Apply a default DCB config
+ * ice_dcb_sw_dflt_cfg - Apply a default DCB config
* @pf: PF to apply config to
- * @ets_willing: configure ets willing
+ * @ets_willing: configure ETS willing
* @locked: was this function called with RTNL held
*/
static int ice_dcb_sw_dflt_cfg(struct ice_pf *pf, bool ets_willing, bool locked)
@@ -599,8 +577,7 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
goto dcb_init_err;
}
- dev_info(dev,
- "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
+ dev_info(dev, "DCB is enabled in the hardware, max number of TCs supported on this port are %d\n",
pf->hw.func_caps.common_cap.maxtc);
if (err) {
struct ice_vsi *pf_vsi;
@@ -610,8 +587,8 @@ int ice_init_pf_dcb(struct ice_pf *pf, bool locked)
clear_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags);
err = ice_dcb_sw_dflt_cfg(pf, true, locked);
if (err) {
- dev_err(dev,
- "Failed to set local DCB config %d\n", err);
+ dev_err(dev, "Failed to set local DCB config %d\n",
+ err);
err = -EIO;
goto dcb_init_err;
}
@@ -777,6 +754,8 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
}
}
+ mutex_lock(&pf->tc_mutex);
+
/* store the old configuration */
tmp_dcbx_cfg = pf->hw.port_info->local_dcbx_cfg;
@@ -787,20 +766,20 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
ret = ice_get_dcb_cfg(pf->hw.port_info);
if (ret) {
dev_err(dev, "Failed to get DCB config\n");
- return;
+ goto out;
}
/* No change detected in DCBX configs */
if (!memcmp(&tmp_dcbx_cfg, &pi->local_dcbx_cfg, sizeof(tmp_dcbx_cfg))) {
dev_dbg(dev, "No change detected in DCBX configuration.\n");
- return;
+ goto out;
}
need_reconfig = ice_dcb_need_recfg(pf, &tmp_dcbx_cfg,
&pi->local_dcbx_cfg);
ice_dcbnl_flush_apps(pf, &tmp_dcbx_cfg, &pi->local_dcbx_cfg);
if (!need_reconfig)
- return;
+ goto out;
/* Enable DCB tagging only when more than one TC */
if (ice_dcb_get_num_tc(&pi->local_dcbx_cfg) > 1) {
@@ -814,7 +793,7 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
pf_vsi = ice_get_main_vsi(pf);
if (!pf_vsi) {
dev_dbg(dev, "PF VSI doesn't exist\n");
- return;
+ goto out;
}
rtnl_lock();
@@ -823,13 +802,15 @@ ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
ret = ice_query_port_ets(pf->hw.port_info, &buf, sizeof(buf), NULL);
if (ret) {
dev_err(dev, "Query Port ETS failed\n");
- rtnl_unlock();
- return;
+ goto unlock_rtnl;
}
/* changes in configuration update VSI */
ice_pf_dcb_recfg(pf);
ice_ena_vsi(pf_vsi, true);
+unlock_rtnl:
rtnl_unlock();
+out:
+ mutex_unlock(&pf->tc_mutex);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
index d870c1aedc17..b61aba428adb 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_nl.c
@@ -297,8 +297,7 @@ ice_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio, u8 *setting)
return;
*setting = (pi->local_dcbx_cfg.pfc.pfcena >> prio) & 0x1;
- dev_dbg(ice_pf_to_dev(pf),
- "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
+ dev_dbg(ice_pf_to_dev(pf), "Get PFC Config up=%d, setting=%d, pfcenable=0x%x\n",
prio, *setting, pi->local_dcbx_cfg.pfc.pfcena);
}
@@ -418,8 +417,8 @@ ice_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int prio,
return;
*pgid = pi->local_dcbx_cfg.etscfg.prio_table[prio];
- dev_dbg(ice_pf_to_dev(pf),
- "Get PG config prio=%d tc=%d\n", prio, *pgid);
+ dev_dbg(ice_pf_to_dev(pf), "Get PG config prio=%d tc=%d\n", prio,
+ *pgid);
}
/**
@@ -713,13 +712,13 @@ static int ice_dcbnl_delapp(struct net_device *netdev, struct dcb_app *app)
return -EINVAL;
mutex_lock(&pf->tc_mutex);
- ret = dcb_ieee_delapp(netdev, app);
- if (ret)
- goto delapp_out;
-
old_cfg = &pf->hw.port_info->local_dcbx_cfg;
- if (old_cfg->numapps == 1)
+ if (old_cfg->numapps <= 1)
+ goto delapp_out;
+
+ ret = dcb_ieee_delapp(netdev, app);
+ if (ret)
goto delapp_out;
new_cfg = &pf->hw.port_info->desired_dcbx_cfg;
@@ -882,8 +881,7 @@ ice_dcbnl_vsi_del_app(struct ice_vsi *vsi,
sapp.protocol = app->prot_id;
sapp.priority = app->priority;
err = ice_dcbnl_delapp(vsi->netdev, &sapp);
- dev_dbg(&vsi->back->pdev->dev,
- "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
+ dev_dbg(ice_pf_to_dev(vsi->back), "Deleting app for VSI idx=%d err=%d sel=%d proto=0x%x, prio=%d\n",
vsi->idx, err, app->selector, app->prot_id, app->priority);
}
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index 90c6a3ca20c9..77c412a7e7a4 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -166,13 +166,24 @@ static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
struct ice_netdev_priv *np = netdev_priv(netdev);
+ u8 oem_ver, oem_patch, nvm_ver_hi, nvm_ver_lo;
struct ice_vsi *vsi = np->vsi;
struct ice_pf *pf = vsi->back;
+ struct ice_hw *hw = &pf->hw;
+ u16 oem_build;
strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver));
strlcpy(drvinfo->version, ice_drv_ver, sizeof(drvinfo->version));
- strlcpy(drvinfo->fw_version, ice_nvm_version_str(&pf->hw),
- sizeof(drvinfo->fw_version));
+
+ /* Display NVM version (from which the firmware version can be
+ * determined) which contains more pertinent information.
+ */
+ ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch,
+ &nvm_ver_hi, &nvm_ver_lo);
+ snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version),
+ "%x.%02x 0x%x %d.%d.%d", nvm_ver_hi, nvm_ver_lo,
+ hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
+
strlcpy(drvinfo->bus_info, pci_name(pf->pdev),
sizeof(drvinfo->bus_info));
drvinfo->n_priv_flags = ICE_PRIV_FLAG_ARRAY_SIZE;
@@ -363,8 +374,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
val = rd32(hw, reg);
if (val == pattern)
continue;
- dev_err(dev,
- "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
+ dev_err(dev, "%s: reg pattern test failed - reg 0x%08x pat 0x%08x val 0x%08x\n"
, __func__, reg, pattern, val);
return 1;
}
@@ -372,8 +382,7 @@ static int ice_reg_pattern_test(struct ice_hw *hw, u32 reg, u32 mask)
wr32(hw, reg, orig_val);
val = rd32(hw, reg);
if (val != orig_val) {
- dev_err(dev,
- "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
+ dev_err(dev, "%s: reg restore test failed - reg 0x%08x orig 0x%08x val 0x%08x\n"
, __func__, reg, orig_val, val);
return 1;
}
@@ -791,8 +800,7 @@ ice_self_test(struct net_device *netdev, struct ethtool_test *eth_test,
set_bit(__ICE_TESTING, pf->state);
if (ice_active_vfs(pf)) {
- dev_warn(dev,
- "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
+ dev_warn(dev, "Please take active VFs and Netqueues offline and restart the adapter before running NIC diagnostics\n");
data[ICE_ETH_TEST_REG] = 1;
data[ICE_ETH_TEST_EEPROM] = 1;
data[ICE_ETH_TEST_INTR] = 1;
@@ -1047,7 +1055,7 @@ ice_set_fecparam(struct net_device *netdev, struct ethtool_fecparam *fecparam)
fec = ICE_FEC_NONE;
break;
default:
- dev_warn(&vsi->back->pdev->dev, "Unsupported FEC mode: %d\n",
+ dev_warn(ice_pf_to_dev(vsi->back), "Unsupported FEC mode: %d\n",
fecparam->fec);
return -EINVAL;
}
@@ -1200,8 +1208,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
* events to respond to.
*/
if (status)
- dev_info(dev,
- "Failed to unreg for LLDP events\n");
+ dev_info(dev, "Failed to unreg for LLDP events\n");
/* The AQ call to stop the FW LLDP agent will generate
* an error if the agent is already stopped.
@@ -1256,8 +1263,7 @@ static int ice_set_priv_flags(struct net_device *netdev, u32 flags)
/* Register for MIB change events */
status = ice_cfg_lldp_mib_change(&pf->hw, true);
if (status)
- dev_dbg(dev,
- "Fail to enable MIB change events\n");
+ dev_dbg(dev, "Fail to enable MIB change events\n");
}
}
if (test_bit(ICE_FLAG_LEGACY_RX, change_flags)) {
@@ -1710,291 +1716,13 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
{
struct ice_netdev_priv *np = netdev_priv(netdev);
struct ice_port_info *pi = np->vsi->port_info;
- struct ethtool_link_ksettings cap_ksettings;
struct ice_link_status *link_info;
struct ice_vsi *vsi = np->vsi;
- bool unrecog_phy_high = false;
- bool unrecog_phy_low = false;
link_info = &vsi->port_info->phy.link_info;
- /* Initialize supported and advertised settings based on PHY settings */
- switch (link_info->phy_type_low) {
- case ICE_PHY_TYPE_LOW_100BASE_TX:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_100M_SGMII:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_1000BASE_T:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 1000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_1G_SGMII:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_1000BASE_SX:
- case ICE_PHY_TYPE_LOW_1000BASE_LX:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseX_Full);
- break;
- case ICE_PHY_TYPE_LOW_1000BASE_KX:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 1000baseKX_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 1000baseKX_Full);
- break;
- case ICE_PHY_TYPE_LOW_2500BASE_T:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 2500baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 2500baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_2500BASE_X:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 2500baseX_Full);
- break;
- case ICE_PHY_TYPE_LOW_2500BASE_KX:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 2500baseX_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 2500baseX_Full);
- break;
- case ICE_PHY_TYPE_LOW_5GBASE_T:
- case ICE_PHY_TYPE_LOW_5GBASE_KR:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 5000baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 5000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_10GBASE_T:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseT_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_10G_SFI_DA:
- case ICE_PHY_TYPE_LOW_10G_SFI_AOC_ACC:
- case ICE_PHY_TYPE_LOW_10G_SFI_C2C:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseT_Full);
- break;
- case ICE_PHY_TYPE_LOW_10GBASE_SR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseSR_Full);
- break;
- case ICE_PHY_TYPE_LOW_10GBASE_LR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseLR_Full);
- break;
- case ICE_PHY_TYPE_LOW_10GBASE_KR_CR1:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 10000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 10000baseKR_Full);
- break;
- case ICE_PHY_TYPE_LOW_25GBASE_T:
- case ICE_PHY_TYPE_LOW_25GBASE_CR:
- case ICE_PHY_TYPE_LOW_25GBASE_CR_S:
- case ICE_PHY_TYPE_LOW_25GBASE_CR1:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseCR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 25000baseCR_Full);
- break;
- case ICE_PHY_TYPE_LOW_25G_AUI_AOC_ACC:
- case ICE_PHY_TYPE_LOW_25G_AUI_C2C:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseCR_Full);
- break;
- case ICE_PHY_TYPE_LOW_25GBASE_SR:
- case ICE_PHY_TYPE_LOW_25GBASE_LR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseSR_Full);
- break;
- case ICE_PHY_TYPE_LOW_25GBASE_KR:
- case ICE_PHY_TYPE_LOW_25GBASE_KR1:
- case ICE_PHY_TYPE_LOW_25GBASE_KR_S:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 25000baseKR_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 25000baseKR_Full);
- break;
- case ICE_PHY_TYPE_LOW_40GBASE_CR4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseCR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_40G_XLAUI_AOC_ACC:
- case ICE_PHY_TYPE_LOW_40G_XLAUI:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_40GBASE_SR4:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseSR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_40GBASE_LR4:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseLR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_40GBASE_KR4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 40000baseKR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 40000baseKR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_50GBASE_CR2:
- case ICE_PHY_TYPE_LOW_50GBASE_CP:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseCR2_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 50000baseCR2_Full);
- break;
- case ICE_PHY_TYPE_LOW_50G_LAUI2_AOC_ACC:
- case ICE_PHY_TYPE_LOW_50G_LAUI2:
- case ICE_PHY_TYPE_LOW_50G_AUI2_AOC_ACC:
- case ICE_PHY_TYPE_LOW_50G_AUI2:
- case ICE_PHY_TYPE_LOW_50GBASE_SR:
- case ICE_PHY_TYPE_LOW_50G_AUI1_AOC_ACC:
- case ICE_PHY_TYPE_LOW_50G_AUI1:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseCR2_Full);
- break;
- case ICE_PHY_TYPE_LOW_50GBASE_KR2:
- case ICE_PHY_TYPE_LOW_50GBASE_KR_PAM4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseKR2_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 50000baseKR2_Full);
- break;
- case ICE_PHY_TYPE_LOW_50GBASE_SR2:
- case ICE_PHY_TYPE_LOW_50GBASE_LR2:
- case ICE_PHY_TYPE_LOW_50GBASE_FR:
- case ICE_PHY_TYPE_LOW_50GBASE_LR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 50000baseSR2_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_CR4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100G_CAUI4_AOC_ACC:
- case ICE_PHY_TYPE_LOW_100G_CAUI4:
- case ICE_PHY_TYPE_LOW_100G_AUI4_AOC_ACC:
- case ICE_PHY_TYPE_LOW_100G_AUI4:
- case ICE_PHY_TYPE_LOW_100GBASE_CR_PAM4:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_CP2:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseCR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_SR4:
- case ICE_PHY_TYPE_LOW_100GBASE_SR2:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseSR4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_LR4:
- case ICE_PHY_TYPE_LOW_100GBASE_DR:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseLR4_ER4_Full);
- break;
- case ICE_PHY_TYPE_LOW_100GBASE_KR4:
- case ICE_PHY_TYPE_LOW_100GBASE_KR_PAM4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseKR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseKR4_Full);
- break;
- default:
- unrecog_phy_low = true;
- }
-
- switch (link_info->phy_type_high) {
- case ICE_PHY_TYPE_HIGH_100GBASE_KR2_PAM4:
- ethtool_link_ksettings_add_link_mode(ks, supported, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseKR4_Full);
- ethtool_link_ksettings_add_link_mode(ks, advertising, Autoneg);
- ethtool_link_ksettings_add_link_mode(ks, advertising,
- 100000baseKR4_Full);
- break;
- case ICE_PHY_TYPE_HIGH_100G_CAUI2_AOC_ACC:
- case ICE_PHY_TYPE_HIGH_100G_CAUI2:
- case ICE_PHY_TYPE_HIGH_100G_AUI2_AOC_ACC:
- case ICE_PHY_TYPE_HIGH_100G_AUI2:
- ethtool_link_ksettings_add_link_mode(ks, supported,
- 100000baseCR4_Full);
- break;
- default:
- unrecog_phy_high = true;
- }
-
- if (unrecog_phy_low && unrecog_phy_high) {
- /* if we got here and link is up something bad is afoot */
- netdev_info(netdev,
- "WARNING: Unrecognized PHY_Low (0x%llx).\n",
- (u64)link_info->phy_type_low);
- netdev_info(netdev,
- "WARNING: Unrecognized PHY_High (0x%llx).\n",
- (u64)link_info->phy_type_high);
- }
-
- /* Now that we've worked out everything that could be supported by the
- * current PHY type, get what is supported by the NVM and intersect
- * them to get what is truly supported
- */
- memset(&cap_ksettings, 0, sizeof(cap_ksettings));
- ice_phy_type_to_ethtool(netdev, &cap_ksettings);
- ethtool_intersect_link_masks(ks, &cap_ksettings);
+ /* Get supported and advertised settings from PHY ability with media */
+ ice_phy_type_to_ethtool(netdev, ks);
switch (link_info->link_speed) {
case ICE_AQ_LINK_SPEED_100GB:
@@ -2028,8 +1756,7 @@ ice_get_settings_link_up(struct ethtool_link_ksettings *ks,
ks->base.speed = SPEED_100;
break;
default:
- netdev_info(netdev,
- "WARNING: Unrecognized link_speed (0x%x).\n",
+ netdev_info(netdev, "WARNING: Unrecognized link_speed (0x%x).\n",
link_info->link_speed);
break;
}
@@ -2845,13 +2572,11 @@ ice_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
new_tx_cnt = ALIGN(ring->tx_pending, ICE_REQ_DESC_MULTIPLE);
if (new_tx_cnt != ring->tx_pending)
- netdev_info(netdev,
- "Requested Tx descriptor count rounded up to %d\n",
+ netdev_info(netdev, "Requested Tx descriptor count rounded up to %d\n",
new_tx_cnt);
new_rx_cnt = ALIGN(ring->rx_pending, ICE_REQ_DESC_MULTIPLE);
if (new_rx_cnt != ring->rx_pending)
- netdev_info(netdev,
- "Requested Rx descriptor count rounded up to %d\n",
+ netdev_info(netdev, "Requested Rx descriptor count rounded up to %d\n",
new_rx_cnt);
/* if nothing to do return success */
@@ -3211,13 +2936,6 @@ ice_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *pause)
else
return -EINVAL;
- /* Tell the OS link is going down, the link will go back up when fw
- * says it is ready asynchronously
- */
- ice_print_link_msg(vsi, false);
- netif_carrier_off(netdev);
- netif_tx_stop_all_queues(netdev);
-
/* Set the FC mode and only restart AN if link is up */
status = ice_set_fc(pi, &aq_failures, link_up);
@@ -3718,8 +3436,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
if (ec->rx_coalesce_usecs_high > ICE_MAX_INTRL ||
(ec->rx_coalesce_usecs_high &&
ec->rx_coalesce_usecs_high < pf->hw.intrl_gran)) {
- netdev_info(vsi->netdev,
- "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n",
+ netdev_info(vsi->netdev, "Invalid value, %s-usecs-high valid values are 0 (disabled), %d-%d\n",
c_type_str, pf->hw.intrl_gran,
ICE_MAX_INTRL);
return -EINVAL;
@@ -3737,8 +3454,7 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
break;
case ICE_TX_CONTAINER:
if (ec->tx_coalesce_usecs_high) {
- netdev_info(vsi->netdev,
- "setting %s-usecs-high is not supported\n",
+ netdev_info(vsi->netdev, "setting %s-usecs-high is not supported\n",
c_type_str);
return -EINVAL;
}
@@ -3755,35 +3471,24 @@ ice_set_rc_coalesce(enum ice_container_type c_type, struct ethtool_coalesce *ec,
itr_setting = rc->itr_setting & ~ICE_ITR_DYNAMIC;
if (coalesce_usecs != itr_setting && use_adaptive_coalesce) {
- netdev_info(vsi->netdev,
- "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
+ netdev_info(vsi->netdev, "%s interrupt throttling cannot be changed if adaptive-%s is enabled\n",
c_type_str, c_type_str);
return -EINVAL;
}
if (coalesce_usecs > ICE_ITR_MAX) {
- netdev_info(vsi->netdev,
- "Invalid value, %s-usecs range is 0-%d\n",
+ netdev_info(vsi->netdev, "Invalid value, %s-usecs range is 0-%d\n",
c_type_str, ICE_ITR_MAX);
return -EINVAL;
}
- /* hardware only supports an ITR granularity of 2us */
- if (coalesce_usecs % 2 != 0) {
- netdev_info(vsi->netdev,
- "Invalid value, %s-usecs must be even\n",
- c_type_str);
- return -EINVAL;
- }
-
if (use_adaptive_coalesce) {
rc->itr_setting |= ICE_ITR_DYNAMIC;
} else {
- /* store user facing value how it was set */
+ /* save the user set usecs */
rc->itr_setting = coalesce_usecs;
- /* set to static and convert to value HW understands */
- rc->target_itr =
- ITR_TO_REG(ITR_REG_ALIGN(rc->itr_setting));
+ /* device ITR granularity is in 2 usec increments */
+ rc->target_itr = ITR_REG_ALIGN(rc->itr_setting);
}
return 0;
@@ -3877,6 +3582,30 @@ ice_is_coalesce_param_invalid(struct net_device *netdev,
}
/**
+ * ice_print_if_odd_usecs - print message if user tries to set odd [tx|rx]-usecs
+ * @netdev: netdev used for print
+ * @itr_setting: previous user setting
+ * @use_adaptive_coalesce: if adaptive coalesce is enabled or being enabled
+ * @coalesce_usecs: requested value of [tx|rx]-usecs
+ * @c_type_str: either "rx" or "tx" to match user set field of [tx|rx]-usecs
+ */
+static void
+ice_print_if_odd_usecs(struct net_device *netdev, u16 itr_setting,
+ u32 use_adaptive_coalesce, u32 coalesce_usecs,
+ const char *c_type_str)
+{
+ if (use_adaptive_coalesce)
+ return;
+
+ itr_setting = ITR_TO_REG(itr_setting);
+
+ if (itr_setting != coalesce_usecs && (coalesce_usecs % 2))
+ netdev_info(netdev, "User set %s-usecs to %d, device only supports even values. Rounding down and attempting to set %s-usecs to %d\n",
+ c_type_str, coalesce_usecs, c_type_str,
+ ITR_REG_ALIGN(coalesce_usecs));
+}
+
+/**
* __ice_set_coalesce - set ITR/INTRL values for the device
* @netdev: pointer to the netdev associated with this query
* @ec: ethtool structure to fill with driver's coalesce settings
@@ -3896,8 +3625,19 @@ __ice_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ec,
return -EINVAL;
if (q_num < 0) {
+ struct ice_q_vector *q_vector = vsi->q_vectors[0];
int v_idx;
+ if (q_vector) {
+ ice_print_if_odd_usecs(netdev, q_vector->rx.itr_setting,
+ ec->use_adaptive_rx_coalesce,
+ ec->rx_coalesce_usecs, "rx");
+
+ ice_print_if_odd_usecs(netdev, q_vector->tx.itr_setting,
+ ec->use_adaptive_tx_coalesce,
+ ec->tx_coalesce_usecs, "tx");
+ }
+
ice_for_each_q_vector(vsi, v_idx) {
/* In some cases if DCB is configured the num_[rx|tx]q
* can be less than vsi->num_q_vectors. This check
@@ -4012,8 +3752,7 @@ ice_get_module_info(struct net_device *netdev,
}
break;
default:
- netdev_warn(netdev,
- "SFF Module Type not recognized.\n");
+ netdev_warn(netdev, "SFF Module Type not recognized.\n");
return -EINVAL;
}
return 0;
@@ -4081,11 +3820,11 @@ ice_get_module_eeprom(struct net_device *netdev,
static const struct ethtool_ops ice_ethtool_ops = {
.get_link_ksettings = ice_get_link_ksettings,
.set_link_ksettings = ice_set_link_ksettings,
- .get_drvinfo = ice_get_drvinfo,
- .get_regs_len = ice_get_regs_len,
- .get_regs = ice_get_regs,
- .get_msglevel = ice_get_msglevel,
- .set_msglevel = ice_set_msglevel,
+ .get_drvinfo = ice_get_drvinfo,
+ .get_regs_len = ice_get_regs_len,
+ .get_regs = ice_get_regs,
+ .get_msglevel = ice_get_msglevel,
+ .set_msglevel = ice_set_msglevel,
.self_test = ice_self_test,
.get_link = ethtool_op_get_link,
.get_eeprom_len = ice_get_eeprom_len,
@@ -4112,8 +3851,8 @@ static const struct ethtool_ops ice_ethtool_ops = {
.get_channels = ice_get_channels,
.set_channels = ice_set_channels,
.get_ts_info = ethtool_op_get_ts_info,
- .get_per_queue_coalesce = ice_get_per_q_coalesce,
- .set_per_queue_coalesce = ice_set_per_q_coalesce,
+ .get_per_queue_coalesce = ice_get_per_q_coalesce,
+ .set_per_queue_coalesce = ice_set_per_q_coalesce,
.get_fecparam = ice_get_fecparam,
.set_fecparam = ice_set_fecparam,
.get_module_info = ice_get_module_info,
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index f2cababf2561..6db3d0494127 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -267,8 +267,14 @@
#define GLNVM_GENS_SR_SIZE_S 5
#define GLNVM_GENS_SR_SIZE_M ICE_M(0x7, 5)
#define GLNVM_ULD 0x000B6008
+#define GLNVM_ULD_PCIER_DONE_M BIT(0)
+#define GLNVM_ULD_PCIER_DONE_1_M BIT(1)
#define GLNVM_ULD_CORER_DONE_M BIT(3)
#define GLNVM_ULD_GLOBR_DONE_M BIT(4)
+#define GLNVM_ULD_POR_DONE_M BIT(5)
+#define GLNVM_ULD_POR_DONE_1_M BIT(8)
+#define GLNVM_ULD_PCIER_DONE_2_M BIT(9)
+#define GLNVM_ULD_PE_DONE_M BIT(10)
#define GLPCI_CNF2 0x000BE004
#define GLPCI_CNF2_CACHELINE_SIZE_M BIT(1)
#define PF_FUNC_RID 0x0009E880
@@ -331,7 +337,6 @@
#define GLV_TEPC(_VSI) (0x00312000 + ((_VSI) * 4))
#define GLV_UPRCL(_i) (0x003B2000 + ((_i) * 8))
#define GLV_UPTCL(_i) (0x0030A000 + ((_i) * 8))
-#define PF_VT_PFALLOC_HIF 0x0009DD80
#define VSIQF_HKEY_MAX_INDEX 12
#define VSIQF_HLUT_MAX_INDEX 15
#define VFINT_DYN_CTLN(_i) (0x00003800 + ((_i) * 4))
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 1874c9f51a32..d974e2fa3e63 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -117,8 +117,7 @@ static void ice_vsi_set_num_desc(struct ice_vsi *vsi)
vsi->num_tx_desc = ICE_DFLT_NUM_TX_DESC;
break;
default:
- dev_dbg(&vsi->back->pdev->dev,
- "Not setting number of Tx/Rx descriptors for VSI type %d\n",
+ dev_dbg(ice_pf_to_dev(vsi->back), "Not setting number of Tx/Rx descriptors for VSI type %d\n",
vsi->type);
break;
}
@@ -724,7 +723,7 @@ static void ice_vsi_setup_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctxt)
vsi->num_txq = tx_count;
if (vsi->type == ICE_VSI_VF && vsi->num_txq != vsi->num_rxq) {
- dev_dbg(&vsi->back->pdev->dev, "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
+ dev_dbg(ice_pf_to_dev(vsi->back), "VF VSI should have same number of Tx and Rx queues. Hence making them equal\n");
/* since there is a chance that num_rxq could have been changed
* in the above for loop, make num_txq equal to num_rxq.
*/
@@ -929,8 +928,7 @@ static int ice_vsi_setup_vector_base(struct ice_vsi *vsi)
vsi->base_vector = ice_get_res(pf, pf->irq_tracker, num_q_vectors,
vsi->idx);
if (vsi->base_vector < 0) {
- dev_err(dev,
- "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
+ dev_err(dev, "Failed to get tracking for %d vectors for VSI %d, err=%d\n",
num_q_vectors, vsi->vsi_num, vsi->base_vector);
return -ENOENT;
}
@@ -1232,8 +1230,9 @@ static void ice_vsi_set_rss_flow_fld(struct ice_vsi *vsi)
*
* Returns 0 on success or ENOMEM on failure.
*/
-int ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
- const u8 *macaddr)
+int
+ice_add_mac_to_list(struct ice_vsi *vsi, struct list_head *add_list,
+ const u8 *macaddr)
{
struct ice_fltr_list_entry *tmp;
struct ice_pf *pf = vsi->back;
@@ -1392,12 +1391,10 @@ int ice_vsi_kill_vlan(struct ice_vsi *vsi, u16 vid)
status = ice_remove_vlan(&pf->hw, &tmp_add_list);
if (status == ICE_ERR_DOES_NOT_EXIST) {
- dev_dbg(dev,
- "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
+ dev_dbg(dev, "Failed to remove VLAN %d on VSI %i, it does not exist, status: %d\n",
vid, vsi->vsi_num, status);
} else if (status) {
- dev_err(dev,
- "Error removing VLAN %d on vsi %i error: %d\n",
+ dev_err(dev, "Error removing VLAN %d on vsi %i error: %d\n",
vid, vsi->vsi_num, status);
err = -EIO;
}
@@ -1453,8 +1450,7 @@ setup_rings:
err = ice_setup_rx_ctx(vsi->rx_rings[i]);
if (err) {
- dev_err(&vsi->back->pdev->dev,
- "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "ice_setup_rx_ctx failed for RxQ %d, err %d\n",
i, err);
return err;
}
@@ -1623,7 +1619,7 @@ int ice_vsi_manage_vlan_insertion(struct ice_vsi *vsi)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(&vsi->back->pdev->dev, "update VSI for VLAN insert failed, err %d aq_err %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN insert failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -1669,7 +1665,7 @@ int ice_vsi_manage_vlan_stripping(struct ice_vsi *vsi, bool ena)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(&vsi->back->pdev->dev, "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for VLAN strip failed, ena = %d err %d aq_err %d\n",
ena, status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -1834,8 +1830,7 @@ ice_vsi_set_q_vectors_reg_idx(struct ice_vsi *vsi)
struct ice_q_vector *q_vector = vsi->q_vectors[i];
if (!q_vector) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to set reg_idx on q_vector %d VSI %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to set reg_idx on q_vector %d VSI %d\n",
i, vsi->vsi_num);
goto clear_reg_idx;
}
@@ -1898,8 +1893,7 @@ ice_vsi_add_rem_eth_mac(struct ice_vsi *vsi, bool add_rule)
status = ice_remove_eth_mac(&pf->hw, &tmp_add_list);
if (status)
- dev_err(dev,
- "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
+ dev_err(dev, "Failure Adding or Removing Ethertype on VSI %i error: %d\n",
vsi->vsi_num, status);
ice_free_fltr_list(dev, &tmp_add_list);
@@ -2384,8 +2378,7 @@ ice_get_res(struct ice_pf *pf, struct ice_res_tracker *res, u16 needed, u16 id)
return -EINVAL;
if (!needed || needed > res->num_entries || id >= ICE_RES_VALID_BIT) {
- dev_err(ice_pf_to_dev(pf),
- "param err: needed=%d, num_entries = %d id=0x%04x\n",
+ dev_err(ice_pf_to_dev(pf), "param err: needed=%d, num_entries = %d id=0x%04x\n",
needed, res->num_entries, id);
return -EINVAL;
}
@@ -2686,7 +2679,6 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
ice_vsi_put_qs(vsi);
ice_vsi_clear_rings(vsi);
ice_vsi_free_arrays(vsi);
- ice_dev_onetime_setup(&pf->hw);
if (vsi->type == ICE_VSI_VF)
ice_vsi_set_num_qs(vsi, vf->vf_id);
else
@@ -2765,8 +2757,7 @@ int ice_vsi_rebuild(struct ice_vsi *vsi, bool init_vsi)
status = ice_cfg_vsi_lan(vsi->port_info, vsi->idx, vsi->tc_cfg.ena_tc,
max_txqs);
if (status) {
- dev_err(ice_pf_to_dev(pf),
- "VSI %d failed lan queue config, error %d\n",
+ dev_err(ice_pf_to_dev(pf), "VSI %d failed lan queue config, error %d\n",
vsi->vsi_num, status);
if (init_vsi) {
ret = -EIO;
@@ -2834,8 +2825,8 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
int ice_vsi_cfg_tc(struct ice_vsi *vsi, u8 ena_tc)
{
u16 max_txqs[ICE_MAX_TRAFFIC_CLASS] = { 0 };
- struct ice_vsi_ctx *ctx;
struct ice_pf *pf = vsi->back;
+ struct ice_vsi_ctx *ctx;
enum ice_status status;
struct device *dev;
int i, ret = 0;
@@ -2892,25 +2883,6 @@ out:
#endif /* CONFIG_DCB */
/**
- * ice_nvm_version_str - format the NVM version strings
- * @hw: ptr to the hardware info
- */
-char *ice_nvm_version_str(struct ice_hw *hw)
-{
- u8 oem_ver, oem_patch, ver_hi, ver_lo;
- static char buf[ICE_NVM_VER_LEN];
- u16 oem_build;
-
- ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
- &ver_lo);
-
- snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
- hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
-
- return buf;
-}
-
-/**
* ice_update_ring_stats - Update ring statistics
* @ring: ring to update
* @cont: used to increment per-vector counters
@@ -2981,7 +2953,7 @@ ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set)
status = ice_remove_mac(&vsi->back->hw, &tmp_add_list);
cfg_mac_fltr_exit:
- ice_free_fltr_list(&vsi->back->pdev->dev, &tmp_add_list);
+ ice_free_fltr_list(ice_pf_to_dev(vsi->back), &tmp_add_list);
return status;
}
@@ -3043,16 +3015,14 @@ int ice_set_dflt_vsi(struct ice_sw *sw, struct ice_vsi *vsi)
/* another VSI is already the default VSI for this switch */
if (ice_is_dflt_vsi_in_use(sw)) {
- dev_err(dev,
- "Default forwarding VSI %d already in use, disable it and try again\n",
+ dev_err(dev, "Default forwarding VSI %d already in use, disable it and try again\n",
sw->dflt_vsi->vsi_num);
return -EEXIST;
}
status = ice_cfg_dflt_vsi(&vsi->back->hw, vsi->idx, true, ICE_FLTR_RX);
if (status) {
- dev_err(dev,
- "Failed to set VSI %d as the default forwarding VSI, error %d\n",
+ dev_err(dev, "Failed to set VSI %d as the default forwarding VSI, error %d\n",
vsi->vsi_num, status);
return -EIO;
}
@@ -3091,8 +3061,7 @@ int ice_clear_dflt_vsi(struct ice_sw *sw)
status = ice_cfg_dflt_vsi(&dflt_vsi->back->hw, dflt_vsi->idx, false,
ICE_FLTR_RX);
if (status) {
- dev_err(dev,
- "Failed to clear the default forwarding VSI %d, error %d\n",
+ dev_err(dev, "Failed to clear the default forwarding VSI %d, error %d\n",
dflt_vsi->vsi_num, status);
return -EIO;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 68fd0d4505c2..e2c0dadce920 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -97,8 +97,6 @@ void ice_vsi_cfg_frame_size(struct ice_vsi *vsi);
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
-char *ice_nvm_version_str(struct ice_hw *hw);
-
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index 5ae671609f98..5ef28052c0f8 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -162,8 +162,7 @@ unregister:
* had an error
*/
if (status && vsi->netdev->reg_state == NETREG_REGISTERED) {
- dev_err(ice_pf_to_dev(pf),
- "Could not add MAC filters error %d. Unregistering device\n",
+ dev_err(ice_pf_to_dev(pf), "Could not add MAC filters error %d. Unregistering device\n",
status);
unregister_netdev(vsi->netdev);
free_netdev(vsi->netdev);
@@ -269,7 +268,7 @@ static int ice_cfg_promisc(struct ice_vsi *vsi, u8 promisc_m, bool set_promisc)
*/
static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
{
- struct device *dev = &vsi->back->pdev->dev;
+ struct device *dev = ice_pf_to_dev(vsi->back);
struct net_device *netdev = vsi->netdev;
bool promisc_forced_on = false;
struct ice_pf *pf = vsi->back;
@@ -335,8 +334,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
!test_and_set_bit(__ICE_FLTR_OVERFLOW_PROMISC,
vsi->state)) {
promisc_forced_on = true;
- netdev_warn(netdev,
- "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
+ netdev_warn(netdev, "Reached MAC filter limit, forcing promisc mode on VSI %d\n",
vsi->vsi_num);
} else {
err = -EIO;
@@ -382,8 +380,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
if (!ice_is_dflt_vsi_in_use(pf->first_sw)) {
err = ice_set_dflt_vsi(pf->first_sw, vsi);
if (err && err != -EEXIST) {
- netdev_err(netdev,
- "Error %d setting default VSI %i Rx rule\n",
+ netdev_err(netdev, "Error %d setting default VSI %i Rx rule\n",
err, vsi->vsi_num);
vsi->current_netdev_flags &=
~IFF_PROMISC;
@@ -395,8 +392,7 @@ static int ice_vsi_sync_fltr(struct ice_vsi *vsi)
if (ice_is_vsi_dflt_vsi(pf->first_sw, vsi)) {
err = ice_clear_dflt_vsi(pf->first_sw);
if (err) {
- netdev_err(netdev,
- "Error %d clearing default VSI %i Rx rule\n",
+ netdev_err(netdev, "Error %d clearing default VSI %i Rx rule\n",
err, vsi->vsi_num);
vsi->current_netdev_flags |=
IFF_PROMISC;
@@ -752,7 +748,7 @@ void ice_print_link_msg(struct ice_vsi *vsi, bool isup)
kfree(caps);
done:
- netdev_info(vsi->netdev, "NIC Link is up %sbps, Requested FEC: %s, FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ netdev_info(vsi->netdev, "NIC Link is up %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
speed, fec_req, fec, an, fc);
ice_print_topo_conflict(vsi);
}
@@ -815,8 +811,7 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
*/
result = ice_update_link_info(pi);
if (result)
- dev_dbg(dev,
- "Failed to update link status and re-enable link events for port %d\n",
+ dev_dbg(dev, "Failed to update link status and re-enable link events for port %d\n",
pi->lport);
/* if the old link up/down and speed is the same as the new */
@@ -834,13 +829,13 @@ ice_link_event(struct ice_pf *pf, struct ice_port_info *pi, bool link_up,
result = ice_aq_set_link_restart_an(pi, false, NULL);
if (result) {
- dev_dbg(dev,
- "Failed to set link down, VSI %d error %d\n",
+ dev_dbg(dev, "Failed to set link down, VSI %d error %d\n",
vsi->vsi_num, result);
return result;
}
}
+ ice_dcb_rebuild(pf);
ice_vsi_link_event(vsi, link_up);
ice_print_link_msg(vsi, link_up);
@@ -892,15 +887,13 @@ static int ice_init_link_events(struct ice_port_info *pi)
ICE_AQ_LINK_EVENT_MODULE_QUAL_FAIL));
if (ice_aq_set_event_mask(pi->hw, pi->lport, mask, NULL)) {
- dev_dbg(ice_hw_to_dev(pi->hw),
- "Failed to set link event mask for port %d\n",
+ dev_dbg(ice_hw_to_dev(pi->hw), "Failed to set link event mask for port %d\n",
pi->lport);
return -EIO;
}
if (ice_aq_get_link_info(pi, true, NULL, NULL)) {
- dev_dbg(ice_hw_to_dev(pi->hw),
- "Failed to enable link events for port %d\n",
+ dev_dbg(ice_hw_to_dev(pi->hw), "Failed to enable link events for port %d\n",
pi->lport);
return -EIO;
}
@@ -929,8 +922,8 @@ ice_handle_link_event(struct ice_pf *pf, struct ice_rq_event_info *event)
!!(link_data->link_info & ICE_AQ_LINK_UP),
le16_to_cpu(link_data->link_speed));
if (status)
- dev_dbg(ice_pf_to_dev(pf),
- "Could not process link event, error %d\n", status);
+ dev_dbg(ice_pf_to_dev(pf), "Could not process link event, error %d\n",
+ status);
return status;
}
@@ -979,13 +972,11 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
dev_dbg(dev, "%s Receive Queue VF Error detected\n",
qtype);
if (val & PF_FW_ARQLEN_ARQOVFL_M) {
- dev_dbg(dev,
- "%s Receive Queue Overflow Error detected\n",
+ dev_dbg(dev, "%s Receive Queue Overflow Error detected\n",
qtype);
}
if (val & PF_FW_ARQLEN_ARQCRIT_M)
- dev_dbg(dev,
- "%s Receive Queue Critical Error detected\n",
+ dev_dbg(dev, "%s Receive Queue Critical Error detected\n",
qtype);
val &= ~(PF_FW_ARQLEN_ARQVFE_M | PF_FW_ARQLEN_ARQOVFL_M |
PF_FW_ARQLEN_ARQCRIT_M);
@@ -998,8 +989,8 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
PF_FW_ATQLEN_ATQCRIT_M)) {
oldval = val;
if (val & PF_FW_ATQLEN_ATQVFE_M)
- dev_dbg(dev,
- "%s Send Queue VF Error detected\n", qtype);
+ dev_dbg(dev, "%s Send Queue VF Error detected\n",
+ qtype);
if (val & PF_FW_ATQLEN_ATQOVFL_M) {
dev_dbg(dev, "%s Send Queue Overflow Error detected\n",
qtype);
@@ -1048,8 +1039,7 @@ static int __ice_clean_ctrlq(struct ice_pf *pf, enum ice_ctl_q q_type)
ice_dcb_process_lldp_set_mib_change(pf, &event);
break;
default:
- dev_dbg(dev,
- "%s Receive Queue unknown event 0x%04x ignored\n",
+ dev_dbg(dev, "%s Receive Queue unknown event 0x%04x ignored\n",
qtype, opcode);
break;
}
@@ -1238,7 +1228,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
u16 queue = ((reg & GL_MDET_TX_TCLAN_QNUM_M) >>
GL_MDET_TX_TCLAN_QNUM_S);
- if (netif_msg_rx_err(pf))
+ if (netif_msg_tx_err(pf))
dev_info(dev, "Malicious Driver Detection event %d on TX queue %d PF# %d VF# %d\n",
event, queue, pf_num, vf_num);
wr32(hw, GL_MDET_TX_TCLAN, 0xffffffff);
@@ -1335,8 +1325,7 @@ static void ice_handle_mdd_event(struct ice_pf *pf)
vf->num_mdd_events++;
if (vf->num_mdd_events &&
vf->num_mdd_events <= ICE_MDD_EVENTS_THRESHOLD)
- dev_info(dev,
- "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
+ dev_info(dev, "VF %d has had %llu MDD events since last boot, Admin might need to reload AVF driver with this number of events\n",
i, vf->num_mdd_events);
}
}
@@ -1367,7 +1356,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
if (vsi->type != ICE_VSI_PF)
return 0;
- dev = &vsi->back->pdev->dev;
+ dev = ice_pf_to_dev(vsi->back);
pi = vsi->port_info;
@@ -1378,8 +1367,7 @@ static int ice_force_phys_link_state(struct ice_vsi *vsi, bool link_up)
retcode = ice_aq_get_phy_caps(pi, false, ICE_AQC_REPORT_SW_CFG, pcaps,
NULL);
if (retcode) {
- dev_err(dev,
- "Failed to get phy capabilities, VSI %d error %d\n",
+ dev_err(dev, "Failed to get phy capabilities, VSI %d error %d\n",
vsi->vsi_num, retcode);
retcode = -EIO;
goto out;
@@ -1649,8 +1637,8 @@ static int ice_vsi_req_irq_msix(struct ice_vsi *vsi, char *basename)
err = devm_request_irq(dev, irq_num, vsi->irq_handler, 0,
q_vector->name, q_vector);
if (err) {
- netdev_err(vsi->netdev,
- "MSIX request_irq failed, error: %d\n", err);
+ netdev_err(vsi->netdev, "MSIX request_irq failed, error: %d\n",
+ err);
goto free_q_irqs;
}
@@ -1685,7 +1673,7 @@ free_q_irqs:
*/
static int ice_xdp_alloc_setup_rings(struct ice_vsi *vsi)
{
- struct device *dev = &vsi->back->pdev->dev;
+ struct device *dev = ice_pf_to_dev(vsi->back);
int i;
for (i = 0; i < vsi->num_xdp_txq; i++) {
@@ -2664,14 +2652,12 @@ static void ice_set_pf_caps(struct ice_pf *pf)
clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
-#ifdef CONFIG_PCI_IOV
clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
if (func_caps->common_cap.sr_iov_1_1) {
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
ICE_MAX_VF_COUNT);
}
-#endif /* CONFIG_PCI_IOV */
clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
if (func_caps->common_cap.rss_table_size)
set_bit(ICE_FLAG_RSS_ENA, pf->flags);
@@ -2764,8 +2750,7 @@ static int ice_ena_msix_range(struct ice_pf *pf)
}
if (v_actual < v_budget) {
- dev_warn(dev,
- "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
+ dev_warn(dev, "not enough OS MSI-X vectors. requested = %d, obtained = %d\n",
v_budget, v_actual);
/* 2 vectors for LAN (traffic + OICR) */
#define ICE_MIN_LAN_VECS 2
@@ -2787,8 +2772,7 @@ msix_err:
goto exit_err;
no_hw_vecs_left_err:
- dev_err(dev,
- "not enough device MSI-X vectors. requested = %d, available = %d\n",
+ dev_err(dev, "not enough device MSI-X vectors. requested = %d, available = %d\n",
needed, v_left);
err = -ERANGE;
exit_err:
@@ -2921,16 +2905,14 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
!memcmp(hw->pkg_name, hw->active_pkg_name,
sizeof(hw->pkg_name))) {
if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
- dev_info(dev,
- "DDP package already present on device: %s version %d.%d.%d.%d\n",
+ dev_info(dev, "DDP package already present on device: %s version %d.%d.%d.%d\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
hw->active_pkg_ver.update,
hw->active_pkg_ver.draft);
else
- dev_info(dev,
- "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
+ dev_info(dev, "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
@@ -2938,8 +2920,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
hw->active_pkg_ver.draft);
} else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
- dev_err(dev,
- "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ dev_err(dev, "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
@@ -2947,8 +2928,7 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
*status = ICE_ERR_NOT_SUPPORTED;
} else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
- dev_info(dev,
- "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
+ dev_info(dev, "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
hw->active_pkg_name,
hw->active_pkg_ver.major,
hw->active_pkg_ver.minor,
@@ -2960,54 +2940,46 @@ ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
hw->pkg_ver.update,
hw->pkg_ver.draft);
} else {
- dev_err(dev,
- "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
+ dev_err(dev, "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
*status = ICE_ERR_NOT_SUPPORTED;
}
break;
case ICE_ERR_BUF_TOO_SHORT:
/* fall-through */
case ICE_ERR_CFG:
- dev_err(dev,
- "The DDP package file is invalid. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP package file is invalid. Entering Safe Mode.\n");
break;
case ICE_ERR_NOT_SUPPORTED:
/* Package File version not supported */
if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
(hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
- dev_err(dev,
- "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
(hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
- dev_err(dev,
- "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
+ dev_err(dev, "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
break;
case ICE_ERR_AQ_ERROR:
switch (hw->pkg_dwnld_status) {
case ICE_AQ_RC_ENOSEC:
case ICE_AQ_RC_EBADSIG:
- dev_err(dev,
- "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
return;
case ICE_AQ_RC_ESVN:
- dev_err(dev,
- "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
return;
case ICE_AQ_RC_EBADMAN:
case ICE_AQ_RC_EBADBUF:
- dev_err(dev,
- "An error occurred on the device while loading the DDP package. The device will be reset.\n");
+ dev_err(dev, "An error occurred on the device while loading the DDP package. The device will be reset.\n");
return;
default:
break;
}
/* fall-through */
default:
- dev_err(dev,
- "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
+ dev_err(dev, "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
*status);
break;
}
@@ -3038,8 +3010,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
ice_log_pkg_init(hw, &status);
} else {
- dev_err(dev,
- "The DDP package file failed to load. Entering Safe Mode.\n");
+ dev_err(dev, "The DDP package file failed to load. Entering Safe Mode.\n");
}
if (status) {
@@ -3065,8 +3036,7 @@ ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
static void ice_verify_cacheline_size(struct ice_pf *pf)
{
if (rd32(&pf->hw, GLPCI_CNF2) & GLPCI_CNF2_CACHELINE_SIZE_M)
- dev_warn(ice_pf_to_dev(pf),
- "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
+ dev_warn(ice_pf_to_dev(pf), "%d Byte cache line assumption is invalid, driver may have Tx timeouts!\n",
ICE_CACHE_LINE_BYTES);
}
@@ -3159,8 +3129,7 @@ static void ice_request_fw(struct ice_pf *pf)
dflt_pkg_load:
err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
if (err) {
- dev_err(dev,
- "The DDP package file was not found or could not be read. Entering Safe Mode\n");
+ dev_err(dev, "The DDP package file was not found or could not be read. Entering Safe Mode\n");
return;
}
@@ -3184,7 +3153,9 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
struct ice_hw *hw;
int err;
- /* this driver uses devres, see Documentation/driver-api/driver-model/devres.rst */
+ /* this driver uses devres, see
+ * Documentation/driver-api/driver-model/devres.rst
+ */
err = pcim_enable_device(pdev);
if (err)
return err;
@@ -3245,11 +3216,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_exit_unroll;
}
- dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n",
- hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
- hw->api_maj_ver, hw->api_min_ver, hw->api_patch,
- ice_nvm_version_str(hw), hw->fw_build);
-
ice_request_fw(pf);
/* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
@@ -3257,8 +3223,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
* true
*/
if (ice_is_safe_mode(pf)) {
- dev_err(dev,
- "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
+ dev_err(dev, "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
/* we already got function/device capabilities but these don't
* reflect what the driver needs to do in safe mode. Instead of
* adding conditional logic everywhere to ignore these
@@ -3335,8 +3300,7 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
/* tell the firmware we are up */
err = ice_send_version(pf);
if (err) {
- dev_err(dev,
- "probe failed sending driver version %s. error: %d\n",
+ dev_err(dev, "probe failed sending driver version %s. error: %d\n",
ice_drv_ver, err);
goto err_alloc_sw_unroll;
}
@@ -3477,8 +3441,7 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
err = pci_enable_device_mem(pdev);
if (err) {
- dev_err(&pdev->dev,
- "Cannot re-enable PCI device after reset, error %d\n",
+ dev_err(&pdev->dev, "Cannot re-enable PCI device after reset, error %d\n",
err);
result = PCI_ERS_RESULT_DISCONNECT;
} else {
@@ -3497,8 +3460,7 @@ static pci_ers_result_t ice_pci_err_slot_reset(struct pci_dev *pdev)
err = pci_cleanup_aer_uncorrect_error_status(pdev);
if (err)
- dev_dbg(&pdev->dev,
- "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
+ dev_dbg(&pdev->dev, "pci_cleanup_aer_uncorrect_error_status failed, error %d\n",
err);
/* non-fatal, continue */
@@ -3517,8 +3479,8 @@ static void ice_pci_err_resume(struct pci_dev *pdev)
struct ice_pf *pf = pci_get_drvdata(pdev);
if (!pf) {
- dev_err(&pdev->dev,
- "%s failed, device is unrecoverable\n", __func__);
+ dev_err(&pdev->dev, "%s failed, device is unrecoverable\n",
+ __func__);
return;
}
@@ -3766,8 +3728,7 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
/* Validate maxrate requested is within permitted range */
if (maxrate && (maxrate > (ICE_SCHED_MAX_BW / 1000))) {
- netdev_err(netdev,
- "Invalid max rate %d specified for the queue %d\n",
+ netdev_err(netdev, "Invalid max rate %d specified for the queue %d\n",
maxrate, queue_index);
return -EINVAL;
}
@@ -3783,8 +3744,8 @@ ice_set_tx_maxrate(struct net_device *netdev, int queue_index, u32 maxrate)
status = ice_cfg_q_bw_lmt(vsi->port_info, vsi->idx, tc,
q_handle, ICE_MAX_BW, maxrate * 1000);
if (status) {
- netdev_err(netdev,
- "Unable to set Tx max rate, error %d\n", status);
+ netdev_err(netdev, "Unable to set Tx max rate, error %d\n",
+ status);
return -EIO;
}
@@ -3876,15 +3837,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
/* Don't set any netdev advanced features with device in Safe Mode */
if (ice_is_safe_mode(vsi->back)) {
- dev_err(&vsi->back->pdev->dev,
- "Device is in Safe Mode - not enabling advanced netdev features\n");
+ dev_err(ice_pf_to_dev(vsi->back), "Device is in Safe Mode - not enabling advanced netdev features\n");
return ret;
}
/* Do not change setting during reset */
if (ice_is_reset_in_progress(pf->state)) {
- dev_err(&vsi->back->pdev->dev,
- "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
+ dev_err(ice_pf_to_dev(vsi->back), "Device is resetting, changing advanced netdev features temporarily unavailable.\n");
return -EBUSY;
}
@@ -4372,21 +4331,18 @@ int ice_down(struct ice_vsi *vsi)
tx_err = ice_vsi_stop_lan_tx_rings(vsi, ICE_NO_RESET, 0);
if (tx_err)
- netdev_err(vsi->netdev,
- "Failed stop Tx rings, VSI %d error %d\n",
+ netdev_err(vsi->netdev, "Failed stop Tx rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
if (!tx_err && ice_is_xdp_ena_vsi(vsi)) {
tx_err = ice_vsi_stop_xdp_tx_rings(vsi);
if (tx_err)
- netdev_err(vsi->netdev,
- "Failed stop XDP rings, VSI %d error %d\n",
+ netdev_err(vsi->netdev, "Failed stop XDP rings, VSI %d error %d\n",
vsi->vsi_num, tx_err);
}
rx_err = ice_vsi_stop_rx_rings(vsi);
if (rx_err)
- netdev_err(vsi->netdev,
- "Failed stop Rx rings, VSI %d error %d\n",
+ netdev_err(vsi->netdev, "Failed stop Rx rings, VSI %d error %d\n",
vsi->vsi_num, rx_err);
ice_napi_disable_all(vsi);
@@ -4394,8 +4350,7 @@ int ice_down(struct ice_vsi *vsi)
if (test_bit(ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA, vsi->back->flags)) {
link_err = ice_force_phys_link_state(vsi, false);
if (link_err)
- netdev_err(vsi->netdev,
- "Failed to set physical link down, VSI %d error %d\n",
+ netdev_err(vsi->netdev, "Failed to set physical link down, VSI %d error %d\n",
vsi->vsi_num, link_err);
}
@@ -4406,8 +4361,7 @@ int ice_down(struct ice_vsi *vsi)
ice_clean_rx_ring(vsi->rx_rings[i]);
if (tx_err || rx_err || link_err) {
- netdev_err(vsi->netdev,
- "Failed to close VSI 0x%04X on switch 0x%04X\n",
+ netdev_err(vsi->netdev, "Failed to close VSI 0x%04X on switch 0x%04X\n",
vsi->vsi_num, vsi->vsw->sw_id);
return -EIO;
}
@@ -4426,7 +4380,7 @@ int ice_vsi_setup_tx_rings(struct ice_vsi *vsi)
int i, err = 0;
if (!vsi->num_txq) {
- dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Tx queues\n",
+ dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Tx queues\n",
vsi->vsi_num);
return -EINVAL;
}
@@ -4457,7 +4411,7 @@ int ice_vsi_setup_rx_rings(struct ice_vsi *vsi)
int i, err = 0;
if (!vsi->num_rxq) {
- dev_err(&vsi->back->pdev->dev, "VSI %d has 0 Rx queues\n",
+ dev_err(ice_pf_to_dev(vsi->back), "VSI %d has 0 Rx queues\n",
vsi->vsi_num);
return -EINVAL;
}
@@ -4554,8 +4508,7 @@ static void ice_vsi_release_all(struct ice_pf *pf)
err = ice_vsi_release(pf->vsi[i]);
if (err)
- dev_dbg(ice_pf_to_dev(pf),
- "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
+ dev_dbg(ice_pf_to_dev(pf), "Failed to release pf->vsi[%d], err %d, vsi_num = %d\n",
i, err, pf->vsi[i]->vsi_num);
}
}
@@ -4582,8 +4535,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* rebuild the VSI */
err = ice_vsi_rebuild(vsi, true);
if (err) {
- dev_err(dev,
- "rebuild VSI failed, err %d, VSI index %d, type %s\n",
+ dev_err(dev, "rebuild VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type));
return err;
}
@@ -4591,8 +4543,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* replay filters for the VSI */
status = ice_replay_vsi(&pf->hw, vsi->idx);
if (status) {
- dev_err(dev,
- "replay VSI failed, status %d, VSI index %d, type %s\n",
+ dev_err(dev, "replay VSI failed, status %d, VSI index %d, type %s\n",
status, vsi->idx, ice_vsi_type_str(type));
return -EIO;
}
@@ -4605,8 +4556,7 @@ static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
/* enable the VSI */
err = ice_ena_vsi(vsi, false);
if (err) {
- dev_err(dev,
- "enable VSI failed, err %d, VSI index %d, type %s\n",
+ dev_err(dev, "enable VSI failed, err %d, VSI index %d, type %s\n",
err, vsi->idx, ice_vsi_type_str(type));
return err;
}
@@ -4684,8 +4634,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
}
if (pf->first_sw->dflt_vsi_ena)
- dev_info(dev,
- "Clearing default VSI, re-enable after reset completes\n");
+ dev_info(dev, "Clearing default VSI, re-enable after reset completes\n");
/* clear the default VSI configuration if it exists */
pf->first_sw->dflt_vsi = NULL;
pf->first_sw->dflt_vsi_ena = false;
@@ -4736,8 +4685,7 @@ static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
/* tell the firmware we are up */
ret = ice_send_version(pf);
if (ret) {
- dev_err(dev,
- "Rebuild failed due to error sending driver version: %d\n",
+ dev_err(dev, "Rebuild failed due to error sending driver version: %d\n",
ret);
goto err_vsi_rebuild;
}
@@ -4993,7 +4941,7 @@ static int ice_vsi_update_bridge_mode(struct ice_vsi *vsi, u16 bmode)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_err(&vsi->back->pdev->dev, "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "update VSI for bridge mode failed, bmode = %d err %d aq_err %d\n",
bmode, status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -5185,8 +5133,7 @@ int ice_open(struct net_device *netdev)
if (pi->phy.link_info.link_info & ICE_AQ_MEDIA_AVAILABLE) {
err = ice_force_phys_link_state(vsi, true);
if (err) {
- netdev_err(netdev,
- "Failed to set physical link up, error %d\n",
+ netdev_err(netdev, "Failed to set physical link up, error %d\n",
err);
return err;
}
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.c b/drivers/net/ethernet/intel/ice/ice_txrx.c
index fd17ace6b226..4de61dbedd36 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.c
@@ -644,7 +644,7 @@ static bool ice_page_is_reserved(struct page *page)
* Update the offset within page so that Rx buf will be ready to be reused.
* For systems with PAGE_SIZE < 8192 this function will flip the page offset
* so the second half of page assigned to Rx buffer will be used, otherwise
- * the offset is moved by the @size bytes
+ * the offset is moved by "size" bytes
*/
static void
ice_rx_buf_adjust_pg_offset(struct ice_rx_buf *rx_buf, unsigned int size)
@@ -1078,8 +1078,6 @@ construct_skb:
skb = ice_build_skb(rx_ring, rx_buf, &xdp);
else
skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
- } else {
- skb = ice_construct_skb(rx_ring, rx_buf, &xdp);
}
/* exit if we failed to retrieve a buffer */
if (!skb) {
@@ -1621,11 +1619,11 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
{
u64 td_offset, td_tag, td_cmd;
u16 i = tx_ring->next_to_use;
- skb_frag_t *frag;
unsigned int data_len, size;
struct ice_tx_desc *tx_desc;
struct ice_tx_buf *tx_buf;
struct sk_buff *skb;
+ skb_frag_t *frag;
dma_addr_t dma;
td_tag = off->td_l2tag1;
@@ -1738,9 +1736,8 @@ ice_tx_map(struct ice_ring *tx_ring, struct ice_tx_buf *first,
ice_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* notify HW of packet */
- if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more()) {
+ if (netif_xmit_stopped(txring_txq(tx_ring)) || !netdev_xmit_more())
writel(i, tx_ring->tail);
- }
return;
@@ -2078,7 +2075,7 @@ static bool __ice_chk_linearize(struct sk_buff *skb)
frag = &skb_shinfo(skb)->frags[0];
/* Initialize size to the negative value of gso_size minus 1. We
- * use this as the worst case scenerio in which the frag ahead
+ * use this as the worst case scenario in which the frag ahead
* of us only provides one byte which is why we are limited to 6
* descriptors for a single transmit as the header and previous
* fragment are already consuming 2 descriptors.
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx.h b/drivers/net/ethernet/intel/ice/ice_txrx.h
index a86270696df1..7ee00a128663 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx.h
+++ b/drivers/net/ethernet/intel/ice/ice_txrx.h
@@ -33,8 +33,8 @@
* frame.
*
* Note: For cache line sizes 256 or larger this value is going to end
- * up negative. In these cases we should fall back to the legacy
- * receive path.
+ * up negative. In these cases we should fall back to the legacy
+ * receive path.
*/
#if (PAGE_SIZE < 8192)
#define ICE_2K_TOO_SMALL_WITH_PADDING \
@@ -222,7 +222,7 @@ enum ice_rx_dtype {
#define ICE_ITR_GRAN_S 1 /* ITR granularity is always 2us */
#define ICE_ITR_GRAN_US BIT(ICE_ITR_GRAN_S)
#define ICE_ITR_MASK 0x1FFE /* ITR register value alignment mask */
-#define ITR_REG_ALIGN(setting) __ALIGN_MASK(setting, ~ICE_ITR_MASK)
+#define ITR_REG_ALIGN(setting) ((setting) & ICE_ITR_MASK)
#define ICE_ITR_ADAPTIVE_MIN_INC 0x0002
#define ICE_ITR_ADAPTIVE_MIN_USECS 0x0002
diff --git a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
index 35bbc4ff603c..6da048a6ca7c 100644
--- a/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_txrx_lib.c
@@ -10,7 +10,7 @@
*/
void ice_release_rx_desc(struct ice_ring *rx_ring, u32 val)
{
- u16 prev_ntu = rx_ring->next_to_use;
+ u16 prev_ntu = rx_ring->next_to_use & ~0x7;
rx_ring->next_to_use = val;
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index b361ffabb0ca..db0ef6ba907f 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -517,7 +517,7 @@ struct ice_hw {
struct ice_fw_log_cfg fw_log;
/* Device max aggregate bandwidths corresponding to the GL_PWR_MODE_CTL
- * register. Used for determining the ITR/intrl granularity during
+ * register. Used for determining the ITR/INTRL granularity during
* initialization.
*/
#define ICE_MAX_AGG_BW_200G 0x0
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 82b1e7a4cb92..75c70d432c72 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -199,8 +199,7 @@ static void ice_dis_vf_mappings(struct ice_vf *vf)
if (vsi->rx_mapping_mode == ICE_VSI_MAP_CONTIG)
wr32(hw, VPLAN_RX_QBASE(vf->vf_id), 0);
else
- dev_err(dev,
- "Scattered mode for VF Rx queues is not yet implemented\n");
+ dev_err(dev, "Scattered mode for VF Rx queues is not yet implemented\n");
}
/**
@@ -402,8 +401,7 @@ static void ice_trigger_vf_reset(struct ice_vf *vf, bool is_vflr, bool is_pfr)
if ((reg & VF_TRANS_PENDING_M) == 0)
break;
- dev_err(dev,
- "VF %d PCI transactions stuck\n", vf->vf_id);
+ dev_err(dev, "VF %d PCI transactions stuck\n", vf->vf_id);
udelay(ICE_PCI_CIAD_WAIT_DELAY_US);
}
}
@@ -462,7 +460,7 @@ static int ice_vsi_manage_pvid(struct ice_vsi *vsi, u16 vid, bool enable)
status = ice_update_vsi(hw, vsi->idx, ctxt, NULL);
if (status) {
- dev_info(&vsi->back->pdev->dev, "update VSI for port VLAN failed, err %d aq_err %d\n",
+ dev_info(ice_pf_to_dev(vsi->back), "update VSI for port VLAN failed, err %d aq_err %d\n",
status, hw->adminq.sq_last_status);
ret = -EIO;
goto out;
@@ -1095,7 +1093,6 @@ bool ice_reset_all_vfs(struct ice_pf *pf, bool is_vflr)
* finished resetting.
*/
for (i = 0, v = 0; i < 10 && v < pf->num_alloc_vfs; i++) {
-
/* Check each VF in sequence */
while (v < pf->num_alloc_vfs) {
u32 reg;
@@ -1553,8 +1550,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
dev_info(dev, "VF %d failed opcode %d, retval: %d\n", vf->vf_id,
v_opcode, v_retval);
if (vf->num_inval_msgs > ICE_DFLT_NUM_INVAL_MSGS_ALLOWED) {
- dev_err(dev,
- "Number of invalid messages exceeded for VF %d\n",
+ dev_err(dev, "Number of invalid messages exceeded for VF %d\n",
vf->vf_id);
dev_err(dev, "Use PF Control I/F to enable the VF\n");
set_bit(ICE_VF_STATE_DIS, vf->vf_states);
@@ -1569,8 +1565,7 @@ ice_vc_send_msg_to_vf(struct ice_vf *vf, u32 v_opcode,
aq_ret = ice_aq_send_msg_to_vf(&pf->hw, vf->vf_id, v_opcode, v_retval,
msg, msglen, NULL);
if (aq_ret && pf->hw.mailboxq.sq_last_status != ICE_AQ_RC_ENOSYS) {
- dev_info(dev,
- "Unable to send the message to VF %d ret %d aq_err %d\n",
+ dev_info(dev, "Unable to send the message to VF %d ret %d aq_err %d\n",
vf->vf_id, aq_ret, pf->hw.mailboxq.sq_last_status);
return -EIO;
}
@@ -1879,6 +1874,48 @@ error_param:
}
/**
+ * ice_wait_on_vf_reset - poll to make sure a given VF is ready after reset
+ * @vf: The VF being resseting
+ *
+ * The max poll time is about ~800ms, which is about the maximum time it takes
+ * for a VF to be reset and/or a VF driver to be removed.
+ */
+static void ice_wait_on_vf_reset(struct ice_vf *vf)
+{
+ int i;
+
+ for (i = 0; i < ICE_MAX_VF_RESET_TRIES; i++) {
+ if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
+ break;
+ msleep(ICE_MAX_VF_RESET_SLEEP_MS);
+ }
+}
+
+/**
+ * ice_check_vf_ready_for_cfg - check if VF is ready to be configured/queried
+ * @vf: VF to check if it's ready to be configured/queried
+ *
+ * The purpose of this function is to make sure the VF is not in reset, not
+ * disabled, and initialized so it can be configured and/or queried by a host
+ * administrator.
+ */
+static int ice_check_vf_ready_for_cfg(struct ice_vf *vf)
+{
+ struct ice_pf *pf;
+
+ ice_wait_on_vf_reset(vf);
+
+ if (ice_is_vf_disabled(vf))
+ return -EINVAL;
+
+ pf = vf->pf;
+ if (ice_check_vf_init(pf, vf))
+ return -EBUSY;
+
+ return 0;
+}
+
+/**
* ice_set_vf_spoofchk
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -1895,16 +1932,16 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
enum ice_status status;
struct device *dev;
struct ice_vf *vf;
- int ret = 0;
+ int ret;
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
-
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ return ret;
vf_vsi = pf->vsi[vf->lan_vsi_idx];
if (!vf_vsi) {
@@ -1914,8 +1951,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
}
if (vf_vsi->type != ICE_VSI_VF) {
- netdev_err(netdev,
- "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
+ netdev_err(netdev, "Type %d of VSI %d for VF %d is no ICE_VSI_VF\n",
vf_vsi->type, vf_vsi->vsi_num, vf->vf_id);
return -ENODEV;
}
@@ -1945,8 +1981,7 @@ int ice_set_vf_spoofchk(struct net_device *netdev, int vf_id, bool ena)
status = ice_update_vsi(&pf->hw, vf_vsi->idx, ctx, NULL);
if (status) {
- dev_err(dev,
- "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
+ dev_err(dev, "Failed to %sable spoofchk on VF %d VSI %d\n error %d",
ena ? "en" : "dis", vf->vf_id, vf_vsi->vsi_num, status);
ret = -EIO;
goto out;
@@ -2063,8 +2098,7 @@ static int ice_vc_ena_qs_msg(struct ice_vf *vf, u8 *msg)
continue;
if (ice_vsi_ctrl_rx_ring(vsi, true, vf_q_id)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to enable Rx ring %d on VSI %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to enable Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2166,8 +2200,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
if (ice_vsi_stop_tx_ring(vsi, ICE_NO_RESET, vf->vf_id,
ring, &txq_meta)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to stop Tx ring %d on VSI %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Tx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2193,8 +2226,7 @@ static int ice_vc_dis_qs_msg(struct ice_vf *vf, u8 *msg)
continue;
if (ice_vsi_ctrl_rx_ring(vsi, false, vf_q_id)) {
- dev_err(&vsi->back->pdev->dev,
- "Failed to stop Rx ring %d on VSI %d\n",
+ dev_err(ice_pf_to_dev(vsi->back), "Failed to stop Rx ring %d on VSI %d\n",
vf_q_id, vsi->vsi_num);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2357,8 +2389,7 @@ static int ice_vc_cfg_qs_msg(struct ice_vf *vf, u8 *msg)
if (qci->num_queue_pairs > ICE_MAX_BASE_QS_PER_VF ||
qci->num_queue_pairs > min_t(u16, vsi->alloc_txq, vsi->alloc_rxq)) {
- dev_err(ice_pf_to_dev(pf),
- "VF-%d requesting more than supported number of queues: %d\n",
+ dev_err(ice_pf_to_dev(pf), "VF-%d requesting more than supported number of queues: %d\n",
vf->vf_id, min_t(u16, vsi->alloc_txq, vsi->alloc_rxq));
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto error_param;
@@ -2570,8 +2601,7 @@ ice_vc_handle_mac_addr_msg(struct ice_vf *vf, u8 *msg, bool set)
*/
if (set && !ice_is_vf_trusted(vf) &&
(vf->num_mac + al->num_elements) > ICE_MAX_MACADDR_PER_VF) {
- dev_err(ice_pf_to_dev(pf),
- "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
+ dev_err(ice_pf_to_dev(pf), "Can't add more MAC addresses, because VF-%d is not trusted, switch the VF to trusted mode in order to add more functionalities\n",
vf->vf_id);
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
goto handle_mac_exit;
@@ -2648,8 +2678,8 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
struct ice_pf *pf = vf->pf;
u16 max_allowed_vf_queues;
u16 tx_rx_queue_left;
- u16 cur_queues;
struct device *dev;
+ u16 cur_queues;
dev = ice_pf_to_dev(pf);
if (!test_bit(ICE_VF_STATE_ACTIVE, vf->vf_states)) {
@@ -2670,8 +2700,7 @@ static int ice_vc_request_qs_msg(struct ice_vf *vf, u8 *msg)
vfres->num_queue_pairs = ICE_MAX_BASE_QS_PER_VF;
} else if (req_queues > cur_queues &&
req_queues - cur_queues > tx_rx_queue_left) {
- dev_warn(dev,
- "VF %d requested %u more queues, but only %u left.\n",
+ dev_warn(dev, "VF %d requested %u more queues, but only %u left.\n",
vf->vf_id, req_queues - cur_queues, tx_rx_queue_left);
vfres->num_queue_pairs = min_t(u16, max_allowed_vf_queues,
ICE_MAX_BASE_QS_PER_VF);
@@ -2709,7 +2738,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
struct ice_vsi *vsi;
struct device *dev;
struct ice_vf *vf;
- int ret = 0;
+ int ret;
dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id))
@@ -2727,13 +2756,15 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
vf = &pf->vf[vf_id];
vsi = pf->vsi[vf->lan_vsi_idx];
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
+
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ return ret;
if (le16_to_cpu(vsi->info.pvid) == vlanprio) {
/* duplicate request, so just return success */
dev_dbg(dev, "Duplicate pvid %d request\n", vlanprio);
- return ret;
+ return 0;
}
/* If PVID, then remove all filters on the old VLAN */
@@ -2744,7 +2775,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
if (vlan_id || qos) {
ret = ice_vsi_manage_pvid(vsi, vlanprio, true);
if (ret)
- goto error_set_pvid;
+ return ret;
} else {
ice_vsi_manage_pvid(vsi, 0, false);
vsi->info.pvid = 0;
@@ -2757,7 +2788,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
/* add new VLAN filter for each MAC */
ret = ice_vsi_add_vlan(vsi, vlan_id);
if (ret)
- goto error_set_pvid;
+ return ret;
}
/* The Port VLAN needs to be saved across resets the same as the
@@ -2765,8 +2796,7 @@ ice_set_vf_port_vlan(struct net_device *netdev, int vf_id, u16 vlan_id, u8 qos,
*/
vf->port_vlan_id = le16_to_cpu(vsi->info.pvid);
-error_set_pvid:
- return ret;
+ return 0;
}
/**
@@ -2821,8 +2851,8 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
for (i = 0; i < vfl->num_elements; i++) {
if (vfl->vlan_id[i] > ICE_MAX_VLANID) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev,
- "invalid VF VLAN id %d\n", vfl->vlan_id[i]);
+ dev_err(dev, "invalid VF VLAN id %d\n",
+ vfl->vlan_id[i]);
goto error_param;
}
}
@@ -2836,8 +2866,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (add_v && !ice_is_vf_trusted(vf) &&
vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(dev,
- "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being not trusted,
* so we can just return success message here
@@ -2860,8 +2889,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
if (!ice_is_vf_trusted(vf) &&
vsi->num_vlan >= ICE_MAX_VLAN_PER_VF) {
- dev_info(dev,
- "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
+ dev_info(dev, "VF-%d is not trusted, switch the VF to trusted mode, in order to add more VLAN addresses\n",
vf->vf_id);
/* There is no need to let VF know about being
* not trusted, so we can just return success
@@ -2889,8 +2917,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
status = ice_cfg_vlan_pruning(vsi, true, false);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev,
- "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
+ dev_err(dev, "Enable VLAN pruning on VLAN ID: %d failed error-%d\n",
vid, status);
goto error_param;
}
@@ -2903,8 +2930,7 @@ static int ice_vc_process_vlan_msg(struct ice_vf *vf, u8 *msg, bool add_v)
promisc_m, vid);
if (status) {
v_ret = VIRTCHNL_STATUS_ERR_PARAM;
- dev_err(dev,
- "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
+ dev_err(dev, "Enable Unicast/multicast promiscuous mode on VLAN ID:%d failed error-%d\n",
vid, status);
}
}
@@ -3140,8 +3166,7 @@ error_handler:
case VIRTCHNL_OP_GET_VF_RESOURCES:
err = ice_vc_get_vf_res_msg(vf, msg);
if (ice_vf_init_vlan_stripping(vf))
- dev_err(dev,
- "Failed to initialize VLAN stripping for VF %d\n",
+ dev_err(dev, "Failed to initialize VLAN stripping for VF %d\n",
vf->vf_id);
ice_vc_notify_vf_link_state(vf);
break;
@@ -3255,23 +3280,6 @@ ice_get_vf_cfg(struct net_device *netdev, int vf_id, struct ifla_vf_info *ivi)
}
/**
- * ice_wait_on_vf_reset
- * @vf: The VF being resseting
- *
- * Poll to make sure a given VF is ready after reset
- */
-static void ice_wait_on_vf_reset(struct ice_vf *vf)
-{
- int i;
-
- for (i = 0; i < ICE_MAX_VF_RESET_WAIT; i++) {
- if (test_bit(ICE_VF_STATE_INIT, vf->vf_states))
- break;
- msleep(20);
- }
-}
-
-/**
* ice_set_vf_mac
* @netdev: network interface device structure
* @vf_id: VF identifier
@@ -3283,29 +3291,21 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vf *vf;
- int ret = 0;
+ int ret;
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
- vf = &pf->vf[vf_id];
- /* Don't set MAC on disabled VF */
- if (ice_is_vf_disabled(vf))
- return -EINVAL;
-
- /* In case VF is in reset mode, wait until it is completed. Depending
- * on factors like queue disabling routine, this could take ~250ms
- */
- ice_wait_on_vf_reset(vf);
-
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
-
if (is_zero_ether_addr(mac) || is_multicast_ether_addr(mac)) {
netdev_err(netdev, "%pM not a valid unicast address\n", mac);
return -EINVAL;
}
+ vf = &pf->vf[vf_id];
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ return ret;
+
/* copy MAC into dflt_lan_addr and trigger a VF reset. The reset
* flow will use the updated dflt_lan_addr and add a MAC filter
* using ice_add_mac. Also set pf_set_mac to indicate that the PF has
@@ -3313,12 +3313,11 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
*/
ether_addr_copy(vf->dflt_lan_addr.addr, mac);
vf->pf_set_mac = true;
- netdev_info(netdev,
- "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
+ netdev_info(netdev, "MAC on VF %d set to %pM. VF driver will be reinitialized\n",
vf_id, mac);
ice_vc_reset_vf(vf);
- return ret;
+ return 0;
}
/**
@@ -3332,25 +3331,16 @@ int ice_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
- struct device *dev;
struct ice_vf *vf;
+ int ret;
- dev = ice_pf_to_dev(pf);
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
- /* Don't set Trusted Mode on disabled VF */
- if (ice_is_vf_disabled(vf))
- return -EINVAL;
-
- /* In case VF is in reset mode, wait until it is completed. Depending
- * on factors like queue disabling routine, this could take ~250ms
- */
- ice_wait_on_vf_reset(vf);
-
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ return ret;
/* Check if already trusted */
if (trusted == vf->trusted)
@@ -3358,7 +3348,7 @@ int ice_set_vf_trust(struct net_device *netdev, int vf_id, bool trusted)
vf->trusted = trusted;
ice_vc_reset_vf(vf);
- dev_info(dev, "VF %u is now %strusted\n",
+ dev_info(ice_pf_to_dev(pf), "VF %u is now %strusted\n",
vf_id, trusted ? "" : "un");
return 0;
@@ -3376,13 +3366,15 @@ int ice_set_vf_link_state(struct net_device *netdev, int vf_id, int link_state)
{
struct ice_pf *pf = ice_netdev_to_pf(netdev);
struct ice_vf *vf;
+ int ret;
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ return ret;
switch (link_state) {
case IFLA_VF_LINK_STATE_AUTO:
@@ -3418,14 +3410,15 @@ int ice_get_vf_stats(struct net_device *netdev, int vf_id,
struct ice_eth_stats *stats;
struct ice_vsi *vsi;
struct ice_vf *vf;
+ int ret;
if (ice_validate_vf_id(pf, vf_id))
return -EINVAL;
vf = &pf->vf[vf_id];
-
- if (ice_check_vf_init(pf, vf))
- return -EBUSY;
+ ret = ice_check_vf_ready_for_cfg(vf);
+ if (ret)
+ return ret;
vsi = pf->vsi[vf->lan_vsi_idx];
if (!vsi)
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
index 4647d636ed36..ac67982751df 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.h
@@ -38,7 +38,8 @@
#define ICE_MAX_POLICY_INTR_PER_VF 33
#define ICE_MIN_INTR_PER_VF (ICE_MIN_QS_PER_VF + 1)
#define ICE_DFLT_INTR_PER_VF (ICE_DFLT_QS_PER_VF + 1)
-#define ICE_MAX_VF_RESET_WAIT 15
+#define ICE_MAX_VF_RESET_TRIES 40
+#define ICE_MAX_VF_RESET_SLEEP_MS 20
#define ice_for_each_vf(pf, i) \
for ((i) = 0; (i) < (pf)->num_alloc_vfs; (i)++)
diff --git a/drivers/net/ethernet/intel/ice/ice_xsk.c b/drivers/net/ethernet/intel/ice/ice_xsk.c
index 149dca0012ba..4d3407bbd4c4 100644
--- a/drivers/net/ethernet/intel/ice/ice_xsk.c
+++ b/drivers/net/ethernet/intel/ice/ice_xsk.c
@@ -338,8 +338,8 @@ static int ice_xsk_umem_dma_map(struct ice_vsi *vsi, struct xdp_umem *umem)
DMA_BIDIRECTIONAL,
ICE_RX_DMA_ATTR);
if (dma_mapping_error(dev, dma)) {
- dev_dbg(dev,
- "XSK UMEM DMA mapping error on page num %d", i);
+ dev_dbg(dev, "XSK UMEM DMA mapping error on page num %d\n",
+ i);
goto out_unmap;
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
index 3a975641f902..20b907dc1e29 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/health.c
@@ -200,7 +200,7 @@ int mlx5e_health_report(struct mlx5e_priv *priv,
netdev_err(priv->netdev, err_str);
if (!reporter)
- return err_ctx->recover(&err_ctx->ctx);
+ return err_ctx->recover(err_ctx->ctx);
return devlink_health_report(reporter, err_str, err_ctx);
}
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
index 7c8796d9743f..a226277b0980 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h
@@ -179,6 +179,14 @@ mlx5e_tx_dma_unmap(struct device *pdev, struct mlx5e_sq_dma *dma)
}
}
+static inline void mlx5e_rqwq_reset(struct mlx5e_rq *rq)
+{
+ if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
+ mlx5_wq_ll_reset(&rq->mpwqe.wq);
+ else
+ mlx5_wq_cyc_reset(&rq->wqe.wq);
+}
+
/* SW parser related functions */
struct mlx5e_swp_spec {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 454d3459bd8b..21de4764d4c0 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -712,6 +712,9 @@ int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state, int next_state)
if (!in)
return -ENOMEM;
+ if (curr_state == MLX5_RQC_STATE_RST && next_state == MLX5_RQC_STATE_RDY)
+ mlx5e_rqwq_reset(rq);
+
rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
MLX5_SET(modify_rq_in, in, rq_state, curr_state);
@@ -5144,7 +5147,6 @@ static void mlx5e_nic_enable(struct mlx5e_priv *priv)
static void mlx5e_nic_disable(struct mlx5e_priv *priv)
{
- struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
#ifdef CONFIG_MLX5_CORE_EN_DCB
@@ -5165,7 +5167,7 @@ static void mlx5e_nic_disable(struct mlx5e_priv *priv)
mlx5e_monitor_counter_cleanup(priv);
mlx5e_disable_async_events(priv);
- mlx5_lag_remove(mdev, netdev);
+ mlx5_lag_remove(mdev);
}
int mlx5e_update_nic_rx(struct mlx5e_priv *priv)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
index 7b48ccacebe2..6ed307d7f191 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c
@@ -1861,7 +1861,6 @@ static void mlx5e_uplink_rep_enable(struct mlx5e_priv *priv)
static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
{
- struct net_device *netdev = priv->netdev;
struct mlx5_core_dev *mdev = priv->mdev;
struct mlx5e_rep_priv *rpriv = priv->ppriv;
@@ -1870,7 +1869,7 @@ static void mlx5e_uplink_rep_disable(struct mlx5e_priv *priv)
#endif
mlx5_notifier_unregister(mdev, &priv->events_nb);
cancel_work_sync(&rpriv->uplink_priv.reoffload_flows_work);
- mlx5_lag_remove(mdev, netdev);
+ mlx5_lag_remove(mdev);
}
static MLX5E_DEFINE_STATS_GRP(sw_rep, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
index 5acf60b1bbfe..e49acd0c5da5 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch.c
@@ -459,12 +459,16 @@ static void esw_destroy_legacy_table(struct mlx5_eswitch *esw)
static int esw_legacy_enable(struct mlx5_eswitch *esw)
{
- int ret;
+ struct mlx5_vport *vport;
+ int ret, i;
ret = esw_create_legacy_table(esw);
if (ret)
return ret;
+ mlx5_esw_for_each_vf_vport(esw, i, vport, esw->esw_funcs.num_vfs)
+ vport->info.link_state = MLX5_VPORT_ADMIN_STATE_AUTO;
+
ret = mlx5_eswitch_enable_pf_vf_vports(esw, MLX5_LEGACY_SRIOV_VPORT_EVENTS);
if (ret)
esw_destroy_legacy_table(esw);
@@ -2452,25 +2456,17 @@ out:
int mlx5_eswitch_get_vepa(struct mlx5_eswitch *esw, u8 *setting)
{
- int err = 0;
-
if (!esw)
return -EOPNOTSUPP;
if (!ESW_ALLOWED(esw))
return -EPERM;
- mutex_lock(&esw->state_lock);
- if (esw->mode != MLX5_ESWITCH_LEGACY) {
- err = -EOPNOTSUPP;
- goto out;
- }
+ if (esw->mode != MLX5_ESWITCH_LEGACY)
+ return -EOPNOTSUPP;
*setting = esw->fdb_table.legacy.vepa_uplink_rule ? 1 : 0;
-
-out:
- mutex_unlock(&esw->state_lock);
- return err;
+ return 0;
}
int mlx5_eswitch_set_vport_trust(struct mlx5_eswitch *esw,
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
index 979f13bdc203..1a57b2bd74b8 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c
@@ -1172,7 +1172,7 @@ static int esw_offloads_start(struct mlx5_eswitch *esw,
return -EINVAL;
}
- mlx5_eswitch_disable(esw, true);
+ mlx5_eswitch_disable(esw, false);
mlx5_eswitch_update_num_of_vfs(esw, esw->dev->priv.sriov.num_vfs);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_OFFLOADS);
if (err) {
@@ -2065,7 +2065,7 @@ static int esw_offloads_stop(struct mlx5_eswitch *esw,
{
int err, err1;
- mlx5_eswitch_disable(esw, true);
+ mlx5_eswitch_disable(esw, false);
err = mlx5_eswitch_enable(esw, MLX5_ESWITCH_LEGACY);
if (err) {
NL_SET_ERR_MSG_MOD(extack, "Failed setting eswitch to legacy");
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
index c5a446e295aa..4276194b633f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads_chains.c
@@ -35,7 +35,7 @@
static const unsigned int ESW_POOLS[] = { 4 * 1024 * 1024,
1 * 1024 * 1024,
64 * 1024,
- 4 * 1024, };
+ 128 };
struct mlx5_esw_chains_priv {
struct rhashtable chains_ht;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.c b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
index b91eabc09fbc..8e19f6ab8393 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.c
@@ -464,9 +464,6 @@ static int mlx5_lag_netdev_event(struct notifier_block *this,
struct mlx5_lag *ldev;
int changed = 0;
- if (!net_eq(dev_net(ndev), &init_net))
- return NOTIFY_DONE;
-
if ((event != NETDEV_CHANGEUPPER) && (event != NETDEV_CHANGELOWERSTATE))
return NOTIFY_DONE;
@@ -586,8 +583,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
if (!ldev->nb.notifier_call) {
ldev->nb.notifier_call = mlx5_lag_netdev_event;
- if (register_netdevice_notifier_dev_net(netdev, &ldev->nb,
- &ldev->nn)) {
+ if (register_netdevice_notifier_net(&init_net, &ldev->nb)) {
ldev->nb.notifier_call = NULL;
mlx5_core_err(dev, "Failed to register LAG netdev notifier\n");
}
@@ -600,7 +596,7 @@ void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev)
}
/* Must be called with intf_mutex held */
-void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev)
+void mlx5_lag_remove(struct mlx5_core_dev *dev)
{
struct mlx5_lag *ldev;
int i;
@@ -620,8 +616,7 @@ void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev)
if (i == MLX5_MAX_PORTS) {
if (ldev->nb.notifier_call)
- unregister_netdevice_notifier_dev_net(netdev, &ldev->nb,
- &ldev->nn);
+ unregister_netdevice_notifier_net(&init_net, &ldev->nb);
mlx5_lag_mp_cleanup(ldev);
cancel_delayed_work_sync(&ldev->bond_work);
mlx5_lag_dev_free(ldev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lag.h b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
index 316ab09e2664..f1068aac6406 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/lag.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/lag.h
@@ -44,7 +44,6 @@ struct mlx5_lag {
struct workqueue_struct *wq;
struct delayed_work bond_work;
struct notifier_block nb;
- struct netdev_net_notifier nn;
struct lag_mp lag_mp;
};
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
index fcce9e0fc82c..da67b28d6e23 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mlx5_core.h
@@ -157,7 +157,7 @@ int mlx5_query_qcam_reg(struct mlx5_core_dev *mdev, u32 *qcam,
u8 feature_group, u8 access_reg_group);
void mlx5_lag_add(struct mlx5_core_dev *dev, struct net_device *netdev);
-void mlx5_lag_remove(struct mlx5_core_dev *dev, struct net_device *netdev);
+void mlx5_lag_remove(struct mlx5_core_dev *dev);
int mlx5_irq_table_init(struct mlx5_core_dev *dev);
void mlx5_irq_table_cleanup(struct mlx5_core_dev *dev);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
index c6c7d1defbd7..aade62a9ee5c 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/dr_ste.c
@@ -2307,7 +2307,9 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
struct mlx5dr_cmd_vport_cap *vport_cap;
struct mlx5dr_domain *dmn = sb->dmn;
struct mlx5dr_cmd_caps *caps;
+ u8 *bit_mask = sb->bit_mask;
u8 *tag = hw_ste->tag;
+ bool source_gvmi_set;
DR_STE_SET_TAG(src_gvmi_qp, tag, source_qp, misc, source_sqn);
@@ -2328,7 +2330,8 @@ static int dr_ste_build_src_gvmi_qpn_tag(struct mlx5dr_match_param *value,
if (!vport_cap)
return -EINVAL;
- if (vport_cap->vport_gvmi)
+ source_gvmi_set = MLX5_GET(ste_src_gvmi_qp, bit_mask, source_gvmi);
+ if (vport_cap->vport_gvmi && source_gvmi_set)
MLX5_SET(ste_src_gvmi_qp, tag, source_gvmi, vport_cap->vport_gvmi);
misc->source_eswitch_owner_vhca_id = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
index 3abfc8125926..c2027192e21e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/steering/fs_dr.c
@@ -66,15 +66,20 @@ static int mlx5_cmd_dr_create_flow_table(struct mlx5_flow_root_namespace *ns,
struct mlx5_flow_table *next_ft)
{
struct mlx5dr_table *tbl;
+ u32 flags;
int err;
if (mlx5_dr_is_fw_table(ft->flags))
return mlx5_fs_cmd_get_fw_cmds()->create_flow_table(ns, ft,
log_size,
next_ft);
+ flags = ft->flags;
+ /* turn off encap/decap if not supported for sw-str by fw */
+ if (!MLX5_CAP_FLOWTABLE(ns->dev, sw_owner_reformat_supported))
+ flags = ft->flags & ~(MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT |
+ MLX5_FLOW_TABLE_TUNNEL_EN_DECAP);
- tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain,
- ft->level, ft->flags);
+ tbl = mlx5dr_table_create(ns->fs_dr_domain.dr_domain, ft->level, flags);
if (!tbl) {
mlx5_core_err(ns->dev, "Failed creating dr flow_table\n");
return -EINVAL;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.c b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
index 02f7e4a39578..01f075fac276 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.c
@@ -94,6 +94,13 @@ void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides)
print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET, 16, 1, wqe, len, false);
}
+void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq)
+{
+ wq->wqe_ctr = 0;
+ wq->cur_sz = 0;
+ mlx5_wq_cyc_update_db_record(wq);
+}
+
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
struct mlx5_wq_ctrl *wq_ctrl)
@@ -192,6 +199,19 @@ err_db_free:
return err;
}
+static void mlx5_wq_ll_init_list(struct mlx5_wq_ll *wq)
+{
+ struct mlx5_wqe_srq_next_seg *next_seg;
+ int i;
+
+ for (i = 0; i < wq->fbc.sz_m1; i++) {
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ next_seg->next_wqe_index = cpu_to_be16(i + 1);
+ }
+ next_seg = mlx5_wq_ll_get_wqe(wq, i);
+ wq->tail_next = &next_seg->next_wqe_index;
+}
+
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl)
@@ -199,9 +219,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
u8 log_wq_stride = MLX5_GET(wq, wqc, log_wq_stride);
u8 log_wq_sz = MLX5_GET(wq, wqc, log_wq_sz);
struct mlx5_frag_buf_ctrl *fbc = &wq->fbc;
- struct mlx5_wqe_srq_next_seg *next_seg;
int err;
- int i;
err = mlx5_db_alloc_node(mdev, &wq_ctrl->db, param->db_numa_node);
if (err) {
@@ -220,13 +238,7 @@ int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
mlx5_init_fbc(wq_ctrl->buf.frags, log_wq_stride, log_wq_sz, fbc);
- for (i = 0; i < fbc->sz_m1; i++) {
- next_seg = mlx5_wq_ll_get_wqe(wq, i);
- next_seg->next_wqe_index = cpu_to_be16(i + 1);
- }
- next_seg = mlx5_wq_ll_get_wqe(wq, i);
- wq->tail_next = &next_seg->next_wqe_index;
-
+ mlx5_wq_ll_init_list(wq);
wq_ctrl->mdev = mdev;
return 0;
@@ -237,6 +249,15 @@ err_db_free:
return err;
}
+void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq)
+{
+ wq->head = 0;
+ wq->wqe_ctr = 0;
+ wq->cur_sz = 0;
+ mlx5_wq_ll_init_list(wq);
+ mlx5_wq_ll_update_db_record(wq);
+}
+
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl)
{
mlx5_frag_buf_free(wq_ctrl->mdev, &wq_ctrl->buf);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/wq.h b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
index d9a94bc223c0..4cadc336593f 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/wq.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/wq.h
@@ -80,6 +80,7 @@ int mlx5_wq_cyc_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_cyc *wq,
struct mlx5_wq_ctrl *wq_ctrl);
void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc *wq, u16 ix, u8 nstrides);
+void mlx5_wq_cyc_reset(struct mlx5_wq_cyc *wq);
int mlx5_wq_qp_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *qpc, struct mlx5_wq_qp *wq,
@@ -92,6 +93,7 @@ int mlx5_cqwq_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
int mlx5_wq_ll_create(struct mlx5_core_dev *mdev, struct mlx5_wq_param *param,
void *wqc, struct mlx5_wq_ll *wq,
struct mlx5_wq_ctrl *wq_ctrl);
+void mlx5_wq_ll_reset(struct mlx5_wq_ll *wq);
void mlx5_wq_destroy(struct mlx5_wq_ctrl *wq_ctrl);
diff --git a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
index e0d7d2d9a0c8..43fa8c85b5d9 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/pci_hw.h
@@ -28,7 +28,7 @@
#define MLXSW_PCI_SW_RESET 0xF0010
#define MLXSW_PCI_SW_RESET_RST_BIT BIT(0)
#define MLXSW_PCI_SW_RESET_TIMEOUT_MSECS 900000
-#define MLXSW_PCI_SW_RESET_WAIT_MSECS 100
+#define MLXSW_PCI_SW_RESET_WAIT_MSECS 200
#define MLXSW_PCI_FW_READY 0xA1844
#define MLXSW_PCI_FW_READY_MASK 0xFFFF
#define MLXSW_PCI_FW_READY_MAGIC 0x5E
diff --git a/drivers/net/ethernet/micrel/ks8851_mll.c b/drivers/net/ethernet/micrel/ks8851_mll.c
index a41a90c589db..58579baf3f7a 100644
--- a/drivers/net/ethernet/micrel/ks8851_mll.c
+++ b/drivers/net/ethernet/micrel/ks8851_mll.c
@@ -157,24 +157,6 @@ static int msg_enable;
*/
/**
- * ks_rdreg8 - read 8 bit register from device
- * @ks : The chip information
- * @offset: The register address
- *
- * Read a 8bit register from the chip, returning the result
- */
-static u8 ks_rdreg8(struct ks_net *ks, int offset)
-{
- u16 data;
- u8 shift_bit = offset & 0x03;
- u8 shift_data = (offset & 1) << 3;
- ks->cmd_reg_cache = (u16) offset | (u16)(BE0 << shift_bit);
- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
- data = ioread16(ks->hw_addr);
- return (u8)(data >> shift_data);
-}
-
-/**
* ks_rdreg16 - read 16 bit register from device
* @ks : The chip information
* @offset: The register address
@@ -184,28 +166,12 @@ static u8 ks_rdreg8(struct ks_net *ks, int offset)
static u16 ks_rdreg16(struct ks_net *ks, int offset)
{
- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
+ ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
return ioread16(ks->hw_addr);
}
/**
- * ks_wrreg8 - write 8bit register value to chip
- * @ks: The chip information
- * @offset: The register address
- * @value: The value to write
- *
- */
-static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
-{
- u8 shift_bit = (offset & 0x03);
- u16 value_write = (u16)(value << ((offset & 1) << 3));
- ks->cmd_reg_cache = (u16)offset | (BE0 << shift_bit);
- iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
- iowrite16(value_write, ks->hw_addr);
-}
-
-/**
* ks_wrreg16 - write 16bit register value to chip
* @ks: The chip information
* @offset: The register address
@@ -215,7 +181,7 @@ static void ks_wrreg8(struct ks_net *ks, int offset, u8 value)
static void ks_wrreg16(struct ks_net *ks, int offset, u16 value)
{
- ks->cmd_reg_cache = (u16)offset | ((BE1 | BE0) << (offset & 0x02));
+ ks->cmd_reg_cache = (u16)offset | ((BE3 | BE2) >> (offset & 0x02));
iowrite16(ks->cmd_reg_cache, ks->hw_addr_cmd);
iowrite16(value, ks->hw_addr);
}
@@ -231,7 +197,7 @@ static inline void ks_inblk(struct ks_net *ks, u16 *wptr, u32 len)
{
len >>= 1;
while (len--)
- *wptr++ = (u16)ioread16(ks->hw_addr);
+ *wptr++ = be16_to_cpu(ioread16(ks->hw_addr));
}
/**
@@ -245,7 +211,7 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
{
len >>= 1;
while (len--)
- iowrite16(*wptr++, ks->hw_addr);
+ iowrite16(cpu_to_be16(*wptr++), ks->hw_addr);
}
static void ks_disable_int(struct ks_net *ks)
@@ -324,8 +290,7 @@ static void ks_read_config(struct ks_net *ks)
u16 reg_data = 0;
/* Regardless of bus width, 8 bit read should always work.*/
- reg_data = ks_rdreg8(ks, KS_CCR) & 0x00FF;
- reg_data |= ks_rdreg8(ks, KS_CCR+1) << 8;
+ reg_data = ks_rdreg16(ks, KS_CCR);
/* addr/data bus are multiplexed */
ks->sharedbus = (reg_data & CCR_SHARED) == CCR_SHARED;
@@ -429,7 +394,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
/* 1. set sudo DMA mode */
ks_wrreg16(ks, KS_RXFDPR, RXFDPR_RXFPAI);
- ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
+ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
/* 2. read prepend data */
/**
@@ -446,7 +411,7 @@ static inline void ks_read_qmu(struct ks_net *ks, u16 *buf, u32 len)
ks_inblk(ks, buf, ALIGN(len, 4));
/* 4. reset sudo DMA Mode */
- ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
+ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
}
/**
@@ -548,14 +513,17 @@ static irqreturn_t ks_irq(int irq, void *pw)
{
struct net_device *netdev = pw;
struct ks_net *ks = netdev_priv(netdev);
+ unsigned long flags;
u16 status;
+ spin_lock_irqsave(&ks->statelock, flags);
/*this should be the first in IRQ handler */
ks_save_cmd_reg(ks);
status = ks_rdreg16(ks, KS_ISR);
if (unlikely(!status)) {
ks_restore_cmd_reg(ks);
+ spin_unlock_irqrestore(&ks->statelock, flags);
return IRQ_NONE;
}
@@ -581,6 +549,7 @@ static irqreturn_t ks_irq(int irq, void *pw)
ks->netdev->stats.rx_over_errors++;
/* this should be the last in IRQ handler*/
ks_restore_cmd_reg(ks);
+ spin_unlock_irqrestore(&ks->statelock, flags);
return IRQ_HANDLED;
}
@@ -650,6 +619,7 @@ static int ks_net_stop(struct net_device *netdev)
/* shutdown RX/TX QMU */
ks_disable_qmu(ks);
+ ks_disable_int(ks);
/* set powermode to soft power down to save power */
ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
@@ -679,13 +649,13 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
ks->txh.txw[1] = cpu_to_le16(len);
/* 1. set sudo-DMA mode */
- ks_wrreg8(ks, KS_RXQCR, (ks->rc_rxqcr | RXQCR_SDA) & 0xff);
+ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr | RXQCR_SDA);
/* 2. write status/lenth info */
ks_outblk(ks, ks->txh.txw, 4);
/* 3. write pkt data */
ks_outblk(ks, (u16 *)pdata, ALIGN(len, 4));
/* 4. reset sudo-DMA mode */
- ks_wrreg8(ks, KS_RXQCR, ks->rc_rxqcr);
+ ks_wrreg16(ks, KS_RXQCR, ks->rc_rxqcr);
/* 5. Enqueue Tx(move the pkt from TX buffer into TXQ) */
ks_wrreg16(ks, KS_TXQCR, TXQCR_METFE);
/* 6. wait until TXQCR_METFE is auto-cleared */
@@ -706,10 +676,9 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
netdev_tx_t retv = NETDEV_TX_OK;
struct ks_net *ks = netdev_priv(netdev);
+ unsigned long flags;
- disable_irq(netdev->irq);
- ks_disable_int(ks);
- spin_lock(&ks->statelock);
+ spin_lock_irqsave(&ks->statelock, flags);
/* Extra space are required:
* 4 byte for alignment, 4 for status/length, 4 for CRC
@@ -723,9 +692,7 @@ static netdev_tx_t ks_start_xmit(struct sk_buff *skb, struct net_device *netdev)
dev_kfree_skb(skb);
} else
retv = NETDEV_TX_BUSY;
- spin_unlock(&ks->statelock);
- ks_enable_int(ks);
- enable_irq(netdev->irq);
+ spin_unlock_irqrestore(&ks->statelock, flags);
return retv;
}
diff --git a/drivers/net/ethernet/mscc/ocelot_board.c b/drivers/net/ethernet/mscc/ocelot_board.c
index b38820849faa..1135a18019c7 100644
--- a/drivers/net/ethernet/mscc/ocelot_board.c
+++ b/drivers/net/ethernet/mscc/ocelot_board.c
@@ -114,6 +114,14 @@ static irqreturn_t ocelot_xtr_irq_handler(int irq, void *arg)
if (err != 4)
break;
+ /* At this point the IFH was read correctly, so it is safe to
+ * presume that there is no error. The err needs to be reset
+ * otherwise a frame could come in CPU queue between the while
+ * condition and the check for error later on. And in that case
+ * the new frame is just removed and not processed.
+ */
+ err = 0;
+
ocelot_parse_ifh(ifh, &info);
ocelot_port = ocelot->ports[info.port];
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_dev.c b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
index 87f82f36812f..46107de5e6c3 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_dev.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_dev.c
@@ -103,7 +103,7 @@ int ionic_heartbeat_check(struct ionic *ionic)
{
struct ionic_dev *idev = &ionic->idev;
unsigned long hb_time;
- u32 fw_status;
+ u8 fw_status;
u32 hb;
/* wait a little more than one second before testing again */
@@ -111,9 +111,12 @@ int ionic_heartbeat_check(struct ionic *ionic)
if (time_before(hb_time, (idev->last_hb_time + ionic->watchdog_period)))
return 0;
- /* firmware is useful only if fw_status is non-zero */
- fw_status = ioread32(&idev->dev_info_regs->fw_status);
- if (!fw_status)
+ /* firmware is useful only if the running bit is set and
+ * fw_status != 0xff (bad PCI read)
+ */
+ fw_status = ioread8(&idev->dev_info_regs->fw_status);
+ if (fw_status == 0xff ||
+ !(fw_status & IONIC_FW_STS_F_RUNNING))
return -ENXIO;
/* early FW has no heartbeat, else FW will return non-zero */
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_if.h b/drivers/net/ethernet/pensando/ionic/ionic_if.h
index ce07c2931a72..54547d53b0f2 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_if.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_if.h
@@ -2445,6 +2445,7 @@ union ionic_dev_info_regs {
u8 version;
u8 asic_type;
u8 asic_rev;
+#define IONIC_FW_STS_F_RUNNING 0x1
u8 fw_status;
u32 fw_heartbeat;
char fw_version[IONIC_DEVINFO_FWVERS_BUFLEN];
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index e8a1b27db84d..234c6f30effb 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -163,6 +163,8 @@ struct qede_rdma_dev {
struct list_head entry;
struct list_head rdma_event_list;
struct workqueue_struct *rdma_wq;
+ struct kref refcnt;
+ struct completion event_comp;
bool exp_recovery;
};
diff --git a/drivers/net/ethernet/qlogic/qede/qede_rdma.c b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
index ffabc2d2f082..2d873ae8a234 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_rdma.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_rdma.c
@@ -59,6 +59,9 @@ static void _qede_rdma_dev_add(struct qede_dev *edev)
static int qede_rdma_create_wq(struct qede_dev *edev)
{
INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
+ kref_init(&edev->rdma_info.refcnt);
+ init_completion(&edev->rdma_info.event_comp);
+
edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
if (!edev->rdma_info.rdma_wq) {
DP_NOTICE(edev, "qedr: Could not create workqueue\n");
@@ -83,8 +86,23 @@ static void qede_rdma_cleanup_event(struct qede_dev *edev)
}
}
+static void qede_rdma_complete_event(struct kref *ref)
+{
+ struct qede_rdma_dev *rdma_dev =
+ container_of(ref, struct qede_rdma_dev, refcnt);
+
+ /* no more events will be added after this */
+ complete(&rdma_dev->event_comp);
+}
+
static void qede_rdma_destroy_wq(struct qede_dev *edev)
{
+ /* Avoid race with add_event flow, make sure it finishes before
+ * we start accessing the list and cleaning up the work
+ */
+ kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
+ wait_for_completion(&edev->rdma_info.event_comp);
+
qede_rdma_cleanup_event(edev);
destroy_workqueue(edev->rdma_info.rdma_wq);
}
@@ -310,15 +328,24 @@ static void qede_rdma_add_event(struct qede_dev *edev,
if (!edev->rdma_info.qedr_dev)
return;
+ /* We don't want the cleanup flow to start while we're allocating and
+ * scheduling the work
+ */
+ if (!kref_get_unless_zero(&edev->rdma_info.refcnt))
+ return; /* already being destroyed */
+
event_node = qede_rdma_get_free_event_node(edev);
if (!event_node)
- return;
+ goto out;
event_node->event = event;
event_node->ptr = edev;
INIT_WORK(&event_node->work, qede_rdma_handle_event);
queue_work(edev->rdma_info.rdma_wq, &event_node->work);
+
+out:
+ kref_put(&edev->rdma_info.refcnt, qede_rdma_complete_event);
}
void qede_rdma_dev_event_open(struct qede_dev *edev)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 06de59521fc4..fbf4cbcf1a65 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -13,25 +13,6 @@
#include "rmnet_vnd.h"
#include "rmnet_private.h"
-/* Locking scheme -
- * The shared resource which needs to be protected is realdev->rx_handler_data.
- * For the writer path, this is using rtnl_lock(). The writer paths are
- * rmnet_newlink(), rmnet_dellink() and rmnet_force_unassociate_device(). These
- * paths are already called with rtnl_lock() acquired in. There is also an
- * ASSERT_RTNL() to ensure that we are calling with rtnl acquired. For
- * dereference here, we will need to use rtnl_dereference(). Dev list writing
- * needs to happen with rtnl_lock() acquired for netdev_master_upper_dev_link().
- * For the reader path, the real_dev->rx_handler_data is called in the TX / RX
- * path. We only need rcu_read_lock() for these scenarios. In these cases,
- * the rcu_read_lock() is held in __dev_queue_xmit() and
- * netif_receive_skb_internal(), so readers need to use rcu_dereference_rtnl()
- * to get the relevant information. For dev list reading, we again acquire
- * rcu_read_lock() in rmnet_dellink() for netdev_master_upper_dev_get_rcu().
- * We also use unregister_netdevice_many() to free all rmnet devices in
- * rmnet_force_unassociate_device() so we dont lose the rtnl_lock() and free in
- * same context.
- */
-
/* Local Definitions and Declarations */
static const struct nla_policy rmnet_policy[IFLA_RMNET_MAX + 1] = {
@@ -51,9 +32,10 @@ rmnet_get_port_rtnl(const struct net_device *real_dev)
return rtnl_dereference(real_dev->rx_handler_data);
}
-static int rmnet_unregister_real_device(struct net_device *real_dev,
- struct rmnet_port *port)
+static int rmnet_unregister_real_device(struct net_device *real_dev)
{
+ struct rmnet_port *port = rmnet_get_port_rtnl(real_dev);
+
if (port->nr_rmnet_devs)
return -EINVAL;
@@ -61,9 +43,6 @@ static int rmnet_unregister_real_device(struct net_device *real_dev,
kfree(port);
- /* release reference on real_dev */
- dev_put(real_dev);
-
netdev_dbg(real_dev, "Removed from rmnet\n");
return 0;
}
@@ -89,9 +68,6 @@ static int rmnet_register_real_device(struct net_device *real_dev)
return -EBUSY;
}
- /* hold on to real dev for MAP data */
- dev_hold(real_dev);
-
for (entry = 0; entry < RMNET_MAX_LOGICAL_EP; entry++)
INIT_HLIST_HEAD(&port->muxed_ep[entry]);
@@ -99,28 +75,33 @@ static int rmnet_register_real_device(struct net_device *real_dev)
return 0;
}
-static void rmnet_unregister_bridge(struct net_device *dev,
- struct rmnet_port *port)
+static void rmnet_unregister_bridge(struct rmnet_port *port)
{
- struct rmnet_port *bridge_port;
- struct net_device *bridge_dev;
+ struct net_device *bridge_dev, *real_dev, *rmnet_dev;
+ struct rmnet_port *real_port;
if (port->rmnet_mode != RMNET_EPMODE_BRIDGE)
return;
- /* bridge slave handling */
+ rmnet_dev = port->rmnet_dev;
if (!port->nr_rmnet_devs) {
- bridge_dev = port->bridge_ep;
+ /* bridge device */
+ real_dev = port->bridge_ep;
+ bridge_dev = port->dev;
- bridge_port = rmnet_get_port_rtnl(bridge_dev);
- bridge_port->bridge_ep = NULL;
- bridge_port->rmnet_mode = RMNET_EPMODE_VND;
+ real_port = rmnet_get_port_rtnl(real_dev);
+ real_port->bridge_ep = NULL;
+ real_port->rmnet_mode = RMNET_EPMODE_VND;
} else {
+ /* real device */
bridge_dev = port->bridge_ep;
- bridge_port = rmnet_get_port_rtnl(bridge_dev);
- rmnet_unregister_real_device(bridge_dev, bridge_port);
+ port->bridge_ep = NULL;
+ port->rmnet_mode = RMNET_EPMODE_VND;
}
+
+ netdev_upper_dev_unlink(bridge_dev, rmnet_dev);
+ rmnet_unregister_real_device(bridge_dev);
}
static int rmnet_newlink(struct net *src_net, struct net_device *dev,
@@ -135,6 +116,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
int err = 0;
u16 mux_id;
+ if (!tb[IFLA_LINK]) {
+ NL_SET_ERR_MSG_MOD(extack, "link not specified");
+ return -EINVAL;
+ }
+
real_dev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
if (!real_dev || !dev)
return -ENODEV;
@@ -157,7 +143,12 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
if (err)
goto err1;
+ err = netdev_upper_dev_link(real_dev, dev, extack);
+ if (err < 0)
+ goto err2;
+
port->rmnet_mode = mode;
+ port->rmnet_dev = dev;
hlist_add_head_rcu(&ep->hlnode, &port->muxed_ep[mux_id]);
@@ -173,8 +164,11 @@ static int rmnet_newlink(struct net *src_net, struct net_device *dev,
return 0;
+err2:
+ unregister_netdevice(dev);
+ rmnet_vnd_dellink(mux_id, port, ep);
err1:
- rmnet_unregister_real_device(real_dev, port);
+ rmnet_unregister_real_device(real_dev);
err0:
kfree(ep);
return err;
@@ -183,77 +177,74 @@ err0:
static void rmnet_dellink(struct net_device *dev, struct list_head *head)
{
struct rmnet_priv *priv = netdev_priv(dev);
- struct net_device *real_dev;
+ struct net_device *real_dev, *bridge_dev;
+ struct rmnet_port *real_port, *bridge_port;
struct rmnet_endpoint *ep;
- struct rmnet_port *port;
- u8 mux_id;
+ u8 mux_id = priv->mux_id;
real_dev = priv->real_dev;
- if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
+ if (!rmnet_is_real_dev_registered(real_dev))
return;
- port = rmnet_get_port_rtnl(real_dev);
-
- mux_id = rmnet_vnd_get_mux(dev);
+ real_port = rmnet_get_port_rtnl(real_dev);
+ bridge_dev = real_port->bridge_ep;
+ if (bridge_dev) {
+ bridge_port = rmnet_get_port_rtnl(bridge_dev);
+ rmnet_unregister_bridge(bridge_port);
+ }
- ep = rmnet_get_endpoint(port, mux_id);
+ ep = rmnet_get_endpoint(real_port, mux_id);
if (ep) {
hlist_del_init_rcu(&ep->hlnode);
- rmnet_unregister_bridge(dev, port);
- rmnet_vnd_dellink(mux_id, port, ep);
+ rmnet_vnd_dellink(mux_id, real_port, ep);
kfree(ep);
}
- rmnet_unregister_real_device(real_dev, port);
+ netdev_upper_dev_unlink(real_dev, dev);
+ rmnet_unregister_real_device(real_dev);
unregister_netdevice_queue(dev, head);
}
-static void rmnet_force_unassociate_device(struct net_device *dev)
+static void rmnet_force_unassociate_device(struct net_device *real_dev)
{
- struct net_device *real_dev = dev;
struct hlist_node *tmp_ep;
struct rmnet_endpoint *ep;
struct rmnet_port *port;
unsigned long bkt_ep;
LIST_HEAD(list);
- if (!rmnet_is_real_dev_registered(real_dev))
- return;
-
- ASSERT_RTNL();
-
- port = rmnet_get_port_rtnl(dev);
-
- rcu_read_lock();
- rmnet_unregister_bridge(dev, port);
-
- hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
- unregister_netdevice_queue(ep->egress_dev, &list);
- rmnet_vnd_dellink(ep->mux_id, port, ep);
+ port = rmnet_get_port_rtnl(real_dev);
- hlist_del_init_rcu(&ep->hlnode);
- kfree(ep);
+ if (port->nr_rmnet_devs) {
+ /* real device */
+ rmnet_unregister_bridge(port);
+ hash_for_each_safe(port->muxed_ep, bkt_ep, tmp_ep, ep, hlnode) {
+ unregister_netdevice_queue(ep->egress_dev, &list);
+ netdev_upper_dev_unlink(real_dev, ep->egress_dev);
+ rmnet_vnd_dellink(ep->mux_id, port, ep);
+ hlist_del_init_rcu(&ep->hlnode);
+ kfree(ep);
+ }
+ rmnet_unregister_real_device(real_dev);
+ unregister_netdevice_many(&list);
+ } else {
+ rmnet_unregister_bridge(port);
}
-
- rcu_read_unlock();
- unregister_netdevice_many(&list);
-
- rmnet_unregister_real_device(real_dev, port);
}
static int rmnet_config_notify_cb(struct notifier_block *nb,
unsigned long event, void *data)
{
- struct net_device *dev = netdev_notifier_info_to_dev(data);
+ struct net_device *real_dev = netdev_notifier_info_to_dev(data);
- if (!dev)
+ if (!rmnet_is_real_dev_registered(real_dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UNREGISTER:
- netdev_dbg(dev, "Kernel unregister\n");
- rmnet_force_unassociate_device(dev);
+ netdev_dbg(real_dev, "Kernel unregister\n");
+ rmnet_force_unassociate_device(real_dev);
break;
default:
@@ -295,16 +286,18 @@ static int rmnet_changelink(struct net_device *dev, struct nlattr *tb[],
if (!dev)
return -ENODEV;
- real_dev = __dev_get_by_index(dev_net(dev),
- nla_get_u32(tb[IFLA_LINK]));
-
- if (!real_dev || !rmnet_is_real_dev_registered(real_dev))
+ real_dev = priv->real_dev;
+ if (!rmnet_is_real_dev_registered(real_dev))
return -ENODEV;
port = rmnet_get_port_rtnl(real_dev);
if (data[IFLA_RMNET_MUX_ID]) {
mux_id = nla_get_u16(data[IFLA_RMNET_MUX_ID]);
+ if (rmnet_get_endpoint(port, mux_id)) {
+ NL_SET_ERR_MSG_MOD(extack, "MUX ID already exists");
+ return -EINVAL;
+ }
ep = rmnet_get_endpoint(port, priv->mux_id);
if (!ep)
return -ENODEV;
@@ -379,11 +372,10 @@ struct rtnl_link_ops rmnet_link_ops __read_mostly = {
.fill_info = rmnet_fill_info,
};
-/* Needs either rcu_read_lock() or rtnl lock */
-struct rmnet_port *rmnet_get_port(struct net_device *real_dev)
+struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev)
{
if (rmnet_is_real_dev_registered(real_dev))
- return rcu_dereference_rtnl(real_dev->rx_handler_data);
+ return rcu_dereference_bh(real_dev->rx_handler_data);
else
return NULL;
}
@@ -409,7 +401,7 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
struct rmnet_port *port, *slave_port;
int err;
- port = rmnet_get_port(real_dev);
+ port = rmnet_get_port_rtnl(real_dev);
/* If there is more than one rmnet dev attached, its probably being
* used for muxing. Skip the briding in that case
@@ -417,6 +409,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
if (port->nr_rmnet_devs > 1)
return -EINVAL;
+ if (port->rmnet_mode != RMNET_EPMODE_VND)
+ return -EINVAL;
+
if (rmnet_is_real_dev_registered(slave_dev))
return -EBUSY;
@@ -424,9 +419,17 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
if (err)
return -EBUSY;
- slave_port = rmnet_get_port(slave_dev);
+ err = netdev_master_upper_dev_link(slave_dev, rmnet_dev, NULL, NULL,
+ extack);
+ if (err) {
+ rmnet_unregister_real_device(slave_dev);
+ return err;
+ }
+
+ slave_port = rmnet_get_port_rtnl(slave_dev);
slave_port->rmnet_mode = RMNET_EPMODE_BRIDGE;
slave_port->bridge_ep = real_dev;
+ slave_port->rmnet_dev = rmnet_dev;
port->rmnet_mode = RMNET_EPMODE_BRIDGE;
port->bridge_ep = slave_dev;
@@ -438,16 +441,9 @@ int rmnet_add_bridge(struct net_device *rmnet_dev,
int rmnet_del_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev)
{
- struct rmnet_priv *priv = netdev_priv(rmnet_dev);
- struct net_device *real_dev = priv->real_dev;
- struct rmnet_port *port, *slave_port;
+ struct rmnet_port *port = rmnet_get_port_rtnl(slave_dev);
- port = rmnet_get_port(real_dev);
- port->rmnet_mode = RMNET_EPMODE_VND;
- port->bridge_ep = NULL;
-
- slave_port = rmnet_get_port(slave_dev);
- rmnet_unregister_real_device(slave_dev, slave_port);
+ rmnet_unregister_bridge(port);
netdev_dbg(slave_dev, "removed from rmnet as slave\n");
return 0;
@@ -473,8 +469,8 @@ static int __init rmnet_init(void)
static void __exit rmnet_exit(void)
{
- unregister_netdevice_notifier(&rmnet_dev_notifier);
rtnl_link_unregister(&rmnet_link_ops);
+ unregister_netdevice_notifier(&rmnet_dev_notifier);
}
module_init(rmnet_init)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index cd0a6bcbe74a..be515982d628 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -28,6 +28,7 @@ struct rmnet_port {
u8 rmnet_mode;
struct hlist_head muxed_ep[RMNET_MAX_LOGICAL_EP];
struct net_device *bridge_ep;
+ struct net_device *rmnet_dev;
};
extern struct rtnl_link_ops rmnet_link_ops;
@@ -65,7 +66,7 @@ struct rmnet_priv {
struct rmnet_priv_stats stats;
};
-struct rmnet_port *rmnet_get_port(struct net_device *real_dev);
+struct rmnet_port *rmnet_get_port_rcu(struct net_device *real_dev);
struct rmnet_endpoint *rmnet_get_endpoint(struct rmnet_port *port, u8 mux_id);
int rmnet_add_bridge(struct net_device *rmnet_dev,
struct net_device *slave_dev,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index 1b74bc160402..29a7bfa2584d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -159,6 +159,9 @@ static int rmnet_map_egress_handler(struct sk_buff *skb,
static void
rmnet_bridge_handler(struct sk_buff *skb, struct net_device *bridge_dev)
{
+ if (skb_mac_header_was_set(skb))
+ skb_push(skb, skb->mac_len);
+
if (bridge_dev) {
skb->dev = bridge_dev;
dev_queue_xmit(skb);
@@ -184,7 +187,7 @@ rx_handler_result_t rmnet_rx_handler(struct sk_buff **pskb)
return RX_HANDLER_PASS;
dev = skb->dev;
- port = rmnet_get_port(dev);
+ port = rmnet_get_port_rcu(dev);
switch (port->rmnet_mode) {
case RMNET_EPMODE_VND:
@@ -217,7 +220,7 @@ void rmnet_egress_handler(struct sk_buff *skb)
skb->dev = priv->real_dev;
mux_id = priv->mux_id;
- port = rmnet_get_port(skb->dev);
+ port = rmnet_get_port_rcu(skb->dev);
if (!port)
goto drop;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 509dfc895a33..26ad40f19c64 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -266,14 +266,6 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
return 0;
}
-u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev)
-{
- struct rmnet_priv *priv;
-
- priv = netdev_priv(rmnet_dev);
- return priv->mux_id;
-}
-
int rmnet_vnd_do_flow_control(struct net_device *rmnet_dev, int enable)
{
netdev_dbg(rmnet_dev, "Setting VND TX queue state to %d\n", enable);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
index 54cbaf3c3bc4..14d77c709d4a 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.h
@@ -16,6 +16,5 @@ int rmnet_vnd_dellink(u8 id, struct rmnet_port *port,
struct rmnet_endpoint *ep);
void rmnet_vnd_rx_fixup(struct sk_buff *skb, struct net_device *dev);
void rmnet_vnd_tx_fixup(struct sk_buff *skb, struct net_device *dev);
-u8 rmnet_vnd_get_mux(struct net_device *rmnet_dev);
void rmnet_vnd_setup(struct net_device *dev);
#endif /* _RMNET_VND_H_ */
diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
index af15a737c675..59b4f16896a8 100644
--- a/drivers/net/ethernet/sfc/ptp.c
+++ b/drivers/net/ethernet/sfc/ptp.c
@@ -560,13 +560,45 @@ efx_ptp_mac_nic_to_ktime_correction(struct efx_nic *efx,
u32 nic_major, u32 nic_minor,
s32 correction)
{
+ u32 sync_timestamp;
ktime_t kt = { 0 };
+ s16 delta;
if (!(nic_major & 0x80000000)) {
WARN_ON_ONCE(nic_major >> 16);
- /* Use the top bits from the latest sync event. */
- nic_major &= 0xffff;
- nic_major |= (last_sync_timestamp_major(efx) & 0xffff0000);
+
+ /* Medford provides 48 bits of timestamp, so we must get the top
+ * 16 bits from the timesync event state.
+ *
+ * We only have the lower 16 bits of the time now, but we do
+ * have a full resolution timestamp at some point in past. As
+ * long as the difference between the (real) now and the sync
+ * is less than 2^15, then we can reconstruct the difference
+ * between those two numbers using only the lower 16 bits of
+ * each.
+ *
+ * Put another way
+ *
+ * a - b = ((a mod k) - b) mod k
+ *
+ * when -k/2 < (a-b) < k/2. In our case k is 2^16. We know
+ * (a mod k) and b, so can calculate the delta, a - b.
+ *
+ */
+ sync_timestamp = last_sync_timestamp_major(efx);
+
+ /* Because delta is s16 this does an implicit mask down to
+ * 16 bits which is what we need, assuming
+ * MEDFORD_TX_SECS_EVENT_BITS is 16. delta is signed so that
+ * we can deal with the (unlikely) case of sync timestamps
+ * arriving from the future.
+ */
+ delta = nic_major - sync_timestamp;
+
+ /* Recover the fully specified time now, by applying the offset
+ * to the (fully specified) sync time.
+ */
+ nic_major = sync_timestamp + delta;
kt = ptp->nic_to_kernel_time(nic_major, nic_minor,
correction);
diff --git a/drivers/net/ethernet/socionext/sni_ave.c b/drivers/net/ethernet/socionext/sni_ave.c
index b7032422393f..67ddf782d98a 100644
--- a/drivers/net/ethernet/socionext/sni_ave.c
+++ b/drivers/net/ethernet/socionext/sni_ave.c
@@ -1810,6 +1810,9 @@ static int ave_pro4_get_pinmode(struct ave_private *priv,
break;
case PHY_INTERFACE_MODE_MII:
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
priv->pinmode_val = 0;
break;
default:
@@ -1854,6 +1857,9 @@ static int ave_ld20_get_pinmode(struct ave_private *priv,
priv->pinmode_val = SG_ETPINMODE_RMII(0);
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
priv->pinmode_val = 0;
break;
default:
@@ -1876,6 +1882,9 @@ static int ave_pxs3_get_pinmode(struct ave_private *priv,
priv->pinmode_val = SG_ETPINMODE_RMII(arg);
break;
case PHY_INTERFACE_MODE_RGMII:
+ case PHY_INTERFACE_MODE_RGMII_ID:
+ case PHY_INTERFACE_MODE_RGMII_RXID:
+ case PHY_INTERFACE_MODE_RGMII_TXID:
priv->pinmode_val = 0;
break;
default:
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 5836b21edd7e..7da18c9afa01 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -4405,6 +4405,8 @@ static void stmmac_init_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
+ rtnl_lock();
+
/* Create per netdev entries */
priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
@@ -4416,14 +4418,13 @@ static void stmmac_init_fs(struct net_device *dev)
debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
&stmmac_dma_cap_fops);
- register_netdevice_notifier(&stmmac_notifier);
+ rtnl_unlock();
}
static void stmmac_exit_fs(struct net_device *dev)
{
struct stmmac_priv *priv = netdev_priv(dev);
- unregister_netdevice_notifier(&stmmac_notifier);
debugfs_remove_recursive(priv->dbgfs_dir);
}
#endif /* CONFIG_DEBUG_FS */
@@ -4940,14 +4941,14 @@ int stmmac_dvr_remove(struct device *dev)
netdev_info(priv->dev, "%s: removing driver", __func__);
-#ifdef CONFIG_DEBUG_FS
- stmmac_exit_fs(ndev);
-#endif
stmmac_stop_all_dma(priv);
stmmac_mac_set(priv, priv->ioaddr, false);
netif_carrier_off(ndev);
unregister_netdev(ndev);
+#ifdef CONFIG_DEBUG_FS
+ stmmac_exit_fs(ndev);
+#endif
phylink_destroy(priv->phylink);
if (priv->plat->stmmac_rst)
reset_control_assert(priv->plat->stmmac_rst);
@@ -5166,6 +5167,7 @@ static int __init stmmac_init(void)
/* Create debugfs main directory if it doesn't exist yet */
if (!stmmac_fs_dir)
stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+ register_netdevice_notifier(&stmmac_notifier);
#endif
return 0;
@@ -5174,6 +5176,7 @@ static int __init stmmac_init(void)
static void __exit stmmac_exit(void)
{
#ifdef CONFIG_DEBUG_FS
+ unregister_netdevice_notifier(&stmmac_notifier);
debugfs_remove_recursive(stmmac_fs_dir);
#endif
}
diff --git a/drivers/net/ethernet/sun/sunvnet_common.c b/drivers/net/ethernet/sun/sunvnet_common.c
index c23ce838ff63..8dc6c9ff22e1 100644
--- a/drivers/net/ethernet/sun/sunvnet_common.c
+++ b/drivers/net/ethernet/sun/sunvnet_common.c
@@ -1350,27 +1350,12 @@ sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
if (vio_version_after_eq(&port->vio, 1, 3))
localmtu -= VLAN_HLEN;
- if (skb->protocol == htons(ETH_P_IP)) {
- struct flowi4 fl4;
- struct rtable *rt = NULL;
-
- memset(&fl4, 0, sizeof(fl4));
- fl4.flowi4_oif = dev->ifindex;
- fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
- fl4.daddr = ip_hdr(skb)->daddr;
- fl4.saddr = ip_hdr(skb)->saddr;
-
- rt = ip_route_output_key(dev_net(dev), &fl4);
- if (!IS_ERR(rt)) {
- skb_dst_set(skb, &rt->dst);
- icmp_send(skb, ICMP_DEST_UNREACH,
- ICMP_FRAG_NEEDED,
- htonl(localmtu));
- }
- }
+ if (skb->protocol == htons(ETH_P_IP))
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(localmtu));
#if IS_ENABLED(CONFIG_IPV6)
else if (skb->protocol == htons(ETH_P_IPV6))
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
+ icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
#endif
goto out_dropped;
}
diff --git a/drivers/net/ethernet/xilinx/ll_temac.h b/drivers/net/ethernet/xilinx/ll_temac.h
index 276292bca334..53fb8141f1a6 100644
--- a/drivers/net/ethernet/xilinx/ll_temac.h
+++ b/drivers/net/ethernet/xilinx/ll_temac.h
@@ -375,10 +375,14 @@ struct temac_local {
int tx_bd_next;
int tx_bd_tail;
int rx_bd_ci;
+ int rx_bd_tail;
/* DMA channel control setup */
u32 tx_chnl_ctrl;
u32 rx_chnl_ctrl;
+ u8 coalesce_count_rx;
+
+ struct delayed_work restart_work;
};
/* Wrappers for temac_ior()/temac_iow() function pointers above */
diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c
index 6f11f52c9a9e..9461acec6f70 100644
--- a/drivers/net/ethernet/xilinx/ll_temac_main.c
+++ b/drivers/net/ethernet/xilinx/ll_temac_main.c
@@ -51,6 +51,7 @@
#include <linux/ip.h>
#include <linux/slab.h>
#include <linux/interrupt.h>
+#include <linux/workqueue.h>
#include <linux/dma-mapping.h>
#include <linux/processor.h>
#include <linux/platform_data/xilinx-ll-temac.h>
@@ -367,6 +368,8 @@ static int temac_dma_bd_init(struct net_device *ndev)
skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, skb_dma_addr))
+ goto out;
lp->rx_bd_v[i].phys = cpu_to_be32(skb_dma_addr);
lp->rx_bd_v[i].len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
lp->rx_bd_v[i].app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
@@ -387,12 +390,13 @@ static int temac_dma_bd_init(struct net_device *ndev)
lp->tx_bd_next = 0;
lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0;
+ lp->rx_bd_tail = RX_BD_NUM - 1;
/* Enable RX DMA transfers */
wmb();
lp->dma_out(lp, RX_CURDESC_PTR, lp->rx_bd_p);
lp->dma_out(lp, RX_TAILDESC_PTR,
- lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1)));
+ lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * lp->rx_bd_tail));
/* Prepare for TX DMA transfer */
lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p);
@@ -788,6 +792,9 @@ static void temac_start_xmit_done(struct net_device *ndev)
stat = be32_to_cpu(cur_p->app0);
}
+ /* Matches barrier in temac_start_xmit */
+ smp_mb();
+
netif_wake_queue(ndev);
}
@@ -830,9 +837,19 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag + 1)) {
- if (!netif_queue_stopped(ndev))
- netif_stop_queue(ndev);
- return NETDEV_TX_BUSY;
+ if (netif_queue_stopped(ndev))
+ return NETDEV_TX_BUSY;
+
+ netif_stop_queue(ndev);
+
+ /* Matches barrier in temac_start_xmit_done */
+ smp_mb();
+
+ /* Space might have just been freed - check again */
+ if (temac_check_tx_bd_space(lp, num_frag))
+ return NETDEV_TX_BUSY;
+
+ netif_wake_queue(ndev);
}
cur_p->app0 = 0;
@@ -850,12 +867,16 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
skb_headlen(skb), DMA_TO_DEVICE);
cur_p->len = cpu_to_be32(skb_headlen(skb));
+ if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent, skb_dma_addr))) {
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
cur_p->phys = cpu_to_be32(skb_dma_addr);
ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) {
- lp->tx_bd_tail++;
- if (lp->tx_bd_tail >= TX_BD_NUM)
+ if (++lp->tx_bd_tail >= TX_BD_NUM)
lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
@@ -863,6 +884,27 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_address(frag),
skb_frag_size(frag),
DMA_TO_DEVICE);
+ if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
+ if (--lp->tx_bd_tail < 0)
+ lp->tx_bd_tail = TX_BD_NUM - 1;
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ while (--ii >= 0) {
+ --frag;
+ dma_unmap_single(ndev->dev.parent,
+ be32_to_cpu(cur_p->phys),
+ skb_frag_size(frag),
+ DMA_TO_DEVICE);
+ if (--lp->tx_bd_tail < 0)
+ lp->tx_bd_tail = TX_BD_NUM - 1;
+ cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
+ }
+ dma_unmap_single(ndev->dev.parent,
+ be32_to_cpu(cur_p->phys),
+ skb_headlen(skb), DMA_TO_DEVICE);
+ dev_kfree_skb_any(skb);
+ ndev->stats.tx_dropped++;
+ return NETDEV_TX_OK;
+ }
cur_p->phys = cpu_to_be32(skb_dma_addr);
cur_p->len = cpu_to_be32(skb_frag_size(frag));
cur_p->app0 = 0;
@@ -884,31 +926,56 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK;
}
+static int ll_temac_recv_buffers_available(struct temac_local *lp)
+{
+ int available;
+
+ if (!lp->rx_skb[lp->rx_bd_ci])
+ return 0;
+ available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
+ if (available <= 0)
+ available += RX_BD_NUM;
+ return available;
+}
static void ll_temac_recv(struct net_device *ndev)
{
struct temac_local *lp = netdev_priv(ndev);
- struct sk_buff *skb, *new_skb;
- unsigned int bdstat;
- struct cdmac_bd *cur_p;
- dma_addr_t tail_p, skb_dma_addr;
- int length;
unsigned long flags;
+ int rx_bd;
+ bool update_tail = false;
spin_lock_irqsave(&lp->rx_lock, flags);
- tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
-
- bdstat = be32_to_cpu(cur_p->app0);
- while ((bdstat & STS_CTRL_APP0_CMPLT)) {
+ /* Process all received buffers, passing them on network
+ * stack. After this, the buffer descriptors will be in an
+ * un-allocated stage, where no skb is allocated for it, and
+ * they are therefore not available for TEMAC/DMA.
+ */
+ do {
+ struct cdmac_bd *bd = &lp->rx_bd_v[lp->rx_bd_ci];
+ struct sk_buff *skb = lp->rx_skb[lp->rx_bd_ci];
+ unsigned int bdstat = be32_to_cpu(bd->app0);
+ int length;
+
+ /* While this should not normally happen, we can end
+ * here when GFP_ATOMIC allocations fail, and we
+ * therefore have un-allocated buffers.
+ */
+ if (!skb)
+ break;
- skb = lp->rx_skb[lp->rx_bd_ci];
- length = be32_to_cpu(cur_p->app4) & 0x3FFF;
+ /* Loop over all completed buffer descriptors */
+ if (!(bdstat & STS_CTRL_APP0_CMPLT))
+ break;
- dma_unmap_single(ndev->dev.parent, be32_to_cpu(cur_p->phys),
+ dma_unmap_single(ndev->dev.parent, be32_to_cpu(bd->phys),
XTE_MAX_JUMBO_FRAME_SIZE, DMA_FROM_DEVICE);
+ /* The buffer is not valid for DMA anymore */
+ bd->phys = 0;
+ bd->len = 0;
+ length = be32_to_cpu(bd->app4) & 0x3FFF;
skb_put(skb, length);
skb->protocol = eth_type_trans(skb, ndev);
skb_checksum_none_assert(skb);
@@ -923,43 +990,102 @@ static void ll_temac_recv(struct net_device *ndev)
* (back) for proper IP checksum byte order
* (be16).
*/
- skb->csum = htons(be32_to_cpu(cur_p->app3) & 0xFFFF);
+ skb->csum = htons(be32_to_cpu(bd->app3) & 0xFFFF);
skb->ip_summed = CHECKSUM_COMPLETE;
}
if (!skb_defer_rx_timestamp(skb))
netif_rx(skb);
+ /* The skb buffer is now owned by network stack above */
+ lp->rx_skb[lp->rx_bd_ci] = NULL;
ndev->stats.rx_packets++;
ndev->stats.rx_bytes += length;
- new_skb = netdev_alloc_skb_ip_align(ndev,
- XTE_MAX_JUMBO_FRAME_SIZE);
- if (!new_skb) {
- spin_unlock_irqrestore(&lp->rx_lock, flags);
- return;
+ rx_bd = lp->rx_bd_ci;
+ if (++lp->rx_bd_ci >= RX_BD_NUM)
+ lp->rx_bd_ci = 0;
+ } while (rx_bd != lp->rx_bd_tail);
+
+ /* DMA operations will halt when the last buffer descriptor is
+ * processed (ie. the one pointed to by RX_TAILDESC_PTR).
+ * When that happens, no more interrupt events will be
+ * generated. No IRQ_COAL or IRQ_DLY, and not even an
+ * IRQ_ERR. To avoid stalling, we schedule a delayed work
+ * when there is a potential risk of that happening. The work
+ * will call this function, and thus re-schedule itself until
+ * enough buffers are available again.
+ */
+ if (ll_temac_recv_buffers_available(lp) < lp->coalesce_count_rx)
+ schedule_delayed_work(&lp->restart_work, HZ / 1000);
+
+ /* Allocate new buffers for those buffer descriptors that were
+ * passed to network stack. Note that GFP_ATOMIC allocations
+ * can fail (e.g. when a larger burst of GFP_ATOMIC
+ * allocations occurs), so while we try to allocate all
+ * buffers in the same interrupt where they were processed, we
+ * continue with what we could get in case of allocation
+ * failure. Allocation of remaining buffers will be retried
+ * in following calls.
+ */
+ while (1) {
+ struct sk_buff *skb;
+ struct cdmac_bd *bd;
+ dma_addr_t skb_dma_addr;
+
+ rx_bd = lp->rx_bd_tail + 1;
+ if (rx_bd >= RX_BD_NUM)
+ rx_bd = 0;
+ bd = &lp->rx_bd_v[rx_bd];
+
+ if (bd->phys)
+ break; /* All skb's allocated */
+
+ skb = netdev_alloc_skb_ip_align(ndev, XTE_MAX_JUMBO_FRAME_SIZE);
+ if (!skb) {
+ dev_warn(&ndev->dev, "skb alloc failed\n");
+ break;
}
- cur_p->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
- skb_dma_addr = dma_map_single(ndev->dev.parent, new_skb->data,
+ skb_dma_addr = dma_map_single(ndev->dev.parent, skb->data,
XTE_MAX_JUMBO_FRAME_SIZE,
DMA_FROM_DEVICE);
- cur_p->phys = cpu_to_be32(skb_dma_addr);
- cur_p->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
- lp->rx_skb[lp->rx_bd_ci] = new_skb;
+ if (WARN_ON_ONCE(dma_mapping_error(ndev->dev.parent,
+ skb_dma_addr))) {
+ dev_kfree_skb_any(skb);
+ break;
+ }
- lp->rx_bd_ci++;
- if (lp->rx_bd_ci >= RX_BD_NUM)
- lp->rx_bd_ci = 0;
+ bd->phys = cpu_to_be32(skb_dma_addr);
+ bd->len = cpu_to_be32(XTE_MAX_JUMBO_FRAME_SIZE);
+ bd->app0 = cpu_to_be32(STS_CTRL_APP0_IRQONEND);
+ lp->rx_skb[rx_bd] = skb;
- cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
- bdstat = be32_to_cpu(cur_p->app0);
+ lp->rx_bd_tail = rx_bd;
+ update_tail = true;
+ }
+
+ /* Move tail pointer when buffers have been allocated */
+ if (update_tail) {
+ lp->dma_out(lp, RX_TAILDESC_PTR,
+ lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_tail);
}
- lp->dma_out(lp, RX_TAILDESC_PTR, tail_p);
spin_unlock_irqrestore(&lp->rx_lock, flags);
}
+/* Function scheduled to ensure a restart in case of DMA halt
+ * condition caused by running out of buffer descriptors.
+ */
+static void ll_temac_restart_work_func(struct work_struct *work)
+{
+ struct temac_local *lp = container_of(work, struct temac_local,
+ restart_work.work);
+ struct net_device *ndev = lp->ndev;
+
+ ll_temac_recv(ndev);
+}
+
static irqreturn_t ll_temac_tx_irq(int irq, void *_ndev)
{
struct net_device *ndev = _ndev;
@@ -1052,6 +1178,8 @@ static int temac_stop(struct net_device *ndev)
dev_dbg(&ndev->dev, "temac_close()\n");
+ cancel_delayed_work_sync(&lp->restart_work);
+
free_irq(lp->tx_irq, ndev);
free_irq(lp->rx_irq, ndev);
@@ -1173,6 +1301,7 @@ static int temac_probe(struct platform_device *pdev)
lp->dev = &pdev->dev;
lp->options = XTE_OPTION_DEFAULTS;
spin_lock_init(&lp->rx_lock);
+ INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
/* Setup mutex for synchronization of indirect register access */
if (pdata) {
@@ -1279,6 +1408,7 @@ static int temac_probe(struct platform_device *pdev)
*/
lp->tx_chnl_ctrl = 0x10220000;
lp->rx_chnl_ctrl = 0xff070000;
+ lp->coalesce_count_rx = 0x07;
/* Finished with the DMA node; drop the reference */
of_node_put(dma_np);
@@ -1310,11 +1440,14 @@ static int temac_probe(struct platform_device *pdev)
(pdata->tx_irq_count << 16);
else
lp->tx_chnl_ctrl = 0x10220000;
- if (pdata->rx_irq_timeout || pdata->rx_irq_count)
+ if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) |
(pdata->rx_irq_count << 16);
- else
+ lp->coalesce_count_rx = pdata->rx_irq_count;
+ } else {
lp->rx_chnl_ctrl = 0xff070000;
+ lp->coalesce_count_rx = 0x07;
+ }
}
/* Error handle returned DMA RX and TX interrupts */
diff --git a/drivers/net/gtp.c b/drivers/net/gtp.c
index af07ea760b35..672cd2caf2fb 100644
--- a/drivers/net/gtp.c
+++ b/drivers/net/gtp.c
@@ -546,8 +546,8 @@ static int gtp_build_skb_ip4(struct sk_buff *skb, struct net_device *dev,
mtu < ntohs(iph->tot_len)) {
netdev_dbg(dev, "packet too big, fragmentation needed\n");
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
- htonl(mtu));
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
goto err_rt;
}
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index ae3f3084c2ed..1b320bcf150a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -99,7 +99,7 @@ static struct netvsc_device *alloc_net_device(void)
init_waitqueue_head(&net_device->wait_drain);
net_device->destroy = false;
- net_device->tx_disable = false;
+ net_device->tx_disable = true;
net_device->max_pkt = RNDIS_MAX_PKT_DEFAULT;
net_device->pkt_align = RNDIS_PKT_ALIGN_DEFAULT;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 65e12cb07f45..2c0a24c606fc 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1068,6 +1068,7 @@ static int netvsc_attach(struct net_device *ndev,
}
/* In any case device is now ready */
+ nvdev->tx_disable = false;
netif_device_attach(ndev);
/* Note: enable and attach happen when sub-channels setup */
@@ -2476,6 +2477,8 @@ static int netvsc_probe(struct hv_device *dev,
else
net->max_mtu = ETH_DATA_LEN;
+ nvdev->tx_disable = false;
+
ret = register_netdevice(net);
if (ret != 0) {
pr_err("Unable to register netdev.\n");
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index 7d68b28bb893..a62229a8b1a4 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -410,7 +410,7 @@ static int bcm5481_config_aneg(struct phy_device *phydev)
struct device_node *np = phydev->mdio.dev.of_node;
int ret;
- /* Aneg firsly. */
+ /* Aneg firstly. */
ret = genphy_config_aneg(phydev);
/* Then we can set up the delay. */
@@ -463,7 +463,7 @@ static int bcm54616s_config_aneg(struct phy_device *phydev)
{
int ret;
- /* Aneg firsly. */
+ /* Aneg firstly. */
if (phydev->dev_flags & PHY_BCM_FLAGS_MODE_1000BX)
ret = genphy_c37_config_aneg(phydev);
else
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 28e33ece4ce1..9a8badafea8a 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1306,6 +1306,9 @@ static int marvell_read_status_page_an(struct phy_device *phydev,
}
}
+ if (!(status & MII_M1011_PHY_STATUS_RESOLVED))
+ return 0;
+
if (status & MII_M1011_PHY_STATUS_FULLDUPLEX)
phydev->duplex = DUPLEX_FULL;
else
@@ -1365,6 +1368,8 @@ static int marvell_read_status_page(struct phy_device *phydev, int page)
linkmode_zero(phydev->lp_advertising);
phydev->pause = 0;
phydev->asym_pause = 0;
+ phydev->speed = SPEED_UNKNOWN;
+ phydev->duplex = DUPLEX_UNKNOWN;
if (phydev->autoneg == AUTONEG_ENABLE)
err = marvell_read_status_page_an(phydev, fiber, status);
diff --git a/drivers/net/phy/mdio-bcm-iproc.c b/drivers/net/phy/mdio-bcm-iproc.c
index 7e9975d25066..f1ded03f0229 100644
--- a/drivers/net/phy/mdio-bcm-iproc.c
+++ b/drivers/net/phy/mdio-bcm-iproc.c
@@ -178,6 +178,23 @@ static int iproc_mdio_remove(struct platform_device *pdev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+int iproc_mdio_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct iproc_mdio_priv *priv = platform_get_drvdata(pdev);
+
+ /* restore the mii clock configuration */
+ iproc_mdio_config_clk(priv->base);
+
+ return 0;
+}
+
+static const struct dev_pm_ops iproc_mdio_pm_ops = {
+ .resume = iproc_mdio_resume
+};
+#endif /* CONFIG_PM_SLEEP */
+
static const struct of_device_id iproc_mdio_of_match[] = {
{ .compatible = "brcm,iproc-mdio", },
{ /* sentinel */ },
@@ -188,6 +205,9 @@ static struct platform_driver iproc_mdio_driver = {
.driver = {
.name = "iproc-mdio",
.of_match_table = iproc_mdio_of_match,
+#ifdef CONFIG_PM_SLEEP
+ .pm = &iproc_mdio_pm_ops,
+#endif
},
.probe = iproc_mdio_probe,
.remove = iproc_mdio_remove,
diff --git a/drivers/net/phy/mscc.c b/drivers/net/phy/mscc.c
index 937ac7da2789..f686f40f6bdc 100644
--- a/drivers/net/phy/mscc.c
+++ b/drivers/net/phy/mscc.c
@@ -345,11 +345,11 @@ enum macsec_bank {
BIT(VSC8531_FORCE_LED_OFF) | \
BIT(VSC8531_FORCE_LED_ON))
-#define MSCC_VSC8584_REVB_INT8051_FW "mscc_vsc8584_revb_int8051_fb48.bin"
+#define MSCC_VSC8584_REVB_INT8051_FW "microchip/mscc_vsc8584_revb_int8051_fb48.bin"
#define MSCC_VSC8584_REVB_INT8051_FW_START_ADDR 0xe800
#define MSCC_VSC8584_REVB_INT8051_FW_CRC 0xfb48
-#define MSCC_VSC8574_REVB_INT8051_FW "mscc_vsc8574_revb_int8051_29e8.bin"
+#define MSCC_VSC8574_REVB_INT8051_FW "microchip/mscc_vsc8574_revb_int8051_29e8.bin"
#define MSCC_VSC8574_REVB_INT8051_FW_START_ADDR 0x4000
#define MSCC_VSC8574_REVB_INT8051_FW_CRC 0x29e8
diff --git a/drivers/net/phy/phy-c45.c b/drivers/net/phy/phy-c45.c
index a1caeee12236..dd2e23fb67c0 100644
--- a/drivers/net/phy/phy-c45.c
+++ b/drivers/net/phy/phy-c45.c
@@ -167,7 +167,7 @@ EXPORT_SYMBOL_GPL(genphy_c45_restart_aneg);
*/
int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart)
{
- int ret = 0;
+ int ret;
if (!restart) {
/* Configure and restart aneg if it wasn't set before */
@@ -180,9 +180,9 @@ int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart)
}
if (restart)
- ret = genphy_c45_restart_aneg(phydev);
+ return genphy_c45_restart_aneg(phydev);
- return ret;
+ return 0;
}
EXPORT_SYMBOL_GPL(genphy_c45_check_and_restart_aneg);
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index 6a5056e0ae77..c8b0c34030d3 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -247,7 +247,7 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
* MDIO bus driver and clock gated at this point.
*/
if (!netdev)
- return !phydev->suspended;
+ goto out;
if (netdev->wol_enabled)
return false;
@@ -267,7 +267,8 @@ static bool mdio_bus_phy_may_suspend(struct phy_device *phydev)
if (device_may_wakeup(&netdev->dev))
return false;
- return true;
+out:
+ return !phydev->suspended;
}
static int mdio_bus_phy_suspend(struct device *dev)
@@ -1792,7 +1793,7 @@ EXPORT_SYMBOL(genphy_restart_aneg);
*/
int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart)
{
- int ret = 0;
+ int ret;
if (!restart) {
/* Advertisement hasn't changed, but maybe aneg was never on to
@@ -1807,9 +1808,9 @@ int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart)
}
if (restart)
- ret = genphy_restart_aneg(phydev);
+ return genphy_restart_aneg(phydev);
- return ret;
+ return 0;
}
EXPORT_SYMBOL(genphy_check_and_restart_aneg);
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index 6f4d7ba8b109..babb01888b78 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -863,7 +863,10 @@ err_free_chan:
tty->disc_data = NULL;
clear_bit(SLF_INUSE, &sl->flags);
sl_free_netdev(sl->dev);
+ /* do not call free_netdev before rtnl_unlock */
+ rtnl_unlock();
free_netdev(sl->dev);
+ return err;
err_exit:
rtnl_unlock();
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 9485c8d1de8a..5754bb6ca0ee 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -61,7 +61,6 @@ enum qmi_wwan_flags {
enum qmi_wwan_quirks {
QMI_WWAN_QUIRK_DTR = 1 << 0, /* needs "set DTR" request */
- QMI_WWAN_QUIRK_QUECTEL_DYNCFG = 1 << 1, /* check num. endpoints */
};
struct qmimux_hdr {
@@ -338,6 +337,9 @@ static void qmi_wwan_netdev_setup(struct net_device *net)
netdev_dbg(net, "mode: raw IP\n");
} else if (!net->header_ops) { /* don't bother if already set */
ether_setup(net);
+ /* Restoring min/max mtu values set originally by usbnet */
+ net->min_mtu = 0;
+ net->max_mtu = ETH_MAX_MTU;
clear_bit(EVENT_NO_IP_ALIGN, &dev->flags);
netdev_dbg(net, "mode: Ethernet\n");
}
@@ -916,16 +918,6 @@ static const struct driver_info qmi_wwan_info_quirk_dtr = {
.data = QMI_WWAN_QUIRK_DTR,
};
-static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
- .description = "WWAN/QMI device",
- .flags = FLAG_WWAN | FLAG_SEND_ZLP,
- .bind = qmi_wwan_bind,
- .unbind = qmi_wwan_unbind,
- .manage_power = qmi_wwan_manage_power,
- .rx_fixup = qmi_wwan_rx_fixup,
- .data = QMI_WWAN_QUIRK_DTR | QMI_WWAN_QUIRK_QUECTEL_DYNCFG,
-};
-
#define HUAWEI_VENDOR_ID 0x12D1
/* map QMI/wwan function by a fixed interface number */
@@ -946,14 +938,18 @@ static const struct driver_info qmi_wwan_info_quirk_quectel_dyncfg = {
#define QMI_GOBI_DEVICE(vend, prod) \
QMI_FIXED_INTF(vend, prod, 0)
-/* Quectel does not use fixed interface numbers on at least some of their
- * devices. We need to check the number of endpoints to ensure that we bind to
- * the correct interface.
+/* Many devices have QMI and DIAG functions which are distinguishable
+ * from other vendor specific functions by class, subclass and
+ * protocol all being 0xff. The DIAG function has exactly 2 endpoints
+ * and is silently rejected when probed.
+ *
+ * This makes it possible to match dynamically numbered QMI functions
+ * as seen on e.g. many Quectel modems.
*/
-#define QMI_QUIRK_QUECTEL_DYNCFG(vend, prod) \
+#define QMI_MATCH_FF_FF_FF(vend, prod) \
USB_DEVICE_AND_INTERFACE_INFO(vend, prod, USB_CLASS_VENDOR_SPEC, \
USB_SUBCLASS_VENDOR_SPEC, 0xff), \
- .driver_info = (unsigned long)&qmi_wwan_info_quirk_quectel_dyncfg
+ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr
static const struct usb_device_id products[] = {
/* 1. CDC ECM like devices match on the control interface */
@@ -1059,10 +1055,10 @@ static const struct usb_device_id products[] = {
USB_DEVICE_AND_INTERFACE_INFO(0x03f0, 0x581d, USB_CLASS_VENDOR_SPEC, 1, 7),
.driver_info = (unsigned long)&qmi_wwan_info,
},
- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
- {QMI_QUIRK_QUECTEL_DYNCFG(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0125)}, /* Quectel EC25, EC20 R2.0 Mini PCIe */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0306)}, /* Quectel EP06/EG06/EM06 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0512)}, /* Quectel EG12/EM12 */
+ {QMI_MATCH_FF_FF_FF(0x2c7c, 0x0800)}, /* Quectel RM500Q-GL */
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1363,6 +1359,7 @@ static const struct usb_device_id products[] = {
{QMI_FIXED_INTF(0x413c, 0x81b6, 8)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81b6, 10)}, /* Dell Wireless 5811e */
{QMI_FIXED_INTF(0x413c, 0x81d7, 0)}, /* Dell Wireless 5821e */
+ {QMI_FIXED_INTF(0x413c, 0x81d7, 1)}, /* Dell Wireless 5821e preproduction config */
{QMI_FIXED_INTF(0x413c, 0x81e0, 0)}, /* Dell Wireless 5821e with eSIM support*/
{QMI_FIXED_INTF(0x03f0, 0x4e1d, 8)}, /* HP lt4111 LTE/EV-DO/HSPA+ Gobi 4G Module */
{QMI_FIXED_INTF(0x03f0, 0x9d1d, 1)}, /* HP lt4120 Snapdragon X5 LTE */
@@ -1454,7 +1451,6 @@ static int qmi_wwan_probe(struct usb_interface *intf,
{
struct usb_device_id *id = (struct usb_device_id *)prod;
struct usb_interface_descriptor *desc = &intf->cur_altsetting->desc;
- const struct driver_info *info;
/* Workaround to enable dynamic IDs. This disables usbnet
* blacklisting functionality. Which, if required, can be
@@ -1490,12 +1486,8 @@ static int qmi_wwan_probe(struct usb_interface *intf,
* different. Ignore the current interface if the number of endpoints
* equals the number for the diag interface (two).
*/
- info = (void *)id->driver_info;
-
- if (info->data & QMI_WWAN_QUIRK_QUECTEL_DYNCFG) {
- if (desc->bNumEndpoints == 2)
- return -ENODEV;
- }
+ if (desc->bNumEndpoints == 2)
+ return -ENODEV;
return usbnet_probe(intf, id);
}
diff --git a/drivers/net/wireguard/device.c b/drivers/net/wireguard/device.c
index 16b19824b9ad..cdc96968b0f4 100644
--- a/drivers/net/wireguard/device.c
+++ b/drivers/net/wireguard/device.c
@@ -203,9 +203,9 @@ err_peer:
err:
++dev->stats.tx_errors;
if (skb->protocol == htons(ETH_P_IP))
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_UNREACH, 0);
else if (skb->protocol == htons(ETH_P_IPV6))
- icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
+ icmpv6_ndo_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_ADDR_UNREACH, 0);
kfree_skb(skb);
return ret;
}
@@ -258,6 +258,8 @@ static void wg_setup(struct net_device *dev)
enum { WG_NETDEV_FEATURES = NETIF_F_HW_CSUM | NETIF_F_RXCSUM |
NETIF_F_SG | NETIF_F_GSO |
NETIF_F_GSO_SOFTWARE | NETIF_F_HIGHDMA };
+ const int overhead = MESSAGE_MINIMUM_LENGTH + sizeof(struct udphdr) +
+ max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
dev->netdev_ops = &netdev_ops;
dev->hard_header_len = 0;
@@ -271,9 +273,8 @@ static void wg_setup(struct net_device *dev)
dev->features |= WG_NETDEV_FEATURES;
dev->hw_features |= WG_NETDEV_FEATURES;
dev->hw_enc_features |= WG_NETDEV_FEATURES;
- dev->mtu = ETH_DATA_LEN - MESSAGE_MINIMUM_LENGTH -
- sizeof(struct udphdr) -
- max(sizeof(struct ipv6hdr), sizeof(struct iphdr));
+ dev->mtu = ETH_DATA_LEN - overhead;
+ dev->max_mtu = round_down(INT_MAX, MESSAGE_PADDING_MULTIPLE) - overhead;
SET_NETDEV_DEVTYPE(dev, &device_type);
diff --git a/drivers/net/wireguard/receive.c b/drivers/net/wireguard/receive.c
index 9c6bab9c981f..4a153894cee2 100644
--- a/drivers/net/wireguard/receive.c
+++ b/drivers/net/wireguard/receive.c
@@ -118,10 +118,13 @@ static void wg_receive_handshake_packet(struct wg_device *wg,
under_load = skb_queue_len(&wg->incoming_handshakes) >=
MAX_QUEUED_INCOMING_HANDSHAKES / 8;
- if (under_load)
+ if (under_load) {
last_under_load = ktime_get_coarse_boottime_ns();
- else if (last_under_load)
+ } else if (last_under_load) {
under_load = !wg_birthdate_has_expired(last_under_load, 1);
+ if (!under_load)
+ last_under_load = 0;
+ }
mac_state = wg_cookie_validate_packet(&wg->cookie_checker, skb,
under_load);
if ((under_load && mac_state == VALID_MAC_WITH_COOKIE) ||
diff --git a/drivers/net/wireguard/send.c b/drivers/net/wireguard/send.c
index c13260563446..7348c10cbae3 100644
--- a/drivers/net/wireguard/send.c
+++ b/drivers/net/wireguard/send.c
@@ -143,16 +143,22 @@ static void keep_key_fresh(struct wg_peer *peer)
static unsigned int calculate_skb_padding(struct sk_buff *skb)
{
+ unsigned int padded_size, last_unit = skb->len;
+
+ if (unlikely(!PACKET_CB(skb)->mtu))
+ return ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE) - last_unit;
+
/* We do this modulo business with the MTU, just in case the networking
* layer gives us a packet that's bigger than the MTU. In that case, we
* wouldn't want the final subtraction to overflow in the case of the
- * padded_size being clamped.
+ * padded_size being clamped. Fortunately, that's very rarely the case,
+ * so we optimize for that not happening.
*/
- unsigned int last_unit = skb->len % PACKET_CB(skb)->mtu;
- unsigned int padded_size = ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE);
+ if (unlikely(last_unit > PACKET_CB(skb)->mtu))
+ last_unit %= PACKET_CB(skb)->mtu;
- if (padded_size > PACKET_CB(skb)->mtu)
- padded_size = PACKET_CB(skb)->mtu;
+ padded_size = min(PACKET_CB(skb)->mtu,
+ ALIGN(last_unit, MESSAGE_PADDING_MULTIPLE));
return padded_size - last_unit;
}
diff --git a/drivers/net/wireguard/socket.c b/drivers/net/wireguard/socket.c
index 262f3b5c819d..b0d6541582d3 100644
--- a/drivers/net/wireguard/socket.c
+++ b/drivers/net/wireguard/socket.c
@@ -432,7 +432,6 @@ void wg_socket_reinit(struct wg_device *wg, struct sock *new4,
wg->incoming_port = ntohs(inet_sk(new4)->inet_sport);
mutex_unlock(&wg->socket_update_lock);
synchronize_rcu();
- synchronize_net();
sock_free(old4);
sock_free(old6);
}
diff --git a/drivers/nfc/pn544/i2c.c b/drivers/nfc/pn544/i2c.c
index 720c89d6066e..4ac8cb262559 100644
--- a/drivers/nfc/pn544/i2c.c
+++ b/drivers/nfc/pn544/i2c.c
@@ -225,6 +225,7 @@ static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy)
out:
gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity);
+ usleep_range(10000, 15000);
}
static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode)
diff --git a/drivers/nfc/pn544/pn544.c b/drivers/nfc/pn544/pn544.c
index 2b83156efe3f..b788870473e8 100644
--- a/drivers/nfc/pn544/pn544.c
+++ b/drivers/nfc/pn544/pn544.c
@@ -682,7 +682,7 @@ static int pn544_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb)
static int pn544_hci_check_presence(struct nfc_hci_dev *hdev,
struct nfc_target *target)
{
- pr_debug("supported protocol %d\b", target->supported_protocols);
+ pr_debug("supported protocol %d\n", target->supported_protocols);
if (target->supported_protocols & (NFC_PROTO_ISO14443_MASK |
NFC_PROTO_ISO14443_B_MASK)) {
return nfc_hci_send_cmd(hdev, target->hci_reader_gate,
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 5dc32b72e7fa..a4d8c90ee7cc 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -66,8 +66,8 @@ MODULE_PARM_DESC(streams, "turn on support for Streams write directives");
* nvme_reset_wq - hosts nvme reset works
* nvme_delete_wq - hosts nvme delete works
*
- * nvme_wq will host works such are scan, aen handling, fw activation,
- * keep-alive error recovery, periodic reconnects etc. nvme_reset_wq
+ * nvme_wq will host works such as scan, aen handling, fw activation,
+ * keep-alive, periodic reconnects etc. nvme_reset_wq
* runs reset works which also flush works hosted on nvme_wq for
* serialization purposes. nvme_delete_wq host controller deletion
* works which flush reset works for serialization.
@@ -976,7 +976,7 @@ static void nvme_keep_alive_end_io(struct request *rq, blk_status_t status)
startka = true;
spin_unlock_irqrestore(&ctrl->lock, flags);
if (startka)
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
static int nvme_keep_alive(struct nvme_ctrl *ctrl)
@@ -1006,7 +1006,7 @@ static void nvme_keep_alive_work(struct work_struct *work)
dev_dbg(ctrl->device,
"reschedule traffic based keep-alive timer\n");
ctrl->comp_seen = false;
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
return;
}
@@ -1023,7 +1023,7 @@ static void nvme_start_keep_alive(struct nvme_ctrl *ctrl)
if (unlikely(ctrl->kato == 0))
return;
- schedule_delayed_work(&ctrl->ka_work, ctrl->kato * HZ);
+ queue_delayed_work(nvme_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
void nvme_stop_keep_alive(struct nvme_ctrl *ctrl)
@@ -1165,8 +1165,8 @@ static int nvme_identify_ns(struct nvme_ctrl *ctrl,
static int nvme_features(struct nvme_ctrl *dev, u8 op, unsigned int fid,
unsigned int dword11, void *buffer, size_t buflen, u32 *result)
{
+ union nvme_result res = { 0 };
struct nvme_command c;
- union nvme_result res;
int ret;
memset(&c, 0, sizeof(c));
@@ -3867,7 +3867,7 @@ static void nvme_get_fw_slot_info(struct nvme_ctrl *ctrl)
if (!log)
return;
- if (nvme_get_log(ctrl, NVME_NSID_ALL, 0, NVME_LOG_FW_SLOT, log,
+ if (nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_FW_SLOT, 0, log,
sizeof(*log), 0))
dev_warn(ctrl->device, "Get FW SLOT INFO log error\n");
kfree(log);
diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
index 797c18337d96..a11900cf3a36 100644
--- a/drivers/nvme/host/multipath.c
+++ b/drivers/nvme/host/multipath.c
@@ -715,6 +715,7 @@ int nvme_mpath_init(struct nvme_ctrl *ctrl, struct nvme_id_ctrl *id)
}
INIT_WORK(&ctrl->ana_work, nvme_ana_work);
+ kfree(ctrl->ana_log_buf);
ctrl->ana_log_buf = kmalloc(ctrl->ana_log_size, GFP_KERNEL);
if (!ctrl->ana_log_buf) {
error = -ENOMEM;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index da392b50f73e..d3f23d6254e4 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -1078,9 +1078,9 @@ static int nvme_poll(struct blk_mq_hw_ctx *hctx)
spin_lock(&nvmeq->cq_poll_lock);
found = nvme_process_cq(nvmeq, &start, &end, -1);
+ nvme_complete_cqes(nvmeq, start, end);
spin_unlock(&nvmeq->cq_poll_lock);
- nvme_complete_cqes(nvmeq, start, end);
return found;
}
@@ -1401,6 +1401,23 @@ static void nvme_disable_admin_queue(struct nvme_dev *dev, bool shutdown)
nvme_poll_irqdisable(nvmeq, -1);
}
+/*
+ * Called only on a device that has been disabled and after all other threads
+ * that can check this device's completion queues have synced. This is the
+ * last chance for the driver to see a natural completion before
+ * nvme_cancel_request() terminates all incomplete requests.
+ */
+static void nvme_reap_pending_cqes(struct nvme_dev *dev)
+{
+ u16 start, end;
+ int i;
+
+ for (i = dev->ctrl.queue_count - 1; i > 0; i--) {
+ nvme_process_cq(&dev->queues[i], &start, &end, -1);
+ nvme_complete_cqes(&dev->queues[i], start, end);
+ }
+}
+
static int nvme_cmb_qdepth(struct nvme_dev *dev, int nr_io_queues,
int entry_size)
{
@@ -2235,11 +2252,6 @@ static bool __nvme_disable_io_queues(struct nvme_dev *dev, u8 opcode)
if (timeout == 0)
return false;
- /* handle any remaining CQEs */
- if (opcode == nvme_admin_delete_cq &&
- !test_bit(NVMEQ_DELETE_ERROR, &nvmeq->flags))
- nvme_poll_irqdisable(nvmeq, -1);
-
sent--;
if (nr_queues)
goto retry;
@@ -2428,6 +2440,7 @@ static void nvme_dev_disable(struct nvme_dev *dev, bool shutdown)
nvme_suspend_io_queues(dev);
nvme_suspend_queue(&dev->queues[0]);
nvme_pci_disable(dev);
+ nvme_reap_pending_cqes(dev);
blk_mq_tagset_busy_iter(&dev->tagset, nvme_cancel_request, &dev->ctrl);
blk_mq_tagset_busy_iter(&dev->admin_tagset, nvme_cancel_request, &dev->ctrl);
@@ -2734,6 +2747,18 @@ static unsigned long check_vendor_combination_bug(struct pci_dev *pdev)
(dmi_match(DMI_BOARD_NAME, "PRIME B350M-A") ||
dmi_match(DMI_BOARD_NAME, "PRIME Z370-A")))
return NVME_QUIRK_NO_APST;
+ } else if ((pdev->vendor == 0x144d && (pdev->device == 0xa801 ||
+ pdev->device == 0xa808 || pdev->device == 0xa809)) ||
+ (pdev->vendor == 0x1e0f && pdev->device == 0x0001)) {
+ /*
+ * Forcing to use host managed nvme power settings for
+ * lowest idle power with quick resume latency on
+ * Samsung and Toshiba SSDs based on suspend behavior
+ * on Coffee Lake board for LENOVO C640
+ */
+ if ((dmi_match(DMI_BOARD_VENDOR, "LENOVO")) &&
+ dmi_match(DMI_BOARD_NAME, "LNVNB161216"))
+ return NVME_QUIRK_SIMPLE_SUSPEND;
}
return 0;
@@ -3096,7 +3121,8 @@ static const struct pci_device_id nvme_id_table[] = {
.driver_data = NVME_QUIRK_NO_DEEPEST_PS |
NVME_QUIRK_IGNORE_DEV_SUBNQN, },
{ PCI_DEVICE_CLASS(PCI_CLASS_STORAGE_EXPRESS, 0xffffff) },
- { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001) },
+ { PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2001),
+ .driver_data = NVME_QUIRK_SINGLE_VECTOR },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2003) },
{ PCI_DEVICE(PCI_VENDOR_ID_APPLE, 0x2005),
.driver_data = NVME_QUIRK_SINGLE_VECTOR |
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index 2a47c6c5007e..3e85c5cacefd 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -1088,7 +1088,7 @@ static void nvme_rdma_error_recovery(struct nvme_rdma_ctrl *ctrl)
if (!nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_RESETTING))
return;
- queue_work(nvme_wq, &ctrl->err_work);
+ queue_work(nvme_reset_wq, &ctrl->err_work);
}
static void nvme_rdma_wr_error(struct ib_cq *cq, struct ib_wc *wc,
diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c
index 6d43b23a0fc8..49d4373b84eb 100644
--- a/drivers/nvme/host/tcp.c
+++ b/drivers/nvme/host/tcp.c
@@ -422,7 +422,7 @@ static void nvme_tcp_error_recovery(struct nvme_ctrl *ctrl)
if (!nvme_change_ctrl_state(ctrl, NVME_CTRL_RESETTING))
return;
- queue_work(nvme_wq, &to_tcp_ctrl(ctrl)->err_work);
+ queue_work(nvme_reset_wq, &to_tcp_ctrl(ctrl)->err_work);
}
static int nvme_tcp_process_nvme_cqe(struct nvme_tcp_queue *queue,
@@ -1054,7 +1054,12 @@ static void nvme_tcp_io_work(struct work_struct *w)
} else if (unlikely(result < 0)) {
dev_err(queue->ctrl->ctrl.device,
"failed to send request %d\n", result);
- if (result != -EPIPE)
+
+ /*
+ * Fail the request unless peer closed the connection,
+ * in which case error recovery flow will complete all.
+ */
+ if ((result != -EPIPE) && (result != -ECONNRESET))
nvme_tcp_fail_request(queue->request);
nvme_tcp_done_send_req(queue);
return;
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index d20aabc26273..3a10e678c7f4 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -670,7 +670,7 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
* outbound memory @ 3GB). So instead it will start at the 1x
* multiple of its size
*/
- if (!*rc_bar2_size || *rc_bar2_offset % *rc_bar2_size ||
+ if (!*rc_bar2_size || (*rc_bar2_offset & (*rc_bar2_size - 1)) ||
(*rc_bar2_offset < SZ_4G && *rc_bar2_offset > SZ_2G)) {
dev_err(dev, "Invalid rc_bar2_offset/size: size 0x%llx, off 0x%llx\n",
*rc_bar2_size, *rc_bar2_offset);
diff --git a/drivers/perf/arm_pmu_acpi.c b/drivers/perf/arm_pmu_acpi.c
index acce8781c456..f5c7a845cd7b 100644
--- a/drivers/perf/arm_pmu_acpi.c
+++ b/drivers/perf/arm_pmu_acpi.c
@@ -24,8 +24,6 @@ static int arm_pmu_acpi_register_irq(int cpu)
int gsi, trigger;
gicc = acpi_cpu_get_madt_gicc(cpu);
- if (WARN_ON(!gicc))
- return -EINVAL;
gsi = gicc->performance_interrupt;
@@ -64,11 +62,10 @@ static void arm_pmu_acpi_unregister_irq(int cpu)
int gsi;
gicc = acpi_cpu_get_madt_gicc(cpu);
- if (!gicc)
- return;
gsi = gicc->performance_interrupt;
- acpi_unregister_gsi(gsi);
+ if (gsi)
+ acpi_unregister_gsi(gsi);
}
#if IS_ENABLED(CONFIG_ARM_SPE_PMU)
diff --git a/drivers/perf/arm_smmuv3_pmu.c b/drivers/perf/arm_smmuv3_pmu.c
index d704eccc548f..f01a57e5a5f3 100644
--- a/drivers/perf/arm_smmuv3_pmu.c
+++ b/drivers/perf/arm_smmuv3_pmu.c
@@ -771,7 +771,7 @@ static int smmu_pmu_probe(struct platform_device *pdev)
smmu_pmu->reloc_base = smmu_pmu->reg_base;
}
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq_optional(pdev, 0);
if (irq > 0)
smmu_pmu->irq = irq;
diff --git a/drivers/perf/fsl_imx8_ddr_perf.c b/drivers/perf/fsl_imx8_ddr_perf.c
index 95dca2cb5265..90884d14f95f 100644
--- a/drivers/perf/fsl_imx8_ddr_perf.c
+++ b/drivers/perf/fsl_imx8_ddr_perf.c
@@ -388,9 +388,10 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
if (enable) {
/*
- * must disable first, then enable again
- * otherwise, cycle counter will not work
- * if previous state is enabled.
+ * cycle counter is special which should firstly write 0 then
+ * write 1 into CLEAR bit to clear it. Other counters only
+ * need write 0 into CLEAR bit and it turns out to be 1 by
+ * hardware. Below enable flow is harmless for all counters.
*/
writel(0, pmu->base + reg);
val = CNTL_EN | CNTL_CLEAR;
@@ -398,7 +399,8 @@ static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config,
writel(val, pmu->base + reg);
} else {
/* Disable counter */
- writel(0, pmu->base + reg);
+ val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK;
+ writel(val, pmu->base + reg);
}
}
diff --git a/drivers/phy/allwinner/phy-sun50i-usb3.c b/drivers/phy/allwinner/phy-sun50i-usb3.c
index 1169f3e83a6f..b1c04f71a31d 100644
--- a/drivers/phy/allwinner/phy-sun50i-usb3.c
+++ b/drivers/phy/allwinner/phy-sun50i-usb3.c
@@ -49,7 +49,7 @@
#define SUNXI_LOS_BIAS(n) ((n) << 3)
#define SUNXI_LOS_BIAS_MASK GENMASK(5, 3)
#define SUNXI_TXVBOOSTLVL(n) ((n) << 0)
-#define SUNXI_TXVBOOSTLVL_MASK GENMASK(0, 2)
+#define SUNXI_TXVBOOSTLVL_MASK GENMASK(2, 0)
struct sun50i_usb3_phy {
struct phy *phy;
diff --git a/drivers/phy/broadcom/phy-brcm-sata.c b/drivers/phy/broadcom/phy-brcm-sata.c
index 4710cfcc3037..18251f232172 100644
--- a/drivers/phy/broadcom/phy-brcm-sata.c
+++ b/drivers/phy/broadcom/phy-brcm-sata.c
@@ -186,29 +186,6 @@ enum sata_phy_ctrl_regs {
PHY_CTRL_1_RESET = BIT(0),
};
-static inline void __iomem *brcm_sata_pcb_base(struct brcm_sata_port *port)
-{
- struct brcm_sata_phy *priv = port->phy_priv;
- u32 size = 0;
-
- switch (priv->version) {
- case BRCM_SATA_PHY_STB_16NM:
- case BRCM_SATA_PHY_STB_28NM:
- case BRCM_SATA_PHY_IPROC_NS2:
- case BRCM_SATA_PHY_DSL_28NM:
- size = SATA_PCB_REG_28NM_SPACE_SIZE;
- break;
- case BRCM_SATA_PHY_STB_40NM:
- size = SATA_PCB_REG_40NM_SPACE_SIZE;
- break;
- default:
- dev_err(priv->dev, "invalid phy version\n");
- break;
- }
-
- return priv->phy_base + (port->portnum * size);
-}
-
static inline void __iomem *brcm_sata_ctrl_base(struct brcm_sata_port *port)
{
struct brcm_sata_phy *priv = port->phy_priv;
@@ -226,19 +203,34 @@ static inline void __iomem *brcm_sata_ctrl_base(struct brcm_sata_port *port)
return priv->ctrl_base + (port->portnum * size);
}
-static void brcm_sata_phy_wr(void __iomem *pcb_base, u32 bank,
+static void brcm_sata_phy_wr(struct brcm_sata_port *port, u32 bank,
u32 ofs, u32 msk, u32 value)
{
+ struct brcm_sata_phy *priv = port->phy_priv;
+ void __iomem *pcb_base = priv->phy_base;
u32 tmp;
+ if (priv->version == BRCM_SATA_PHY_STB_40NM)
+ bank += (port->portnum * SATA_PCB_REG_40NM_SPACE_SIZE);
+ else
+ pcb_base += (port->portnum * SATA_PCB_REG_28NM_SPACE_SIZE);
+
writel(bank, pcb_base + SATA_PCB_BANK_OFFSET);
tmp = readl(pcb_base + SATA_PCB_REG_OFFSET(ofs));
tmp = (tmp & msk) | value;
writel(tmp, pcb_base + SATA_PCB_REG_OFFSET(ofs));
}
-static u32 brcm_sata_phy_rd(void __iomem *pcb_base, u32 bank, u32 ofs)
+static u32 brcm_sata_phy_rd(struct brcm_sata_port *port, u32 bank, u32 ofs)
{
+ struct brcm_sata_phy *priv = port->phy_priv;
+ void __iomem *pcb_base = priv->phy_base;
+
+ if (priv->version == BRCM_SATA_PHY_STB_40NM)
+ bank += (port->portnum * SATA_PCB_REG_40NM_SPACE_SIZE);
+ else
+ pcb_base += (port->portnum * SATA_PCB_REG_28NM_SPACE_SIZE);
+
writel(bank, pcb_base + SATA_PCB_BANK_OFFSET);
return readl(pcb_base + SATA_PCB_REG_OFFSET(ofs));
}
@@ -250,16 +242,15 @@ static u32 brcm_sata_phy_rd(void __iomem *pcb_base, u32 bank, u32 ofs)
static void brcm_stb_sata_ssc_init(struct brcm_sata_port *port)
{
- void __iomem *base = brcm_sata_pcb_base(port);
struct brcm_sata_phy *priv = port->phy_priv;
u32 tmp;
/* override the TX spread spectrum setting */
tmp = TXPMD_CONTROL1_TX_SSC_EN_FRC_VAL | TXPMD_CONTROL1_TX_SSC_EN_FRC;
- brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_CONTROL1, ~tmp, tmp);
+ brcm_sata_phy_wr(port, TXPMD_REG_BANK, TXPMD_CONTROL1, ~tmp, tmp);
/* set fixed min freq */
- brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL2,
+ brcm_sata_phy_wr(port, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL2,
~TXPMD_TX_FREQ_CTRL_CONTROL2_FMIN_MASK,
STB_FMIN_VAL_DEFAULT);
@@ -271,7 +262,7 @@ static void brcm_stb_sata_ssc_init(struct brcm_sata_port *port)
tmp = STB_FMAX_VAL_DEFAULT;
}
- brcm_sata_phy_wr(base, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL3,
+ brcm_sata_phy_wr(port, TXPMD_REG_BANK, TXPMD_TX_FREQ_CTRL_CONTROL3,
~TXPMD_TX_FREQ_CTRL_CONTROL3_FMAX_MASK, tmp);
}
@@ -280,7 +271,6 @@ static void brcm_stb_sata_ssc_init(struct brcm_sata_port *port)
static int brcm_stb_sata_rxaeq_init(struct brcm_sata_port *port)
{
- void __iomem *base = brcm_sata_pcb_base(port);
u32 tmp = 0, reg = 0;
switch (port->rxaeq_mode) {
@@ -301,8 +291,8 @@ static int brcm_stb_sata_rxaeq_init(struct brcm_sata_port *port)
break;
}
- brcm_sata_phy_wr(base, AEQRX_REG_BANK_0, reg, ~tmp, tmp);
- brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, reg, ~tmp, tmp);
+ brcm_sata_phy_wr(port, AEQRX_REG_BANK_0, reg, ~tmp, tmp);
+ brcm_sata_phy_wr(port, AEQRX_REG_BANK_1, reg, ~tmp, tmp);
return 0;
}
@@ -316,18 +306,17 @@ static int brcm_stb_sata_init(struct brcm_sata_port *port)
static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
{
- void __iomem *base = brcm_sata_pcb_base(port);
u32 tmp, value;
/* Reduce CP tail current to 1/16th of its default value */
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0x141);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0x141);
/* Turn off CP tail current boost */
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL8, 0, 0xc006);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL8, 0, 0xc006);
/* Set a specific AEQ equalizer value */
tmp = AEQ_FRC_EQ_FORCE_VAL | AEQ_FRC_EQ_FORCE;
- brcm_sata_phy_wr(base, AEQRX_REG_BANK_0, AEQ_FRC_EQ,
+ brcm_sata_phy_wr(port, AEQRX_REG_BANK_0, AEQ_FRC_EQ,
~(tmp | AEQ_RFZ_FRC_VAL |
AEQ_FRC_EQ_VAL_MASK << AEQ_FRC_EQ_VAL_SHIFT),
tmp | 32 << AEQ_FRC_EQ_VAL_SHIFT);
@@ -337,7 +326,7 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
value = 0x52;
else
value = 0;
- brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CONTROL1,
+ brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_CDR_CONTROL1,
~RXPMD_RX_PPM_VAL_MASK, value);
/* Set proportional loop bandwith Gen1/2/3 */
@@ -352,7 +341,7 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
value = 1 << RXPMD_G1_CDR_PROP_BW_SHIFT |
1 << RXPMD_G2_CDR_PROP_BW_SHIFT |
1 << RXPMD_G3_CDR_PROB_BW_SHIFT;
- brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_PROP_BW, ~tmp,
+ brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_PROP_BW, ~tmp,
value);
/* Set CDR integral loop acquisition bandwidth for Gen1/2/3 */
@@ -365,7 +354,7 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
1 << RXPMD_G3_CDR_ACQ_INT_BW_SHIFT;
else
value = 0;
- brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_ACQ_INTEG_BW,
+ brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_ACQ_INTEG_BW,
~tmp, value);
/* Set CDR integral loop locking bandwidth to 1 for Gen 1/2/3 */
@@ -378,7 +367,7 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
1 << RXPMD_G3_CDR_LOCK_INT_BW_SHIFT;
else
value = 0;
- brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_LOCK_INTEG_BW,
+ brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_CDR_CDR_LOCK_INTEG_BW,
~tmp, value);
/* Set no guard band and clamp CDR */
@@ -387,11 +376,11 @@ static int brcm_stb_sata_16nm_ssc_init(struct brcm_sata_port *port)
value = 0x51;
else
value = 0;
- brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
+ brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
~tmp, RXPMD_MON_CORRECT_EN | value);
/* Turn on/off SSC */
- brcm_sata_phy_wr(base, TX_REG_BANK, TX_ACTRL5, ~TX_ACTRL5_SSC_EN,
+ brcm_sata_phy_wr(port, TX_REG_BANK, TX_ACTRL5, ~TX_ACTRL5_SSC_EN,
port->ssc_en ? TX_ACTRL5_SSC_EN : 0);
return 0;
@@ -411,7 +400,6 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port)
{
int try;
unsigned int val;
- void __iomem *base = brcm_sata_pcb_base(port);
void __iomem *ctrl_base = brcm_sata_ctrl_base(port);
struct device *dev = port->phy_priv->dev;
@@ -421,24 +409,24 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port)
val |= (0x4 << OOB_CTRL1_BURST_MIN_SHIFT);
val |= (0x9 << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT);
val |= (0x3 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT);
- brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL1, 0x0, val);
+ brcm_sata_phy_wr(port, OOB_REG_BANK, OOB_CTRL1, 0x0, val);
val = 0x0;
val |= (0x1b << OOB_CTRL2_RESET_IDLE_MAX_SHIFT);
val |= (0x2 << OOB_CTRL2_BURST_CNT_SHIFT);
val |= (0x9 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT);
- brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL2, 0x0, val);
+ brcm_sata_phy_wr(port, OOB_REG_BANK, OOB_CTRL2, 0x0, val);
/* Configure PHY PLL register bank 1 */
val = NS2_PLL1_ACTRL2_MAGIC;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val);
val = NS2_PLL1_ACTRL3_MAGIC;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val);
val = NS2_PLL1_ACTRL4_MAGIC;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val);
/* Configure PHY BLOCK0 register bank */
/* Set oob_clk_sel to refclk/2 */
- brcm_sata_phy_wr(base, BLOCK0_REG_BANK, BLOCK0_SPARE,
+ brcm_sata_phy_wr(port, BLOCK0_REG_BANK, BLOCK0_SPARE,
~BLOCK0_SPARE_OOB_CLK_SEL_MASK,
BLOCK0_SPARE_OOB_CLK_SEL_REFBY2);
@@ -451,7 +439,7 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port)
/* Wait for PHY PLL lock by polling pll_lock bit */
try = 50;
while (try) {
- val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ val = brcm_sata_phy_rd(port, BLOCK0_REG_BANK,
BLOCK0_XGXSSTATUS);
if (val & BLOCK0_XGXSSTATUS_PLL_LOCK)
break;
@@ -471,9 +459,7 @@ static int brcm_ns2_sata_init(struct brcm_sata_port *port)
static int brcm_nsp_sata_init(struct brcm_sata_port *port)
{
- struct brcm_sata_phy *priv = port->phy_priv;
struct device *dev = port->phy_priv->dev;
- void __iomem *base = priv->phy_base;
unsigned int oob_bank;
unsigned int val, try;
@@ -490,36 +476,36 @@ static int brcm_nsp_sata_init(struct brcm_sata_port *port)
val |= (0x06 << OOB_CTRL1_BURST_MIN_SHIFT);
val |= (0x0f << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT);
val |= (0x06 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT);
- brcm_sata_phy_wr(base, oob_bank, OOB_CTRL1, 0x0, val);
+ brcm_sata_phy_wr(port, oob_bank, OOB_CTRL1, 0x0, val);
val = 0x0;
val |= (0x2e << OOB_CTRL2_RESET_IDLE_MAX_SHIFT);
val |= (0x02 << OOB_CTRL2_BURST_CNT_SHIFT);
val |= (0x16 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT);
- brcm_sata_phy_wr(base, oob_bank, OOB_CTRL2, 0x0, val);
+ brcm_sata_phy_wr(port, oob_bank, OOB_CTRL2, 0x0, val);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_ACTRL2,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_ACTRL2,
~(PLL_ACTRL2_SELDIV_MASK << PLL_ACTRL2_SELDIV_SHIFT),
0x0c << PLL_ACTRL2_SELDIV_SHIFT);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_CAP_CONTROL,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_CAP_CONTROL,
0xff0, 0x4f0);
val = PLLCONTROL_0_FREQ_DET_RESTART | PLLCONTROL_0_FREQ_MONITOR;
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
~val, val);
val = PLLCONTROL_0_SEQ_START;
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
~val, 0);
mdelay(10);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
~val, val);
/* Wait for pll_seq_done bit */
try = 50;
while (--try) {
- val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ val = brcm_sata_phy_rd(port, BLOCK0_REG_BANK,
BLOCK0_XGXSSTATUS);
if (val & BLOCK0_XGXSSTATUS_PLL_LOCK)
break;
@@ -546,27 +532,25 @@ static int brcm_nsp_sata_init(struct brcm_sata_port *port)
static int brcm_sr_sata_init(struct brcm_sata_port *port)
{
- struct brcm_sata_phy *priv = port->phy_priv;
struct device *dev = port->phy_priv->dev;
- void __iomem *base = priv->phy_base;
unsigned int val, try;
/* Configure PHY PLL register bank 1 */
val = SR_PLL1_ACTRL2_MAGIC;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL2, 0x0, val);
val = SR_PLL1_ACTRL3_MAGIC;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL3, 0x0, val);
val = SR_PLL1_ACTRL4_MAGIC;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL4, 0x0, val);
/* Configure PHY PLL register bank 0 */
val = SR_PLL0_ACTRL6_MAGIC;
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_ACTRL6, 0x0, val);
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_ACTRL6, 0x0, val);
/* Wait for PHY PLL lock by polling pll_lock bit */
try = 50;
do {
- val = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ val = brcm_sata_phy_rd(port, BLOCK0_REG_BANK,
BLOCK0_XGXSSTATUS);
if (val & BLOCK0_XGXSSTATUS_PLL_LOCK)
break;
@@ -581,7 +565,7 @@ static int brcm_sr_sata_init(struct brcm_sata_port *port)
}
/* Invert Tx polarity */
- brcm_sata_phy_wr(base, TX_REG_BANK, TX_ACTRL0,
+ brcm_sata_phy_wr(port, TX_REG_BANK, TX_ACTRL0,
~TX_ACTRL0_TXPOL_FLIP, TX_ACTRL0_TXPOL_FLIP);
/* Configure OOB control to handle 100MHz reference clock */
@@ -589,52 +573,51 @@ static int brcm_sr_sata_init(struct brcm_sata_port *port)
(0x4 << OOB_CTRL1_BURST_MIN_SHIFT) |
(0x8 << OOB_CTRL1_WAKE_IDLE_MAX_SHIFT) |
(0x3 << OOB_CTRL1_WAKE_IDLE_MIN_SHIFT));
- brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL1, 0x0, val);
+ brcm_sata_phy_wr(port, OOB_REG_BANK, OOB_CTRL1, 0x0, val);
val = ((0x1b << OOB_CTRL2_RESET_IDLE_MAX_SHIFT) |
(0x2 << OOB_CTRL2_BURST_CNT_SHIFT) |
(0x9 << OOB_CTRL2_RESET_IDLE_MIN_SHIFT));
- brcm_sata_phy_wr(base, OOB_REG_BANK, OOB_CTRL2, 0x0, val);
+ brcm_sata_phy_wr(port, OOB_REG_BANK, OOB_CTRL2, 0x0, val);
return 0;
}
static int brcm_dsl_sata_init(struct brcm_sata_port *port)
{
- void __iomem *base = brcm_sata_pcb_base(port);
struct device *dev = port->phy_priv->dev;
unsigned int try;
u32 tmp;
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL7, 0, 0x873);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL7, 0, 0x873);
- brcm_sata_phy_wr(base, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0xc000);
+ brcm_sata_phy_wr(port, PLL1_REG_BANK, PLL1_ACTRL6, 0, 0xc000);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
0, 0x3089);
usleep_range(1000, 2000);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_REG_BANK_0_PLLCONTROL_0,
0, 0x3088);
usleep_range(1000, 2000);
- brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, AEQRX_SLCAL0_CTRL0,
+ brcm_sata_phy_wr(port, AEQRX_REG_BANK_1, AEQRX_SLCAL0_CTRL0,
0, 0x3000);
- brcm_sata_phy_wr(base, AEQRX_REG_BANK_1, AEQRX_SLCAL1_CTRL0,
+ brcm_sata_phy_wr(port, AEQRX_REG_BANK_1, AEQRX_SLCAL1_CTRL0,
0, 0x3000);
usleep_range(1000, 2000);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_CAP_CHARGE_TIME, 0, 0x32);
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_CAP_CHARGE_TIME, 0, 0x32);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_VCO_CAL_THRESH, 0, 0xa);
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_VCO_CAL_THRESH, 0, 0xa);
- brcm_sata_phy_wr(base, PLL_REG_BANK_0, PLL_FREQ_DET_TIME, 0, 0x64);
+ brcm_sata_phy_wr(port, PLL_REG_BANK_0, PLL_FREQ_DET_TIME, 0, 0x64);
usleep_range(1000, 2000);
/* Acquire PLL lock */
try = 50;
while (try) {
- tmp = brcm_sata_phy_rd(base, BLOCK0_REG_BANK,
+ tmp = brcm_sata_phy_rd(port, BLOCK0_REG_BANK,
BLOCK0_XGXSSTATUS);
if (tmp & BLOCK0_XGXSSTATUS_PLL_LOCK)
break;
@@ -687,10 +670,9 @@ static int brcm_sata_phy_init(struct phy *phy)
static void brcm_stb_sata_calibrate(struct brcm_sata_port *port)
{
- void __iomem *base = brcm_sata_pcb_base(port);
u32 tmp = BIT(8);
- brcm_sata_phy_wr(base, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
+ brcm_sata_phy_wr(port, RXPMD_REG_BANK, RXPMD_RX_FREQ_MON_CONTROL1,
~tmp, tmp);
}
diff --git a/drivers/phy/motorola/phy-mapphone-mdm6600.c b/drivers/phy/motorola/phy-mapphone-mdm6600.c
index f20524f0c21d..94a34cf75eb3 100644
--- a/drivers/phy/motorola/phy-mapphone-mdm6600.c
+++ b/drivers/phy/motorola/phy-mapphone-mdm6600.c
@@ -20,6 +20,7 @@
#define PHY_MDM6600_PHY_DELAY_MS 4000 /* PHY enable 2.2s to 3.5s */
#define PHY_MDM6600_ENABLED_DELAY_MS 8000 /* 8s more total for MDM6600 */
+#define PHY_MDM6600_WAKE_KICK_MS 600 /* time on after GPIO toggle */
#define MDM6600_MODEM_IDLE_DELAY_MS 1000 /* modem after USB suspend */
#define MDM6600_MODEM_WAKE_DELAY_MS 200 /* modem response after idle */
@@ -243,10 +244,24 @@ static irqreturn_t phy_mdm6600_wakeirq_thread(int irq, void *data)
{
struct phy_mdm6600 *ddata = data;
struct gpio_desc *mode_gpio1;
+ int error, wakeup;
mode_gpio1 = ddata->mode_gpios->desc[PHY_MDM6600_MODE1];
- dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n",
- gpiod_get_value(mode_gpio1));
+ wakeup = gpiod_get_value(mode_gpio1);
+ if (!wakeup)
+ return IRQ_NONE;
+
+ dev_dbg(ddata->dev, "OOB wake on mode_gpio1: %i\n", wakeup);
+ error = pm_runtime_get_sync(ddata->dev);
+ if (error < 0) {
+ pm_runtime_put_noidle(ddata->dev);
+
+ return IRQ_NONE;
+ }
+
+ /* Just wake-up and kick the autosuspend timer */
+ pm_runtime_mark_last_busy(ddata->dev);
+ pm_runtime_put_autosuspend(ddata->dev);
return IRQ_HANDLED;
}
@@ -496,8 +511,14 @@ static void phy_mdm6600_modem_wake(struct work_struct *work)
ddata = container_of(work, struct phy_mdm6600, modem_wake_work.work);
phy_mdm6600_wake_modem(ddata);
+
+ /*
+ * The modem does not always stay awake 1.2 seconds after toggling
+ * the wake GPIO, and sometimes it idles after about some 600 ms
+ * making writes time out.
+ */
schedule_delayed_work(&ddata->modem_wake_work,
- msecs_to_jiffies(MDM6600_MODEM_IDLE_DELAY_MS));
+ msecs_to_jiffies(PHY_MDM6600_WAKE_KICK_MS));
}
static int __maybe_unused phy_mdm6600_runtime_suspend(struct device *dev)
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index cd5a6c95dbdc..a27b8d578d7f 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -688,11 +688,9 @@ struct phy *phy_get(struct device *dev, const char *string)
get_device(&phy->dev);
link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
- if (!link) {
- dev_err(dev, "failed to create device link to %s\n",
+ if (!link)
+ dev_dbg(dev, "failed to create device link to %s\n",
dev_name(phy->dev.parent));
- return ERR_PTR(-EINVAL);
- }
return phy;
}
@@ -803,11 +801,9 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
}
link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
- if (!link) {
- dev_err(dev, "failed to create device link to %s\n",
+ if (!link)
+ dev_dbg(dev, "failed to create device link to %s\n",
dev_name(phy->dev.parent));
- return ERR_PTR(-EINVAL);
- }
return phy;
}
@@ -852,11 +848,9 @@ struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
devres_add(dev, ptr);
link = device_link_add(dev, &phy->dev, DL_FLAG_STATELESS);
- if (!link) {
- dev_err(dev, "failed to create device link to %s\n",
+ if (!link)
+ dev_dbg(dev, "failed to create device link to %s\n",
dev_name(phy->dev.parent));
- return ERR_PTR(-EINVAL);
- }
return phy;
}
diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c
index a28bd15297f5..1c536fc03c83 100644
--- a/drivers/phy/ti/phy-gmii-sel.c
+++ b/drivers/phy/ti/phy-gmii-sel.c
@@ -80,20 +80,20 @@ static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode)
break;
case PHY_INTERFACE_MODE_MII:
- mode = AM33XX_GMII_SEL_MODE_MII;
+ case PHY_INTERFACE_MODE_GMII:
+ gmii_sel_mode = AM33XX_GMII_SEL_MODE_MII;
break;
default:
- dev_warn(dev,
- "port%u: unsupported mode: \"%s\". Defaulting to MII.\n",
- if_phy->id, phy_modes(rgmii_id));
+ dev_warn(dev, "port%u: unsupported mode: \"%s\"\n",
+ if_phy->id, phy_modes(submode));
return -EINVAL;
}
if_phy->phy_if_mode = submode;
dev_dbg(dev, "%s id:%u mode:%u rgmii_id:%d rmii_clk_ext:%d\n",
- __func__, if_phy->id, mode, rgmii_id,
+ __func__, if_phy->id, submode, rgmii_id,
if_phy->rmii_clock_external);
regfield = if_phy->fields[PHY_GMII_SEL_PORT_MODE];
diff --git a/drivers/platform/chrome/wilco_ec/properties.c b/drivers/platform/chrome/wilco_ec/properties.c
index e69682c95ea2..62f27610dd33 100644
--- a/drivers/platform/chrome/wilco_ec/properties.c
+++ b/drivers/platform/chrome/wilco_ec/properties.c
@@ -5,7 +5,7 @@
#include <linux/platform_data/wilco-ec.h>
#include <linux/string.h>
-#include <linux/unaligned/le_memmove.h>
+#include <asm/unaligned.h>
/* Operation code; what the EC should do with the property */
enum ec_property_op {
diff --git a/drivers/regulator/stm32-vrefbuf.c b/drivers/regulator/stm32-vrefbuf.c
index bdfaf7edb75a..992bc18101ef 100644
--- a/drivers/regulator/stm32-vrefbuf.c
+++ b/drivers/regulator/stm32-vrefbuf.c
@@ -88,7 +88,7 @@ static int stm32_vrefbuf_disable(struct regulator_dev *rdev)
}
val = readl_relaxed(priv->base + STM32_VREFBUF_CSR);
- val = (val & ~STM32_ENVR) | STM32_HIZ;
+ val &= ~STM32_ENVR;
writel_relaxed(val, priv->base + STM32_VREFBUF_CSR);
pm_runtime_mark_last_busy(priv->dev);
@@ -175,6 +175,7 @@ static const struct regulator_desc stm32_vrefbuf_regu = {
.volt_table = stm32_vrefbuf_voltages,
.n_voltages = ARRAY_SIZE(stm32_vrefbuf_voltages),
.ops = &stm32_vrefbuf_volt_ops,
+ .off_on_delay = 1000,
.type = REGULATOR_VOLTAGE,
.owner = THIS_MODULE,
};
diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig
index 461b0e506a26..d9efbfd29646 100644
--- a/drivers/reset/Kconfig
+++ b/drivers/reset/Kconfig
@@ -51,6 +51,7 @@ config RESET_BRCMSTB
config RESET_BRCMSTB_RESCAL
bool "Broadcom STB RESCAL reset controller"
+ depends on HAS_IOMEM
default ARCH_BRCMSTB || COMPILE_TEST
help
This enables the RESCAL reset controller for SATA, PCIe0, or PCIe1 on
@@ -73,7 +74,7 @@ config RESET_IMX7
config RESET_INTEL_GW
bool "Intel Reset Controller Driver"
- depends on OF
+ depends on OF && HAS_IOMEM
select REGMAP_MMIO
help
This enables the reset controller driver for Intel Gateway SoCs.
diff --git a/drivers/s390/cio/blacklist.c b/drivers/s390/cio/blacklist.c
index da642e811f7f..4dd2eb634856 100644
--- a/drivers/s390/cio/blacklist.c
+++ b/drivers/s390/cio/blacklist.c
@@ -303,8 +303,10 @@ static void *
cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
{
struct ccwdev_iter *iter;
+ loff_t p = *offset;
- if (*offset >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
+ (*offset)++;
+ if (p >= (__MAX_SUBCHANNEL + 1) * (__MAX_SSID + 1))
return NULL;
iter = it;
if (iter->devno == __MAX_SUBCHANNEL) {
@@ -314,7 +316,6 @@ cio_ignore_proc_seq_next(struct seq_file *s, void *it, loff_t *offset)
return NULL;
} else
iter->devno++;
- (*offset)++;
return iter;
}
diff --git a/drivers/s390/cio/chp.c b/drivers/s390/cio/chp.c
index 51038ec309c1..dfcbe54591fb 100644
--- a/drivers/s390/cio/chp.c
+++ b/drivers/s390/cio/chp.c
@@ -135,7 +135,7 @@ static ssize_t chp_measurement_chars_read(struct file *filp,
struct channel_path *chp;
struct device *device;
- device = container_of(kobj, struct device, kobj);
+ device = kobj_to_dev(kobj);
chp = to_channelpath(device);
if (chp->cmg == -1)
return 0;
@@ -184,7 +184,7 @@ static ssize_t chp_measurement_read(struct file *filp, struct kobject *kobj,
struct device *device;
unsigned int size;
- device = container_of(kobj, struct device, kobj);
+ device = kobj_to_dev(kobj);
chp = to_channelpath(device);
css = to_css(chp->dev.parent);
diff --git a/drivers/s390/cio/qdio.h b/drivers/s390/cio/qdio.h
index 4b0798472643..ff74eb5fce50 100644
--- a/drivers/s390/cio/qdio.h
+++ b/drivers/s390/cio/qdio.h
@@ -182,11 +182,9 @@ enum qdio_queue_irq_states {
};
struct qdio_input_q {
- /* input buffer acknowledgement flag */
- int polling;
/* first ACK'ed buffer */
int ack_start;
- /* how much sbals are acknowledged with qebsm */
+ /* how many SBALs are acknowledged */
int ack_count;
/* last time of noticing incoming data */
u64 timestamp;
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 35410e6eda2e..9c0370b27426 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -124,9 +124,8 @@ static int qstat_show(struct seq_file *m, void *v)
seq_printf(m, "nr_used: %d ftc: %d\n",
atomic_read(&q->nr_buf_used), q->first_to_check);
if (q->is_input_q) {
- seq_printf(m, "polling: %d ack start: %d ack count: %d\n",
- q->u.in.polling, q->u.in.ack_start,
- q->u.in.ack_count);
+ seq_printf(m, "ack start: %d ack count: %d\n",
+ q->u.in.ack_start, q->u.in.ack_count);
seq_printf(m, "DSCI: %x IRQs disabled: %u\n",
*(u8 *)q->irq_ptr->dsci,
test_bit(QDIO_QUEUE_IRQS_DISABLED,
diff --git a/drivers/s390/cio/qdio_main.c b/drivers/s390/cio/qdio_main.c
index f8b897b7e78b..3475317c42e5 100644
--- a/drivers/s390/cio/qdio_main.c
+++ b/drivers/s390/cio/qdio_main.c
@@ -393,19 +393,15 @@ int debug_get_buf_state(struct qdio_q *q, unsigned int bufnr,
static inline void qdio_stop_polling(struct qdio_q *q)
{
- if (!q->u.in.polling)
+ if (!q->u.in.ack_count)
return;
- q->u.in.polling = 0;
qperf_inc(q, stop_polling);
/* show the card that we are not polling anymore */
- if (is_qebsm(q)) {
- set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
- q->u.in.ack_count);
- q->u.in.ack_count = 0;
- } else
- set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
+ set_buf_states(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT,
+ q->u.in.ack_count);
+ q->u.in.ack_count = 0;
}
static inline void account_sbals(struct qdio_q *q, unsigned int count)
@@ -451,8 +447,7 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start,
/* for QEBSM the ACK was already set by EQBS */
if (is_qebsm(q)) {
- if (!q->u.in.polling) {
- q->u.in.polling = 1;
+ if (!q->u.in.ack_count) {
q->u.in.ack_count = count;
q->u.in.ack_start = start;
return;
@@ -471,12 +466,12 @@ static inline void inbound_primed(struct qdio_q *q, unsigned int start,
* or by the next inbound run.
*/
new = add_buf(start, count - 1);
- if (q->u.in.polling) {
+ if (q->u.in.ack_count) {
/* reset the previous ACK but first set the new one */
set_buf_state(q, new, SLSB_P_INPUT_ACK);
set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
} else {
- q->u.in.polling = 1;
+ q->u.in.ack_count = 1;
set_buf_state(q, new, SLSB_P_INPUT_ACK);
}
@@ -1479,13 +1474,12 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
qperf_inc(q, inbound_call);
- if (!q->u.in.polling)
+ if (!q->u.in.ack_count)
goto set;
/* protect against stop polling setting an ACK for an emptied slsb */
if (count == QDIO_MAX_BUFFERS_PER_Q) {
/* overwriting everything, just delete polling status */
- q->u.in.polling = 0;
q->u.in.ack_count = 0;
goto set;
} else if (buf_in_between(q->u.in.ack_start, bufnr, count)) {
@@ -1495,15 +1489,14 @@ static int handle_inbound(struct qdio_q *q, unsigned int callflags,
diff = sub_buf(diff, q->u.in.ack_start);
q->u.in.ack_count -= diff;
if (q->u.in.ack_count <= 0) {
- q->u.in.polling = 0;
q->u.in.ack_count = 0;
goto set;
}
q->u.in.ack_start = add_buf(q->u.in.ack_start, diff);
+ } else {
+ /* the only ACK will be deleted */
+ q->u.in.ack_count = 0;
}
- else
- /* the only ACK will be deleted, so stop polling */
- q->u.in.polling = 0;
}
set:
diff --git a/drivers/s390/cio/qdio_setup.c b/drivers/s390/cio/qdio_setup.c
index dc430bd86ade..e115623b86b2 100644
--- a/drivers/s390/cio/qdio_setup.c
+++ b/drivers/s390/cio/qdio_setup.c
@@ -8,6 +8,7 @@
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/export.h>
+#include <linux/io.h>
#include <asm/qdio.h>
#include "cio.h"
@@ -205,7 +206,7 @@ static void setup_storage_lists(struct qdio_q *q, struct qdio_irq *irq_ptr,
/* fill in sl */
for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++)
- q->sl->element[j].sbal = (unsigned long)q->sbal[j];
+ q->sl->element[j].sbal = virt_to_phys(q->sbal[j]);
}
static void setup_queues(struct qdio_irq *irq_ptr,
@@ -536,7 +537,7 @@ void qdio_print_subchannel_info(struct qdio_irq *irq_ptr,
int qdio_enable_async_operation(struct qdio_output_q *outq)
{
outq->aobs = kcalloc(QDIO_MAX_BUFFERS_PER_Q, sizeof(struct qaob *),
- GFP_ATOMIC);
+ GFP_KERNEL);
if (!outq->aobs) {
outq->use_cq = 0;
return -ENOMEM;
diff --git a/drivers/s390/cio/vfio_ccw_trace.h b/drivers/s390/cio/vfio_ccw_trace.h
index 30162a318a8a..f5d31887d413 100644
--- a/drivers/s390/cio/vfio_ccw_trace.h
+++ b/drivers/s390/cio/vfio_ccw_trace.h
@@ -1,5 +1,5 @@
-/* SPDX-License-Identifier: GPL-2.0
- * Tracepoints for vfio_ccw driver
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Tracepoints for vfio_ccw driver
*
* Copyright IBM Corp. 2018
*
diff --git a/drivers/s390/crypto/ap_bus.h b/drivers/s390/crypto/ap_bus.h
index bb35ba4a8d24..4348fdff1c61 100644
--- a/drivers/s390/crypto/ap_bus.h
+++ b/drivers/s390/crypto/ap_bus.h
@@ -162,7 +162,7 @@ struct ap_card {
unsigned int functions; /* AP device function bitfield. */
int queue_depth; /* AP queue depth.*/
int id; /* AP card number. */
- atomic_t total_request_count; /* # requests ever for this AP device.*/
+ atomic64_t total_request_count; /* # requests ever for this AP device.*/
};
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
@@ -179,7 +179,7 @@ struct ap_queue {
enum ap_state state; /* State of the AP device. */
int pendingq_count; /* # requests on pendingq list. */
int requestq_count; /* # requests on requestq list. */
- int total_request_count; /* # requests ever for this AP device.*/
+ u64 total_request_count; /* # requests ever for this AP device.*/
int request_timeout; /* Request timeout in jiffies. */
struct timer_list timeout; /* Timer for request timeouts. */
struct list_head pendingq; /* List of message sent to AP queue. */
diff --git a/drivers/s390/crypto/ap_card.c b/drivers/s390/crypto/ap_card.c
index 63b4cc6cd7e5..e85bfca1ed16 100644
--- a/drivers/s390/crypto/ap_card.c
+++ b/drivers/s390/crypto/ap_card.c
@@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev,
char *buf)
{
struct ap_card *ac = to_ap_card(dev);
- unsigned int req_cnt;
+ u64 req_cnt;
req_cnt = 0;
spin_lock_bh(&ap_list_lock);
- req_cnt = atomic_read(&ac->total_request_count);
+ req_cnt = atomic64_read(&ac->total_request_count);
spin_unlock_bh(&ap_list_lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+ return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
@@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev,
for_each_ap_queue(aq, ac)
aq->total_request_count = 0;
spin_unlock_bh(&ap_list_lock);
- atomic_set(&ac->total_request_count, 0);
+ atomic64_set(&ac->total_request_count, 0);
return count;
}
diff --git a/drivers/s390/crypto/ap_queue.c b/drivers/s390/crypto/ap_queue.c
index 37c3bdc3642d..a317ab484932 100644
--- a/drivers/s390/crypto/ap_queue.c
+++ b/drivers/s390/crypto/ap_queue.c
@@ -479,12 +479,12 @@ static ssize_t request_count_show(struct device *dev,
char *buf)
{
struct ap_queue *aq = to_ap_queue(dev);
- unsigned int req_cnt;
+ u64 req_cnt;
spin_lock_bh(&aq->lock);
req_cnt = aq->total_request_count;
spin_unlock_bh(&aq->lock);
- return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
+ return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
}
static ssize_t request_count_store(struct device *dev,
@@ -676,7 +676,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
list_add_tail(&ap_msg->list, &aq->requestq);
aq->requestq_count++;
aq->total_request_count++;
- atomic_inc(&aq->card->total_request_count);
+ atomic64_inc(&aq->card->total_request_count);
/* Send/receive as many request from the queue as possible. */
ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
spin_unlock_bh(&aq->lock);
diff --git a/drivers/s390/crypto/pkey_api.c b/drivers/s390/crypto/pkey_api.c
index 71dae64ba994..2f33c5fcf676 100644
--- a/drivers/s390/crypto/pkey_api.c
+++ b/drivers/s390/crypto/pkey_api.c
@@ -994,7 +994,7 @@ static long pkey_unlocked_ioctl(struct file *filp, unsigned int cmd,
return -EFAULT;
rc = cca_sec2protkey(ksp.cardnr, ksp.domain,
ksp.seckey.seckey, ksp.protkey.protkey,
- NULL, &ksp.protkey.type);
+ &ksp.protkey.len, &ksp.protkey.type);
DEBUG_DBG("%s cca_sec2protkey()=%d\n", __func__, rc);
if (rc)
break;
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index a42257d6c79e..56a405dce8bc 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -606,8 +606,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
weight += atomic_read(&zc->load);
pref_weight += atomic_read(&pref_zc->load);
if (weight == pref_weight)
- return atomic_read(&zc->card->total_request_count) >
- atomic_read(&pref_zc->card->total_request_count);
+ return atomic64_read(&zc->card->total_request_count) >
+ atomic64_read(&pref_zc->card->total_request_count);
return weight > pref_weight;
}
@@ -1226,11 +1226,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
spin_unlock(&zcrypt_list_lock);
}
-static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
+static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
{
struct zcrypt_card *zc;
struct zcrypt_queue *zq;
int card;
+ u64 cnt;
memset(reqcnt, 0, sizeof(int) * max_adapters);
spin_lock(&zcrypt_list_lock);
@@ -1242,8 +1243,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
|| card >= max_adapters)
continue;
spin_lock(&zq->queue->lock);
- reqcnt[card] = zq->queue->total_request_count;
+ cnt = zq->queue->total_request_count;
spin_unlock(&zq->queue->lock);
+ reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
}
}
local_bh_enable();
@@ -1421,9 +1423,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
return 0;
}
case ZCRYPT_PERDEV_REQCNT: {
- int *reqcnt;
+ u32 *reqcnt;
- reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
+ reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
if (!reqcnt)
return -ENOMEM;
zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
@@ -1480,7 +1482,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
}
case Z90STAT_PERDEV_REQCNT: {
/* the old ioctl supports only 64 adapters */
- int reqcnt[MAX_ZDEV_CARDIDS];
+ u32 reqcnt[MAX_ZDEV_CARDIDS];
zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
diff --git a/drivers/s390/crypto/zcrypt_ep11misc.c b/drivers/s390/crypto/zcrypt_ep11misc.c
index d4caf46ff9df..2afe2153b34e 100644
--- a/drivers/s390/crypto/zcrypt_ep11misc.c
+++ b/drivers/s390/crypto/zcrypt_ep11misc.c
@@ -887,7 +887,7 @@ static int ep11_unwrapkey(u16 card, u16 domain,
/* empty pin tag */
*p++ = 0x04;
*p++ = 0;
- /* encrytped key value tag and bytes */
+ /* encrypted key value tag and bytes */
p += asn1tag_write(p, 0x04, enckey, enckeysize);
/* reply cprb and payload */
@@ -1095,7 +1095,7 @@ int ep11_clr2keyblob(u16 card, u16 domain, u32 keybitsize, u32 keygenflags,
/* Step 1: generate AES 256 bit random kek key */
rc = ep11_genaeskey(card, domain, 256,
- 0x00006c00, /* EN/DECRYTP, WRAP/UNWRAP */
+ 0x00006c00, /* EN/DECRYPT, WRAP/UNWRAP */
kek, &keklen);
if (rc) {
DEBUG_ERR(
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 9639938581f5..8ca85c8a01a1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -1128,9 +1128,10 @@ static void qeth_clear_output_buffer(struct qeth_qdio_out_q *queue,
qeth_tx_complete_buf(buf, error, budget);
for (i = 0; i < queue->max_elements; ++i) {
- if (buf->buffer->element[i].addr && buf->is_header[i])
- kmem_cache_free(qeth_core_header_cache,
- buf->buffer->element[i].addr);
+ void *data = phys_to_virt(buf->buffer->element[i].addr);
+
+ if (data && buf->is_header[i])
+ kmem_cache_free(qeth_core_header_cache, data);
buf->is_header[i] = 0;
}
@@ -2641,7 +2642,8 @@ static int qeth_init_input_buffer(struct qeth_card *card,
buf->pool_entry = pool_entry;
for (i = 0; i < QETH_MAX_BUFFER_ELEMENTS(card); ++i) {
buf->buffer->element[i].length = PAGE_SIZE;
- buf->buffer->element[i].addr = pool_entry->elements[i];
+ buf->buffer->element[i].addr =
+ virt_to_phys(pool_entry->elements[i]);
if (i == QETH_MAX_BUFFER_ELEMENTS(card) - 1)
buf->buffer->element[i].eflags = SBAL_EFLAGS_LAST_ENTRY;
else
@@ -3459,9 +3461,8 @@ static void qeth_qdio_cq_handler(struct qeth_card *card, unsigned int qdio_err,
while ((e < QDIO_MAX_ELEMENTS_PER_BUFFER) &&
buffer->element[e].addr) {
- unsigned long phys_aob_addr;
+ unsigned long phys_aob_addr = buffer->element[e].addr;
- phys_aob_addr = (unsigned long) buffer->element[e].addr;
qeth_qdio_handle_aob(card, phys_aob_addr);
++e;
}
@@ -3750,7 +3751,7 @@ static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
elem_length = min_t(unsigned int, length,
PAGE_SIZE - offset_in_page(data));
- buffer->element[element].addr = data;
+ buffer->element[element].addr = virt_to_phys(data);
buffer->element[element].length = elem_length;
length -= elem_length;
if (is_first_elem) {
@@ -3780,7 +3781,7 @@ static unsigned int __qeth_fill_buffer(struct sk_buff *skb,
elem_length = min_t(unsigned int, length,
PAGE_SIZE - offset_in_page(data));
- buffer->element[element].addr = data;
+ buffer->element[element].addr = virt_to_phys(data);
buffer->element[element].length = elem_length;
buffer->element[element].eflags =
SBAL_EFLAGS_MIDDLE_FRAG;
@@ -3820,7 +3821,7 @@ static unsigned int qeth_fill_buffer(struct qeth_qdio_out_buffer *buf,
int element = buf->next_element_to_fill;
is_first_elem = false;
- buffer->element[element].addr = hdr;
+ buffer->element[element].addr = virt_to_phys(hdr);
buffer->element[element].length = hd_len;
buffer->element[element].eflags = SBAL_EFLAGS_FIRST_FRAG;
/* remember to free cache-allocated qeth_hdr: */
@@ -4746,10 +4747,10 @@ static void qeth_qdio_establish_cq(struct qeth_card *card,
if (card->options.cq == QETH_CQ_ENABLED) {
int offset = QDIO_MAX_BUFFERS_PER_Q *
(card->qdio.no_in_queues - 1);
- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
- in_sbal_ptrs[offset + i] = (struct qdio_buffer *)
- virt_to_phys(card->qdio.c_q->bufs[i].buffer);
- }
+
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
+ in_sbal_ptrs[offset + i] =
+ card->qdio.c_q->bufs[i].buffer;
queue_start_poll[card->qdio.no_in_queues - 1] = NULL;
}
@@ -4783,10 +4784,9 @@ static int qeth_qdio_establish(struct qeth_card *card)
rc = -ENOMEM;
goto out_free_qib_param;
}
- for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; ++i) {
- in_sbal_ptrs[i] = (struct qdio_buffer *)
- virt_to_phys(card->qdio.in_q->bufs[i].buffer);
- }
+
+ for (i = 0; i < QDIO_MAX_BUFFERS_PER_Q; i++)
+ in_sbal_ptrs[i] = card->qdio.in_q->bufs[i].buffer;
queue_start_poll = kcalloc(card->qdio.no_in_queues, sizeof(void *),
GFP_KERNEL);
@@ -4807,11 +4807,11 @@ static int qeth_qdio_establish(struct qeth_card *card)
rc = -ENOMEM;
goto out_free_queue_start_poll;
}
+
for (i = 0, k = 0; i < card->qdio.no_out_queues; ++i)
- for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; ++j, ++k) {
- out_sbal_ptrs[k] = (struct qdio_buffer *)virt_to_phys(
- card->qdio.out_qs[i]->bufs[j]->buffer);
- }
+ for (j = 0; j < QDIO_MAX_BUFFERS_PER_Q; j++, k++)
+ out_sbal_ptrs[k] =
+ card->qdio.out_qs[i]->bufs[j]->buffer;
memset(&init_data, 0, sizeof(struct qdio_initialize));
init_data.cdev = CARD_DDEV(card);
@@ -5289,7 +5289,7 @@ next_packet:
offset = 0;
}
- hdr = element->addr + offset;
+ hdr = phys_to_virt(element->addr) + offset;
offset += sizeof(*hdr);
skb = NULL;
@@ -5344,7 +5344,7 @@ next_packet:
}
use_rx_sg = (card->options.cq == QETH_CQ_ENABLED) ||
- ((skb_len >= card->options.rx_sg_cb) &&
+ (skb_len > card->options.rx_sg_cb &&
!atomic_read(&card->force_alloc_skb) &&
!IS_OSN(card));
@@ -5388,7 +5388,7 @@ use_skb:
walk_packet:
while (skb_len) {
int data_len = min(skb_len, (int)(element->length - offset));
- char *data = element->addr + offset;
+ char *data = phys_to_virt(element->addr) + offset;
skb_len -= data_len;
offset += data_len;
@@ -5447,7 +5447,6 @@ static int qeth_extract_skbs(struct qeth_card *card, int budget,
{
int work_done = 0;
- WARN_ON_ONCE(!budget);
*done = false;
while (budget) {
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 692bd2623401..9972d96820f3 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -1707,15 +1707,14 @@ int qeth_l2_vnicc_set_state(struct qeth_card *card, u32 vnicc, bool state)
QETH_CARD_TEXT(card, 2, "vniccsch");
- /* do not change anything if BridgePort is enabled */
- if (qeth_bridgeport_is_in_use(card))
- return -EBUSY;
-
/* check if characteristic and enable/disable are supported */
if (!(card->options.vnicc.sup_chars & vnicc) ||
!(card->options.vnicc.set_char_sup & vnicc))
return -EOPNOTSUPP;
+ if (qeth_bridgeport_is_in_use(card))
+ return -EBUSY;
+
/* set enable/disable command and store wanted characteristic */
if (state) {
cmd = IPA_VNICC_ENABLE;
@@ -1761,14 +1760,13 @@ int qeth_l2_vnicc_get_state(struct qeth_card *card, u32 vnicc, bool *state)
QETH_CARD_TEXT(card, 2, "vniccgch");
- /* do not get anything if BridgePort is enabled */
- if (qeth_bridgeport_is_in_use(card))
- return -EBUSY;
-
/* check if characteristic is supported */
if (!(card->options.vnicc.sup_chars & vnicc))
return -EOPNOTSUPP;
+ if (qeth_bridgeport_is_in_use(card))
+ return -EBUSY;
+
/* if card is ready, query current VNICC state */
if (qeth_card_hw_is_reachable(card))
rc = qeth_l2_vnicc_query_chars(card);
@@ -1786,15 +1784,14 @@ int qeth_l2_vnicc_set_timeout(struct qeth_card *card, u32 timeout)
QETH_CARD_TEXT(card, 2, "vniccsto");
- /* do not change anything if BridgePort is enabled */
- if (qeth_bridgeport_is_in_use(card))
- return -EBUSY;
-
/* check if characteristic and set_timeout are supported */
if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
!(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
return -EOPNOTSUPP;
+ if (qeth_bridgeport_is_in_use(card))
+ return -EBUSY;
+
/* do we need to do anything? */
if (card->options.vnicc.learning_timeout == timeout)
return rc;
@@ -1823,14 +1820,14 @@ int qeth_l2_vnicc_get_timeout(struct qeth_card *card, u32 *timeout)
QETH_CARD_TEXT(card, 2, "vniccgto");
- /* do not get anything if BridgePort is enabled */
- if (qeth_bridgeport_is_in_use(card))
- return -EBUSY;
-
/* check if characteristic and get_timeout are supported */
if (!(card->options.vnicc.sup_chars & QETH_VNICC_LEARNING) ||
!(card->options.vnicc.getset_timeout_sup & QETH_VNICC_LEARNING))
return -EOPNOTSUPP;
+
+ if (qeth_bridgeport_is_in_use(card))
+ return -EBUSY;
+
/* if card is ready, get timeout. Otherwise, just return stored value */
*timeout = card->options.vnicc.learning_timeout;
if (qeth_card_hw_is_reachable(card))
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 223a805f0b0b..cae9b7ff79b0 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -2510,7 +2510,7 @@ void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
sbale = &sbal->element[idx];
- req_id = (unsigned long) sbale->addr;
+ req_id = sbale->addr;
fsf_req = zfcp_reqlist_find_rm(adapter->req_list, req_id);
if (!fsf_req) {
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index 2b1e4da1944f..4bfb79f20588 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -410,7 +410,7 @@ struct fsf_qtcb_bottom_port {
u8 cb_util;
u8 a_util;
u8 res2;
- u16 temperature;
+ s16 temperature;
u16 vcc;
u16 tx_bias;
u16 tx_power;
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index 661436a92f8e..f0d6296e673b 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -98,7 +98,7 @@ static void zfcp_qdio_int_resp(struct ccw_device *cdev, unsigned int qdio_err,
memset(pl, 0,
ZFCP_QDIO_MAX_SBALS_PER_REQ * sizeof(void *));
sbale = qdio->res_q[idx]->element;
- req_id = (u64) sbale->addr;
+ req_id = sbale->addr;
scount = min(sbale->scount + 1,
ZFCP_QDIO_MAX_SBALS_PER_REQ + 1);
/* incl. signaling SBAL */
@@ -199,7 +199,7 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
q_req->sbal_number);
return -EINVAL;
}
- sbale->addr = sg_virt(sg);
+ sbale->addr = sg_phys(sg);
sbale->length = sg->length;
}
return 0;
@@ -418,7 +418,7 @@ int zfcp_qdio_open(struct zfcp_qdio *qdio)
sbale->length = 0;
sbale->eflags = SBAL_EFLAGS_LAST_ENTRY;
sbale->sflags = 0;
- sbale->addr = NULL;
+ sbale->addr = 0;
}
if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0, QDIO_MAX_BUFFERS_PER_Q))
diff --git a/drivers/s390/scsi/zfcp_qdio.h b/drivers/s390/scsi/zfcp_qdio.h
index 2a816a37b3c0..6b43d6b254be 100644
--- a/drivers/s390/scsi/zfcp_qdio.h
+++ b/drivers/s390/scsi/zfcp_qdio.h
@@ -122,14 +122,14 @@ void zfcp_qdio_req_init(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
% QDIO_MAX_BUFFERS_PER_Q;
sbale = zfcp_qdio_sbale_req(qdio, q_req);
- sbale->addr = (void *) req_id;
+ sbale->addr = req_id;
sbale->eflags = 0;
sbale->sflags = SBAL_SFLAGS0_COMMAND | sbtype;
if (unlikely(!data))
return;
sbale++;
- sbale->addr = data;
+ sbale->addr = virt_to_phys(data);
sbale->length = len;
}
@@ -152,7 +152,7 @@ void zfcp_qdio_fill_next(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
BUG_ON(q_req->sbale_curr == qdio->max_sbale_per_sbal - 1);
q_req->sbale_curr++;
sbale = zfcp_qdio_sbale_curr(qdio, q_req);
- sbale->addr = data;
+ sbale->addr = virt_to_phys(data);
sbale->length = len;
}
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 494b9fe9cc94..a711a0d15100 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -800,7 +800,7 @@ static ZFCP_DEV_ATTR(adapter_diag, b2b_credit, 0400,
static ZFCP_DEV_ATTR(adapter_diag_sfp, _name, 0400, \
zfcp_sysfs_adapter_diag_sfp_##_name##_show, NULL)
-ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 5, "%hu");
+ZFCP_DEFINE_DIAG_SFP_ATTR(temperature, temperature, 6, "%hd");
ZFCP_DEFINE_DIAG_SFP_ATTR(vcc, vcc, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(tx_bias, tx_bias, 5, "%hu");
ZFCP_DEFINE_DIAG_SFP_ATTR(tx_power, tx_power, 5, "%hu");
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 9c5f7c9178c6..2b865c6423e2 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -628,6 +628,8 @@ redisc:
}
out:
kref_put(&rdata->kref, fc_rport_destroy);
+ if (!IS_ERR(fp))
+ fc_frame_free(fp);
}
/**
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index f3b36fd0a0eb..b2ad96564484 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -623,7 +623,8 @@ retry_alloc:
fusion->io_request_frames =
dma_pool_alloc(fusion->io_request_frames_pool,
- GFP_KERNEL, &fusion->io_request_frames_phys);
+ GFP_KERNEL | __GFP_NOWARN,
+ &fusion->io_request_frames_phys);
if (!fusion->io_request_frames) {
if (instance->max_fw_cmds >= (MEGASAS_REDUCE_QD_COUNT * 2)) {
instance->max_fw_cmds -= MEGASAS_REDUCE_QD_COUNT;
@@ -661,7 +662,7 @@ retry_alloc:
fusion->io_request_frames =
dma_pool_alloc(fusion->io_request_frames_pool,
- GFP_KERNEL,
+ GFP_KERNEL | __GFP_NOWARN,
&fusion->io_request_frames_phys);
if (!fusion->io_request_frames) {
diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c
index e4282bce5834..f45c22b09726 100644
--- a/drivers/scsi/sd_zbc.c
+++ b/drivers/scsi/sd_zbc.c
@@ -161,6 +161,7 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
unsigned int nr_zones, report_zones_cb cb, void *data)
{
struct scsi_disk *sdkp = scsi_disk(disk);
+ sector_t capacity = logical_to_sectors(sdkp->device, sdkp->capacity);
unsigned int nr, i;
unsigned char *buf;
size_t offset, buflen = 0;
@@ -171,11 +172,15 @@ int sd_zbc_report_zones(struct gendisk *disk, sector_t sector,
/* Not a zoned device */
return -EOPNOTSUPP;
+ if (!capacity)
+ /* Device gone or invalid */
+ return -ENODEV;
+
buf = sd_zbc_alloc_report_buffer(sdkp, nr_zones, &buflen);
if (!buf)
return -ENOMEM;
- while (zone_idx < nr_zones && sector < get_capacity(disk)) {
+ while (zone_idx < nr_zones && sector < capacity) {
ret = sd_zbc_do_report_zones(sdkp, buf, buflen,
sectors_to_logical(sdkp->device, sector), true);
if (ret)
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 0fbb8fe6e521..e4240e4ae8bb 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -688,7 +688,7 @@ static const struct block_device_operations sr_bdops =
.release = sr_block_release,
.ioctl = sr_block_ioctl,
#ifdef CONFIG_COMPAT
- .ioctl = sr_block_compat_ioctl,
+ .compat_ioctl = sr_block_compat_ioctl,
#endif
.check_events = sr_block_check_events,
.revalidate_disk = sr_block_revalidate_disk,
diff --git a/drivers/soc/imx/soc-imx-scu.c b/drivers/soc/imx/soc-imx-scu.c
index fb70b8a3f7c5..20d37eaeb5f2 100644
--- a/drivers/soc/imx/soc-imx-scu.c
+++ b/drivers/soc/imx/soc-imx-scu.c
@@ -25,7 +25,7 @@ struct imx_sc_msg_misc_get_soc_id {
u32 id;
} resp;
} data;
-} __packed;
+} __packed __aligned(4);
struct imx_sc_msg_misc_get_soc_uid {
struct imx_sc_rpc_msg hdr;
diff --git a/drivers/soc/tegra/fuse/fuse-tegra30.c b/drivers/soc/tegra/fuse/fuse-tegra30.c
index f68f4e1c215d..e6037f900fb7 100644
--- a/drivers/soc/tegra/fuse/fuse-tegra30.c
+++ b/drivers/soc/tegra/fuse/fuse-tegra30.c
@@ -36,7 +36,8 @@
defined(CONFIG_ARCH_TEGRA_124_SOC) || \
defined(CONFIG_ARCH_TEGRA_132_SOC) || \
defined(CONFIG_ARCH_TEGRA_210_SOC) || \
- defined(CONFIG_ARCH_TEGRA_186_SOC)
+ defined(CONFIG_ARCH_TEGRA_186_SOC) || \
+ defined(CONFIG_ARCH_TEGRA_194_SOC)
static u32 tegra30_fuse_read_early(struct tegra_fuse *fuse, unsigned int offset)
{
if (WARN_ON(!fuse->base))
diff --git a/drivers/spi/atmel-quadspi.c b/drivers/spi/atmel-quadspi.c
index fd8007ebb145..13def7f78b9e 100644
--- a/drivers/spi/atmel-quadspi.c
+++ b/drivers/spi/atmel-quadspi.c
@@ -149,6 +149,7 @@ struct atmel_qspi {
struct clk *qspick;
struct platform_device *pdev;
const struct atmel_qspi_caps *caps;
+ resource_size_t mmap_size;
u32 pending;
u32 mr;
u32 scr;
@@ -329,6 +330,14 @@ static int atmel_qspi_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
u32 sr, offset;
int err;
+ /*
+ * Check if the address exceeds the MMIO window size. An improvement
+ * would be to add support for regular SPI mode and fall back to it
+ * when the flash memories overrun the controller's memory space.
+ */
+ if (op->addr.val + op->data.nbytes > aq->mmap_size)
+ return -ENOTSUPP;
+
err = atmel_qspi_set_cfg(aq, op, &offset);
if (err)
return err;
@@ -480,6 +489,8 @@ static int atmel_qspi_probe(struct platform_device *pdev)
goto exit;
}
+ aq->mmap_size = resource_size(res);
+
/* Get the peripheral clock */
aq->pclk = devm_clk_get(&pdev->dev, "pclk");
if (IS_ERR(aq->pclk))
diff --git a/drivers/spi/spi-bcm63xx-hsspi.c b/drivers/spi/spi-bcm63xx-hsspi.c
index 7327309ea3d5..6c235306c0e4 100644
--- a/drivers/spi/spi-bcm63xx-hsspi.c
+++ b/drivers/spi/spi-bcm63xx-hsspi.c
@@ -366,7 +366,6 @@ static int bcm63xx_hsspi_probe(struct platform_device *pdev)
goto out_disable_clk;
rate = clk_get_rate(pll_clk);
- clk_disable_unprepare(pll_clk);
if (!rate) {
ret = -EINVAL;
goto out_disable_pll_clk;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7e2292c11d12..e9e256718ef4 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -130,6 +130,7 @@ struct omap2_mcspi {
int fifo_depth;
bool slave_aborted;
unsigned int pin_dir:1;
+ size_t max_xfer_len;
};
struct omap2_mcspi_cs {
@@ -974,20 +975,12 @@ static int omap2_mcspi_setup_transfer(struct spi_device *spi,
* Note that we currently allow DMA only if we get a channel
* for both rx and tx. Otherwise we'll do PIO for both rx and tx.
*/
-static int omap2_mcspi_request_dma(struct spi_device *spi)
+static int omap2_mcspi_request_dma(struct omap2_mcspi *mcspi,
+ struct omap2_mcspi_dma *mcspi_dma)
{
- struct spi_master *master = spi->master;
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
int ret = 0;
- mcspi = spi_master_get_devdata(master);
- mcspi_dma = mcspi->dma_channels + spi->chip_select;
-
- init_completion(&mcspi_dma->dma_rx_completion);
- init_completion(&mcspi_dma->dma_tx_completion);
-
- mcspi_dma->dma_rx = dma_request_chan(&master->dev,
+ mcspi_dma->dma_rx = dma_request_chan(mcspi->dev,
mcspi_dma->dma_rx_ch_name);
if (IS_ERR(mcspi_dma->dma_rx)) {
ret = PTR_ERR(mcspi_dma->dma_rx);
@@ -995,7 +988,7 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
goto no_dma;
}
- mcspi_dma->dma_tx = dma_request_chan(&master->dev,
+ mcspi_dma->dma_tx = dma_request_chan(mcspi->dev,
mcspi_dma->dma_tx_ch_name);
if (IS_ERR(mcspi_dma->dma_tx)) {
ret = PTR_ERR(mcspi_dma->dma_tx);
@@ -1004,20 +997,40 @@ static int omap2_mcspi_request_dma(struct spi_device *spi)
mcspi_dma->dma_rx = NULL;
}
+ init_completion(&mcspi_dma->dma_rx_completion);
+ init_completion(&mcspi_dma->dma_tx_completion);
+
no_dma:
return ret;
}
+static void omap2_mcspi_release_dma(struct spi_master *master)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ struct omap2_mcspi_dma *mcspi_dma;
+ int i;
+
+ for (i = 0; i < master->num_chipselect; i++) {
+ mcspi_dma = &mcspi->dma_channels[i];
+
+ if (mcspi_dma->dma_rx) {
+ dma_release_channel(mcspi_dma->dma_rx);
+ mcspi_dma->dma_rx = NULL;
+ }
+ if (mcspi_dma->dma_tx) {
+ dma_release_channel(mcspi_dma->dma_tx);
+ mcspi_dma->dma_tx = NULL;
+ }
+ }
+}
+
static int omap2_mcspi_setup(struct spi_device *spi)
{
int ret;
struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
struct omap2_mcspi_regs *ctx = &mcspi->ctx;
- struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs = spi->controller_state;
- mcspi_dma = &mcspi->dma_channels[spi->chip_select];
-
if (!cs) {
cs = kzalloc(sizeof *cs, GFP_KERNEL);
if (!cs)
@@ -1042,13 +1055,6 @@ static int omap2_mcspi_setup(struct spi_device *spi)
}
}
- if (!mcspi_dma->dma_rx || !mcspi_dma->dma_tx) {
- ret = omap2_mcspi_request_dma(spi);
- if (ret)
- dev_warn(&spi->dev, "not using DMA for McSPI (%d)\n",
- ret);
- }
-
ret = pm_runtime_get_sync(mcspi->dev);
if (ret < 0) {
pm_runtime_put_noidle(mcspi->dev);
@@ -1065,12 +1071,8 @@ static int omap2_mcspi_setup(struct spi_device *spi)
static void omap2_mcspi_cleanup(struct spi_device *spi)
{
- struct omap2_mcspi *mcspi;
- struct omap2_mcspi_dma *mcspi_dma;
struct omap2_mcspi_cs *cs;
- mcspi = spi_master_get_devdata(spi->master);
-
if (spi->controller_state) {
/* Unlink controller state from context save list */
cs = spi->controller_state;
@@ -1079,19 +1081,6 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
kfree(cs);
}
- if (spi->chip_select < spi->master->num_chipselect) {
- mcspi_dma = &mcspi->dma_channels[spi->chip_select];
-
- if (mcspi_dma->dma_rx) {
- dma_release_channel(mcspi_dma->dma_rx);
- mcspi_dma->dma_rx = NULL;
- }
- if (mcspi_dma->dma_tx) {
- dma_release_channel(mcspi_dma->dma_tx);
- mcspi_dma->dma_tx = NULL;
- }
- }
-
if (gpio_is_valid(spi->cs_gpio))
gpio_free(spi->cs_gpio);
}
@@ -1302,9 +1291,24 @@ static bool omap2_mcspi_can_dma(struct spi_master *master,
if (spi_controller_is_slave(master))
return true;
+ master->dma_rx = mcspi_dma->dma_rx;
+ master->dma_tx = mcspi_dma->dma_tx;
+
return (xfer->len >= DMA_MIN_BYTES);
}
+static size_t omap2_mcspi_max_xfer_size(struct spi_device *spi)
+{
+ struct omap2_mcspi *mcspi = spi_master_get_devdata(spi->master);
+ struct omap2_mcspi_dma *mcspi_dma =
+ &mcspi->dma_channels[spi->chip_select];
+
+ if (mcspi->max_xfer_len && mcspi_dma->dma_rx)
+ return mcspi->max_xfer_len;
+
+ return SIZE_MAX;
+}
+
static int omap2_mcspi_controller_setup(struct omap2_mcspi *mcspi)
{
struct spi_master *master = mcspi->master;
@@ -1373,6 +1377,11 @@ static struct omap2_mcspi_platform_config omap4_pdata = {
.regs_offset = OMAP4_MCSPI_REG_OFFSET,
};
+static struct omap2_mcspi_platform_config am654_pdata = {
+ .regs_offset = OMAP4_MCSPI_REG_OFFSET,
+ .max_xfer_len = SZ_4K - 1,
+};
+
static const struct of_device_id omap_mcspi_of_match[] = {
{
.compatible = "ti,omap2-mcspi",
@@ -1382,6 +1391,10 @@ static const struct of_device_id omap_mcspi_of_match[] = {
.compatible = "ti,omap4-mcspi",
.data = &omap4_pdata,
},
+ {
+ .compatible = "ti,am654-mcspi",
+ .data = &am654_pdata,
+ },
{ },
};
MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
@@ -1439,6 +1452,10 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
mcspi->pin_dir = pdata->pin_dir;
}
regs_offset = pdata->regs_offset;
+ if (pdata->max_xfer_len) {
+ mcspi->max_xfer_len = pdata->max_xfer_len;
+ master->max_transfer_size = omap2_mcspi_max_xfer_size;
+ }
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
mcspi->base = devm_ioremap_resource(&pdev->dev, r);
@@ -1464,6 +1481,11 @@ static int omap2_mcspi_probe(struct platform_device *pdev)
for (i = 0; i < master->num_chipselect; i++) {
sprintf(mcspi->dma_channels[i].dma_rx_ch_name, "rx%d", i);
sprintf(mcspi->dma_channels[i].dma_tx_ch_name, "tx%d", i);
+
+ status = omap2_mcspi_request_dma(mcspi,
+ &mcspi->dma_channels[i]);
+ if (status == -EPROBE_DEFER)
+ goto free_master;
}
status = platform_get_irq(pdev, 0);
@@ -1501,6 +1523,7 @@ disable_pm:
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
free_master:
+ omap2_mcspi_release_dma(master);
spi_master_put(master);
return status;
}
@@ -1510,6 +1533,8 @@ static int omap2_mcspi_remove(struct platform_device *pdev)
struct spi_master *master = platform_get_drvdata(pdev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ omap2_mcspi_release_dma(master);
+
pm_runtime_dont_use_autosuspend(mcspi->dev);
pm_runtime_put_sync(mcspi->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index 4c7a71f0fb3e..2e318158fca9 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -70,6 +70,10 @@ MODULE_ALIAS("platform:pxa2xx-spi");
#define LPSS_CAPS_CS_EN_SHIFT 9
#define LPSS_CAPS_CS_EN_MASK (0xf << LPSS_CAPS_CS_EN_SHIFT)
+#define LPSS_PRIV_CLOCK_GATE 0x38
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK 0x3
+#define LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON 0x3
+
struct lpss_config {
/* LPSS offset from drv_data->ioaddr */
unsigned offset;
@@ -86,6 +90,8 @@ struct lpss_config {
unsigned cs_sel_shift;
unsigned cs_sel_mask;
unsigned cs_num;
+ /* Quirks */
+ unsigned cs_clk_stays_gated : 1;
};
/* Keep these sorted with enum pxa_ssp_type */
@@ -156,6 +162,7 @@ static const struct lpss_config lpss_platforms[] = {
.tx_threshold_hi = 56,
.cs_sel_shift = 8,
.cs_sel_mask = 3 << 8,
+ .cs_clk_stays_gated = true,
},
};
@@ -383,6 +390,22 @@ static void lpss_ssp_cs_control(struct spi_device *spi, bool enable)
else
value |= LPSS_CS_CONTROL_CS_HIGH;
__lpss_ssp_write_priv(drv_data, config->reg_cs_ctrl, value);
+ if (config->cs_clk_stays_gated) {
+ u32 clkgate;
+
+ /*
+ * Changing CS alone when dynamic clock gating is on won't
+ * actually flip CS at that time. This ruins SPI transfers
+ * that specify delays, or have no data. Toggle the clock mode
+ * to force on briefly to poke the CS pin to move.
+ */
+ clkgate = __lpss_ssp_read_priv(drv_data, LPSS_PRIV_CLOCK_GATE);
+ value = (clkgate & ~LPSS_PRIV_CLOCK_GATE_CLK_CTL_MASK) |
+ LPSS_PRIV_CLOCK_GATE_CLK_CTL_FORCE_ON;
+
+ __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, value);
+ __lpss_ssp_write_priv(drv_data, LPSS_PRIV_CLOCK_GATE, clkgate);
+ }
}
static void cs_assert(struct spi_device *spi)
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index dd3434a407ea..a364b99497e2 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -1217,6 +1217,11 @@ static int spi_qup_suspend(struct device *device)
struct spi_qup *controller = spi_master_get_devdata(master);
int ret;
+ if (pm_runtime_suspended(device)) {
+ ret = spi_qup_pm_resume_runtime(device);
+ if (ret)
+ return ret;
+ }
ret = spi_master_suspend(master);
if (ret)
return ret;
@@ -1225,10 +1230,8 @@ static int spi_qup_suspend(struct device *device)
if (ret)
return ret;
- if (!pm_runtime_suspended(device)) {
- clk_disable_unprepare(controller->cclk);
- clk_disable_unprepare(controller->iclk);
- }
+ clk_disable_unprepare(controller->cclk);
+ clk_disable_unprepare(controller->iclk);
return 0;
}
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 60c4de4e4485..7412a3042a8d 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -401,9 +401,6 @@ static void zynqmp_qspi_chipselect(struct spi_device *qspi, bool is_high)
zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, genfifoentry);
- /* Dummy generic FIFO entry */
- zynqmp_gqspi_write(xqspi, GQSPI_GEN_FIFO_OFST, 0x0);
-
/* Manually start the generic FIFO command */
zynqmp_gqspi_write(xqspi, GQSPI_CONFIG_OFST,
zynqmp_gqspi_read(xqspi, GQSPI_CONFIG_OFST) |
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index 38b4c78df506..755221bc3745 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -2639,7 +2639,7 @@ int spi_register_controller(struct spi_controller *ctlr)
if (ctlr->use_gpio_descriptors) {
status = spi_get_gpio_descs(ctlr);
if (status)
- return status;
+ goto free_bus_id;
/*
* A controller using GPIO descriptors always
* supports SPI_CS_HIGH if need be.
@@ -2649,7 +2649,7 @@ int spi_register_controller(struct spi_controller *ctlr)
/* Legacy code path for GPIOs from DT */
status = of_spi_get_gpio_numbers(ctlr);
if (status)
- return status;
+ goto free_bus_id;
}
}
@@ -2657,17 +2657,14 @@ int spi_register_controller(struct spi_controller *ctlr)
* Even if it's just one always-selected device, there must
* be at least one chipselect.
*/
- if (!ctlr->num_chipselect)
- return -EINVAL;
+ if (!ctlr->num_chipselect) {
+ status = -EINVAL;
+ goto free_bus_id;
+ }
status = device_add(&ctlr->dev);
- if (status < 0) {
- /* free bus id */
- mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
- mutex_unlock(&board_lock);
- goto done;
- }
+ if (status < 0)
+ goto free_bus_id;
dev_dbg(dev, "registered %s %s\n",
spi_controller_is_slave(ctlr) ? "slave" : "master",
dev_name(&ctlr->dev));
@@ -2683,11 +2680,7 @@ int spi_register_controller(struct spi_controller *ctlr)
status = spi_controller_initialize_queue(ctlr);
if (status) {
device_del(&ctlr->dev);
- /* free bus id */
- mutex_lock(&board_lock);
- idr_remove(&spi_master_idr, ctlr->bus_num);
- mutex_unlock(&board_lock);
- goto done;
+ goto free_bus_id;
}
}
/* add statistics */
@@ -2702,7 +2695,12 @@ int spi_register_controller(struct spi_controller *ctlr)
/* Register devices from the device tree and ACPI */
of_register_spi_devices(ctlr);
acpi_register_spi_devices(ctlr);
-done:
+ return status;
+
+free_bus_id:
+ mutex_lock(&board_lock);
+ idr_remove(&spi_master_idr, ctlr->bus_num);
+ mutex_unlock(&board_lock);
return status;
}
EXPORT_SYMBOL_GPL(spi_register_controller);
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index 1e217e3e9486..2ab6e782f14c 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -396,6 +396,7 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
else
retval = get_user(tmp, (u32 __user *)arg);
if (retval == 0) {
+ struct spi_controller *ctlr = spi->controller;
u32 save = spi->mode;
if (tmp & ~SPI_MODE_MASK) {
@@ -403,6 +404,10 @@ spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
+ if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
+ ctlr->cs_gpiods[spi->chip_select])
+ tmp |= SPI_CS_HIGH;
+
tmp |= spi->mode & ~SPI_MODE_MASK;
spi->mode = (u16)tmp;
retval = spi_setup(spi);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index 97acc2ba2912..de844b412110 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -731,6 +731,7 @@ static int qpnpint_irq_domain_translate(struct irq_domain *d,
return 0;
}
+static struct lock_class_key qpnpint_irq_lock_class, qpnpint_irq_request_class;
static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
struct irq_domain *domain, unsigned int virq,
@@ -746,6 +747,9 @@ static void qpnpint_irq_domain_map(struct spmi_pmic_arb *pmic_arb,
else
handler = handle_level_irq;
+
+ irq_set_lockdep_class(virq, &qpnpint_irq_lock_class,
+ &qpnpint_irq_request_class);
irq_domain_set_info(domain, virq, hwirq, &pmic_arb_irqchip, pmic_arb,
handler, NULL, NULL);
}
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index d6d605d5cbde..8d8fd5c29349 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -14,14 +14,6 @@ config ASHMEM
It is, in theory, a good memory allocator for low-memory devices,
because it can discard shared memory units when under memory pressure.
-config ANDROID_VSOC
- tristate "Android Virtual SoC support"
- depends on PCI_MSI
- help
- This option adds support for the Virtual SoC driver needed to boot
- a 'cuttlefish' Android image inside QEmu. The driver interacts with
- a QEmu ivshmem device. If built as a module, it will be called vsoc.
-
source "drivers/staging/android/ion/Kconfig"
endif # if ANDROID
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index 14bd9c6ce10d..3b66cd0b0ec5 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -4,4 +4,3 @@ ccflags-y += -I$(src) # needed for trace events
obj-y += ion/
obj-$(CONFIG_ASHMEM) += ashmem.o
-obj-$(CONFIG_ANDROID_VSOC) += vsoc.o
diff --git a/drivers/staging/android/TODO b/drivers/staging/android/TODO
index 767dd98fd92d..80eccfaf6db5 100644
--- a/drivers/staging/android/TODO
+++ b/drivers/staging/android/TODO
@@ -9,14 +9,5 @@ ion/
- Split /dev/ion up into multiple nodes (e.g. /dev/ion/heap0)
- Better test framework (integration with VGEM was suggested)
-vsoc.c, uapi/vsoc_shm.h
- - The current driver uses the same wait queue for all of the futexes in a
- region. This will cause false wakeups in regions with a large number of
- waiting threads. We should eventually use multiple queues and select the
- queue based on the region.
- - Add debugfs support for examining the permissions of regions.
- - Remove VSOC_WAIT_FOR_INCOMING_INTERRUPT ioctl. This functionality has been
- superseded by the futex and is there for legacy reasons.
-
Please send patches to Greg Kroah-Hartman <[email protected]> and Cc:
Arve Hjønnevåg <[email protected]> and Riley Andrews <[email protected]>
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 5891d0744a76..8044510d8ec6 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -351,8 +351,23 @@ static inline vm_flags_t calc_vm_may_flags(unsigned long prot)
_calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC);
}
+static int ashmem_vmfile_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ /* do not allow to mmap ashmem backing shmem file directly */
+ return -EPERM;
+}
+
+static unsigned long
+ashmem_vmfile_get_unmapped_area(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ return current->mm->get_unmapped_area(file, addr, len, pgoff, flags);
+}
+
static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
{
+ static struct file_operations vmfile_fops;
struct ashmem_area *asma = file->private_data;
int ret = 0;
@@ -393,6 +408,19 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
}
vmfile->f_mode |= FMODE_LSEEK;
asma->file = vmfile;
+ /*
+ * override mmap operation of the vmfile so that it can't be
+ * remapped which would lead to creation of a new vma with no
+ * asma permission checks. Have to override get_unmapped_area
+ * as well to prevent VM_BUG_ON check for f_ops modification.
+ */
+ if (!vmfile_fops.mmap) {
+ vmfile_fops = *vmfile->f_op;
+ vmfile_fops.mmap = ashmem_vmfile_mmap;
+ vmfile_fops.get_unmapped_area =
+ ashmem_vmfile_get_unmapped_area;
+ }
+ vmfile->f_op = &vmfile_fops;
}
get_file(asma->file);
diff --git a/drivers/staging/android/uapi/vsoc_shm.h b/drivers/staging/android/uapi/vsoc_shm.h
deleted file mode 100644
index 6291fb24efb2..000000000000
--- a/drivers/staging/android/uapi/vsoc_shm.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2017 Google, Inc.
- *
- */
-
-#ifndef _UAPI_LINUX_VSOC_SHM_H
-#define _UAPI_LINUX_VSOC_SHM_H
-
-#include <linux/types.h>
-
-/**
- * A permission is a token that permits a receiver to read and/or write an area
- * of memory within a Vsoc region.
- *
- * An fd_scoped permission grants both read and write access, and can be
- * attached to a file description (see open(2)).
- * Ownership of the area can then be shared by passing a file descriptor
- * among processes.
- *
- * begin_offset and end_offset define the area of memory that is controlled by
- * the permission. owner_offset points to a word, also in shared memory, that
- * controls ownership of the area.
- *
- * ownership of the region expires when the associated file description is
- * released.
- *
- * At most one permission can be attached to each file description.
- *
- * This is useful when implementing HALs like gralloc that scope and pass
- * ownership of shared resources via file descriptors.
- *
- * The caller is responsibe for doing any fencing.
- *
- * The calling process will normally identify a currently free area of
- * memory. It will construct a proposed fd_scoped_permission_arg structure:
- *
- * begin_offset and end_offset describe the area being claimed
- *
- * owner_offset points to the location in shared memory that indicates the
- * owner of the area.
- *
- * owned_value is the value that will be stored in owner_offset iff the
- * permission can be granted. It must be different than VSOC_REGION_FREE.
- *
- * Two fd_scoped_permission structures are compatible if they vary only by
- * their owned_value fields.
- *
- * The driver ensures that, for any group of simultaneous callers proposing
- * compatible fd_scoped_permissions, it will accept exactly one of the
- * propopsals. The other callers will get a failure with errno of EAGAIN.
- *
- * A process receiving a file descriptor can identify the region being
- * granted using the VSOC_GET_FD_SCOPED_PERMISSION ioctl.
- */
-struct fd_scoped_permission {
- __u32 begin_offset;
- __u32 end_offset;
- __u32 owner_offset;
- __u32 owned_value;
-};
-
-/*
- * This value represents a free area of memory. The driver expects to see this
- * value at owner_offset when creating a permission otherwise it will not do it,
- * and will write this value back once the permission is no longer needed.
- */
-#define VSOC_REGION_FREE ((__u32)0)
-
-/**
- * ioctl argument for VSOC_CREATE_FD_SCOPE_PERMISSION
- */
-struct fd_scoped_permission_arg {
- struct fd_scoped_permission perm;
- __s32 managed_region_fd;
-};
-
-#define VSOC_NODE_FREE ((__u32)0)
-
-/*
- * Describes a signal table in shared memory. Each non-zero entry in the
- * table indicates that the receiver should signal the futex at the given
- * offset. Offsets are relative to the region, not the shared memory window.
- *
- * interrupt_signalled_offset is used to reliably signal interrupts across the
- * vmm boundary. There are two roles: transmitter and receiver. For example,
- * in the host_to_guest_signal_table the host is the transmitter and the
- * guest is the receiver. The protocol is as follows:
- *
- * 1. The transmitter should convert the offset of the futex to an offset
- * in the signal table [0, (1 << num_nodes_lg2))
- * The transmitter can choose any appropriate hashing algorithm, including
- * hash = futex_offset & ((1 << num_nodes_lg2) - 1)
- *
- * 3. The transmitter should atomically compare and swap futex_offset with 0
- * at hash. There are 3 possible outcomes
- * a. The swap fails because the futex_offset is already in the table.
- * The transmitter should stop.
- * b. Some other offset is in the table. This is a hash collision. The
- * transmitter should move to another table slot and try again. One
- * possible algorithm:
- * hash = (hash + 1) & ((1 << num_nodes_lg2) - 1)
- * c. The swap worked. Continue below.
- *
- * 3. The transmitter atomically swaps 1 with the value at the
- * interrupt_signalled_offset. There are two outcomes:
- * a. The prior value was 1. In this case an interrupt has already been
- * posted. The transmitter is done.
- * b. The prior value was 0, indicating that the receiver may be sleeping.
- * The transmitter will issue an interrupt.
- *
- * 4. On waking the receiver immediately exchanges a 0 with the
- * interrupt_signalled_offset. If it receives a 0 then this a spurious
- * interrupt. That may occasionally happen in the current protocol, but
- * should be rare.
- *
- * 5. The receiver scans the signal table by atomicaly exchanging 0 at each
- * location. If a non-zero offset is returned from the exchange the
- * receiver wakes all sleepers at the given offset:
- * futex((int*)(region_base + old_value), FUTEX_WAKE, MAX_INT);
- *
- * 6. The receiver thread then does a conditional wait, waking immediately
- * if the value at interrupt_signalled_offset is non-zero. This catches cases
- * here additional signals were posted while the table was being scanned.
- * On the guest the wait is handled via the VSOC_WAIT_FOR_INCOMING_INTERRUPT
- * ioctl.
- */
-struct vsoc_signal_table_layout {
- /* log_2(Number of signal table entries) */
- __u32 num_nodes_lg2;
- /*
- * Offset to the first signal table entry relative to the start of the
- * region
- */
- __u32 futex_uaddr_table_offset;
- /*
- * Offset to an atomic_t / atomic uint32_t. A non-zero value indicates
- * that one or more offsets are currently posted in the table.
- * semi-unique access to an entry in the table
- */
- __u32 interrupt_signalled_offset;
-};
-
-#define VSOC_REGION_WHOLE ((__s32)0)
-#define VSOC_DEVICE_NAME_SZ 16
-
-/**
- * Each HAL would (usually) talk to a single device region
- * Mulitple entities care about these regions:
- * - The ivshmem_server will populate the regions in shared memory
- * - The guest kernel will read the region, create minor device nodes, and
- * allow interested parties to register for FUTEX_WAKE events in the region
- * - HALs will access via the minor device nodes published by the guest kernel
- * - Host side processes will access the region via the ivshmem_server:
- * 1. Pass name to ivshmem_server at a UNIX socket
- * 2. ivshmemserver will reply with 2 fds:
- * - host->guest doorbell fd
- * - guest->host doorbell fd
- * - fd for the shared memory region
- * - region offset
- * 3. Start a futex receiver thread on the doorbell fd pointed at the
- * signal_nodes
- */
-struct vsoc_device_region {
- __u16 current_version;
- __u16 min_compatible_version;
- __u32 region_begin_offset;
- __u32 region_end_offset;
- __u32 offset_of_region_data;
- struct vsoc_signal_table_layout guest_to_host_signal_table;
- struct vsoc_signal_table_layout host_to_guest_signal_table;
- /* Name of the device. Must always be terminated with a '\0', so
- * the longest supported device name is 15 characters.
- */
- char device_name[VSOC_DEVICE_NAME_SZ];
- /* There are two ways that permissions to access regions are handled:
- * - When subdivided_by is VSOC_REGION_WHOLE, any process that can
- * open the device node for the region gains complete access to it.
- * - When subdivided is set processes that open the region cannot
- * access it. Access to a sub-region must be established by invoking
- * the VSOC_CREATE_FD_SCOPE_PERMISSION ioctl on the region
- * referenced in subdivided_by, providing a fileinstance
- * (represented by a fd) opened on this region.
- */
- __u32 managed_by;
-};
-
-/*
- * The vsoc layout descriptor.
- * The first 4K should be reserved for the shm header and region descriptors.
- * The regions should be page aligned.
- */
-
-struct vsoc_shm_layout_descriptor {
- __u16 major_version;
- __u16 minor_version;
-
- /* size of the shm. This may be redundant but nice to have */
- __u32 size;
-
- /* number of shared memory regions */
- __u32 region_count;
-
- /* The offset to the start of region descriptors */
- __u32 vsoc_region_desc_offset;
-};
-
-/*
- * This specifies the current version that should be stored in
- * vsoc_shm_layout_descriptor.major_version and
- * vsoc_shm_layout_descriptor.minor_version.
- * It should be updated only if the vsoc_device_region and
- * vsoc_shm_layout_descriptor structures have changed.
- * Versioning within each region is transferred
- * via the min_compatible_version and current_version fields in
- * vsoc_device_region. The driver does not consult these fields: they are left
- * for the HALs and host processes and will change independently of the layout
- * version.
- */
-#define CURRENT_VSOC_LAYOUT_MAJOR_VERSION 2
-#define CURRENT_VSOC_LAYOUT_MINOR_VERSION 0
-
-#define VSOC_CREATE_FD_SCOPED_PERMISSION \
- _IOW(0xF5, 0, struct fd_scoped_permission)
-#define VSOC_GET_FD_SCOPED_PERMISSION _IOR(0xF5, 1, struct fd_scoped_permission)
-
-/*
- * This is used to signal the host to scan the guest_to_host_signal_table
- * for new futexes to wake. This sends an interrupt if one is not already
- * in flight.
- */
-#define VSOC_MAYBE_SEND_INTERRUPT_TO_HOST _IO(0xF5, 2)
-
-/*
- * When this returns the guest will scan host_to_guest_signal_table to
- * check for new futexes to wake.
- */
-/* TODO(ghartman): Consider moving this to the bottom half */
-#define VSOC_WAIT_FOR_INCOMING_INTERRUPT _IO(0xF5, 3)
-
-/*
- * Guest HALs will use this to retrieve the region description after
- * opening their device node.
- */
-#define VSOC_DESCRIBE_REGION _IOR(0xF5, 4, struct vsoc_device_region)
-
-/*
- * Wake any threads that may be waiting for a host interrupt on this region.
- * This is mostly used during shutdown.
- */
-#define VSOC_SELF_INTERRUPT _IO(0xF5, 5)
-
-/*
- * This is used to signal the host to scan the guest_to_host_signal_table
- * for new futexes to wake. This sends an interrupt unconditionally.
- */
-#define VSOC_SEND_INTERRUPT_TO_HOST _IO(0xF5, 6)
-
-enum wait_types {
- VSOC_WAIT_UNDEFINED = 0,
- VSOC_WAIT_IF_EQUAL = 1,
- VSOC_WAIT_IF_EQUAL_TIMEOUT = 2
-};
-
-/*
- * Wait for a condition to be true
- *
- * Note, this is sized and aligned so the 32 bit and 64 bit layouts are
- * identical.
- */
-struct vsoc_cond_wait {
- /* Input: Offset of the 32 bit word to check */
- __u32 offset;
- /* Input: Value that will be compared with the offset */
- __u32 value;
- /* Monotonic time to wake at in seconds */
- __u64 wake_time_sec;
- /* Input: Monotonic time to wait in nanoseconds */
- __u32 wake_time_nsec;
- /* Input: Type of wait */
- __u32 wait_type;
- /* Output: Number of times the thread woke before returning. */
- __u32 wakes;
- /* Ensure that we're 8-byte aligned and 8 byte length for 32/64 bit
- * compatibility.
- */
- __u32 reserved_1;
-};
-
-#define VSOC_COND_WAIT _IOWR(0xF5, 7, struct vsoc_cond_wait)
-
-/* Wake any local threads waiting at the offset given in arg */
-#define VSOC_COND_WAKE _IO(0xF5, 8)
-
-#endif /* _UAPI_LINUX_VSOC_SHM_H */
diff --git a/drivers/staging/android/vsoc.c b/drivers/staging/android/vsoc.c
deleted file mode 100644
index 1240bb0317d9..000000000000
--- a/drivers/staging/android/vsoc.c
+++ /dev/null
@@ -1,1149 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * drivers/android/staging/vsoc.c
- *
- * Android Virtual System on a Chip (VSoC) driver
- *
- * Copyright (C) 2017 Google, Inc.
- *
- * Author: [email protected]
- *
- * Based on drivers/char/kvm_ivshmem.c - driver for KVM Inter-VM shared memory
- * Copyright 2009 Cam Macdonell <[email protected]>
- *
- * Based on cirrusfb.c and 8139cp.c:
- * Copyright 1999-2001 Jeff Garzik
- * Copyright 2001-2004 Jeff Garzik
- */
-
-#include <linux/dma-mapping.h>
-#include <linux/freezer.h>
-#include <linux/futex.h>
-#include <linux/init.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/pci.h>
-#include <linux/proc_fs.h>
-#include <linux/sched.h>
-#include <linux/syscalls.h>
-#include <linux/uaccess.h>
-#include <linux/interrupt.h>
-#include <linux/cdev.h>
-#include <linux/file.h>
-#include "uapi/vsoc_shm.h"
-
-#define VSOC_DEV_NAME "vsoc"
-
-/*
- * Description of the ivshmem-doorbell PCI device used by QEmu. These
- * constants follow docs/specs/ivshmem-spec.txt, which can be found in
- * the QEmu repository. This was last reconciled with the version that
- * came out with 2.8
- */
-
-/*
- * These constants are determined KVM Inter-VM shared memory device
- * register offsets
- */
-enum {
- INTR_MASK = 0x00, /* Interrupt Mask */
- INTR_STATUS = 0x04, /* Interrupt Status */
- IV_POSITION = 0x08, /* VM ID */
- DOORBELL = 0x0c, /* Doorbell */
-};
-
-static const int REGISTER_BAR; /* Equal to 0 */
-static const int MAX_REGISTER_BAR_LEN = 0x100;
-/*
- * The MSI-x BAR is not used directly.
- *
- * static const int MSI_X_BAR = 1;
- */
-static const int SHARED_MEMORY_BAR = 2;
-
-struct vsoc_region_data {
- char name[VSOC_DEVICE_NAME_SZ + 1];
- wait_queue_head_t interrupt_wait_queue;
- /* TODO(b/73664181): Use multiple futex wait queues */
- wait_queue_head_t futex_wait_queue;
- /* Flag indicating that an interrupt has been signalled by the host. */
- atomic_t *incoming_signalled;
- /* Flag indicating the guest has signalled the host. */
- atomic_t *outgoing_signalled;
- bool irq_requested;
- bool device_created;
-};
-
-struct vsoc_device {
- /* Kernel virtual address of REGISTER_BAR. */
- void __iomem *regs;
- /* Physical address of SHARED_MEMORY_BAR. */
- phys_addr_t shm_phys_start;
- /* Kernel virtual address of SHARED_MEMORY_BAR. */
- void __iomem *kernel_mapped_shm;
- /* Size of the entire shared memory window in bytes. */
- size_t shm_size;
- /*
- * Pointer to the virtual address of the shared memory layout structure.
- * This is probably identical to kernel_mapped_shm, but saving this
- * here saves a lot of annoying casts.
- */
- struct vsoc_shm_layout_descriptor *layout;
- /*
- * Points to a table of region descriptors in the kernel's virtual
- * address space. Calculated from
- * vsoc_shm_layout_descriptor.vsoc_region_desc_offset
- */
- struct vsoc_device_region *regions;
- /* Head of a list of permissions that have been granted. */
- struct list_head permissions;
- struct pci_dev *dev;
- /* Per-region (and therefore per-interrupt) information. */
- struct vsoc_region_data *regions_data;
- /*
- * Table of msi-x entries. This has to be separated from struct
- * vsoc_region_data because the kernel deals with them as an array.
- */
- struct msix_entry *msix_entries;
- /* Mutex that protectes the permission list */
- struct mutex mtx;
- /* Major number assigned by the kernel */
- int major;
- /* Character device assigned by the kernel */
- struct cdev cdev;
- /* Device class assigned by the kernel */
- struct class *class;
- /*
- * Flags that indicate what we've initialized. These are used to do an
- * orderly cleanup of the device.
- */
- bool enabled_device;
- bool requested_regions;
- bool cdev_added;
- bool class_added;
- bool msix_enabled;
-};
-
-static struct vsoc_device vsoc_dev;
-
-/*
- * TODO(ghartman): Add a /sys filesystem entry that summarizes the permissions.
- */
-
-struct fd_scoped_permission_node {
- struct fd_scoped_permission permission;
- struct list_head list;
-};
-
-struct vsoc_private_data {
- struct fd_scoped_permission_node *fd_scoped_permission_node;
-};
-
-static long vsoc_ioctl(struct file *, unsigned int, unsigned long);
-static int vsoc_mmap(struct file *, struct vm_area_struct *);
-static int vsoc_open(struct inode *, struct file *);
-static int vsoc_release(struct inode *, struct file *);
-static ssize_t vsoc_read(struct file *, char __user *, size_t, loff_t *);
-static ssize_t vsoc_write(struct file *, const char __user *, size_t, loff_t *);
-static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin);
-static int
-do_create_fd_scoped_permission(struct vsoc_device_region *region_p,
- struct fd_scoped_permission_node *np,
- struct fd_scoped_permission_arg __user *arg);
-static void
-do_destroy_fd_scoped_permission(struct vsoc_device_region *owner_region_p,
- struct fd_scoped_permission *perm);
-static long do_vsoc_describe_region(struct file *,
- struct vsoc_device_region __user *);
-static ssize_t vsoc_get_area(struct file *filp, __u32 *perm_off);
-
-/**
- * Validate arguments on entry points to the driver.
- */
-inline int vsoc_validate_inode(struct inode *inode)
-{
- if (iminor(inode) >= vsoc_dev.layout->region_count) {
- dev_err(&vsoc_dev.dev->dev,
- "describe_region: invalid region %d\n", iminor(inode));
- return -ENODEV;
- }
- return 0;
-}
-
-inline int vsoc_validate_filep(struct file *filp)
-{
- int ret = vsoc_validate_inode(file_inode(filp));
-
- if (ret)
- return ret;
- if (!filp->private_data) {
- dev_err(&vsoc_dev.dev->dev,
- "No private data on fd, region %d\n",
- iminor(file_inode(filp)));
- return -EBADFD;
- }
- return 0;
-}
-
-/* Converts from shared memory offset to virtual address */
-static inline void *shm_off_to_virtual_addr(__u32 offset)
-{
- return (void __force *)vsoc_dev.kernel_mapped_shm + offset;
-}
-
-/* Converts from shared memory offset to physical address */
-static inline phys_addr_t shm_off_to_phys_addr(__u32 offset)
-{
- return vsoc_dev.shm_phys_start + offset;
-}
-
-/**
- * Convenience functions to obtain the region from the inode or file.
- * Dangerous to call before validating the inode/file.
- */
-static
-inline struct vsoc_device_region *vsoc_region_from_inode(struct inode *inode)
-{
- return &vsoc_dev.regions[iminor(inode)];
-}
-
-static
-inline struct vsoc_device_region *vsoc_region_from_filep(struct file *inode)
-{
- return vsoc_region_from_inode(file_inode(inode));
-}
-
-static inline uint32_t vsoc_device_region_size(struct vsoc_device_region *r)
-{
- return r->region_end_offset - r->region_begin_offset;
-}
-
-static const struct file_operations vsoc_ops = {
- .owner = THIS_MODULE,
- .open = vsoc_open,
- .mmap = vsoc_mmap,
- .read = vsoc_read,
- .unlocked_ioctl = vsoc_ioctl,
- .compat_ioctl = vsoc_ioctl,
- .write = vsoc_write,
- .llseek = vsoc_lseek,
- .release = vsoc_release,
-};
-
-static struct pci_device_id vsoc_id_table[] = {
- {0x1af4, 0x1110, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
- {0},
-};
-
-MODULE_DEVICE_TABLE(pci, vsoc_id_table);
-
-static void vsoc_remove_device(struct pci_dev *pdev);
-static int vsoc_probe_device(struct pci_dev *pdev,
- const struct pci_device_id *ent);
-
-static struct pci_driver vsoc_pci_driver = {
- .name = "vsoc",
- .id_table = vsoc_id_table,
- .probe = vsoc_probe_device,
- .remove = vsoc_remove_device,
-};
-
-static int
-do_create_fd_scoped_permission(struct vsoc_device_region *region_p,
- struct fd_scoped_permission_node *np,
- struct fd_scoped_permission_arg __user *arg)
-{
- struct file *managed_filp;
- s32 managed_fd;
- atomic_t *owner_ptr = NULL;
- struct vsoc_device_region *managed_region_p;
-
- if (copy_from_user(&np->permission,
- &arg->perm, sizeof(np->permission)) ||
- copy_from_user(&managed_fd,
- &arg->managed_region_fd, sizeof(managed_fd))) {
- return -EFAULT;
- }
- managed_filp = fdget(managed_fd).file;
- /* Check that it's a valid fd, */
- if (!managed_filp || vsoc_validate_filep(managed_filp))
- return -EPERM;
- /* EEXIST if the given fd already has a permission. */
- if (((struct vsoc_private_data *)managed_filp->private_data)->
- fd_scoped_permission_node)
- return -EEXIST;
- managed_region_p = vsoc_region_from_filep(managed_filp);
- /* Check that the provided region is managed by this one */
- if (&vsoc_dev.regions[managed_region_p->managed_by] != region_p)
- return -EPERM;
- /* The area must be well formed and have non-zero size */
- if (np->permission.begin_offset >= np->permission.end_offset)
- return -EINVAL;
- /* The area must fit in the memory window */
- if (np->permission.end_offset >
- vsoc_device_region_size(managed_region_p))
- return -ERANGE;
- /* The area must be in the region data section */
- if (np->permission.begin_offset <
- managed_region_p->offset_of_region_data)
- return -ERANGE;
- /* The area must be page aligned */
- if (!PAGE_ALIGNED(np->permission.begin_offset) ||
- !PAGE_ALIGNED(np->permission.end_offset))
- return -EINVAL;
- /* Owner offset must be naturally aligned in the window */
- if (np->permission.owner_offset &
- (sizeof(np->permission.owner_offset) - 1))
- return -EINVAL;
- /* The owner flag must reside in the owner memory */
- if (np->permission.owner_offset + sizeof(np->permission.owner_offset) >
- vsoc_device_region_size(region_p))
- return -ERANGE;
- /* The owner flag must reside in the data section */
- if (np->permission.owner_offset < region_p->offset_of_region_data)
- return -EINVAL;
- /* The owner value must change to claim the memory */
- if (np->permission.owned_value == VSOC_REGION_FREE)
- return -EINVAL;
- owner_ptr =
- (atomic_t *)shm_off_to_virtual_addr(region_p->region_begin_offset +
- np->permission.owner_offset);
- /* We've already verified that this is in the shared memory window, so
- * it should be safe to write to this address.
- */
- if (atomic_cmpxchg(owner_ptr,
- VSOC_REGION_FREE,
- np->permission.owned_value) != VSOC_REGION_FREE) {
- return -EBUSY;
- }
- ((struct vsoc_private_data *)managed_filp->private_data)->
- fd_scoped_permission_node = np;
- /* The file offset needs to be adjusted if the calling
- * process did any read/write operations on the fd
- * before creating the permission.
- */
- if (managed_filp->f_pos) {
- if (managed_filp->f_pos > np->permission.end_offset) {
- /* If the offset is beyond the permission end, set it
- * to the end.
- */
- managed_filp->f_pos = np->permission.end_offset;
- } else {
- /* If the offset is within the permission interval
- * keep it there otherwise reset it to zero.
- */
- if (managed_filp->f_pos < np->permission.begin_offset) {
- managed_filp->f_pos = 0;
- } else {
- managed_filp->f_pos -=
- np->permission.begin_offset;
- }
- }
- }
- return 0;
-}
-
-static void
-do_destroy_fd_scoped_permission_node(struct vsoc_device_region *owner_region_p,
- struct fd_scoped_permission_node *node)
-{
- if (node) {
- do_destroy_fd_scoped_permission(owner_region_p,
- &node->permission);
- mutex_lock(&vsoc_dev.mtx);
- list_del(&node->list);
- mutex_unlock(&vsoc_dev.mtx);
- kfree(node);
- }
-}
-
-static void
-do_destroy_fd_scoped_permission(struct vsoc_device_region *owner_region_p,
- struct fd_scoped_permission *perm)
-{
- atomic_t *owner_ptr = NULL;
- int prev = 0;
-
- if (!perm)
- return;
- owner_ptr = (atomic_t *)shm_off_to_virtual_addr
- (owner_region_p->region_begin_offset + perm->owner_offset);
- prev = atomic_xchg(owner_ptr, VSOC_REGION_FREE);
- if (prev != perm->owned_value)
- dev_err(&vsoc_dev.dev->dev,
- "%x-%x: owner (%s) %x: expected to be %x was %x",
- perm->begin_offset, perm->end_offset,
- owner_region_p->device_name, perm->owner_offset,
- perm->owned_value, prev);
-}
-
-static long do_vsoc_describe_region(struct file *filp,
- struct vsoc_device_region __user *dest)
-{
- struct vsoc_device_region *region_p;
- int retval = vsoc_validate_filep(filp);
-
- if (retval)
- return retval;
- region_p = vsoc_region_from_filep(filp);
- if (copy_to_user(dest, region_p, sizeof(*region_p)))
- return -EFAULT;
- return 0;
-}
-
-/**
- * Implements the inner logic of cond_wait. Copies to and from userspace are
- * done in the helper function below.
- */
-static int handle_vsoc_cond_wait(struct file *filp, struct vsoc_cond_wait *arg)
-{
- DEFINE_WAIT(wait);
- u32 region_number = iminor(file_inode(filp));
- struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
- struct hrtimer_sleeper timeout, *to = NULL;
- int ret = 0;
- struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
- atomic_t *address = NULL;
- ktime_t wake_time;
-
- /* Ensure that the offset is aligned */
- if (arg->offset & (sizeof(uint32_t) - 1))
- return -EADDRNOTAVAIL;
- /* Ensure that the offset is within shared memory */
- if (((uint64_t)arg->offset) + region_p->region_begin_offset +
- sizeof(uint32_t) > region_p->region_end_offset)
- return -E2BIG;
- address = shm_off_to_virtual_addr(region_p->region_begin_offset +
- arg->offset);
-
- /* Ensure that the type of wait is valid */
- switch (arg->wait_type) {
- case VSOC_WAIT_IF_EQUAL:
- break;
- case VSOC_WAIT_IF_EQUAL_TIMEOUT:
- to = &timeout;
- break;
- default:
- return -EINVAL;
- }
-
- if (to) {
- /* Copy the user-supplied timesec into the kernel structure.
- * We do things this way to flatten differences between 32 bit
- * and 64 bit timespecs.
- */
- if (arg->wake_time_nsec >= NSEC_PER_SEC)
- return -EINVAL;
- wake_time = ktime_set(arg->wake_time_sec, arg->wake_time_nsec);
-
- hrtimer_init_sleeper_on_stack(to, CLOCK_MONOTONIC,
- HRTIMER_MODE_ABS);
- hrtimer_set_expires_range_ns(&to->timer, wake_time,
- current->timer_slack_ns);
- }
-
- while (1) {
- prepare_to_wait(&data->futex_wait_queue, &wait,
- TASK_INTERRUPTIBLE);
- /*
- * Check the sentinel value after prepare_to_wait. If the value
- * changes after this check the writer will call signal,
- * changing the task state from INTERRUPTIBLE to RUNNING. That
- * will ensure that schedule() will eventually schedule this
- * task.
- */
- if (atomic_read(address) != arg->value) {
- ret = 0;
- break;
- }
- if (to) {
- hrtimer_sleeper_start_expires(to, HRTIMER_MODE_ABS);
- if (likely(to->task))
- freezable_schedule();
- hrtimer_cancel(&to->timer);
- if (!to->task) {
- ret = -ETIMEDOUT;
- break;
- }
- } else {
- freezable_schedule();
- }
- /* Count the number of times that we woke up. This is useful
- * for unit testing.
- */
- ++arg->wakes;
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
- }
- }
- finish_wait(&data->futex_wait_queue, &wait);
- if (to)
- destroy_hrtimer_on_stack(&to->timer);
- return ret;
-}
-
-/**
- * Handles the details of copying from/to userspace to ensure that the copies
- * happen on all of the return paths of cond_wait.
- */
-static int do_vsoc_cond_wait(struct file *filp,
- struct vsoc_cond_wait __user *untrusted_in)
-{
- struct vsoc_cond_wait arg;
- int rval = 0;
-
- if (copy_from_user(&arg, untrusted_in, sizeof(arg)))
- return -EFAULT;
- /* wakes is an out parameter. Initialize it to something sensible. */
- arg.wakes = 0;
- rval = handle_vsoc_cond_wait(filp, &arg);
- if (copy_to_user(untrusted_in, &arg, sizeof(arg)))
- return -EFAULT;
- return rval;
-}
-
-static int do_vsoc_cond_wake(struct file *filp, uint32_t offset)
-{
- struct vsoc_device_region *region_p = vsoc_region_from_filep(filp);
- u32 region_number = iminor(file_inode(filp));
- struct vsoc_region_data *data = vsoc_dev.regions_data + region_number;
- /* Ensure that the offset is aligned */
- if (offset & (sizeof(uint32_t) - 1))
- return -EADDRNOTAVAIL;
- /* Ensure that the offset is within shared memory */
- if (((uint64_t)offset) + region_p->region_begin_offset +
- sizeof(uint32_t) > region_p->region_end_offset)
- return -E2BIG;
- /*
- * TODO(b/73664181): Use multiple futex wait queues.
- * We need to wake every sleeper when the condition changes. Typically
- * only a single thread will be waiting on the condition, but there
- * are exceptions. The worst case is about 10 threads.
- */
- wake_up_interruptible_all(&data->futex_wait_queue);
- return 0;
-}
-
-static long vsoc_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
-{
- int rv = 0;
- struct vsoc_device_region *region_p;
- u32 reg_num;
- struct vsoc_region_data *reg_data;
- int retval = vsoc_validate_filep(filp);
-
- if (retval)
- return retval;
- region_p = vsoc_region_from_filep(filp);
- reg_num = iminor(file_inode(filp));
- reg_data = vsoc_dev.regions_data + reg_num;
- switch (cmd) {
- case VSOC_CREATE_FD_SCOPED_PERMISSION:
- {
- struct fd_scoped_permission_node *node = NULL;
-
- node = kzalloc(sizeof(*node), GFP_KERNEL);
- /* We can't allocate memory for the permission */
- if (!node)
- return -ENOMEM;
- INIT_LIST_HEAD(&node->list);
- rv = do_create_fd_scoped_permission
- (region_p,
- node,
- (struct fd_scoped_permission_arg __user *)arg);
- if (!rv) {
- mutex_lock(&vsoc_dev.mtx);
- list_add(&node->list, &vsoc_dev.permissions);
- mutex_unlock(&vsoc_dev.mtx);
- } else {
- kfree(node);
- return rv;
- }
- }
- break;
-
- case VSOC_GET_FD_SCOPED_PERMISSION:
- {
- struct fd_scoped_permission_node *node =
- ((struct vsoc_private_data *)filp->private_data)->
- fd_scoped_permission_node;
- if (!node)
- return -ENOENT;
- if (copy_to_user
- ((struct fd_scoped_permission __user *)arg,
- &node->permission, sizeof(node->permission)))
- return -EFAULT;
- }
- break;
-
- case VSOC_MAYBE_SEND_INTERRUPT_TO_HOST:
- if (!atomic_xchg(reg_data->outgoing_signalled, 1)) {
- writel(reg_num, vsoc_dev.regs + DOORBELL);
- return 0;
- } else {
- return -EBUSY;
- }
- break;
-
- case VSOC_SEND_INTERRUPT_TO_HOST:
- writel(reg_num, vsoc_dev.regs + DOORBELL);
- return 0;
- case VSOC_WAIT_FOR_INCOMING_INTERRUPT:
- wait_event_interruptible
- (reg_data->interrupt_wait_queue,
- (atomic_read(reg_data->incoming_signalled) != 0));
- break;
-
- case VSOC_DESCRIBE_REGION:
- return do_vsoc_describe_region
- (filp,
- (struct vsoc_device_region __user *)arg);
-
- case VSOC_SELF_INTERRUPT:
- atomic_set(reg_data->incoming_signalled, 1);
- wake_up_interruptible(&reg_data->interrupt_wait_queue);
- break;
-
- case VSOC_COND_WAIT:
- return do_vsoc_cond_wait(filp,
- (struct vsoc_cond_wait __user *)arg);
- case VSOC_COND_WAKE:
- return do_vsoc_cond_wake(filp, arg);
-
- default:
- return -EINVAL;
- }
- return 0;
-}
-
-static ssize_t vsoc_read(struct file *filp, char __user *buffer, size_t len,
- loff_t *poffset)
-{
- __u32 area_off;
- const void *area_p;
- ssize_t area_len;
- int retval = vsoc_validate_filep(filp);
-
- if (retval)
- return retval;
- area_len = vsoc_get_area(filp, &area_off);
- area_p = shm_off_to_virtual_addr(area_off);
- area_p += *poffset;
- area_len -= *poffset;
- if (area_len <= 0)
- return 0;
- if (area_len < len)
- len = area_len;
- if (copy_to_user(buffer, area_p, len))
- return -EFAULT;
- *poffset += len;
- return len;
-}
-
-static loff_t vsoc_lseek(struct file *filp, loff_t offset, int origin)
-{
- ssize_t area_len = 0;
- int retval = vsoc_validate_filep(filp);
-
- if (retval)
- return retval;
- area_len = vsoc_get_area(filp, NULL);
- switch (origin) {
- case SEEK_SET:
- break;
-
- case SEEK_CUR:
- if (offset > 0 && offset + filp->f_pos < 0)
- return -EOVERFLOW;
- offset += filp->f_pos;
- break;
-
- case SEEK_END:
- if (offset > 0 && offset + area_len < 0)
- return -EOVERFLOW;
- offset += area_len;
- break;
-
- case SEEK_DATA:
- if (offset >= area_len)
- return -EINVAL;
- if (offset < 0)
- offset = 0;
- break;
-
- case SEEK_HOLE:
- /* Next hole is always the end of the region, unless offset is
- * beyond that
- */
- if (offset < area_len)
- offset = area_len;
- break;
-
- default:
- return -EINVAL;
- }
-
- if (offset < 0 || offset > area_len)
- return -EINVAL;
- filp->f_pos = offset;
-
- return offset;
-}
-
-static ssize_t vsoc_write(struct file *filp, const char __user *buffer,
- size_t len, loff_t *poffset)
-{
- __u32 area_off;
- void *area_p;
- ssize_t area_len;
- int retval = vsoc_validate_filep(filp);
-
- if (retval)
- return retval;
- area_len = vsoc_get_area(filp, &area_off);
- area_p = shm_off_to_virtual_addr(area_off);
- area_p += *poffset;
- area_len -= *poffset;
- if (area_len <= 0)
- return 0;
- if (area_len < len)
- len = area_len;
- if (copy_from_user(area_p, buffer, len))
- return -EFAULT;
- *poffset += len;
- return len;
-}
-
-static irqreturn_t vsoc_interrupt(int irq, void *region_data_v)
-{
- struct vsoc_region_data *region_data =
- (struct vsoc_region_data *)region_data_v;
- int reg_num = region_data - vsoc_dev.regions_data;
-
- if (unlikely(!region_data))
- return IRQ_NONE;
-
- if (unlikely(reg_num < 0 ||
- reg_num >= vsoc_dev.layout->region_count)) {
- dev_err(&vsoc_dev.dev->dev,
- "invalid irq @%p reg_num=0x%04x\n",
- region_data, reg_num);
- return IRQ_NONE;
- }
- if (unlikely(vsoc_dev.regions_data + reg_num != region_data)) {
- dev_err(&vsoc_dev.dev->dev,
- "irq not aligned @%p reg_num=0x%04x\n",
- region_data, reg_num);
- return IRQ_NONE;
- }
- wake_up_interruptible(&region_data->interrupt_wait_queue);
- return IRQ_HANDLED;
-}
-
-static int vsoc_probe_device(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- int result;
- int i;
- resource_size_t reg_size;
- dev_t devt;
-
- vsoc_dev.dev = pdev;
- result = pci_enable_device(pdev);
- if (result) {
- dev_err(&pdev->dev,
- "pci_enable_device failed %s: error %d\n",
- pci_name(pdev), result);
- return result;
- }
- vsoc_dev.enabled_device = true;
- result = pci_request_regions(pdev, "vsoc");
- if (result < 0) {
- dev_err(&pdev->dev, "pci_request_regions failed\n");
- vsoc_remove_device(pdev);
- return -EBUSY;
- }
- vsoc_dev.requested_regions = true;
- /* Set up the control registers in BAR 0 */
- reg_size = pci_resource_len(pdev, REGISTER_BAR);
- if (reg_size > MAX_REGISTER_BAR_LEN)
- vsoc_dev.regs =
- pci_iomap(pdev, REGISTER_BAR, MAX_REGISTER_BAR_LEN);
- else
- vsoc_dev.regs = pci_iomap(pdev, REGISTER_BAR, reg_size);
-
- if (!vsoc_dev.regs) {
- dev_err(&pdev->dev,
- "cannot map registers of size %zu\n",
- (size_t)reg_size);
- vsoc_remove_device(pdev);
- return -EBUSY;
- }
-
- /* Map the shared memory in BAR 2 */
- vsoc_dev.shm_phys_start = pci_resource_start(pdev, SHARED_MEMORY_BAR);
- vsoc_dev.shm_size = pci_resource_len(pdev, SHARED_MEMORY_BAR);
-
- dev_info(&pdev->dev, "shared memory @ DMA %pa size=0x%zx\n",
- &vsoc_dev.shm_phys_start, vsoc_dev.shm_size);
- vsoc_dev.kernel_mapped_shm = pci_iomap_wc(pdev, SHARED_MEMORY_BAR, 0);
- if (!vsoc_dev.kernel_mapped_shm) {
- dev_err(&vsoc_dev.dev->dev, "cannot iomap region\n");
- vsoc_remove_device(pdev);
- return -EBUSY;
- }
-
- vsoc_dev.layout = (struct vsoc_shm_layout_descriptor __force *)
- vsoc_dev.kernel_mapped_shm;
- dev_info(&pdev->dev, "major_version: %d\n",
- vsoc_dev.layout->major_version);
- dev_info(&pdev->dev, "minor_version: %d\n",
- vsoc_dev.layout->minor_version);
- dev_info(&pdev->dev, "size: 0x%x\n", vsoc_dev.layout->size);
- dev_info(&pdev->dev, "regions: %d\n", vsoc_dev.layout->region_count);
- if (vsoc_dev.layout->major_version !=
- CURRENT_VSOC_LAYOUT_MAJOR_VERSION) {
- dev_err(&vsoc_dev.dev->dev,
- "driver supports only major_version %d\n",
- CURRENT_VSOC_LAYOUT_MAJOR_VERSION);
- vsoc_remove_device(pdev);
- return -EBUSY;
- }
- result = alloc_chrdev_region(&devt, 0, vsoc_dev.layout->region_count,
- VSOC_DEV_NAME);
- if (result) {
- dev_err(&vsoc_dev.dev->dev, "alloc_chrdev_region failed\n");
- vsoc_remove_device(pdev);
- return -EBUSY;
- }
- vsoc_dev.major = MAJOR(devt);
- cdev_init(&vsoc_dev.cdev, &vsoc_ops);
- vsoc_dev.cdev.owner = THIS_MODULE;
- result = cdev_add(&vsoc_dev.cdev, devt, vsoc_dev.layout->region_count);
- if (result) {
- dev_err(&vsoc_dev.dev->dev, "cdev_add error\n");
- vsoc_remove_device(pdev);
- return -EBUSY;
- }
- vsoc_dev.cdev_added = true;
- vsoc_dev.class = class_create(THIS_MODULE, VSOC_DEV_NAME);
- if (IS_ERR(vsoc_dev.class)) {
- dev_err(&vsoc_dev.dev->dev, "class_create failed\n");
- vsoc_remove_device(pdev);
- return PTR_ERR(vsoc_dev.class);
- }
- vsoc_dev.class_added = true;
- vsoc_dev.regions = (struct vsoc_device_region __force *)
- ((void *)vsoc_dev.layout +
- vsoc_dev.layout->vsoc_region_desc_offset);
- vsoc_dev.msix_entries =
- kcalloc(vsoc_dev.layout->region_count,
- sizeof(vsoc_dev.msix_entries[0]), GFP_KERNEL);
- if (!vsoc_dev.msix_entries) {
- dev_err(&vsoc_dev.dev->dev,
- "unable to allocate msix_entries\n");
- vsoc_remove_device(pdev);
- return -ENOSPC;
- }
- vsoc_dev.regions_data =
- kcalloc(vsoc_dev.layout->region_count,
- sizeof(vsoc_dev.regions_data[0]), GFP_KERNEL);
- if (!vsoc_dev.regions_data) {
- dev_err(&vsoc_dev.dev->dev,
- "unable to allocate regions' data\n");
- vsoc_remove_device(pdev);
- return -ENOSPC;
- }
- for (i = 0; i < vsoc_dev.layout->region_count; ++i)
- vsoc_dev.msix_entries[i].entry = i;
-
- result = pci_enable_msix_exact(vsoc_dev.dev, vsoc_dev.msix_entries,
- vsoc_dev.layout->region_count);
- if (result) {
- dev_info(&pdev->dev, "pci_enable_msix failed: %d\n", result);
- vsoc_remove_device(pdev);
- return -ENOSPC;
- }
- /* Check that all regions are well formed */
- for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
- const struct vsoc_device_region *region = vsoc_dev.regions + i;
-
- if (!PAGE_ALIGNED(region->region_begin_offset) ||
- !PAGE_ALIGNED(region->region_end_offset)) {
- dev_err(&vsoc_dev.dev->dev,
- "region %d not aligned (%x:%x)", i,
- region->region_begin_offset,
- region->region_end_offset);
- vsoc_remove_device(pdev);
- return -EFAULT;
- }
- if (region->region_begin_offset >= region->region_end_offset ||
- region->region_end_offset > vsoc_dev.shm_size) {
- dev_err(&vsoc_dev.dev->dev,
- "region %d offsets are wrong: %x %x %zx",
- i, region->region_begin_offset,
- region->region_end_offset, vsoc_dev.shm_size);
- vsoc_remove_device(pdev);
- return -EFAULT;
- }
- if (region->managed_by >= vsoc_dev.layout->region_count) {
- dev_err(&vsoc_dev.dev->dev,
- "region %d has invalid owner: %u",
- i, region->managed_by);
- vsoc_remove_device(pdev);
- return -EFAULT;
- }
- }
- vsoc_dev.msix_enabled = true;
- for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
- const struct vsoc_device_region *region = vsoc_dev.regions + i;
- size_t name_sz = sizeof(vsoc_dev.regions_data[i].name) - 1;
- const struct vsoc_signal_table_layout *h_to_g_signal_table =
- &region->host_to_guest_signal_table;
- const struct vsoc_signal_table_layout *g_to_h_signal_table =
- &region->guest_to_host_signal_table;
-
- vsoc_dev.regions_data[i].name[name_sz] = '\0';
- memcpy(vsoc_dev.regions_data[i].name, region->device_name,
- name_sz);
- dev_info(&pdev->dev, "region %d name=%s\n",
- i, vsoc_dev.regions_data[i].name);
- init_waitqueue_head
- (&vsoc_dev.regions_data[i].interrupt_wait_queue);
- init_waitqueue_head(&vsoc_dev.regions_data[i].futex_wait_queue);
- vsoc_dev.regions_data[i].incoming_signalled =
- shm_off_to_virtual_addr(region->region_begin_offset) +
- h_to_g_signal_table->interrupt_signalled_offset;
- vsoc_dev.regions_data[i].outgoing_signalled =
- shm_off_to_virtual_addr(region->region_begin_offset) +
- g_to_h_signal_table->interrupt_signalled_offset;
- result = request_irq(vsoc_dev.msix_entries[i].vector,
- vsoc_interrupt, 0,
- vsoc_dev.regions_data[i].name,
- vsoc_dev.regions_data + i);
- if (result) {
- dev_info(&pdev->dev,
- "request_irq failed irq=%d vector=%d\n",
- i, vsoc_dev.msix_entries[i].vector);
- vsoc_remove_device(pdev);
- return -ENOSPC;
- }
- vsoc_dev.regions_data[i].irq_requested = true;
- if (!device_create(vsoc_dev.class, NULL,
- MKDEV(vsoc_dev.major, i),
- NULL, vsoc_dev.regions_data[i].name)) {
- dev_err(&vsoc_dev.dev->dev, "device_create failed\n");
- vsoc_remove_device(pdev);
- return -EBUSY;
- }
- vsoc_dev.regions_data[i].device_created = true;
- }
- return 0;
-}
-
-/*
- * This should undo all of the allocations in the probe function in reverse
- * order.
- *
- * Notes:
- *
- * The device may have been partially initialized, so double check
- * that the allocations happened.
- *
- * This function may be called multiple times, so mark resources as freed
- * as they are deallocated.
- */
-static void vsoc_remove_device(struct pci_dev *pdev)
-{
- int i;
- /*
- * pdev is the first thing to be set on probe and the last thing
- * to be cleared here. If it's NULL then there is no cleanup.
- */
- if (!pdev || !vsoc_dev.dev)
- return;
- dev_info(&pdev->dev, "remove_device\n");
- if (vsoc_dev.regions_data) {
- for (i = 0; i < vsoc_dev.layout->region_count; ++i) {
- if (vsoc_dev.regions_data[i].device_created) {
- device_destroy(vsoc_dev.class,
- MKDEV(vsoc_dev.major, i));
- vsoc_dev.regions_data[i].device_created = false;
- }
- if (vsoc_dev.regions_data[i].irq_requested)
- free_irq(vsoc_dev.msix_entries[i].vector, NULL);
- vsoc_dev.regions_data[i].irq_requested = false;
- }
- kfree(vsoc_dev.regions_data);
- vsoc_dev.regions_data = NULL;
- }
- if (vsoc_dev.msix_enabled) {
- pci_disable_msix(pdev);
- vsoc_dev.msix_enabled = false;
- }
- kfree(vsoc_dev.msix_entries);
- vsoc_dev.msix_entries = NULL;
- vsoc_dev.regions = NULL;
- if (vsoc_dev.class_added) {
- class_destroy(vsoc_dev.class);
- vsoc_dev.class_added = false;
- }
- if (vsoc_dev.cdev_added) {
- cdev_del(&vsoc_dev.cdev);
- vsoc_dev.cdev_added = false;
- }
- if (vsoc_dev.major && vsoc_dev.layout) {
- unregister_chrdev_region(MKDEV(vsoc_dev.major, 0),
- vsoc_dev.layout->region_count);
- vsoc_dev.major = 0;
- }
- vsoc_dev.layout = NULL;
- if (vsoc_dev.kernel_mapped_shm) {
- pci_iounmap(pdev, vsoc_dev.kernel_mapped_shm);
- vsoc_dev.kernel_mapped_shm = NULL;
- }
- if (vsoc_dev.regs) {
- pci_iounmap(pdev, vsoc_dev.regs);
- vsoc_dev.regs = NULL;
- }
- if (vsoc_dev.requested_regions) {
- pci_release_regions(pdev);
- vsoc_dev.requested_regions = false;
- }
- if (vsoc_dev.enabled_device) {
- pci_disable_device(pdev);
- vsoc_dev.enabled_device = false;
- }
- /* Do this last: it indicates that the device is not initialized. */
- vsoc_dev.dev = NULL;
-}
-
-static void __exit vsoc_cleanup_module(void)
-{
- vsoc_remove_device(vsoc_dev.dev);
- pci_unregister_driver(&vsoc_pci_driver);
-}
-
-static int __init vsoc_init_module(void)
-{
- int err = -ENOMEM;
-
- INIT_LIST_HEAD(&vsoc_dev.permissions);
- mutex_init(&vsoc_dev.mtx);
-
- err = pci_register_driver(&vsoc_pci_driver);
- if (err < 0)
- return err;
- return 0;
-}
-
-static int vsoc_open(struct inode *inode, struct file *filp)
-{
- /* Can't use vsoc_validate_filep because filp is still incomplete */
- int ret = vsoc_validate_inode(inode);
-
- if (ret)
- return ret;
- filp->private_data =
- kzalloc(sizeof(struct vsoc_private_data), GFP_KERNEL);
- if (!filp->private_data)
- return -ENOMEM;
- return 0;
-}
-
-static int vsoc_release(struct inode *inode, struct file *filp)
-{
- struct vsoc_private_data *private_data = NULL;
- struct fd_scoped_permission_node *node = NULL;
- struct vsoc_device_region *owner_region_p = NULL;
- int retval = vsoc_validate_filep(filp);
-
- if (retval)
- return retval;
- private_data = (struct vsoc_private_data *)filp->private_data;
- if (!private_data)
- return 0;
-
- node = private_data->fd_scoped_permission_node;
- if (node) {
- owner_region_p = vsoc_region_from_inode(inode);
- if (owner_region_p->managed_by != VSOC_REGION_WHOLE) {
- owner_region_p =
- &vsoc_dev.regions[owner_region_p->managed_by];
- }
- do_destroy_fd_scoped_permission_node(owner_region_p, node);
- private_data->fd_scoped_permission_node = NULL;
- }
- kfree(private_data);
- filp->private_data = NULL;
-
- return 0;
-}
-
-/*
- * Returns the device relative offset and length of the area specified by the
- * fd scoped permission. If there is no fd scoped permission set, a default
- * permission covering the entire region is assumed, unless the region is owned
- * by another one, in which case the default is a permission with zero size.
- */
-static ssize_t vsoc_get_area(struct file *filp, __u32 *area_offset)
-{
- __u32 off = 0;
- ssize_t length = 0;
- struct vsoc_device_region *region_p;
- struct fd_scoped_permission *perm;
-
- region_p = vsoc_region_from_filep(filp);
- off = region_p->region_begin_offset;
- perm = &((struct vsoc_private_data *)filp->private_data)->
- fd_scoped_permission_node->permission;
- if (perm) {
- off += perm->begin_offset;
- length = perm->end_offset - perm->begin_offset;
- } else if (region_p->managed_by == VSOC_REGION_WHOLE) {
- /* No permission set and the regions is not owned by another,
- * default to full region access.
- */
- length = vsoc_device_region_size(region_p);
- } else {
- /* return zero length, access is denied. */
- length = 0;
- }
- if (area_offset)
- *area_offset = off;
- return length;
-}
-
-static int vsoc_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- unsigned long len = vma->vm_end - vma->vm_start;
- __u32 area_off;
- phys_addr_t mem_off;
- ssize_t area_len;
- int retval = vsoc_validate_filep(filp);
-
- if (retval)
- return retval;
- area_len = vsoc_get_area(filp, &area_off);
- /* Add the requested offset */
- area_off += (vma->vm_pgoff << PAGE_SHIFT);
- area_len -= (vma->vm_pgoff << PAGE_SHIFT);
- if (area_len < len)
- return -EINVAL;
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- mem_off = shm_off_to_phys_addr(area_off);
- if (io_remap_pfn_range(vma, vma->vm_start, mem_off >> PAGE_SHIFT,
- len, vma->vm_page_prot))
- return -EAGAIN;
- return 0;
-}
-
-module_init(vsoc_init_module);
-module_exit(vsoc_cleanup_module);
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Greg Hartman <[email protected]>");
-MODULE_DESCRIPTION("VSoC interpretation of QEmu's ivshmem device");
-MODULE_VERSION("1.0");
diff --git a/drivers/staging/greybus/audio_manager.c b/drivers/staging/greybus/audio_manager.c
index 9b19ea9d3fa1..9a3f7c034ab4 100644
--- a/drivers/staging/greybus/audio_manager.c
+++ b/drivers/staging/greybus/audio_manager.c
@@ -92,8 +92,8 @@ void gb_audio_manager_remove_all(void)
list_for_each_entry_safe(module, next, &modules_list, list) {
list_del(&module->list);
- kobject_put(&module->kobj);
ida_simple_remove(&module_id, module->id);
+ kobject_put(&module->kobj);
}
is_empty = list_empty(&modules_list);
diff --git a/drivers/staging/media/hantro/hantro_drv.c b/drivers/staging/media/hantro/hantro_drv.c
index 97c615a2f057..c98835326135 100644
--- a/drivers/staging/media/hantro/hantro_drv.c
+++ b/drivers/staging/media/hantro/hantro_drv.c
@@ -558,13 +558,13 @@ static int hantro_attach_func(struct hantro_dev *vpu,
goto err_rel_entity1;
/* Connect the three entities */
- ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 1,
+ ret = media_create_pad_link(&func->vdev.entity, 0, &func->proc, 0,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED);
if (ret)
goto err_rel_entity2;
- ret = media_create_pad_link(&func->proc, 0, &func->sink, 0,
+ ret = media_create_pad_link(&func->proc, 1, &func->sink, 0,
MEDIA_LNK_FL_IMMUTABLE |
MEDIA_LNK_FL_ENABLED);
if (ret)
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 9b6ea86d1dcf..ba53959e1303 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -2009,21 +2009,16 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
struct ieee_param *param;
uint ret = 0;
- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
- ret = -EINVAL;
- goto out;
- }
+ if (!p->pointer || p->length != sizeof(struct ieee_param))
+ return -EINVAL;
param = (struct ieee_param *)rtw_malloc(p->length);
- if (!param) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!param)
+ return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
kfree(param);
- ret = -EFAULT;
- goto out;
+ return -EFAULT;
}
switch (param->cmd) {
@@ -2054,9 +2049,6 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
ret = -EFAULT;
kfree(param);
-
-out:
-
return ret;
}
@@ -2791,26 +2783,19 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
* so, we just check hw_init_completed
*/
- if (!padapter->hw_init_completed) {
- ret = -EPERM;
- goto out;
- }
+ if (!padapter->hw_init_completed)
+ return -EPERM;
- if (!p->pointer) {
- ret = -EINVAL;
- goto out;
- }
+ if (!p->pointer || p->length != sizeof(struct ieee_param))
+ return -EINVAL;
param = (struct ieee_param *)rtw_malloc(p->length);
- if (!param) {
- ret = -ENOMEM;
- goto out;
- }
+ if (!param)
+ return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
kfree(param);
- ret = -EFAULT;
- goto out;
+ return -EFAULT;
}
switch (param->cmd) {
@@ -2865,7 +2850,6 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
if (ret == 0 && copy_to_user(p->pointer, param, p->length))
ret = -EFAULT;
kfree(param);
-out:
return ret;
}
#endif
diff --git a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
index b44e902ed338..b6d56cfb0a19 100644
--- a/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
+++ b/drivers/staging/rtl8723bs/hal/rtl8723bs_xmit.c
@@ -476,14 +476,13 @@ int rtl8723bs_xmit_thread(void *context)
s32 ret;
struct adapter *padapter;
struct xmit_priv *pxmitpriv;
- u8 thread_name[20] = "RTWHALXT";
-
+ u8 thread_name[20];
ret = _SUCCESS;
padapter = context;
pxmitpriv = &padapter->xmitpriv;
- rtw_sprintf(thread_name, 20, "%s-"ADPT_FMT, thread_name, ADPT_ARG(padapter));
+ rtw_sprintf(thread_name, 20, "RTWHALXT-" ADPT_FMT, ADPT_ARG(padapter));
thread_enter(thread_name);
DBG_871X("start "FUNC_ADPT_FMT"\n", FUNC_ADPT_ARG(padapter));
diff --git a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
index db6528a01229..9b9038e7deb1 100644
--- a/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8723bs/os_dep/ioctl_linux.c
@@ -3373,21 +3373,16 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
/* down(&ieee->wx_sem); */
- if (p->length < sizeof(struct ieee_param) || !p->pointer) {
- ret = -EINVAL;
- goto out;
- }
+ if (!p->pointer || p->length != sizeof(struct ieee_param))
+ return -EINVAL;
param = rtw_malloc(p->length);
- if (param == NULL) {
- ret = -ENOMEM;
- goto out;
- }
+ if (param == NULL)
+ return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
kfree(param);
- ret = -EFAULT;
- goto out;
+ return -EFAULT;
}
switch (param->cmd) {
@@ -3421,12 +3416,8 @@ static int wpa_supplicant_ioctl(struct net_device *dev, struct iw_point *p)
kfree(param);
-out:
-
/* up(&ieee->wx_sem); */
-
return ret;
-
}
static int rtw_set_encryption(struct net_device *dev, struct ieee_param *param, u32 param_len)
@@ -4200,28 +4191,19 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
* so, we just check hw_init_completed
*/
- if (!padapter->hw_init_completed) {
- ret = -EPERM;
- goto out;
- }
-
+ if (!padapter->hw_init_completed)
+ return -EPERM;
- /* if (p->length < sizeof(struct ieee_param) || !p->pointer) { */
- if (!p->pointer) {
- ret = -EINVAL;
- goto out;
- }
+ if (!p->pointer || p->length != sizeof(*param))
+ return -EINVAL;
param = rtw_malloc(p->length);
- if (param == NULL) {
- ret = -ENOMEM;
- goto out;
- }
+ if (param == NULL)
+ return -ENOMEM;
if (copy_from_user(param, p->pointer, p->length)) {
kfree(param);
- ret = -EFAULT;
- goto out;
+ return -EFAULT;
}
/* DBG_871X("%s, cmd =%d\n", __func__, param->cmd); */
@@ -4321,13 +4303,8 @@ static int rtw_hostapd_ioctl(struct net_device *dev, struct iw_point *p)
if (ret == 0 && copy_to_user(p->pointer, param, p->length))
ret = -EFAULT;
-
kfree(param);
-
-out:
-
return ret;
-
}
static int rtw_wx_set_priv(struct net_device *dev,
diff --git a/drivers/staging/speakup/selection.c b/drivers/staging/speakup/selection.c
index a8b4d0c5ab7e..032f3264fba1 100644
--- a/drivers/staging/speakup/selection.c
+++ b/drivers/staging/speakup/selection.c
@@ -51,9 +51,7 @@ static void __speakup_set_selection(struct work_struct *work)
goto unref;
}
- console_lock();
set_selection_kernel(&sel, tty);
- console_unlock();
unref:
tty_kref_put(tty);
diff --git a/drivers/staging/vt6656/dpc.c b/drivers/staging/vt6656/dpc.c
index 821aae8ca402..a0b60e7d1086 100644
--- a/drivers/staging/vt6656/dpc.c
+++ b/drivers/staging/vt6656/dpc.c
@@ -98,7 +98,7 @@ int vnt_rx_data(struct vnt_private *priv, struct vnt_rcb *ptr_rcb,
vnt_rf_rssi_to_dbm(priv, tail->rssi, &rx_dbm);
- priv->bb_pre_ed_rssi = (u8)rx_dbm + 1;
+ priv->bb_pre_ed_rssi = (u8)-rx_dbm + 1;
priv->current_rssi = priv->bb_pre_ed_rssi;
skb_pull(skb, sizeof(*head));
diff --git a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
index 26de6762b942..081d58abd5ac 100644
--- a/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
+++ b/drivers/staging/wfx/Documentation/devicetree/bindings/net/wireless/siliabs,wfx.txt
@@ -93,5 +93,5 @@ Some properties are recognized either by SPI and SDIO versions:
Must contains 64 hexadecimal digits. Not supported in current version.
WFx driver also supports `mac-address` and `local-mac-address` as described in
-Documentation/devicetree/binding/net/ethernet.txt
+Documentation/devicetree/bindings/net/ethernet.txt
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index b94ed4e30770..09e55ea0bf5d 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1165,9 +1165,7 @@ int iscsit_setup_scsi_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
hdr->cmdsn, be32_to_cpu(hdr->data_length), payload_length,
conn->cid);
- if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
- return iscsit_add_reject_cmd(cmd,
- ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
+ target_get_sess_cmd(&cmd->se_cmd, true);
cmd->sense_reason = transport_lookup_cmd_lun(&cmd->se_cmd,
scsilun_to_int(&hdr->lun));
@@ -2004,9 +2002,7 @@ iscsit_handle_task_mgt_cmd(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
conn->sess->se_sess, 0, DMA_NONE,
TCM_SIMPLE_TAG, cmd->sense_buffer + 2);
- if (target_get_sess_cmd(&cmd->se_cmd, true) < 0)
- return iscsit_add_reject_cmd(cmd,
- ISCSI_REASON_WAITING_FOR_LOGOUT, buf);
+ target_get_sess_cmd(&cmd->se_cmd, true);
/*
* TASK_REASSIGN for ERL=2 / connection stays inside of
@@ -4149,6 +4145,9 @@ int iscsit_close_connection(
iscsit_stop_nopin_response_timer(conn);
iscsit_stop_nopin_timer(conn);
+ if (conn->conn_transport->iscsit_wait_conn)
+ conn->conn_transport->iscsit_wait_conn(conn);
+
/*
* During Connection recovery drop unacknowledged out of order
* commands for this connection, and prepare the other commands
@@ -4231,11 +4230,6 @@ int iscsit_close_connection(
* must wait until they have completed.
*/
iscsit_check_conn_usage_count(conn);
- target_sess_cmd_list_set_waiting(sess->se_sess);
- target_wait_for_sess_cmds(sess->se_sess);
-
- if (conn->conn_transport->iscsit_wait_conn)
- conn->conn_transport->iscsit_wait_conn(conn);
ahash_request_free(conn->conn_tx_hash);
if (conn->conn_rx_hash) {
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index ea482d4b1f00..0ae9e60fc4d5 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -666,6 +666,11 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
target_remove_from_state_list(cmd);
+ /*
+ * Clear struct se_cmd->se_lun before the handoff to FE.
+ */
+ cmd->se_lun = NULL;
+
spin_lock_irqsave(&cmd->t_state_lock, flags);
/*
* Determine if frontend context caller is requesting the stopping of
@@ -693,6 +698,17 @@ static int transport_cmd_check_stop_to_fabric(struct se_cmd *cmd)
return cmd->se_tfo->check_stop_free(cmd);
}
+static void transport_lun_remove_cmd(struct se_cmd *cmd)
+{
+ struct se_lun *lun = cmd->se_lun;
+
+ if (!lun)
+ return;
+
+ if (cmpxchg(&cmd->lun_ref_active, true, false))
+ percpu_ref_put(&lun->lun_ref);
+}
+
static void target_complete_failure_work(struct work_struct *work)
{
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
@@ -783,6 +799,8 @@ static void target_handle_abort(struct se_cmd *cmd)
WARN_ON_ONCE(kref_read(&cmd->cmd_kref) == 0);
+ transport_lun_remove_cmd(cmd);
+
transport_cmd_check_stop_to_fabric(cmd);
}
@@ -1708,6 +1726,7 @@ static void target_complete_tmr_failure(struct work_struct *work)
se_cmd->se_tmr_req->response = TMR_LUN_DOES_NOT_EXIST;
se_cmd->se_tfo->queue_tm_rsp(se_cmd);
+ transport_lun_remove_cmd(se_cmd);
transport_cmd_check_stop_to_fabric(se_cmd);
}
@@ -1898,6 +1917,7 @@ void transport_generic_request_failure(struct se_cmd *cmd,
goto queue_full;
check_stop:
+ transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
@@ -2195,6 +2215,7 @@ queue_status:
transport_handle_queue_full(cmd, cmd->se_dev, ret, false);
return;
}
+ transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
}
@@ -2289,6 +2310,7 @@ static void target_complete_ok_work(struct work_struct *work)
if (ret)
goto queue_full;
+ transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
}
@@ -2314,6 +2336,7 @@ static void target_complete_ok_work(struct work_struct *work)
if (ret)
goto queue_full;
+ transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
}
@@ -2349,6 +2372,7 @@ queue_rsp:
if (ret)
goto queue_full;
+ transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
}
@@ -2384,6 +2408,7 @@ queue_status:
break;
}
+ transport_lun_remove_cmd(cmd);
transport_cmd_check_stop_to_fabric(cmd);
return;
@@ -2710,6 +2735,9 @@ int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
*/
if (cmd->state_active)
target_remove_from_state_list(cmd);
+
+ if (cmd->se_lun)
+ transport_lun_remove_cmd(cmd);
}
if (aborted)
cmd->free_compl = &compl;
@@ -2781,9 +2809,6 @@ static void target_release_cmd_kref(struct kref *kref)
struct completion *abrt_compl = se_cmd->abrt_compl;
unsigned long flags;
- if (se_cmd->lun_ref_active)
- percpu_ref_put(&se_cmd->se_lun->lun_ref);
-
if (se_sess) {
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
list_del_init(&se_cmd->se_cmd_list);
diff --git a/drivers/tee/amdtee/Kconfig b/drivers/tee/amdtee/Kconfig
index 4e32b6413b41..191f9715fa9a 100644
--- a/drivers/tee/amdtee/Kconfig
+++ b/drivers/tee/amdtee/Kconfig
@@ -3,6 +3,6 @@
config AMDTEE
tristate "AMD-TEE"
default m
- depends on CRYPTO_DEV_SP_PSP
+ depends on CRYPTO_DEV_SP_PSP && CRYPTO_DEV_CCP_DD
help
This implements AMD's Trusted Execution Environment (TEE) driver.
diff --git a/drivers/tee/amdtee/core.c b/drivers/tee/amdtee/core.c
index 6370bb55f512..0026eb6f13ce 100644
--- a/drivers/tee/amdtee/core.c
+++ b/drivers/tee/amdtee/core.c
@@ -212,6 +212,19 @@ unlock:
return rc;
}
+static void destroy_session(struct kref *ref)
+{
+ struct amdtee_session *sess = container_of(ref, struct amdtee_session,
+ refcount);
+
+ /* Unload the TA from TEE */
+ handle_unload_ta(sess->ta_handle);
+ mutex_lock(&session_list_mutex);
+ list_del(&sess->list_node);
+ mutex_unlock(&session_list_mutex);
+ kfree(sess);
+}
+
int amdtee_open_session(struct tee_context *ctx,
struct tee_ioctl_open_session_arg *arg,
struct tee_param *param)
@@ -236,15 +249,13 @@ int amdtee_open_session(struct tee_context *ctx,
/* Load the TA binary into TEE environment */
handle_load_ta(ta, ta_size, arg);
- if (arg->ret == TEEC_SUCCESS) {
- mutex_lock(&session_list_mutex);
- sess = alloc_session(ctxdata, arg->session);
- mutex_unlock(&session_list_mutex);
- }
-
if (arg->ret != TEEC_SUCCESS)
goto out;
+ mutex_lock(&session_list_mutex);
+ sess = alloc_session(ctxdata, arg->session);
+ mutex_unlock(&session_list_mutex);
+
if (!sess) {
rc = -ENOMEM;
goto out;
@@ -259,40 +270,29 @@ int amdtee_open_session(struct tee_context *ctx,
if (i >= TEE_NUM_SESSIONS) {
pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
+ kref_put(&sess->refcount, destroy_session);
rc = -ENOMEM;
goto out;
}
/* Open session with loaded TA */
handle_open_session(arg, &session_info, param);
-
- if (arg->ret == TEEC_SUCCESS) {
- sess->session_info[i] = session_info;
- set_session_id(sess->ta_handle, i, &arg->session);
- } else {
+ if (arg->ret != TEEC_SUCCESS) {
pr_err("open_session failed %d\n", arg->ret);
spin_lock(&sess->lock);
clear_bit(i, sess->sess_mask);
spin_unlock(&sess->lock);
+ kref_put(&sess->refcount, destroy_session);
+ goto out;
}
+
+ sess->session_info[i] = session_info;
+ set_session_id(sess->ta_handle, i, &arg->session);
out:
free_pages((u64)ta, get_order(ta_size));
return rc;
}
-static void destroy_session(struct kref *ref)
-{
- struct amdtee_session *sess = container_of(ref, struct amdtee_session,
- refcount);
-
- /* Unload the TA from TEE */
- handle_unload_ta(sess->ta_handle);
- mutex_lock(&session_list_mutex);
- list_del(&sess->list_node);
- mutex_unlock(&session_list_mutex);
- kfree(sess);
-}
-
int amdtee_close_session(struct tee_context *ctx, u32 session)
{
struct amdtee_context_data *ctxdata = ctx->data;
diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c
index ad5479f21174..7d6ecc342508 100644
--- a/drivers/thunderbolt/switch.c
+++ b/drivers/thunderbolt/switch.c
@@ -348,6 +348,12 @@ out:
return ret;
}
+static int tb_switch_nvm_no_read(void *priv, unsigned int offset, void *val,
+ size_t bytes)
+{
+ return -EPERM;
+}
+
static int tb_switch_nvm_write(void *priv, unsigned int offset, void *val,
size_t bytes)
{
@@ -393,6 +399,7 @@ static struct nvmem_device *register_nvmem(struct tb_switch *sw, int id,
config.read_only = true;
} else {
config.name = "nvm_non_active";
+ config.reg_read = tb_switch_nvm_no_read;
config.reg_write = tb_switch_nvm_write;
config.root_only = true;
}
diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c
index 42345e79920c..c5f0d936b003 100644
--- a/drivers/tty/serdev/core.c
+++ b/drivers/tty/serdev/core.c
@@ -18,6 +18,7 @@
#include <linux/sched.h>
#include <linux/serdev.h>
#include <linux/slab.h>
+#include <linux/platform_data/x86/apple.h>
static bool is_registered;
static DEFINE_IDA(ctrl_ida);
@@ -631,6 +632,15 @@ static int acpi_serdev_check_resources(struct serdev_controller *ctrl,
if (ret)
return ret;
+ /*
+ * Apple machines provide an empty resource template, so on those
+ * machines just look for immediate children with a "baud" property
+ * (from the _DSM method) instead.
+ */
+ if (!lookup.controller_handle && x86_apple_machine &&
+ !acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, NULL))
+ acpi_get_parent(adev->handle, &lookup.controller_handle);
+
/* Make sure controller and ResourceSource handle match */
if (ACPI_HANDLE(ctrl->dev.parent) != lookup.controller_handle)
return -ENODEV;
diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c
index d1cdd2ab8b4c..d367803e2044 100644
--- a/drivers/tty/serdev/serdev-ttyport.c
+++ b/drivers/tty/serdev/serdev-ttyport.c
@@ -265,7 +265,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
struct device *parent,
struct tty_driver *drv, int idx)
{
- const struct tty_port_client_operations *old_ops;
struct serdev_controller *ctrl;
struct serport *serport;
int ret;
@@ -284,7 +283,6 @@ struct device *serdev_tty_port_register(struct tty_port *port,
ctrl->ops = &ctrl_ops;
- old_ops = port->client_ops;
port->client_ops = &client_ops;
port->client_data = ctrl;
@@ -297,7 +295,7 @@ struct device *serdev_tty_port_register(struct tty_port *port,
err_reset_data:
port->client_data = NULL;
- port->client_ops = old_ops;
+ port->client_ops = &tty_port_default_client_ops;
serdev_controller_put(ctrl);
return ERR_PTR(ret);
@@ -312,8 +310,8 @@ int serdev_tty_port_unregister(struct tty_port *port)
return -ENODEV;
serdev_controller_remove(ctrl);
- port->client_ops = NULL;
port->client_data = NULL;
+ port->client_ops = &tty_port_default_client_ops;
serdev_controller_put(ctrl);
return 0;
diff --git a/drivers/tty/serial/8250/8250_aspeed_vuart.c b/drivers/tty/serial/8250/8250_aspeed_vuart.c
index d657aa14c3e4..c33e02cbde93 100644
--- a/drivers/tty/serial/8250/8250_aspeed_vuart.c
+++ b/drivers/tty/serial/8250/8250_aspeed_vuart.c
@@ -446,7 +446,6 @@ static int aspeed_vuart_probe(struct platform_device *pdev)
port.port.line = rc;
port.port.irq = irq_of_parse_and_map(np, 0);
- port.port.irqflags = IRQF_SHARED;
port.port.handle_irq = aspeed_vuart_handle_irq;
port.port.iotype = UPIO_MEM;
port.port.type = PORT_16550A;
diff --git a/drivers/tty/serial/8250/8250_core.c b/drivers/tty/serial/8250/8250_core.c
index 0894a22fd702..f2a33c9082a6 100644
--- a/drivers/tty/serial/8250/8250_core.c
+++ b/drivers/tty/serial/8250/8250_core.c
@@ -174,7 +174,7 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
struct hlist_head *h;
struct hlist_node *n;
struct irq_info *i;
- int ret, irq_flags = up->port.flags & UPF_SHARE_IRQ ? IRQF_SHARED : 0;
+ int ret;
mutex_lock(&hash_mutex);
@@ -209,9 +209,8 @@ static int serial_link_irq_chain(struct uart_8250_port *up)
INIT_LIST_HEAD(&up->list);
i->head = &up->list;
spin_unlock_irq(&i->lock);
- irq_flags |= up->port.irqflags;
ret = request_irq(up->port.irq, serial8250_interrupt,
- irq_flags, up->port.name, i);
+ up->port.irqflags, up->port.name, i);
if (ret < 0)
serial_do_unlink(i, up);
}
diff --git a/drivers/tty/serial/8250/8250_exar.c b/drivers/tty/serial/8250/8250_exar.c
index 91e9b070d36d..d330da76d6b6 100644
--- a/drivers/tty/serial/8250/8250_exar.c
+++ b/drivers/tty/serial/8250/8250_exar.c
@@ -25,6 +25,14 @@
#include "8250.h"
+#define PCI_DEVICE_ID_ACCES_COM_2S 0x1052
+#define PCI_DEVICE_ID_ACCES_COM_4S 0x105d
+#define PCI_DEVICE_ID_ACCES_COM_8S 0x106c
+#define PCI_DEVICE_ID_ACCES_COM232_8 0x10a8
+#define PCI_DEVICE_ID_ACCES_COM_2SM 0x10d2
+#define PCI_DEVICE_ID_ACCES_COM_4SM 0x10db
+#define PCI_DEVICE_ID_ACCES_COM_8SM 0x10ea
+
#define PCI_DEVICE_ID_COMMTECH_4224PCI335 0x0002
#define PCI_DEVICE_ID_COMMTECH_4222PCI335 0x0004
#define PCI_DEVICE_ID_COMMTECH_2324PCI335 0x000a
@@ -677,6 +685,22 @@ static int __maybe_unused exar_resume(struct device *dev)
static SIMPLE_DEV_PM_OPS(exar_pci_pm, exar_suspend, exar_resume);
+static const struct exar8250_board acces_com_2x = {
+ .num_ports = 2,
+ .setup = pci_xr17c154_setup,
+};
+
+static const struct exar8250_board acces_com_4x = {
+ .num_ports = 4,
+ .setup = pci_xr17c154_setup,
+};
+
+static const struct exar8250_board acces_com_8x = {
+ .num_ports = 8,
+ .setup = pci_xr17c154_setup,
+};
+
+
static const struct exar8250_board pbn_fastcom335_2 = {
.num_ports = 2,
.setup = pci_fastcom335_setup,
@@ -745,6 +769,15 @@ static const struct exar8250_board pbn_exar_XR17V8358 = {
}
static const struct pci_device_id exar_pci_tbl[] = {
+ EXAR_DEVICE(ACCESSIO, ACCES_COM_2S, acces_com_2x),
+ EXAR_DEVICE(ACCESSIO, ACCES_COM_4S, acces_com_4x),
+ EXAR_DEVICE(ACCESSIO, ACCES_COM_8S, acces_com_8x),
+ EXAR_DEVICE(ACCESSIO, ACCES_COM232_8, acces_com_8x),
+ EXAR_DEVICE(ACCESSIO, ACCES_COM_2SM, acces_com_2x),
+ EXAR_DEVICE(ACCESSIO, ACCES_COM_4SM, acces_com_4x),
+ EXAR_DEVICE(ACCESSIO, ACCES_COM_8SM, acces_com_8x),
+
+
CONNECT_DEVICE(XR17C152, UART_2_232, pbn_connect),
CONNECT_DEVICE(XR17C154, UART_4_232, pbn_connect),
CONNECT_DEVICE(XR17C158, UART_8_232, pbn_connect),
diff --git a/drivers/tty/serial/8250/8250_of.c b/drivers/tty/serial/8250/8250_of.c
index 531ad67395e0..f6687756ec5e 100644
--- a/drivers/tty/serial/8250/8250_of.c
+++ b/drivers/tty/serial/8250/8250_of.c
@@ -202,7 +202,6 @@ static int of_platform_serial_setup(struct platform_device *ofdev,
port->type = type;
port->uartclk = clk;
- port->irqflags |= IRQF_SHARED;
if (of_property_read_bool(np, "no-loopback-test"))
port->flags |= UPF_SKIP_TEST;
diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
index 430e3467aff7..0325f2e53b74 100644
--- a/drivers/tty/serial/8250/8250_port.c
+++ b/drivers/tty/serial/8250/8250_port.c
@@ -2177,6 +2177,10 @@ int serial8250_do_startup(struct uart_port *port)
}
}
+ /* Check if we need to have shared IRQs */
+ if (port->irq && (up->port.flags & UPF_SHARE_IRQ))
+ up->port.irqflags |= IRQF_SHARED;
+
if (port->irq && !(up->port.flags & UPF_NO_THRE_TEST)) {
unsigned char iir1;
/*
diff --git a/drivers/tty/serial/ar933x_uart.c b/drivers/tty/serial/ar933x_uart.c
index 3bdd56a1021b..ea12f10610b6 100644
--- a/drivers/tty/serial/ar933x_uart.c
+++ b/drivers/tty/serial/ar933x_uart.c
@@ -286,6 +286,10 @@ static void ar933x_uart_set_termios(struct uart_port *port,
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_HOST_INT_EN);
+ /* enable RX and TX ready overide */
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
+
/* reenable the UART */
ar933x_uart_rmw(up, AR933X_UART_CS_REG,
AR933X_UART_CS_IF_MODE_M << AR933X_UART_CS_IF_MODE_S,
@@ -418,6 +422,10 @@ static int ar933x_uart_startup(struct uart_port *port)
ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
AR933X_UART_CS_HOST_INT_EN);
+ /* enable RX and TX ready overide */
+ ar933x_uart_rmw_set(up, AR933X_UART_CS_REG,
+ AR933X_UART_CS_TX_READY_ORIDE | AR933X_UART_CS_RX_READY_ORIDE);
+
/* Enable RX interrupts */
up->ier = AR933X_UART_INT_RX_VALID;
ar933x_uart_write(up, AR933X_UART_INT_EN_REG, up->ier);
diff --git a/drivers/tty/serial/atmel_serial.c b/drivers/tty/serial/atmel_serial.c
index c15c398c88a9..a39c87a7c2e1 100644
--- a/drivers/tty/serial/atmel_serial.c
+++ b/drivers/tty/serial/atmel_serial.c
@@ -570,7 +570,8 @@ static void atmel_stop_tx(struct uart_port *port)
atmel_uart_writel(port, ATMEL_US_IDR, atmel_port->tx_done_mask);
if (atmel_uart_is_half_duplex(port))
- atmel_start_rx(port);
+ if (!atomic_read(&atmel_port->tasklet_shutdown))
+ atmel_start_rx(port);
}
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index 19d5a4cf29a6..d4b81b06e0cb 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1373,6 +1373,7 @@ static struct console cpm_scc_uart_console = {
static int __init cpm_uart_console_init(void)
{
+ cpm_muram_init();
register_console(&cpm_scc_uart_console);
return 0;
}
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 91e2805e6441..c31b8f3db6bf 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -264,6 +264,7 @@ struct lpuart_port {
int rx_dma_rng_buf_len;
unsigned int dma_tx_nents;
wait_queue_head_t dma_wait;
+ bool id_allocated;
};
struct lpuart_soc_data {
@@ -2390,6 +2391,8 @@ static int __init lpuart32_imx_early_console_setup(struct earlycon_device *devic
OF_EARLYCON_DECLARE(lpuart, "fsl,vf610-lpuart", lpuart_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,ls1021a-lpuart", lpuart32_early_console_setup);
OF_EARLYCON_DECLARE(lpuart32, "fsl,imx7ulp-lpuart", lpuart32_imx_early_console_setup);
+EARLYCON_DECLARE(lpuart, lpuart_early_console_setup);
+EARLYCON_DECLARE(lpuart32, lpuart32_early_console_setup);
#define LPUART_CONSOLE (&lpuart_console)
#define LPUART32_CONSOLE (&lpuart32_console)
@@ -2420,19 +2423,6 @@ static int lpuart_probe(struct platform_device *pdev)
if (!sport)
return -ENOMEM;
- ret = of_alias_get_id(np, "serial");
- if (ret < 0) {
- ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
- if (ret < 0) {
- dev_err(&pdev->dev, "port line is full, add device failed\n");
- return ret;
- }
- }
- if (ret >= ARRAY_SIZE(lpuart_ports)) {
- dev_err(&pdev->dev, "serial%d out of range\n", ret);
- return -EINVAL;
- }
- sport->port.line = ret;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
sport->port.membase = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(sport->port.membase))
@@ -2477,9 +2467,25 @@ static int lpuart_probe(struct platform_device *pdev)
}
}
+ ret = of_alias_get_id(np, "serial");
+ if (ret < 0) {
+ ret = ida_simple_get(&fsl_lpuart_ida, 0, UART_NR, GFP_KERNEL);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "port line is full, add device failed\n");
+ return ret;
+ }
+ sport->id_allocated = true;
+ }
+ if (ret >= ARRAY_SIZE(lpuart_ports)) {
+ dev_err(&pdev->dev, "serial%d out of range\n", ret);
+ ret = -EINVAL;
+ goto failed_out_of_range;
+ }
+ sport->port.line = ret;
+
ret = lpuart_enable_clks(sport);
if (ret)
- return ret;
+ goto failed_clock_enable;
sport->port.uartclk = lpuart_get_baud_clk_rate(sport);
lpuart_ports[sport->port.line] = sport;
@@ -2529,6 +2535,10 @@ static int lpuart_probe(struct platform_device *pdev)
failed_attach_port:
failed_irq_request:
lpuart_disable_clks(sport);
+failed_clock_enable:
+failed_out_of_range:
+ if (sport->id_allocated)
+ ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
return ret;
}
@@ -2538,7 +2548,8 @@ static int lpuart_remove(struct platform_device *pdev)
uart_remove_one_port(&lpuart_reg, &sport->port);
- ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
+ if (sport->id_allocated)
+ ida_simple_remove(&fsl_lpuart_ida, sport->port.line);
lpuart_disable_clks(sport);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 0c6c63166250..d337782b3648 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -599,7 +599,7 @@ static void imx_uart_dma_tx(struct imx_port *sport)
sport->tx_bytes = uart_circ_chars_pending(xmit);
- if (xmit->tail < xmit->head) {
+ if (xmit->tail < xmit->head || xmit->head == 0) {
sport->dma_tx_nents = 1;
sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes);
} else {
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index c12a12556339..4e9a590712cb 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -851,7 +851,7 @@ static int mvebu_uart_probe(struct platform_device *pdev)
port->membase = devm_ioremap_resource(&pdev->dev, reg);
if (IS_ERR(port->membase))
- return -PTR_ERR(port->membase);
+ return PTR_ERR(port->membase);
mvuart = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_uart),
GFP_KERNEL);
diff --git a/drivers/tty/serial/qcom_geni_serial.c b/drivers/tty/serial/qcom_geni_serial.c
index 191abb18fc2a..0bd1684cabb3 100644
--- a/drivers/tty/serial/qcom_geni_serial.c
+++ b/drivers/tty/serial/qcom_geni_serial.c
@@ -129,6 +129,7 @@ static int handle_rx_console(struct uart_port *uport, u32 bytes, bool drop);
static int handle_rx_uart(struct uart_port *uport, u32 bytes, bool drop);
static unsigned int qcom_geni_serial_tx_empty(struct uart_port *port);
static void qcom_geni_serial_stop_rx(struct uart_port *uport);
+static void qcom_geni_serial_handle_rx(struct uart_port *uport, bool drop);
static const unsigned long root_freq[] = {7372800, 14745600, 19200000, 29491200,
32000000, 48000000, 64000000, 80000000,
@@ -599,7 +600,7 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
u32 irq_en;
u32 status;
struct qcom_geni_serial_port *port = to_dev_port(uport, uport);
- u32 irq_clear = S_CMD_DONE_EN;
+ u32 s_irq_status;
irq_en = readl(uport->membase + SE_GENI_S_IRQ_EN);
irq_en &= ~(S_RX_FIFO_WATERMARK_EN | S_RX_FIFO_LAST_EN);
@@ -615,10 +616,19 @@ static void qcom_geni_serial_stop_rx(struct uart_port *uport)
return;
geni_se_cancel_s_cmd(&port->se);
- qcom_geni_serial_poll_bit(uport, SE_GENI_S_CMD_CTRL_REG,
- S_GENI_CMD_CANCEL, false);
+ qcom_geni_serial_poll_bit(uport, SE_GENI_S_IRQ_STATUS,
+ S_CMD_CANCEL_EN, true);
+ /*
+ * If timeout occurs secondary engine remains active
+ * and Abort sequence is executed.
+ */
+ s_irq_status = readl(uport->membase + SE_GENI_S_IRQ_STATUS);
+ /* Flush the Rx buffer */
+ if (s_irq_status & S_RX_FIFO_LAST_EN)
+ qcom_geni_serial_handle_rx(uport, true);
+ writel(s_irq_status, uport->membase + SE_GENI_S_IRQ_CLEAR);
+
status = readl(uport->membase + SE_GENI_STATUS);
- writel(irq_clear, uport->membase + SE_GENI_S_IRQ_CLEAR);
if (status & S_GENI_CMD_ACTIVE)
qcom_geni_serial_abort_rx(uport);
}
diff --git a/drivers/tty/serial/serial-tegra.c b/drivers/tty/serial/serial-tegra.c
index 33034b852a51..8de8bac9c6c7 100644
--- a/drivers/tty/serial/serial-tegra.c
+++ b/drivers/tty/serial/serial-tegra.c
@@ -692,11 +692,22 @@ static void tegra_uart_copy_rx_to_tty(struct tegra_uart_port *tup,
count, DMA_TO_DEVICE);
}
+static void do_handle_rx_pio(struct tegra_uart_port *tup)
+{
+ struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
+ struct tty_port *port = &tup->uport.state->port;
+
+ tegra_uart_handle_rx_pio(tup, port);
+ if (tty) {
+ tty_flip_buffer_push(port);
+ tty_kref_put(tty);
+ }
+}
+
static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
unsigned int residue)
{
struct tty_port *port = &tup->uport.state->port;
- struct tty_struct *tty = tty_port_tty_get(port);
unsigned int count;
async_tx_ack(tup->rx_dma_desc);
@@ -705,11 +716,7 @@ static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
/* If we are here, DMA is stopped */
tegra_uart_copy_rx_to_tty(tup, port, count);
- tegra_uart_handle_rx_pio(tup, port);
- if (tty) {
- tty_flip_buffer_push(port);
- tty_kref_put(tty);
- }
+ do_handle_rx_pio(tup);
}
static void tegra_uart_rx_dma_complete(void *args)
@@ -749,8 +756,10 @@ static void tegra_uart_terminate_rx_dma(struct tegra_uart_port *tup)
{
struct dma_tx_state state;
- if (!tup->rx_dma_active)
+ if (!tup->rx_dma_active) {
+ do_handle_rx_pio(tup);
return;
+ }
dmaengine_terminate_all(tup->rx_dma_chan);
dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
@@ -816,18 +825,6 @@ static void tegra_uart_handle_modem_signal_change(struct uart_port *u)
uart_handle_cts_change(&tup->uport, msr & UART_MSR_CTS);
}
-static void do_handle_rx_pio(struct tegra_uart_port *tup)
-{
- struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
- struct tty_port *port = &tup->uport.state->port;
-
- tegra_uart_handle_rx_pio(tup, port);
- if (tty) {
- tty_flip_buffer_push(port);
- tty_kref_put(tty);
- }
-}
-
static irqreturn_t tegra_uart_isr(int irq, void *data)
{
struct tegra_uart_port *tup = data;
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index 044c3cbdcfa4..ea80bf872f54 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -52,10 +52,11 @@ static void tty_port_default_wakeup(struct tty_port *port)
}
}
-static const struct tty_port_client_operations default_client_ops = {
+const struct tty_port_client_operations tty_port_default_client_ops = {
.receive_buf = tty_port_default_receive_buf,
.write_wakeup = tty_port_default_wakeup,
};
+EXPORT_SYMBOL_GPL(tty_port_default_client_ops);
void tty_port_init(struct tty_port *port)
{
@@ -68,7 +69,7 @@ void tty_port_init(struct tty_port *port)
spin_lock_init(&port->lock);
port->close_delay = (50 * HZ) / 100;
port->closing_wait = (3000 * HZ) / 100;
- port->client_ops = &default_client_ops;
+ port->client_ops = &tty_port_default_client_ops;
kref_init(&port->kref);
}
EXPORT_SYMBOL(tty_port_init);
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index 78732feaf65b..d7d2e4b844bc 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -16,6 +16,7 @@
#include <linux/tty.h>
#include <linux/sched.h>
#include <linux/mm.h>
+#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/types.h>
@@ -29,6 +30,8 @@
#include <linux/console.h>
#include <linux/tty_flip.h>
+#include <linux/sched/signal.h>
+
/* Don't take this from <ctype.h>: 011-015 on the screen aren't spaces */
#define isspace(c) ((c) == ' ')
@@ -43,6 +46,7 @@ static volatile int sel_start = -1; /* cleared by clear_selection */
static int sel_end;
static int sel_buffer_lth;
static char *sel_buffer;
+static DEFINE_MUTEX(sel_lock);
/* clear_selection, highlight and highlight_pointer can be called
from interrupt (via scrollback/front) */
@@ -177,14 +181,14 @@ int set_selection_user(const struct tiocl_selection __user *sel,
return set_selection_kernel(&v, tty);
}
-int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
+static int __set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
{
struct vc_data *vc = vc_cons[fg_console].d;
int new_sel_start, new_sel_end, spc;
char *bp, *obp;
int i, ps, pe, multiplier;
u32 c;
- int mode;
+ int mode, ret = 0;
poke_blanked_console();
@@ -332,7 +336,21 @@ int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
}
}
sel_buffer_lth = bp - sel_buffer;
- return 0;
+
+ return ret;
+}
+
+int set_selection_kernel(struct tiocl_selection *v, struct tty_struct *tty)
+{
+ int ret;
+
+ mutex_lock(&sel_lock);
+ console_lock();
+ ret = __set_selection_kernel(v, tty);
+ console_unlock();
+ mutex_unlock(&sel_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(set_selection_kernel);
@@ -350,6 +368,7 @@ int paste_selection(struct tty_struct *tty)
unsigned int count;
struct tty_ldisc *ld;
DECLARE_WAITQUEUE(wait, current);
+ int ret = 0;
console_lock();
poke_blanked_console();
@@ -361,10 +380,17 @@ int paste_selection(struct tty_struct *tty)
tty_buffer_lock_exclusive(&vc->port);
add_wait_queue(&vc->paste_wait, &wait);
+ mutex_lock(&sel_lock);
while (sel_buffer && sel_buffer_lth > pasted) {
set_current_state(TASK_INTERRUPTIBLE);
+ if (signal_pending(current)) {
+ ret = -EINTR;
+ break;
+ }
if (tty_throttled(tty)) {
+ mutex_unlock(&sel_lock);
schedule();
+ mutex_lock(&sel_lock);
continue;
}
__set_current_state(TASK_RUNNING);
@@ -373,11 +399,12 @@ int paste_selection(struct tty_struct *tty)
count);
pasted += count;
}
+ mutex_unlock(&sel_lock);
remove_wait_queue(&vc->paste_wait, &wait);
__set_current_state(TASK_RUNNING);
tty_buffer_unlock_exclusive(&vc->port);
tty_ldisc_deref(ld);
- return 0;
+ return ret;
}
EXPORT_SYMBOL_GPL(paste_selection);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 35d21cdb60d0..15d27698054a 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -936,10 +936,21 @@ static void flush_scrollback(struct vc_data *vc)
WARN_CONSOLE_UNLOCKED();
set_origin(vc);
- if (vc->vc_sw->con_flush_scrollback)
+ if (vc->vc_sw->con_flush_scrollback) {
vc->vc_sw->con_flush_scrollback(vc);
- else
+ } else if (con_is_visible(vc)) {
+ /*
+ * When no con_flush_scrollback method is provided then the
+ * legacy way for flushing the scrollback buffer is to use
+ * a side effect of the con_switch method. We do it only on
+ * the foreground console as background consoles have no
+ * scrollback buffers in that case and we obviously don't
+ * want to switch to them.
+ */
+ hide_cursor(vc);
vc->vc_sw->con_switch(vc);
+ set_cursor(vc);
+ }
}
/*
@@ -3035,10 +3046,8 @@ int tioclinux(struct tty_struct *tty, unsigned long arg)
switch (type)
{
case TIOCL_SETSEL:
- console_lock();
ret = set_selection_user((struct tiocl_selection
__user *)(p+1), tty);
- console_unlock();
break;
case TIOCL_PASTESEL:
ret = paste_selection(tty);
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index 8b0ed139592f..ee6c91ef1f6c 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -876,15 +876,20 @@ int vt_ioctl(struct tty_struct *tty,
return -EINVAL;
for (i = 0; i < MAX_NR_CONSOLES; i++) {
+ struct vc_data *vcp;
+
if (!vc_cons[i].d)
continue;
console_lock();
- if (v.v_vlin)
- vc_cons[i].d->vc_scan_lines = v.v_vlin;
- if (v.v_clin)
- vc_cons[i].d->vc_font.height = v.v_clin;
- vc_cons[i].d->vc_resize_user = 1;
- vc_resize(vc_cons[i].d, v.v_cols, v.v_rows);
+ vcp = vc_cons[i].d;
+ if (vcp) {
+ if (v.v_vlin)
+ vcp->vc_scan_lines = v.v_vlin;
+ if (v.v_clin)
+ vcp->vc_font.height = v.v_clin;
+ vcp->vc_resize_user = 1;
+ vc_resize(vcp, v.v_cols, v.v_rows);
+ }
console_unlock();
}
break;
diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c
index 736b0c6e27fe..3574dbb09366 100644
--- a/drivers/usb/cdns3/gadget.c
+++ b/drivers/usb/cdns3/gadget.c
@@ -2550,7 +2550,7 @@ found:
/* Update ring only if removed request is on pending_req_list list */
if (req_on_hw_ring) {
link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma +
- (priv_req->start_trb * TRB_SIZE));
+ ((priv_req->end_trb + 1) * TRB_SIZE));
link_trb->control = (link_trb->control & TRB_CYCLE) |
TRB_TYPE(TRB_LINK) | TRB_CHAIN;
@@ -2595,11 +2595,21 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
{
struct cdns3_device *priv_dev = priv_ep->cdns3_dev;
struct usb_request *request;
+ struct cdns3_request *priv_req;
+ struct cdns3_trb *trb = NULL;
int ret;
int val;
trace_cdns3_halt(priv_ep, 0, 0);
+ request = cdns3_next_request(&priv_ep->pending_req_list);
+ if (request) {
+ priv_req = to_cdns3_request(request);
+ trb = priv_req->trb;
+ if (trb)
+ trb->control = trb->control ^ TRB_CYCLE;
+ }
+
writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd);
/* wait for EPRST cleared */
@@ -2610,10 +2620,11 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep)
priv_ep->flags &= ~(EP_STALLED | EP_STALL_PENDING);
- request = cdns3_next_request(&priv_ep->pending_req_list);
-
- if (request)
+ if (request) {
+ if (trb)
+ trb->control = trb->control ^ TRB_CYCLE;
cdns3_rearm_transfer(priv_ep, 1);
+ }
cdns3_start_all_request(priv_dev, priv_ep);
return ret;
diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c
index 26bc05e48d8a..b7918f695434 100644
--- a/drivers/usb/core/config.c
+++ b/drivers/usb/core/config.c
@@ -256,6 +256,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
struct usb_host_interface *ifp, int num_ep,
unsigned char *buffer, int size)
{
+ struct usb_device *udev = to_usb_device(ddev);
unsigned char *buffer0 = buffer;
struct usb_endpoint_descriptor *d;
struct usb_host_endpoint *endpoint;
@@ -297,6 +298,16 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
goto skip_to_next_endpoint_or_interface_descriptor;
}
+ /* Ignore blacklisted endpoints */
+ if (udev->quirks & USB_QUIRK_ENDPOINT_BLACKLIST) {
+ if (usb_endpoint_is_blacklisted(udev, ifp, d)) {
+ dev_warn(ddev, "config %d interface %d altsetting %d has a blacklisted endpoint with address 0x%X, skipping\n",
+ cfgno, inum, asnum,
+ d->bEndpointAddress);
+ goto skip_to_next_endpoint_or_interface_descriptor;
+ }
+ }
+
endpoint = &ifp->endpoint[ifp->desc.bNumEndpoints];
++ifp->desc.bNumEndpoints;
@@ -311,7 +322,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
j = 255;
if (usb_endpoint_xfer_int(d)) {
i = 1;
- switch (to_usb_device(ddev)->speed) {
+ switch (udev->speed) {
case USB_SPEED_SUPER_PLUS:
case USB_SPEED_SUPER:
case USB_SPEED_HIGH:
@@ -332,8 +343,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
/*
* This quirk fixes bIntervals reported in ms.
*/
- if (to_usb_device(ddev)->quirks &
- USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
+ if (udev->quirks & USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL) {
n = clamp(fls(d->bInterval) + 3, i, j);
i = j = n;
}
@@ -341,8 +351,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
* This quirk fixes bIntervals reported in
* linear microframes.
*/
- if (to_usb_device(ddev)->quirks &
- USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL) {
+ if (udev->quirks & USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL) {
n = clamp(fls(d->bInterval), i, j);
i = j = n;
}
@@ -359,7 +368,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
} else if (usb_endpoint_xfer_isoc(d)) {
i = 1;
j = 16;
- switch (to_usb_device(ddev)->speed) {
+ switch (udev->speed) {
case USB_SPEED_HIGH:
n = 7; /* 8 ms = 2^(7-1) uframes */
break;
@@ -381,8 +390,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
* explicitly forbidden by the USB spec. In an attempt to make
* them usable, we will try treating them as Interrupt endpoints.
*/
- if (to_usb_device(ddev)->speed == USB_SPEED_LOW &&
- usb_endpoint_xfer_bulk(d)) {
+ if (udev->speed == USB_SPEED_LOW && usb_endpoint_xfer_bulk(d)) {
dev_warn(ddev, "config %d interface %d altsetting %d "
"endpoint 0x%X is Bulk; changing to Interrupt\n",
cfgno, inum, asnum, d->bEndpointAddress);
@@ -406,7 +414,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
/* Find the highest legal maxpacket size for this endpoint */
i = 0; /* additional transactions per microframe */
- switch (to_usb_device(ddev)->speed) {
+ switch (udev->speed) {
case USB_SPEED_LOW:
maxpacket_maxes = low_speed_maxpacket_maxes;
break;
@@ -442,8 +450,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
* maxpacket sizes other than 512. High speed HCDs may not
* be able to handle that particular bug, so let's warn...
*/
- if (to_usb_device(ddev)->speed == USB_SPEED_HIGH
- && usb_endpoint_xfer_bulk(d)) {
+ if (udev->speed == USB_SPEED_HIGH && usb_endpoint_xfer_bulk(d)) {
if (maxp != 512)
dev_warn(ddev, "config %d interface %d altsetting %d "
"bulk endpoint 0x%X has invalid maxpacket %d\n",
@@ -452,7 +459,7 @@ static int usb_parse_endpoint(struct device *ddev, int cfgno,
}
/* Parse a possible SuperSpeed endpoint companion descriptor */
- if (to_usb_device(ddev)->speed >= USB_SPEED_SUPER)
+ if (udev->speed >= USB_SPEED_SUPER)
usb_parse_ss_endpoint_companion(ddev, cfgno,
inum, asnum, endpoint, buffer, size);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 3405b146edc9..54cd8ef795ec 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -38,7 +38,9 @@
#include "otg_whitelist.h"
#define USB_VENDOR_GENESYS_LOGIC 0x05e3
+#define USB_VENDOR_SMSC 0x0424
#define HUB_QUIRK_CHECK_PORT_AUTOSUSPEND 0x01
+#define HUB_QUIRK_DISABLE_AUTOSUSPEND 0x02
#define USB_TP_TRANSMISSION_DELAY 40 /* ns */
#define USB_TP_TRANSMISSION_DELAY_MAX 65535 /* ns */
@@ -986,13 +988,17 @@ int usb_remove_device(struct usb_device *udev)
{
struct usb_hub *hub;
struct usb_interface *intf;
+ int ret;
if (!udev->parent) /* Can't remove a root hub */
return -EINVAL;
hub = usb_hub_to_struct_hub(udev->parent);
intf = to_usb_interface(hub->intfdev);
- usb_autopm_get_interface(intf);
+ ret = usb_autopm_get_interface(intf);
+ if (ret < 0)
+ return ret;
+
set_bit(udev->portnum, hub->removed_bits);
hub_port_logical_disconnect(hub, udev->portnum);
usb_autopm_put_interface(intf);
@@ -1217,11 +1223,6 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
#ifdef CONFIG_PM
udev->reset_resume = 1;
#endif
- /* Don't set the change_bits when the device
- * was powered off.
- */
- if (test_bit(port1, hub->power_bits))
- set_bit(port1, hub->change_bits);
} else {
/* The power session is gone; tell hub_wq */
@@ -1731,6 +1732,10 @@ static void hub_disconnect(struct usb_interface *intf)
kfree(hub->buffer);
pm_suspend_ignore_children(&intf->dev, false);
+
+ if (hub->quirk_disable_autosuspend)
+ usb_autopm_put_interface(intf);
+
kref_put(&hub->kref, hub_release);
}
@@ -1863,6 +1868,11 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (id->driver_info & HUB_QUIRK_CHECK_PORT_AUTOSUSPEND)
hub->quirk_check_port_auto_suspend = 1;
+ if (id->driver_info & HUB_QUIRK_DISABLE_AUTOSUSPEND) {
+ hub->quirk_disable_autosuspend = 1;
+ usb_autopm_get_interface_no_resume(intf);
+ }
+
if (hub_configure(hub, &desc->endpoint[0].desc) >= 0)
return 0;
@@ -5599,6 +5609,10 @@ out_hdev_lock:
}
static const struct usb_device_id hub_id_table[] = {
+ { .match_flags = USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_INT_CLASS,
+ .idVendor = USB_VENDOR_SMSC,
+ .bInterfaceClass = USB_CLASS_HUB,
+ .driver_info = HUB_QUIRK_DISABLE_AUTOSUSPEND},
{ .match_flags = USB_DEVICE_ID_MATCH_VENDOR
| USB_DEVICE_ID_MATCH_INT_CLASS,
.idVendor = USB_VENDOR_GENESYS_LOGIC,
diff --git a/drivers/usb/core/hub.h b/drivers/usb/core/hub.h
index a9e24e4b8df1..a97dd1ba964e 100644
--- a/drivers/usb/core/hub.h
+++ b/drivers/usb/core/hub.h
@@ -61,6 +61,7 @@ struct usb_hub {
unsigned quiescing:1;
unsigned disconnected:1;
unsigned in_reset:1;
+ unsigned quirk_disable_autosuspend:1;
unsigned quirk_check_port_auto_suspend:1;
diff --git a/drivers/usb/core/port.c b/drivers/usb/core/port.c
index bbbb35fa639f..235a7c645503 100644
--- a/drivers/usb/core/port.c
+++ b/drivers/usb/core/port.c
@@ -213,7 +213,10 @@ static int usb_port_runtime_resume(struct device *dev)
if (!port_dev->is_superspeed && peer)
pm_runtime_get_sync(&peer->dev);
- usb_autopm_get_interface(intf);
+ retval = usb_autopm_get_interface(intf);
+ if (retval < 0)
+ return retval;
+
retval = usb_hub_set_port_power(hdev, hub, port1, true);
msleep(hub_power_on_good_delay(hub));
if (udev && !retval) {
@@ -266,7 +269,10 @@ static int usb_port_runtime_suspend(struct device *dev)
if (usb_port_block_power_off)
return -EBUSY;
- usb_autopm_get_interface(intf);
+ retval = usb_autopm_get_interface(intf);
+ if (retval < 0)
+ return retval;
+
retval = usb_hub_set_port_power(hdev, hub, port1, false);
usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
if (!port_dev->is_superspeed)
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 6b6413073584..2dac3e7cdd97 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -231,6 +231,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* Logitech PTZ Pro Camera */
{ USB_DEVICE(0x046d, 0x0853), .driver_info = USB_QUIRK_DELAY_INIT },
+ /* Logitech Screen Share */
+ { USB_DEVICE(0x046d, 0x086c), .driver_info = USB_QUIRK_NO_LPM },
+
/* Logitech Quickcam Fusion */
{ USB_DEVICE(0x046d, 0x08c1), .driver_info = USB_QUIRK_RESET_RESUME },
@@ -354,6 +357,10 @@ static const struct usb_device_id usb_quirk_list[] = {
{ USB_DEVICE(0x0904, 0x6103), .driver_info =
USB_QUIRK_LINEAR_FRAME_INTR_BINTERVAL },
+ /* Sound Devices USBPre2 */
+ { USB_DEVICE(0x0926, 0x0202), .driver_info =
+ USB_QUIRK_ENDPOINT_BLACKLIST },
+
/* Keytouch QWERTY Panel keyboard */
{ USB_DEVICE(0x0926, 0x3333), .driver_info =
USB_QUIRK_CONFIG_INTF_STRINGS },
@@ -445,6 +452,9 @@ static const struct usb_device_id usb_quirk_list[] = {
/* INTEL VALUE SSD */
{ USB_DEVICE(0x8086, 0xf1a5), .driver_info = USB_QUIRK_RESET_RESUME },
+ /* novation SoundControl XL */
+ { USB_DEVICE(0x1235, 0x0061), .driver_info = USB_QUIRK_RESET_RESUME },
+
{ } /* terminating entry must be last */
};
@@ -472,6 +482,39 @@ static const struct usb_device_id usb_amd_resume_quirk_list[] = {
{ } /* terminating entry must be last */
};
+/*
+ * Entries for blacklisted endpoints that should be ignored when parsing
+ * configuration descriptors.
+ *
+ * Matched for devices with USB_QUIRK_ENDPOINT_BLACKLIST.
+ */
+static const struct usb_device_id usb_endpoint_blacklist[] = {
+ { USB_DEVICE_INTERFACE_NUMBER(0x0926, 0x0202, 1), .driver_info = 0x85 },
+ { }
+};
+
+bool usb_endpoint_is_blacklisted(struct usb_device *udev,
+ struct usb_host_interface *intf,
+ struct usb_endpoint_descriptor *epd)
+{
+ const struct usb_device_id *id;
+ unsigned int address;
+
+ for (id = usb_endpoint_blacklist; id->match_flags; ++id) {
+ if (!usb_match_device(udev, id))
+ continue;
+
+ if (!usb_match_one_id_intf(udev, intf, id))
+ continue;
+
+ address = id->driver_info;
+ if (address == epd->bEndpointAddress)
+ return true;
+ }
+
+ return false;
+}
+
static bool usb_match_any_interface(struct usb_device *udev,
const struct usb_device_id *id)
{
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index cf4783cf661a..3ad0ee57e859 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -37,6 +37,9 @@ extern void usb_authorize_interface(struct usb_interface *);
extern void usb_detect_quirks(struct usb_device *udev);
extern void usb_detect_interface_quirks(struct usb_device *udev);
extern void usb_release_quirk_list(void);
+extern bool usb_endpoint_is_blacklisted(struct usb_device *udev,
+ struct usb_host_interface *intf,
+ struct usb_endpoint_descriptor *epd);
extern int usb_remove_device(struct usb_device *udev);
extern int usb_get_device_descriptor(struct usb_device *dev,
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 88f7d6d4ff2d..92ed32ec1607 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1083,11 +1083,6 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg,
else
packets = 1; /* send one packet if length is zero. */
- if (hs_ep->isochronous && length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
- dev_err(hsotg->dev, "req length > maxpacket*mc\n");
- return;
- }
-
if (dir_in && index != 0)
if (hs_ep->isochronous)
epsize = DXEPTSIZ_MC(packets);
@@ -1391,6 +1386,13 @@ static int dwc2_hsotg_ep_queue(struct usb_ep *ep, struct usb_request *req,
req->actual = 0;
req->status = -EINPROGRESS;
+ /* Don't queue ISOC request if length greater than mps*mc */
+ if (hs_ep->isochronous &&
+ req->length > (hs_ep->mc * hs_ep->ep.maxpacket)) {
+ dev_err(hs->dev, "req length > maxpacket*mc\n");
+ return -EINVAL;
+ }
+
/* In DDMA mode for ISOC's don't queue request if length greater
* than descriptor limits.
*/
@@ -1632,6 +1634,7 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
struct dwc2_hsotg_ep *ep0 = hsotg->eps_out[0];
struct dwc2_hsotg_ep *ep;
__le16 reply;
+ u16 status;
int ret;
dev_dbg(hsotg->dev, "%s: USB_REQ_GET_STATUS\n", __func__);
@@ -1643,11 +1646,10 @@ static int dwc2_hsotg_process_req_status(struct dwc2_hsotg *hsotg,
switch (ctrl->bRequestType & USB_RECIP_MASK) {
case USB_RECIP_DEVICE:
- /*
- * bit 0 => self powered
- * bit 1 => remote wakeup
- */
- reply = cpu_to_le16(0);
+ status = 1 << USB_DEVICE_SELF_POWERED;
+ status |= hsotg->remote_wakeup_allowed <<
+ USB_DEVICE_REMOTE_WAKEUP;
+ reply = cpu_to_le16(status);
break;
case USB_RECIP_INTERFACE:
@@ -1758,7 +1760,10 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
case USB_RECIP_DEVICE:
switch (wValue) {
case USB_DEVICE_REMOTE_WAKEUP:
- hsotg->remote_wakeup_allowed = 1;
+ if (set)
+ hsotg->remote_wakeup_allowed = 1;
+ else
+ hsotg->remote_wakeup_allowed = 0;
break;
case USB_DEVICE_TEST_MODE:
@@ -1768,16 +1773,17 @@ static int dwc2_hsotg_process_req_feature(struct dwc2_hsotg *hsotg,
return -EINVAL;
hsotg->test_mode = wIndex >> 8;
- ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
- if (ret) {
- dev_err(hsotg->dev,
- "%s: failed to send reply\n", __func__);
- return ret;
- }
break;
default:
return -ENOENT;
}
+
+ ret = dwc2_hsotg_send_reply(hsotg, ep0, NULL, 0);
+ if (ret) {
+ dev_err(hsotg->dev,
+ "%s: failed to send reply\n", __func__);
+ return ret;
+ }
break;
case USB_RECIP_ENDPOINT:
diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h
index e56beb9d1e36..4a13ceaf4093 100644
--- a/drivers/usb/dwc3/debug.h
+++ b/drivers/usb/dwc3/debug.h
@@ -256,86 +256,77 @@ static inline const char *dwc3_ep_event_string(char *str, size_t size,
u8 epnum = event->endpoint_number;
size_t len;
int status;
- int ret;
- ret = snprintf(str, size, "ep%d%s: ", epnum >> 1,
+ len = scnprintf(str, size, "ep%d%s: ", epnum >> 1,
(epnum & 1) ? "in" : "out");
- if (ret < 0)
- return "UNKNOWN";
status = event->status;
switch (event->endpoint_event) {
case DWC3_DEPEVT_XFERCOMPLETE:
- len = strlen(str);
- snprintf(str + len, size - len, "Transfer Complete (%c%c%c)",
+ len += scnprintf(str + len, size - len,
+ "Transfer Complete (%c%c%c)",
status & DEPEVT_STATUS_SHORT ? 'S' : 's',
status & DEPEVT_STATUS_IOC ? 'I' : 'i',
status & DEPEVT_STATUS_LST ? 'L' : 'l');
- len = strlen(str);
-
if (epnum <= 1)
- snprintf(str + len, size - len, " [%s]",
+ scnprintf(str + len, size - len, " [%s]",
dwc3_ep0_state_string(ep0state));
break;
case DWC3_DEPEVT_XFERINPROGRESS:
- len = strlen(str);
-
- snprintf(str + len, size - len, "Transfer In Progress [%d] (%c%c%c)",
+ scnprintf(str + len, size - len,
+ "Transfer In Progress [%d] (%c%c%c)",
event->parameters,
status & DEPEVT_STATUS_SHORT ? 'S' : 's',
status & DEPEVT_STATUS_IOC ? 'I' : 'i',
status & DEPEVT_STATUS_LST ? 'M' : 'm');
break;
case DWC3_DEPEVT_XFERNOTREADY:
- len = strlen(str);
-
- snprintf(str + len, size - len, "Transfer Not Ready [%d]%s",
+ len += scnprintf(str + len, size - len,
+ "Transfer Not Ready [%d]%s",
event->parameters,
status & DEPEVT_STATUS_TRANSFER_ACTIVE ?
" (Active)" : " (Not Active)");
- len = strlen(str);
-
/* Control Endpoints */
if (epnum <= 1) {
int phase = DEPEVT_STATUS_CONTROL_PHASE(event->status);
switch (phase) {
case DEPEVT_STATUS_CONTROL_DATA:
- snprintf(str + ret, size - ret,
+ scnprintf(str + len, size - len,
" [Data Phase]");
break;
case DEPEVT_STATUS_CONTROL_STATUS:
- snprintf(str + ret, size - ret,
+ scnprintf(str + len, size - len,
" [Status Phase]");
}
}
break;
case DWC3_DEPEVT_RXTXFIFOEVT:
- snprintf(str + ret, size - ret, "FIFO");
+ scnprintf(str + len, size - len, "FIFO");
break;
case DWC3_DEPEVT_STREAMEVT:
status = event->status;
switch (status) {
case DEPEVT_STREAMEVT_FOUND:
- snprintf(str + ret, size - ret, " Stream %d Found",
+ scnprintf(str + len, size - len, " Stream %d Found",
event->parameters);
break;
case DEPEVT_STREAMEVT_NOTFOUND:
default:
- snprintf(str + ret, size - ret, " Stream Not Found");
+ scnprintf(str + len, size - len, " Stream Not Found");
break;
}
break;
case DWC3_DEPEVT_EPCMDCMPLT:
- snprintf(str + ret, size - ret, "Endpoint Command Complete");
+ scnprintf(str + len, size - len, "Endpoint Command Complete");
break;
default:
- snprintf(str, size, "UNKNOWN");
+ scnprintf(str + len, size - len, "UNKNOWN");
}
return str;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 1b8014ab0b25..1e00bf2d65a2 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1071,7 +1071,14 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep,
unsigned int rem = length % maxp;
unsigned chain = true;
- if (sg_is_last(s))
+ /*
+ * IOMMU driver is coalescing the list of sgs which shares a
+ * page boundary into one and giving it to USB driver. With
+ * this the number of sgs mapped is not equal to the number of
+ * sgs passed. So mark the chain bit to false if it isthe last
+ * mapped sg.
+ */
+ if (i == remaining - 1)
chain = false;
if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) {
@@ -2429,7 +2436,8 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep,
if (event->status & DEPEVT_STATUS_SHORT && !chain)
return 1;
- if (event->status & DEPEVT_STATUS_IOC)
+ if ((trb->ctrl & DWC3_TRB_CTRL_IOC) ||
+ (trb->ctrl & DWC3_TRB_CTRL_LST))
return 1;
return 0;
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index 3b4f67000315..223f72d4d9ed 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -437,12 +437,14 @@ static u8 encode_bMaxPower(enum usb_device_speed speed,
val = CONFIG_USB_GADGET_VBUS_DRAW;
if (!val)
return 0;
- switch (speed) {
- case USB_SPEED_SUPER:
- return DIV_ROUND_UP(val, 8);
- default:
- return DIV_ROUND_UP(val, 2);
- }
+ if (speed < USB_SPEED_SUPER)
+ return min(val, 500U) / 2;
+ else
+ /*
+ * USB 3.x supports up to 900mA, but since 900 isn't divisible
+ * by 8 the integral division will effectively cap to 896mA.
+ */
+ return min(val, 900U) / 8;
}
static int config_buf(struct usb_configuration *config,
@@ -854,6 +856,10 @@ static int set_config(struct usb_composite_dev *cdev,
/* when we return, be sure our power usage is valid */
power = c->MaxPower ? c->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
+ if (gadget->speed < USB_SPEED_SUPER)
+ power = min(power, 500U);
+ else
+ power = min(power, 900U);
done:
usb_gadget_vbus_draw(gadget, power);
if (result >= 0 && cdev->delayed_status)
@@ -2280,7 +2286,7 @@ void composite_resume(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
struct usb_function *f;
- u16 maxpower;
+ unsigned maxpower;
/* REVISIT: should we have config level
* suspend/resume callbacks?
@@ -2294,10 +2300,14 @@ void composite_resume(struct usb_gadget *gadget)
f->resume(f);
}
- maxpower = cdev->config->MaxPower;
+ maxpower = cdev->config->MaxPower ?
+ cdev->config->MaxPower : CONFIG_USB_GADGET_VBUS_DRAW;
+ if (gadget->speed < USB_SPEED_SUPER)
+ maxpower = min(maxpower, 500U);
+ else
+ maxpower = min(maxpower, 900U);
- usb_gadget_vbus_draw(gadget, maxpower ?
- maxpower : CONFIG_USB_GADGET_VBUS_DRAW);
+ usb_gadget_vbus_draw(gadget, maxpower);
}
cdev->suspended = 0;
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 6171d28331e6..571917677d35 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -1162,18 +1162,19 @@ static int ffs_aio_cancel(struct kiocb *kiocb)
{
struct ffs_io_data *io_data = kiocb->private;
struct ffs_epfile *epfile = kiocb->ki_filp->private_data;
+ unsigned long flags;
int value;
ENTER();
- spin_lock_irq(&epfile->ffs->eps_lock);
+ spin_lock_irqsave(&epfile->ffs->eps_lock, flags);
if (likely(io_data && io_data->ep && io_data->req))
value = usb_ep_dequeue(io_data->ep, io_data->req);
else
value = -EINVAL;
- spin_unlock_irq(&epfile->ffs->eps_lock);
+ spin_unlock_irqrestore(&epfile->ffs->eps_lock, flags);
return value;
}
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index 6d956f190f5a..e6d32c536781 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -361,7 +361,7 @@ int u_audio_start_capture(struct g_audio *audio_dev)
ep = audio_dev->out_ep;
prm = &uac->c_prm;
config_ep_by_speed(gadget, &audio_dev->func, ep);
- req_len = prm->max_psize;
+ req_len = ep->maxpacket;
prm->ep_enabled = true;
usb_ep_enable(ep);
@@ -379,7 +379,7 @@ int u_audio_start_capture(struct g_audio *audio_dev)
req->context = &prm->ureq[i];
req->length = req_len;
req->complete = u_audio_iso_complete;
- req->buf = prm->rbuf + i * prm->max_psize;
+ req->buf = prm->rbuf + i * ep->maxpacket;
}
if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
@@ -430,9 +430,9 @@ int u_audio_start_playback(struct g_audio *audio_dev)
uac->p_pktsize = min_t(unsigned int,
uac->p_framesize *
(params->p_srate / uac->p_interval),
- prm->max_psize);
+ ep->maxpacket);
- if (uac->p_pktsize < prm->max_psize)
+ if (uac->p_pktsize < ep->maxpacket)
uac->p_pktsize_residue = uac->p_framesize *
(params->p_srate % uac->p_interval);
else
@@ -457,7 +457,7 @@ int u_audio_start_playback(struct g_audio *audio_dev)
req->context = &prm->ureq[i];
req->length = req_len;
req->complete = u_audio_iso_complete;
- req->buf = prm->rbuf + i * prm->max_psize;
+ req->buf = prm->rbuf + i * ep->maxpacket;
}
if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index f986e5c55974..8167d379e115 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -561,8 +561,10 @@ static int gs_start_io(struct gs_port *port)
port->n_read = 0;
started = gs_start_rx(port);
- /* unblock any pending writes into our circular buffer */
if (started) {
+ gs_start_tx(port);
+ /* Unblock any pending writes into our circular buffer, in case
+ * we didn't in gs_start_tx() */
tty_wakeup(port->port.tty);
} else {
gs_free_requests(ep, head, &port->read_allocated);
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index 29d8e5f8bb58..b1cfc8279c3d 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1399,7 +1399,6 @@ err:
/**
* xudc_stop - stops the device.
* @gadget: pointer to the usb gadget structure
- * @driver: pointer to usb gadget driver structure
*
* Return: zero always
*/
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 7a3a29e5e9d2..af92b2576fe9 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -55,6 +55,7 @@ static u8 usb_bos_descriptor [] = {
static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
u16 wLength)
{
+ struct xhci_port_cap *port_cap = NULL;
int i, ssa_count;
u32 temp;
u16 desc_size, ssp_cap_size, ssa_size = 0;
@@ -64,16 +65,24 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
ssp_cap_size = sizeof(usb_bos_descriptor) - desc_size;
/* does xhci support USB 3.1 Enhanced SuperSpeed */
- if (xhci->usb3_rhub.min_rev >= 0x01) {
+ for (i = 0; i < xhci->num_port_caps; i++) {
+ if (xhci->port_caps[i].maj_rev == 0x03 &&
+ xhci->port_caps[i].min_rev >= 0x01) {
+ usb3_1 = true;
+ port_cap = &xhci->port_caps[i];
+ break;
+ }
+ }
+
+ if (usb3_1) {
/* does xhci provide a PSI table for SSA speed attributes? */
- if (xhci->usb3_rhub.psi_count) {
+ if (port_cap->psi_count) {
/* two SSA entries for each unique PSI ID, RX and TX */
- ssa_count = xhci->usb3_rhub.psi_uid_count * 2;
+ ssa_count = port_cap->psi_uid_count * 2;
ssa_size = ssa_count * sizeof(u32);
ssp_cap_size -= 16; /* skip copying the default SSA */
}
desc_size += ssp_cap_size;
- usb3_1 = true;
}
memcpy(buf, &usb_bos_descriptor, min(desc_size, wLength));
@@ -99,7 +108,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
}
/* If PSI table exists, add the custom speed attributes from it */
- if (usb3_1 && xhci->usb3_rhub.psi_count) {
+ if (usb3_1 && port_cap->psi_count) {
u32 ssp_cap_base, bm_attrib, psi, psi_mant, psi_exp;
int offset;
@@ -111,7 +120,7 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
/* attribute count SSAC bits 4:0 and ID count SSIC bits 8:5 */
bm_attrib = (ssa_count - 1) & 0x1f;
- bm_attrib |= (xhci->usb3_rhub.psi_uid_count - 1) << 5;
+ bm_attrib |= (port_cap->psi_uid_count - 1) << 5;
put_unaligned_le32(bm_attrib, &buf[ssp_cap_base + 4]);
if (wLength < desc_size + ssa_size)
@@ -124,8 +133,8 @@ static int xhci_create_usb3_bos_desc(struct xhci_hcd *xhci, char *buf,
* USB 3.1 requires two SSA entries (RX and TX) for every link
*/
offset = desc_size;
- for (i = 0; i < xhci->usb3_rhub.psi_count; i++) {
- psi = xhci->usb3_rhub.psi[i];
+ for (i = 0; i < port_cap->psi_count; i++) {
+ psi = port_cap->psi[i];
psi &= ~USB_SSP_SUBLINK_SPEED_RSVD;
psi_exp = XHCI_EXT_PORT_PSIE(psi);
psi_mant = XHCI_EXT_PORT_PSIM(psi);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 3b1388fa2f36..884c601bfa15 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1475,9 +1475,15 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
/* Allow 3 retries for everything but isoc, set CErr = 3 */
if (!usb_endpoint_xfer_isoc(&ep->desc))
err_count = 3;
- /* Some devices get this wrong */
- if (usb_endpoint_xfer_bulk(&ep->desc) && udev->speed == USB_SPEED_HIGH)
- max_packet = 512;
+ /* HS bulk max packet should be 512, FS bulk supports 8, 16, 32 or 64 */
+ if (usb_endpoint_xfer_bulk(&ep->desc)) {
+ if (udev->speed == USB_SPEED_HIGH)
+ max_packet = 512;
+ if (udev->speed == USB_SPEED_FULL) {
+ max_packet = rounddown_pow_of_two(max_packet);
+ max_packet = clamp_val(max_packet, 8, 64);
+ }
+ }
/* xHCI 1.0 and 1.1 indicates that ctrl ep avg TRB Length should be 8 */
if (usb_endpoint_xfer_control(&ep->desc) && xhci->hci_version >= 0x100)
avg_trb_len = 8;
@@ -1909,17 +1915,17 @@ no_bw:
xhci->usb3_rhub.num_ports = 0;
xhci->num_active_eps = 0;
kfree(xhci->usb2_rhub.ports);
- kfree(xhci->usb2_rhub.psi);
kfree(xhci->usb3_rhub.ports);
- kfree(xhci->usb3_rhub.psi);
kfree(xhci->hw_ports);
kfree(xhci->rh_bw);
kfree(xhci->ext_caps);
+ for (i = 0; i < xhci->num_port_caps; i++)
+ kfree(xhci->port_caps[i].psi);
+ kfree(xhci->port_caps);
+ xhci->num_port_caps = 0;
xhci->usb2_rhub.ports = NULL;
- xhci->usb2_rhub.psi = NULL;
xhci->usb3_rhub.ports = NULL;
- xhci->usb3_rhub.psi = NULL;
xhci->hw_ports = NULL;
xhci->rh_bw = NULL;
xhci->ext_caps = NULL;
@@ -2120,6 +2126,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
u8 major_revision, minor_revision;
struct xhci_hub *rhub;
struct device *dev = xhci_to_hcd(xhci)->self.sysdev;
+ struct xhci_port_cap *port_cap;
temp = readl(addr);
major_revision = XHCI_EXT_PORT_MAJOR(temp);
@@ -2154,31 +2161,39 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
/* WTF? "Valid values are ‘1’ to MaxPorts" */
return;
- rhub->psi_count = XHCI_EXT_PORT_PSIC(temp);
- if (rhub->psi_count) {
- rhub->psi = kcalloc_node(rhub->psi_count, sizeof(*rhub->psi),
- GFP_KERNEL, dev_to_node(dev));
- if (!rhub->psi)
- rhub->psi_count = 0;
+ port_cap = &xhci->port_caps[xhci->num_port_caps++];
+ if (xhci->num_port_caps > max_caps)
+ return;
+
+ port_cap->maj_rev = major_revision;
+ port_cap->min_rev = minor_revision;
+ port_cap->psi_count = XHCI_EXT_PORT_PSIC(temp);
- rhub->psi_uid_count++;
- for (i = 0; i < rhub->psi_count; i++) {
- rhub->psi[i] = readl(addr + 4 + i);
+ if (port_cap->psi_count) {
+ port_cap->psi = kcalloc_node(port_cap->psi_count,
+ sizeof(*port_cap->psi),
+ GFP_KERNEL, dev_to_node(dev));
+ if (!port_cap->psi)
+ port_cap->psi_count = 0;
+
+ port_cap->psi_uid_count++;
+ for (i = 0; i < port_cap->psi_count; i++) {
+ port_cap->psi[i] = readl(addr + 4 + i);
/* count unique ID values, two consecutive entries can
* have the same ID if link is assymetric
*/
- if (i && (XHCI_EXT_PORT_PSIV(rhub->psi[i]) !=
- XHCI_EXT_PORT_PSIV(rhub->psi[i - 1])))
- rhub->psi_uid_count++;
+ if (i && (XHCI_EXT_PORT_PSIV(port_cap->psi[i]) !=
+ XHCI_EXT_PORT_PSIV(port_cap->psi[i - 1])))
+ port_cap->psi_uid_count++;
xhci_dbg(xhci, "PSIV:%d PSIE:%d PLT:%d PFD:%d LP:%d PSIM:%d\n",
- XHCI_EXT_PORT_PSIV(rhub->psi[i]),
- XHCI_EXT_PORT_PSIE(rhub->psi[i]),
- XHCI_EXT_PORT_PLT(rhub->psi[i]),
- XHCI_EXT_PORT_PFD(rhub->psi[i]),
- XHCI_EXT_PORT_LP(rhub->psi[i]),
- XHCI_EXT_PORT_PSIM(rhub->psi[i]));
+ XHCI_EXT_PORT_PSIV(port_cap->psi[i]),
+ XHCI_EXT_PORT_PSIE(port_cap->psi[i]),
+ XHCI_EXT_PORT_PLT(port_cap->psi[i]),
+ XHCI_EXT_PORT_PFD(port_cap->psi[i]),
+ XHCI_EXT_PORT_LP(port_cap->psi[i]),
+ XHCI_EXT_PORT_PSIM(port_cap->psi[i]));
}
}
/* cache usb2 port capabilities */
@@ -2213,6 +2228,7 @@ static void xhci_add_in_port(struct xhci_hcd *xhci, unsigned int num_ports,
continue;
}
hw_port->rhub = rhub;
+ hw_port->port_cap = port_cap;
rhub->num_ports++;
}
/* FIXME: Should we disable ports not in the Extended Capabilities? */
@@ -2303,6 +2319,11 @@ static int xhci_setup_port_arrays(struct xhci_hcd *xhci, gfp_t flags)
if (!xhci->ext_caps)
return -ENOMEM;
+ xhci->port_caps = kcalloc_node(cap_count, sizeof(*xhci->port_caps),
+ flags, dev_to_node(dev));
+ if (!xhci->port_caps)
+ return -ENOMEM;
+
offset = cap_start;
while (offset) {
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 4917c5b033fa..5e9b537df631 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -49,6 +49,7 @@
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_XHCI 0x15ec
#define PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_XHCI 0x15f0
#define PCI_DEVICE_ID_INTEL_ICE_LAKE_XHCI 0x8a13
+#define PCI_DEVICE_ID_INTEL_CML_XHCI 0xa3af
#define PCI_DEVICE_ID_AMD_PROMONTORYA_4 0x43b9
#define PCI_DEVICE_ID_AMD_PROMONTORYA_3 0x43ba
@@ -187,7 +188,8 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
- pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI)) {
+ pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_CML_XHCI)) {
xhci->quirks |= XHCI_PME_STUCK_QUIRK;
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
@@ -302,6 +304,9 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
if (!usb_hcd_is_primary_hcd(hcd))
return 0;
+ if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
+ xhci_pme_acpi_rtd3_enable(pdev);
+
xhci_dbg(xhci, "Got SBRN %u\n", (unsigned int) xhci->sbrn);
/* Find any debug ports */
@@ -359,9 +364,6 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
HCC_MAX_PSA(xhci->hcc_params) >= 4)
xhci->shared_hcd->can_do_streams = 1;
- if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
- xhci_pme_acpi_rtd3_enable(dev);
-
/* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
pm_runtime_put_noidle(&dev->dev);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 13d8838cd552..3ecee10fdcdc 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1702,12 +1702,20 @@ struct xhci_bus_state {
* Intel Lynx Point LP xHCI host.
*/
#define XHCI_MAX_REXIT_TIMEOUT_MS 20
+struct xhci_port_cap {
+ u32 *psi; /* array of protocol speed ID entries */
+ u8 psi_count;
+ u8 psi_uid_count;
+ u8 maj_rev;
+ u8 min_rev;
+};
struct xhci_port {
__le32 __iomem *addr;
int hw_portnum;
int hcd_portnum;
struct xhci_hub *rhub;
+ struct xhci_port_cap *port_cap;
};
struct xhci_hub {
@@ -1719,9 +1727,6 @@ struct xhci_hub {
/* supported prococol extended capabiliy values */
u8 maj_rev;
u8 min_rev;
- u32 *psi; /* array of protocol speed ID entries */
- u8 psi_count;
- u8 psi_uid_count;
};
/* There is one xhci_hcd structure per controller */
@@ -1880,6 +1885,9 @@ struct xhci_hcd {
/* cached usb2 extened protocol capabilites */
u32 *ext_caps;
unsigned int num_ext_caps;
+ /* cached extended protocol port capabilities */
+ struct xhci_port_cap *port_caps;
+ unsigned int num_port_caps;
/* Compliance Mode Recovery Data */
struct timer_list comp_mode_recovery_timer;
u32 port_status_u0;
diff --git a/drivers/usb/misc/iowarrior.c b/drivers/usb/misc/iowarrior.c
index dce44fbf031f..dce20301e367 100644
--- a/drivers/usb/misc/iowarrior.c
+++ b/drivers/usb/misc/iowarrior.c
@@ -33,6 +33,14 @@
#define USB_DEVICE_ID_CODEMERCS_IOWPV2 0x1512
/* full speed iowarrior */
#define USB_DEVICE_ID_CODEMERCS_IOW56 0x1503
+/* fuller speed iowarrior */
+#define USB_DEVICE_ID_CODEMERCS_IOW28 0x1504
+#define USB_DEVICE_ID_CODEMERCS_IOW28L 0x1505
+#define USB_DEVICE_ID_CODEMERCS_IOW100 0x1506
+
+/* OEMed devices */
+#define USB_DEVICE_ID_CODEMERCS_IOW24SAG 0x158a
+#define USB_DEVICE_ID_CODEMERCS_IOW56AM 0x158b
/* Get a minor range for your devices from the usb maintainer */
#ifdef CONFIG_USB_DYNAMIC_MINORS
@@ -133,6 +141,11 @@ static const struct usb_device_id iowarrior_ids[] = {
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV1)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOWPV2)},
{USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56)},
+ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW24SAG)},
+ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW56AM)},
+ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28)},
+ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW28L)},
+ {USB_DEVICE(USB_VENDOR_ID_CODEMERCS, USB_DEVICE_ID_CODEMERCS_IOW100)},
{} /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, iowarrior_ids);
@@ -357,6 +370,7 @@ static ssize_t iowarrior_write(struct file *file,
}
switch (dev->product_id) {
case USB_DEVICE_ID_CODEMERCS_IOW24:
+ case USB_DEVICE_ID_CODEMERCS_IOW24SAG:
case USB_DEVICE_ID_CODEMERCS_IOWPV1:
case USB_DEVICE_ID_CODEMERCS_IOWPV2:
case USB_DEVICE_ID_CODEMERCS_IOW40:
@@ -371,6 +385,10 @@ static ssize_t iowarrior_write(struct file *file,
goto exit;
break;
case USB_DEVICE_ID_CODEMERCS_IOW56:
+ case USB_DEVICE_ID_CODEMERCS_IOW56AM:
+ case USB_DEVICE_ID_CODEMERCS_IOW28:
+ case USB_DEVICE_ID_CODEMERCS_IOW28L:
+ case USB_DEVICE_ID_CODEMERCS_IOW100:
/* The IOW56 uses asynchronous IO and more urbs */
if (atomic_read(&dev->write_busy) == MAX_WRITES_IN_FLIGHT) {
/* Wait until we are below the limit for submitted urbs */
@@ -493,6 +511,7 @@ static long iowarrior_ioctl(struct file *file, unsigned int cmd,
switch (cmd) {
case IOW_WRITE:
if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24 ||
+ dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW24SAG ||
dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV1 ||
dev->product_id == USB_DEVICE_ID_CODEMERCS_IOWPV2 ||
dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW40) {
@@ -767,7 +786,11 @@ static int iowarrior_probe(struct usb_interface *interface,
goto error;
}
- if (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) {
+ if ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
+ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
+ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
+ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
+ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)) {
res = usb_find_last_int_out_endpoint(iface_desc,
&dev->int_out_endpoint);
if (res) {
@@ -780,7 +803,11 @@ static int iowarrior_probe(struct usb_interface *interface,
/* we have to check the report_size often, so remember it in the endianness suitable for our machine */
dev->report_size = usb_endpoint_maxp(dev->int_in_endpoint);
if ((dev->interface->cur_altsetting->desc.bInterfaceNumber == 0) &&
- (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56))
+ ((dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56) ||
+ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW56AM) ||
+ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28) ||
+ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW28L) ||
+ (dev->product_id == USB_DEVICE_ID_CODEMERCS_IOW100)))
/* IOWarrior56 has wMaxPacketSize different from report size */
dev->report_size = 7;
diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c
index 10c9e7f6273e..29fe5771c21b 100644
--- a/drivers/usb/misc/usb251xb.c
+++ b/drivers/usb/misc/usb251xb.c
@@ -424,10 +424,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
return err;
}
- hub->vdd = devm_regulator_get(dev, "vdd");
- if (IS_ERR(hub->vdd))
- return PTR_ERR(hub->vdd);
-
if (of_property_read_u16_array(np, "vendor-id", &hub->vendor_id, 1))
hub->vendor_id = USB251XB_DEF_VENDOR_ID;
@@ -640,6 +636,13 @@ static int usb251xb_get_ofdata(struct usb251xb *hub,
}
#endif /* CONFIG_OF */
+static void usb251xb_regulator_disable_action(void *data)
+{
+ struct usb251xb *hub = data;
+
+ regulator_disable(hub->vdd);
+}
+
static int usb251xb_probe(struct usb251xb *hub)
{
struct device *dev = hub->dev;
@@ -676,10 +679,19 @@ static int usb251xb_probe(struct usb251xb *hub)
if (err)
return err;
+ hub->vdd = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(hub->vdd))
+ return PTR_ERR(hub->vdd);
+
err = regulator_enable(hub->vdd);
if (err)
return err;
+ err = devm_add_action_or_reset(dev,
+ usb251xb_regulator_disable_action, hub);
+ if (err)
+ return err;
+
err = usb251xb_connect(hub);
if (err) {
dev_err(dev, "Failed to connect hub (%d)\n", err);
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 037e8eee737d..6153cc35aba0 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -969,6 +969,10 @@ static int utmi_phy_probe(struct tegra_usb_phy *tegra_phy,
return -ENXIO;
}
+ /*
+ * Note that UTMI pad registers are shared by all PHYs, therefore
+ * devm_platform_ioremap_resource() can't be used here.
+ */
tegra_phy->pad_regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!tegra_phy->pad_regs) {
@@ -1087,6 +1091,10 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
return -ENXIO;
}
+ /*
+ * Note that PHY and USB controller are using shared registers,
+ * therefore devm_platform_ioremap_resource() can't be used here.
+ */
tegra_phy->regs = devm_ioremap(&pdev->dev, res->start,
resource_size(res));
if (!tegra_phy->regs) {
diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c
index d3f420f3a083..c5ecdcd51ffc 100644
--- a/drivers/usb/serial/ch341.c
+++ b/drivers/usb/serial/ch341.c
@@ -205,6 +205,16 @@ static int ch341_get_divisor(speed_t speed)
16 * speed - 16 * CH341_CLKRATE / (clk_div * (div + 1)))
div++;
+ /*
+ * Prefer lower base clock (fact = 0) if even divisor.
+ *
+ * Note that this makes the receiver more tolerant to errors.
+ */
+ if (fact == 1 && div % 2 == 0) {
+ div /= 2;
+ fact = 0;
+ }
+
return (0x100 - div) << 8 | fact << 2 | ps;
}
diff --git a/drivers/usb/serial/ir-usb.c b/drivers/usb/serial/ir-usb.c
index 79d0586e2b33..172261a908d8 100644
--- a/drivers/usb/serial/ir-usb.c
+++ b/drivers/usb/serial/ir-usb.c
@@ -448,7 +448,7 @@ static void ir_set_termios(struct tty_struct *tty,
usb_sndbulkpipe(udev, port->bulk_out_endpointAddress),
transfer_buffer, 1, &actual_length, 5000);
if (ret || actual_length != 1) {
- if (actual_length != 1)
+ if (!ret)
ret = -EIO;
dev_err(&port->dev, "failed to change line speed: %d\n", ret);
}
diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
index 95bba3ba6ac6..3670fda02c34 100644
--- a/drivers/usb/storage/uas.c
+++ b/drivers/usb/storage/uas.c
@@ -45,6 +45,7 @@ struct uas_dev_info {
struct scsi_cmnd *cmnd[MAX_CMNDS];
spinlock_t lock;
struct work_struct work;
+ struct work_struct scan_work; /* for async scanning */
};
enum {
@@ -114,6 +115,17 @@ out:
spin_unlock_irqrestore(&devinfo->lock, flags);
}
+static void uas_scan_work(struct work_struct *work)
+{
+ struct uas_dev_info *devinfo =
+ container_of(work, struct uas_dev_info, scan_work);
+ struct Scsi_Host *shost = usb_get_intfdata(devinfo->intf);
+
+ dev_dbg(&devinfo->intf->dev, "starting scan\n");
+ scsi_scan_host(shost);
+ dev_dbg(&devinfo->intf->dev, "scan complete\n");
+}
+
static void uas_add_work(struct uas_cmd_info *cmdinfo)
{
struct scsi_pointer *scp = (void *)cmdinfo;
@@ -982,6 +994,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
init_usb_anchor(&devinfo->data_urbs);
spin_lock_init(&devinfo->lock);
INIT_WORK(&devinfo->work, uas_do_work);
+ INIT_WORK(&devinfo->scan_work, uas_scan_work);
result = uas_configure_endpoints(devinfo);
if (result)
@@ -998,7 +1011,9 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id)
if (result)
goto free_streams;
- scsi_scan_host(shost);
+ /* Submit the delayed_work for SCSI-device scanning */
+ schedule_work(&devinfo->scan_work);
+
return result;
free_streams:
@@ -1166,6 +1181,12 @@ static void uas_disconnect(struct usb_interface *intf)
usb_kill_anchored_urbs(&devinfo->data_urbs);
uas_zap_pending(devinfo, DID_NO_CONNECT);
+ /*
+ * Prevent SCSI scanning (if it hasn't started yet)
+ * or wait for the SCSI-scanning routine to stop.
+ */
+ cancel_work_sync(&devinfo->scan_work);
+
scsi_remove_host(shost);
uas_free_streams(devinfo);
scsi_host_put(shost);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1cd9b6305b06..1880f3e13f57 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1258,6 +1258,12 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
USB_SC_RBC, USB_PR_BULK, NULL,
0 ),
+UNUSUAL_DEV(0x090c, 0x1000, 0x1100, 0x1100,
+ "Samsung",
+ "Flash Drive FIT",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_MAX_SECTORS_64),
+
/* aeb */
UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
"Feiya",
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index e158159671fa..18e205eeb9af 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -1414,10 +1414,6 @@ static int vhost_net_release(struct inode *inode, struct file *f)
static struct socket *get_raw_socket(int fd)
{
- struct {
- struct sockaddr_ll sa;
- char buf[MAX_ADDR_LEN];
- } uaddr;
int r;
struct socket *sock = sockfd_lookup(fd, &r);
@@ -1430,11 +1426,7 @@ static struct socket *get_raw_socket(int fd)
goto err;
}
- r = sock->ops->getname(sock, (struct sockaddr *)&uaddr.sa, 0);
- if (r < 0)
- goto err;
-
- if (uaddr.sa.sll_family != AF_PACKET) {
+ if (sock->sk->sk_family != AF_PACKET) {
r = -EPFNOSUPPORT;
goto err;
}
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 403707a3e503..7d22d7377606 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -9,7 +9,7 @@ menu "Backlight & LCD device support"
# LCD
#
config LCD_CLASS_DEVICE
- tristate "Lowlevel LCD controls"
+ tristate "Lowlevel LCD controls"
help
This framework adds support for low-level control of LCD.
Some framebuffer devices connect to platform-specific LCD modules
@@ -141,10 +141,10 @@ endif # LCD_CLASS_DEVICE
# Backlight
#
config BACKLIGHT_CLASS_DEVICE
- tristate "Lowlevel Backlight controls"
+ tristate "Lowlevel Backlight controls"
help
This framework adds support for low-level control of the LCD
- backlight. This includes support for brightness and power.
+ backlight. This includes support for brightness and power.
To have support for your specific LCD panel you will have to
select the proper drivers which depend on this option.
@@ -272,7 +272,7 @@ config BACKLIGHT_APPLE
tristate "Apple Backlight Driver"
depends on X86 && ACPI
help
- If you have an Intel-based Apple say Y to enable a driver for its
+ If you have an Intel-based Apple say Y to enable a driver for its
backlight.
config BACKLIGHT_TOSA
@@ -456,6 +456,13 @@ config BACKLIGHT_RAVE_SP
help
Support for backlight control on RAVE SP device.
+config BACKLIGHT_LED
+ tristate "Generic LED based Backlight Driver"
+ depends on LEDS_CLASS && OF
+ help
+ If you have a LCD backlight adjustable by LED class driver, say Y
+ to enable this driver.
+
endif # BACKLIGHT_CLASS_DEVICE
endmenu
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 6f8777037c37..0c1a1524627a 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -57,3 +57,4 @@ obj-$(CONFIG_BACKLIGHT_TPS65217) += tps65217_bl.o
obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
obj-$(CONFIG_BACKLIGHT_ARCXCNN) += arcxcnn_bl.o
obj-$(CONFIG_BACKLIGHT_RAVE_SP) += rave-sp-backlight.o
+obj-$(CONFIG_BACKLIGHT_LED) += led_bl.o
diff --git a/drivers/video/backlight/led_bl.c b/drivers/video/backlight/led_bl.c
new file mode 100644
index 000000000000..3f66549997c8
--- /dev/null
+++ b/drivers/video/backlight/led_bl.c
@@ -0,0 +1,260 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2015-2019 Texas Instruments Incorporated - http://www.ti.com/
+ * Author: Tomi Valkeinen <[email protected]>
+ *
+ * Based on pwm_bl.c
+ */
+
+#include <linux/backlight.h>
+#include <linux/leds.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+
+struct led_bl_data {
+ struct device *dev;
+ struct backlight_device *bl_dev;
+ struct led_classdev **leds;
+ bool enabled;
+ int nb_leds;
+ unsigned int *levels;
+ unsigned int default_brightness;
+ unsigned int max_brightness;
+};
+
+static void led_bl_set_brightness(struct led_bl_data *priv, int level)
+{
+ int i;
+ int bkl_brightness;
+
+ if (priv->levels)
+ bkl_brightness = priv->levels[level];
+ else
+ bkl_brightness = level;
+
+ for (i = 0; i < priv->nb_leds; i++)
+ led_set_brightness(priv->leds[i], bkl_brightness);
+
+ priv->enabled = true;
+}
+
+static void led_bl_power_off(struct led_bl_data *priv)
+{
+ int i;
+
+ if (!priv->enabled)
+ return;
+
+ for (i = 0; i < priv->nb_leds; i++)
+ led_set_brightness(priv->leds[i], LED_OFF);
+
+ priv->enabled = false;
+}
+
+static int led_bl_update_status(struct backlight_device *bl)
+{
+ struct led_bl_data *priv = bl_get_data(bl);
+ int brightness = bl->props.brightness;
+
+ if (bl->props.power != FB_BLANK_UNBLANK ||
+ bl->props.fb_blank != FB_BLANK_UNBLANK ||
+ bl->props.state & BL_CORE_FBBLANK)
+ brightness = 0;
+
+ if (brightness > 0)
+ led_bl_set_brightness(priv, brightness);
+ else
+ led_bl_power_off(priv);
+
+ return 0;
+}
+
+static const struct backlight_ops led_bl_ops = {
+ .update_status = led_bl_update_status,
+};
+
+static int led_bl_get_leds(struct device *dev,
+ struct led_bl_data *priv)
+{
+ int i, nb_leds, ret;
+ struct device_node *node = dev->of_node;
+ struct led_classdev **leds;
+ unsigned int max_brightness;
+ unsigned int default_brightness;
+
+ ret = of_count_phandle_with_args(node, "leds", NULL);
+ if (ret < 0) {
+ dev_err(dev, "Unable to get led count\n");
+ return -EINVAL;
+ }
+
+ nb_leds = ret;
+ if (nb_leds < 1) {
+ dev_err(dev, "At least one LED must be specified!\n");
+ return -EINVAL;
+ }
+
+ leds = devm_kzalloc(dev, sizeof(struct led_classdev *) * nb_leds,
+ GFP_KERNEL);
+ if (!leds)
+ return -ENOMEM;
+
+ for (i = 0; i < nb_leds; i++) {
+ leds[i] = devm_of_led_get(dev, i);
+ if (IS_ERR(leds[i]))
+ return PTR_ERR(leds[i]);
+ }
+
+ /* check that the LEDs all have the same brightness range */
+ max_brightness = leds[0]->max_brightness;
+ for (i = 1; i < nb_leds; i++) {
+ if (max_brightness != leds[i]->max_brightness) {
+ dev_err(dev, "LEDs must have identical ranges\n");
+ return -EINVAL;
+ }
+ }
+
+ /* get the default brightness from the first LED from the list */
+ default_brightness = leds[0]->brightness;
+
+ priv->nb_leds = nb_leds;
+ priv->leds = leds;
+ priv->max_brightness = max_brightness;
+ priv->default_brightness = default_brightness;
+
+ return 0;
+}
+
+static int led_bl_parse_levels(struct device *dev,
+ struct led_bl_data *priv)
+{
+ struct device_node *node = dev->of_node;
+ int num_levels;
+ u32 value;
+ int ret;
+
+ if (!node)
+ return -ENODEV;
+
+ num_levels = of_property_count_u32_elems(node, "brightness-levels");
+ if (num_levels > 1) {
+ int i;
+ unsigned int db;
+ u32 *levels = NULL;
+
+ levels = devm_kzalloc(dev, sizeof(u32) * num_levels,
+ GFP_KERNEL);
+ if (!levels)
+ return -ENOMEM;
+
+ ret = of_property_read_u32_array(node, "brightness-levels",
+ levels,
+ num_levels);
+ if (ret < 0)
+ return ret;
+
+ /*
+ * Try to map actual LED brightness to backlight brightness
+ * level
+ */
+ db = priv->default_brightness;
+ for (i = 0 ; i < num_levels; i++) {
+ if ((i && db > levels[i-1]) && db <= levels[i])
+ break;
+ }
+ priv->default_brightness = i;
+ priv->max_brightness = num_levels - 1;
+ priv->levels = levels;
+ } else if (num_levels >= 0)
+ dev_warn(dev, "Not enough levels defined\n");
+
+ ret = of_property_read_u32(node, "default-brightness-level", &value);
+ if (!ret && value <= priv->max_brightness)
+ priv->default_brightness = value;
+ else if (!ret && value > priv->max_brightness)
+ dev_warn(dev, "Invalid default brightness. Ignoring it\n");
+
+ return 0;
+}
+
+static int led_bl_probe(struct platform_device *pdev)
+{
+ struct backlight_properties props;
+ struct led_bl_data *priv;
+ int ret, i;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, priv);
+
+ priv->dev = &pdev->dev;
+
+ ret = led_bl_get_leds(&pdev->dev, priv);
+ if (ret)
+ return ret;
+
+ ret = led_bl_parse_levels(&pdev->dev, priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Failed to parse DT data\n");
+ return ret;
+ }
+
+ memset(&props, 0, sizeof(struct backlight_properties));
+ props.type = BACKLIGHT_RAW;
+ props.max_brightness = priv->max_brightness;
+ props.brightness = priv->default_brightness;
+ props.power = (priv->default_brightness > 0) ? FB_BLANK_POWERDOWN :
+ FB_BLANK_UNBLANK;
+ priv->bl_dev = backlight_device_register(dev_name(&pdev->dev),
+ &pdev->dev, priv, &led_bl_ops, &props);
+ if (IS_ERR(priv->bl_dev)) {
+ dev_err(&pdev->dev, "Failed to register backlight\n");
+ return PTR_ERR(priv->bl_dev);
+ }
+
+ for (i = 0; i < priv->nb_leds; i++)
+ led_sysfs_disable(priv->leds[i]);
+
+ backlight_update_status(priv->bl_dev);
+
+ return 0;
+}
+
+static int led_bl_remove(struct platform_device *pdev)
+{
+ struct led_bl_data *priv = platform_get_drvdata(pdev);
+ struct backlight_device *bl = priv->bl_dev;
+ int i;
+
+ backlight_device_unregister(bl);
+
+ led_bl_power_off(priv);
+ for (i = 0; i < priv->nb_leds; i++)
+ led_sysfs_enable(priv->leds[i]);
+
+ return 0;
+}
+
+static const struct of_device_id led_bl_of_match[] = {
+ { .compatible = "led-backlight" },
+ { }
+};
+
+MODULE_DEVICE_TABLE(of, led_bl_of_match);
+
+static struct platform_driver led_bl_driver = {
+ .driver = {
+ .name = "led-backlight",
+ .of_match_table = of_match_ptr(led_bl_of_match),
+ },
+ .probe = led_bl_probe,
+ .remove = led_bl_remove,
+};
+
+module_platform_driver(led_bl_driver);
+
+MODULE_DESCRIPTION("LED based Backlight Driver");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS("platform:led-backlight");
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index 70c10ea1c38b..3c01b0d2414f 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -27,7 +27,7 @@ config VGACON_SOFT_SCROLLBACK
depends on VGA_CONSOLE
default n
help
- The scrollback buffer of the standard VGA console is located in
+ The scrollback buffer of the standard VGA console is located in
the VGA RAM. The size of this RAM is fixed and is quite small.
If you require a larger scrollback buffer, this can be placed in
System RAM which is dynamically allocated during initialization.
@@ -84,36 +84,36 @@ config MDA_CONSOLE
If unsure, say N.
config SGI_NEWPORT_CONSOLE
- tristate "SGI Newport Console support"
+ tristate "SGI Newport Console support"
depends on SGI_IP22 && HAS_IOMEM
- select FONT_SUPPORT
- help
- Say Y here if you want the console on the Newport aka XL graphics
- card of your Indy. Most people say Y here.
+ select FONT_SUPPORT
+ help
+ Say Y here if you want the console on the Newport aka XL graphics
+ card of your Indy. Most people say Y here.
config DUMMY_CONSOLE
bool
default y
config DUMMY_CONSOLE_COLUMNS
- int "Initial number of console screen columns"
- depends on DUMMY_CONSOLE && !ARM
- default 160 if PARISC
- default 80
- help
- On PA-RISC, the default value is 160, which should fit a 1280x1024
- monitor.
- Select 80 if you use a 640x480 resolution by default.
+ int "Initial number of console screen columns"
+ depends on DUMMY_CONSOLE && !ARM
+ default 160 if PARISC
+ default 80
+ help
+ On PA-RISC, the default value is 160, which should fit a 1280x1024
+ monitor.
+ Select 80 if you use a 640x480 resolution by default.
config DUMMY_CONSOLE_ROWS
- int "Initial number of console screen rows"
- depends on DUMMY_CONSOLE && !ARM
- default 64 if PARISC
- default 25
- help
- On PA-RISC, the default value is 64, which should fit a 1280x1024
- monitor.
- Select 25 if you use a 640x480 resolution by default.
+ int "Initial number of console screen rows"
+ depends on DUMMY_CONSOLE && !ARM
+ default 64 if PARISC
+ default 25
+ help
+ On PA-RISC, the default value is 64, which should fit a 1280x1024
+ monitor.
+ Select 25 if you use a 640x480 resolution by default.
config FRAMEBUFFER_CONSOLE
bool "Framebuffer Console support"
@@ -129,11 +129,11 @@ config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY
depends on FRAMEBUFFER_CONSOLE
default n
---help---
- If this option is selected, the framebuffer console will
- automatically select the primary display device (if the architecture
+ If this option is selected, the framebuffer console will
+ automatically select the primary display device (if the architecture
supports this feature). Otherwise, the framebuffer console will
- always select the first framebuffer driver that is loaded. The latter
- is the default behavior.
+ always select the first framebuffer driver that is loaded. The latter
+ is the default behavior.
You can always override the automatic selection of the primary device
by using the fbcon=map: boot option.
@@ -144,11 +144,11 @@ config FRAMEBUFFER_CONSOLE_ROTATION
bool "Framebuffer Console Rotation"
depends on FRAMEBUFFER_CONSOLE
help
- Enable display rotation for the framebuffer console. This is done
- in software and may be significantly slower than a normally oriented
- display. Note that the rotation is done at the console level only
- such that other users of the framebuffer will remain normally
- oriented.
+ Enable display rotation for the framebuffer console. This is done
+ in software and may be significantly slower than a normally oriented
+ display. Note that the rotation is done at the console level only
+ such that other users of the framebuffer will remain normally
+ oriented.
config FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
bool "Framebuffer Console Deferred Takeover"
@@ -162,14 +162,14 @@ config FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER
black screen as soon as fbcon loads.
config STI_CONSOLE
- bool "STI text console"
+ bool "STI text console"
depends on PARISC && HAS_IOMEM
- select FONT_SUPPORT
- default y
- help
- The STI console is the builtin display/keyboard on HP-PARISC
- machines. Say Y here to build support for it into your kernel.
- The alternative is to use your primary serial port as a console.
+ select FONT_SUPPORT
+ default y
+ help
+ The STI console is the builtin display/keyboard on HP-PARISC
+ machines. Say Y here to build support for it into your kernel.
+ The alternative is to use your primary serial port as a console.
endmenu
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index de7b8382aba9..998b0de1812f 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -1316,6 +1316,9 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
static int vgacon_resize(struct vc_data *c, unsigned int width,
unsigned int height, unsigned int user)
{
+ if ((width << 1) * height > vga_vram_size)
+ return -EINVAL;
+
if (width % 2 || width > screen_info.orig_video_cols ||
height > (screen_info.orig_video_lines * vga_default_font_height)/
c->vc_font.height)
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index f65991a67af2..91b0a719d221 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -435,7 +435,7 @@ config FB_FM2
config FB_ARC
tristate "Arc Monochrome LCD board support"
- depends on FB && X86
+ depends on FB && (X86 || COMPILE_TEST)
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
@@ -1639,7 +1639,7 @@ config FB_VT8500
config FB_WM8505
bool "Wondermedia WM8xxx-series frame buffer support"
- depends on (FB = y) && ARM && ARCH_VT8500
+ depends on (FB = y) && HAS_IOMEM && (ARCH_VT8500 || COMPILE_TEST)
select FB_SYS_FILLRECT if (!FB_WMT_GE_ROPS)
select FB_SYS_COPYAREA if (!FB_WMT_GE_ROPS)
select FB_SYS_IMAGEBLIT
@@ -1827,7 +1827,7 @@ config FB_FSL_DIU
config FB_W100
tristate "W100 frame buffer support"
- depends on FB && ARCH_PXA
+ depends on FB && HAS_IOMEM && (ARCH_PXA || COMPILE_TEST)
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
select FB_CFB_IMAGEBLIT
@@ -1844,7 +1844,8 @@ config FB_W100
config FB_SH_MOBILE_LCDC
tristate "SuperH Mobile LCDC framebuffer support"
- depends on FB && (SUPERH || ARCH_RENESAS) && HAVE_CLK
+ depends on FB && HAVE_CLK && HAS_IOMEM
+ depends on SUPERH || ARCH_RENESAS || COMPILE_TEST
select FB_SYS_FILLRECT
select FB_SYS_COPYAREA
select FB_SYS_IMAGEBLIT
diff --git a/drivers/video/fbdev/aty/mach64_gx.c b/drivers/video/fbdev/aty/mach64_gx.c
index 27cb65fa2ba2..9c37e28fb78b 100644
--- a/drivers/video/fbdev/aty/mach64_gx.c
+++ b/drivers/video/fbdev/aty/mach64_gx.c
@@ -618,14 +618,13 @@ static int aty_var_to_pll_8398(const struct fb_info *info, u32 vclk_per,
u32 mhz100; /* in 0.01 MHz */
u32 program_bits;
/* u32 post_divider; */
- u32 mach64MinFreq, mach64MaxFreq, mach64RefFreq;
+ u32 mach64MinFreq, mach64MaxFreq;
u16 m, n, k = 0, save_m, save_n, twoToKth;
/* Calculate the programming word */
mhz100 = 100000000 / vclk_per;
mach64MinFreq = MIN_FREQ_2595;
mach64MaxFreq = MAX_FREQ_2595;
- mach64RefFreq = REF_FREQ_2595; /* 14.32 MHz */
save_m = 0;
save_n = 0;
diff --git a/drivers/video/fbdev/aty/radeon_base.c b/drivers/video/fbdev/aty/radeon_base.c
index 3af00e3b965e..e116a3f9ad56 100644
--- a/drivers/video/fbdev/aty/radeon_base.c
+++ b/drivers/video/fbdev/aty/radeon_base.c
@@ -849,12 +849,6 @@ static int radeonfb_check_var (struct fb_var_screeninfo *var, struct fb_info *in
case 9 ... 16:
v.bits_per_pixel = 16;
break;
- case 17 ... 24:
-#if 0 /* Doesn't seem to work */
- v.bits_per_pixel = 24;
- break;
-#endif
- return -EINVAL;
case 25 ... 32:
v.bits_per_pixel = 32;
break;
@@ -1650,14 +1644,14 @@ static int radeonfb_set_par(struct fb_info *info)
struct fb_var_screeninfo *mode = &info->var;
struct radeon_regs *newmode;
int hTotal, vTotal, hSyncStart, hSyncEnd,
- hSyncPol, vSyncStart, vSyncEnd, vSyncPol, cSync;
+ vSyncStart, vSyncEnd;
u8 hsync_adj_tab[] = {0, 0x12, 9, 9, 6, 5};
u8 hsync_fudge_fp[] = {2, 2, 0, 0, 5, 5};
u32 sync, h_sync_pol, v_sync_pol, dotClock, pixClock;
int i, freq;
int format = 0;
int nopllcalc = 0;
- int hsync_start, hsync_fudge, bytpp, hsync_wid, vsync_wid;
+ int hsync_start, hsync_fudge, hsync_wid, vsync_wid;
int primary_mon = PRIMARY_MONITOR(rinfo);
int depth = var_to_depth(mode);
int use_rmx = 0;
@@ -1730,13 +1724,7 @@ static int radeonfb_set_par(struct fb_info *info)
else if (vsync_wid > 0x1f) /* max */
vsync_wid = 0x1f;
- hSyncPol = mode->sync & FB_SYNC_HOR_HIGH_ACT ? 0 : 1;
- vSyncPol = mode->sync & FB_SYNC_VERT_HIGH_ACT ? 0 : 1;
-
- cSync = mode->sync & FB_SYNC_COMP_HIGH_ACT ? (1 << 4) : 0;
-
format = radeon_get_dstbpp(depth);
- bytpp = mode->bits_per_pixel >> 3;
if ((primary_mon == MT_DFP) || (primary_mon == MT_LCD))
hsync_fudge = hsync_fudge_fp[format-1];
@@ -2548,16 +2536,6 @@ static void radeonfb_pci_unregister(struct pci_dev *pdev)
if (rinfo->mon2_EDID)
sysfs_remove_bin_file(&rinfo->pdev->dev.kobj, &edid2_attr);
-#if 0
- /* restore original state
- *
- * Doesn't quite work yet, I suspect if we come from a legacy
- * VGA mode (or worse, text mode), we need to do some VGA black
- * magic here that I know nothing about. --BenH
- */
- radeon_write_mode (rinfo, &rinfo->init_state, 1);
- #endif
-
del_timer_sync(&rinfo->lvds_timer);
arch_phys_wc_del(rinfo->wc_cookie);
unregister_framebuffer(info);
diff --git a/drivers/video/fbdev/cg14.c b/drivers/video/fbdev/cg14.c
index a620b51cf7d0..6a745eb46ca1 100644
--- a/drivers/video/fbdev/cg14.c
+++ b/drivers/video/fbdev/cg14.c
@@ -509,8 +509,7 @@ static int cg14_probe(struct platform_device *op)
if (!par->regs || !par->clut || !par->cursor || !info->screen_base)
goto out_unmap_regs;
- is_8mb = (((op->resource[1].end - op->resource[1].start) + 1) ==
- (8 * 1024 * 1024));
+ is_8mb = (resource_size(&op->resource[1]) == (8 * 1024 * 1024));
BUILD_BUG_ON(sizeof(par->mmap_map) != sizeof(__cg14_mmap_map));
diff --git a/drivers/video/fbdev/core/Makefile b/drivers/video/fbdev/core/Makefile
index 37710316a680..26cbc965497c 100644
--- a/drivers/video/fbdev/core/Makefile
+++ b/drivers/video/fbdev/core/Makefile
@@ -16,7 +16,6 @@ fb-y += fbcon_rotate.o fbcon_cw.o fbcon_ud.o \
fbcon_ccw.o
endif
endif
-fb-objs := $(fb-y)
obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c
index bb6ae995c2e5..28335788e76e 100644
--- a/drivers/video/fbdev/core/fbcon.c
+++ b/drivers/video/fbdev/core/fbcon.c
@@ -873,7 +873,7 @@ static int set_con2fb_map(int unit, int newidx, int user)
int oldidx = con2fb_map[unit];
struct fb_info *info = registered_fb[newidx];
struct fb_info *oldinfo = NULL;
- int found, err = 0;
+ int found, err = 0;
WARN_CONSOLE_UNLOCKED();
@@ -895,31 +895,30 @@ static int set_con2fb_map(int unit, int newidx, int user)
con2fb_map[unit] = newidx;
if (!err && !found)
- err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
-
+ err = con2fb_acquire_newinfo(vc, info, unit, oldidx);
/*
* If old fb is not mapped to any of the consoles,
* fbcon should release it.
*/
- if (!err && oldinfo && !search_fb_in_map(oldidx))
- err = con2fb_release_oldinfo(vc, oldinfo, info, unit, oldidx,
- found);
+ if (!err && oldinfo && !search_fb_in_map(oldidx))
+ err = con2fb_release_oldinfo(vc, oldinfo, info, unit, oldidx,
+ found);
- if (!err) {
- int show_logo = (fg_console == 0 && !user &&
- logo_shown != FBCON_LOGO_DONTSHOW);
+ if (!err) {
+ int show_logo = (fg_console == 0 && !user &&
+ logo_shown != FBCON_LOGO_DONTSHOW);
- if (!found)
- fbcon_add_cursor_timer(info);
- con2fb_map_boot[unit] = newidx;
- con2fb_init_display(vc, info, unit, show_logo);
+ if (!found)
+ fbcon_add_cursor_timer(info);
+ con2fb_map_boot[unit] = newidx;
+ con2fb_init_display(vc, info, unit, show_logo);
}
if (!search_fb_in_map(info_idx))
info_idx = newidx;
- return err;
+ return err;
}
/*
diff --git a/drivers/video/fbdev/core/fbmem.c b/drivers/video/fbdev/core/fbmem.c
index d04554959ea7..30e73ec4ad5c 100644
--- a/drivers/video/fbdev/core/fbmem.c
+++ b/drivers/video/fbdev/core/fbmem.c
@@ -663,20 +663,20 @@ int fb_prepare_logo(struct fb_info *info, int rotate)
fb_logo.depth = 1;
- if (fb_logo.depth > 4 && depth > 4) {
- switch (info->fix.visual) {
- case FB_VISUAL_TRUECOLOR:
- fb_logo.needs_truepalette = 1;
- break;
- case FB_VISUAL_DIRECTCOLOR:
- fb_logo.needs_directpalette = 1;
- fb_logo.needs_cmapreset = 1;
- break;
- case FB_VISUAL_PSEUDOCOLOR:
- fb_logo.needs_cmapreset = 1;
- break;
- }
- }
+ if (fb_logo.depth > 4 && depth > 4) {
+ switch (info->fix.visual) {
+ case FB_VISUAL_TRUECOLOR:
+ fb_logo.needs_truepalette = 1;
+ break;
+ case FB_VISUAL_DIRECTCOLOR:
+ fb_logo.needs_directpalette = 1;
+ fb_logo.needs_cmapreset = 1;
+ break;
+ case FB_VISUAL_PSEUDOCOLOR:
+ fb_logo.needs_cmapreset = 1;
+ break;
+ }
+ }
height = fb_logo.logo->height;
if (fb_center_logo)
@@ -1065,19 +1065,19 @@ fb_blank(struct fb_info *info, int blank)
struct fb_event event;
int ret = -EINVAL;
- if (blank > FB_BLANK_POWERDOWN)
- blank = FB_BLANK_POWERDOWN;
+ if (blank > FB_BLANK_POWERDOWN)
+ blank = FB_BLANK_POWERDOWN;
event.info = info;
event.data = &blank;
if (info->fbops->fb_blank)
- ret = info->fbops->fb_blank(blank, info);
+ ret = info->fbops->fb_blank(blank, info);
if (!ret)
fb_notifier_call_chain(FB_EVENT_BLANK, &event);
- return ret;
+ return ret;
}
EXPORT_SYMBOL(fb_blank);
@@ -1115,7 +1115,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
break;
case FBIOGET_FSCREENINFO:
lock_fb_info(info);
- fix = info->fix;
+ memcpy(&fix, &info->fix, sizeof(fix));
if (info->flags & FBINFO_HIDE_SMEM_START)
fix.smem_start = 0;
unlock_fb_info(info);
diff --git a/drivers/video/fbdev/hyperv_fb.c b/drivers/video/fbdev/hyperv_fb.c
index f47d50e560c0..e4c3c8b65da4 100644
--- a/drivers/video/fbdev/hyperv_fb.c
+++ b/drivers/video/fbdev/hyperv_fb.c
@@ -594,8 +594,8 @@ static int synthvid_get_supported_resolution(struct hv_device *hdev)
t = wait_for_completion_timeout(&par->wait, VSP_TIMEOUT);
if (!t) {
pr_err("Time out on waiting resolution response\n");
- ret = -ETIMEDOUT;
- goto out;
+ ret = -ETIMEDOUT;
+ goto out;
}
if (msg->resolution_resp.resolution_count == 0) {
diff --git a/drivers/video/fbdev/kyro/STG4000OverlayDevice.c b/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
index 0aeeaa10708b..9fde0e3b69ec 100644
--- a/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
+++ b/drivers/video/fbdev/kyro/STG4000OverlayDevice.c
@@ -331,7 +331,7 @@ int SetOverlayViewPort(volatile STG4000REG __iomem *pSTGReg,
u32 ulScale;
u32 ulLeft, ulRight;
u32 ulSrcLeft, ulSrcRight;
- u32 ulScaleLeft, ulScaleRight;
+ u32 ulScaleLeft;
u32 ulhDecim;
u32 ulsVal;
u32 ulVertDecFactor;
@@ -470,7 +470,6 @@ int SetOverlayViewPort(volatile STG4000REG __iomem *pSTGReg,
* round down the pixel pos to the nearest 8 pixels.
*/
ulScaleLeft = ulSrcLeft;
- ulScaleRight = ulSrcRight;
/* shift fxscale until it is in the range of the scaler */
ulhDecim = 0;
diff --git a/drivers/video/fbdev/matrox/matroxfb_base.c b/drivers/video/fbdev/matrox/matroxfb_base.c
index 36cc718b96ae..570439b32655 100644
--- a/drivers/video/fbdev/matrox/matroxfb_base.c
+++ b/drivers/video/fbdev/matrox/matroxfb_base.c
@@ -1376,6 +1376,12 @@ static struct video_board vbG200 = {
.accelID = FB_ACCEL_MATROX_MGAG200,
.lowlevel = &matrox_G100
};
+static struct video_board vbG200eW = {
+ .maxvram = 0x800000,
+ .maxdisplayable = 0x800000,
+ .accelID = FB_ACCEL_MATROX_MGAG200,
+ .lowlevel = &matrox_G100
+};
/* from doc it looks like that accelerator can draw only to low 16MB :-( Direct accesses & displaying are OK for
whole 32MB */
static struct video_board vbG400 = {
@@ -1494,6 +1500,13 @@ static struct board {
MGA_G200,
&vbG200,
"MGA-G200 (PCI)"},
+ {PCI_VENDOR_ID_MATROX, 0x0532, 0xFF,
+ 0, 0,
+ DEVF_G200,
+ 250000,
+ MGA_G200,
+ &vbG200eW,
+ "MGA-G200eW (PCI)"},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200_AGP, 0xFF,
PCI_SS_VENDOR_ID_MATROX, PCI_SS_ID_MATROX_GENERIC,
DEVF_G200,
@@ -2136,6 +2149,8 @@ static const struct pci_device_id matroxfb_devices[] = {
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200_PCI,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
+ {PCI_VENDOR_ID_MATROX, 0x0532,
+ PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G200_AGP,
PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{PCI_VENDOR_ID_MATROX, PCI_DEVICE_ID_MATROX_G400,
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
index 335d4983dc52..167585a889d3 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.h
@@ -1406,7 +1406,7 @@ struct mmphw_ctrl {
/*pathes*/
int path_num;
- struct mmphw_path_plat path_plats[0];
+ struct mmphw_path_plat path_plats[];
};
static inline int overlay_is_vid(struct mmp_overlay *overlay)
diff --git a/drivers/video/fbdev/nvidia/nvidia.c b/drivers/video/fbdev/nvidia/nvidia.c
index c583c018304d..c24de9107958 100644
--- a/drivers/video/fbdev/nvidia/nvidia.c
+++ b/drivers/video/fbdev/nvidia/nvidia.c
@@ -168,27 +168,26 @@ static int nvidia_panel_tweak(struct nvidia_par *par,
{
int tweak = 0;
- if (par->paneltweak) {
- tweak = par->paneltweak;
- } else {
- /* begin flat panel hacks */
- /* This is unfortunate, but some chips need this register
- tweaked or else you get artifacts where adjacent pixels are
- swapped. There are no hard rules for what to set here so all
- we can do is experiment and apply hacks. */
-
- if(((par->Chipset & 0xffff) == 0x0328) && (state->bpp == 32)) {
- /* At least one NV34 laptop needs this workaround. */
- tweak = -1;
- }
-
- if((par->Chipset & 0xfff0) == 0x0310) {
- tweak = 1;
- }
- /* end flat panel hacks */
- }
-
- return tweak;
+ if (par->paneltweak) {
+ tweak = par->paneltweak;
+ } else {
+ /* Begin flat panel hacks.
+ * This is unfortunate, but some chips need this register
+ * tweaked or else you get artifacts where adjacent pixels are
+ * swapped. There are no hard rules for what to set here so all
+ * we can do is experiment and apply hacks.
+ */
+ if (((par->Chipset & 0xffff) == 0x0328) && (state->bpp == 32)) {
+ /* At least one NV34 laptop needs this workaround. */
+ tweak = -1;
+ }
+
+ if ((par->Chipset & 0xfff0) == 0x0310)
+ tweak = 1;
+ /* end flat panel hacks */
+ }
+
+ return tweak;
}
static void nvidia_screen_off(struct nvidia_par *par, int on)
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 8dfa9158ba78..836e7b1639ce 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1154,16 +1154,12 @@ static int _setcolreg(struct fb_info *fbi, u_int regno, u_int red, u_int green,
r = fbdev->ctrl->setcolreg(regno, red, green, blue,
transp, update_hw_pal);
*/
- /* Fallthrough */
r = -EINVAL;
break;
case OMAPFB_COLOR_RGB565:
case OMAPFB_COLOR_RGB444:
case OMAPFB_COLOR_RGB24P:
case OMAPFB_COLOR_RGB24U:
- if (r != 0)
- break;
-
if (regno < 16) {
u32 pal;
pal = ((red >> (16 - var->red.length)) <<
diff --git a/drivers/video/fbdev/pxa168fb.c b/drivers/video/fbdev/pxa168fb.c
index 9b9ec1468347..aef8a3042590 100644
--- a/drivers/video/fbdev/pxa168fb.c
+++ b/drivers/video/fbdev/pxa168fb.c
@@ -769,7 +769,7 @@ failed_free_fbmem:
dma_free_wc(fbi->dev, info->fix.smem_len,
info->screen_base, fbi->fb_start_dma);
failed_free_info:
- kfree(info);
+ framebuffer_release(info);
dev_err(&pdev->dev, "frame buffer device init failed with %d\n", ret);
return ret;
@@ -779,7 +779,6 @@ static int pxa168fb_remove(struct platform_device *pdev)
{
struct pxa168fb_info *fbi = platform_get_drvdata(pdev);
struct fb_info *info;
- int irq;
unsigned int data;
if (!fbi)
@@ -799,8 +798,6 @@ static int pxa168fb_remove(struct platform_device *pdev)
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
- irq = platform_get_irq(pdev, 0);
-
dma_free_wc(fbi->dev, info->fix.smem_len,
info->screen_base, info->fix.smem_start);
diff --git a/drivers/video/fbdev/s1d13xxxfb.c b/drivers/video/fbdev/s1d13xxxfb.c
index 8048499e398d..eaea8c373753 100644
--- a/drivers/video/fbdev/s1d13xxxfb.c
+++ b/drivers/video/fbdev/s1d13xxxfb.c
@@ -746,9 +746,9 @@ s1d13xxxfb_remove(struct platform_device *pdev)
}
release_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start +1);
+ resource_size(&pdev->resource[0]));
release_mem_region(pdev->resource[1].start,
- pdev->resource[1].end - pdev->resource[1].start +1);
+ resource_size(&pdev->resource[1]));
return 0;
}
@@ -788,14 +788,14 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
}
if (!request_mem_region(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start +1, "s1d13xxxfb mem")) {
+ resource_size(&pdev->resource[0]), "s1d13xxxfb mem")) {
dev_dbg(&pdev->dev, "request_mem_region failed\n");
ret = -EBUSY;
goto bail;
}
if (!request_mem_region(pdev->resource[1].start,
- pdev->resource[1].end - pdev->resource[1].start +1, "s1d13xxxfb regs")) {
+ resource_size(&pdev->resource[1]), "s1d13xxxfb regs")) {
dev_dbg(&pdev->dev, "request_mem_region failed\n");
ret = -EBUSY;
goto bail;
@@ -810,7 +810,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, info);
default_par = info->par;
default_par->regs = ioremap(pdev->resource[1].start,
- pdev->resource[1].end - pdev->resource[1].start +1);
+ resource_size(&pdev->resource[1]));
if (!default_par->regs) {
printk(KERN_ERR PFX "unable to map registers\n");
ret = -ENOMEM;
@@ -819,7 +819,7 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
info->pseudo_palette = default_par->pseudo_palette;
info->screen_base = ioremap(pdev->resource[0].start,
- pdev->resource[0].end - pdev->resource[0].start +1);
+ resource_size(&pdev->resource[0]));
if (!info->screen_base) {
printk(KERN_ERR PFX "unable to map framebuffer\n");
@@ -857,9 +857,9 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
info->fix = s1d13xxxfb_fix;
info->fix.mmio_start = pdev->resource[1].start;
- info->fix.mmio_len = pdev->resource[1].end - pdev->resource[1].start + 1;
+ info->fix.mmio_len = resource_size(&pdev->resource[1]);
info->fix.smem_start = pdev->resource[0].start;
- info->fix.smem_len = pdev->resource[0].end - pdev->resource[0].start + 1;
+ info->fix.smem_len = resource_size(&pdev->resource[0]);
printk(KERN_INFO PFX "regs mapped at 0x%p, fb %d KiB mapped at 0x%p\n",
default_par->regs, info->fix.smem_len / 1024, info->screen_base);
diff --git a/drivers/video/fbdev/sa1100fb.c b/drivers/video/fbdev/sa1100fb.c
index 5bb653db0cec..2d285cc384cf 100644
--- a/drivers/video/fbdev/sa1100fb.c
+++ b/drivers/video/fbdev/sa1100fb.c
@@ -1053,7 +1053,7 @@ static int sa1100fb_map_video_memory(struct sa1100fb_info *fbi)
}
/* Fake monspecs to fill in fbinfo structure */
-static struct fb_monspecs monspecs = {
+static const struct fb_monspecs monspecs = {
.hfmin = 30000,
.hfmax = 70000,
.vfmin = 50,
diff --git a/drivers/video/fbdev/sh_mobile_lcdcfb.c b/drivers/video/fbdev/sh_mobile_lcdcfb.c
index 4ea6f932b334..8a27d12e6ea8 100644
--- a/drivers/video/fbdev/sh_mobile_lcdcfb.c
+++ b/drivers/video/fbdev/sh_mobile_lcdcfb.c
@@ -1572,7 +1572,7 @@ sh_mobile_lcdc_overlay_fb_init(struct sh_mobile_lcdc_overlay *ovl)
info->flags = FBINFO_FLAG_DEFAULT;
info->fbops = &sh_mobile_lcdc_overlay_ops;
info->device = priv->dev;
- info->screen_base = ovl->fb_mem;
+ info->screen_buffer = ovl->fb_mem;
info->par = ovl;
/* Initialize fixed screen information. Restrict pan to 2 lines steps
@@ -2056,7 +2056,7 @@ sh_mobile_lcdc_channel_fb_init(struct sh_mobile_lcdc_chan *ch,
info->flags = FBINFO_FLAG_DEFAULT;
info->fbops = &sh_mobile_lcdc_ops;
info->device = priv->dev;
- info->screen_base = ch->fb_mem;
+ info->screen_buffer = ch->fb_mem;
info->pseudo_palette = &ch->pseudo_palette;
info->par = ch;
diff --git a/drivers/video/fbdev/ssd1307fb.c b/drivers/video/fbdev/ssd1307fb.c
index 142535267fec..12fa1050f3eb 100644
--- a/drivers/video/fbdev/ssd1307fb.c
+++ b/drivers/video/fbdev/ssd1307fb.c
@@ -89,7 +89,7 @@ struct ssd1307fb_par {
struct ssd1307fb_array {
u8 type;
- u8 data[0];
+ u8 data[];
};
static const struct fb_fix_screeninfo ssd1307fb_fix = {
@@ -791,6 +791,8 @@ static int ssd1307fb_remove(struct i2c_client *client)
pwm_disable(par->pwm);
pwm_put(par->pwm);
}
+ if (par->vbat_reg)
+ regulator_disable(par->vbat_reg);
fb_deferred_io_cleanup(info);
__free_pages(__va(info->fix.smem_start), get_order(info->fix.smem_len));
framebuffer_release(info);
diff --git a/drivers/video/fbdev/w100fb.c b/drivers/video/fbdev/w100fb.c
index ad26cbffbc6f..2d6e2738b792 100644
--- a/drivers/video/fbdev/w100fb.c
+++ b/drivers/video/fbdev/w100fb.c
@@ -61,9 +61,9 @@ struct w100_pll_info *w100_get_xtal_table(unsigned int freq);
#define BITS_PER_PIXEL 16
/* Remapped addresses for base cfg, memmapped regs and the frame buffer itself */
-static void *remapped_base;
-static void *remapped_regs;
-static void *remapped_fbuf;
+static void __iomem *remapped_base;
+static void __iomem *remapped_regs;
+static void __iomem *remapped_fbuf;
#define REMAPPED_FB_LEN 0x15ffff
@@ -635,7 +635,7 @@ static int w100fb_resume(struct platform_device *dev)
#endif
-int w100fb_probe(struct platform_device *pdev)
+static int w100fb_probe(struct platform_device *pdev)
{
int err = -EIO;
struct w100fb_mach_info *inf;
@@ -807,10 +807,11 @@ static int w100fb_remove(struct platform_device *pdev)
static void w100_soft_reset(void)
{
- u16 val = readw((u16 *) remapped_base + cfgSTATUS);
- writew(val | 0x08, (u16 *) remapped_base + cfgSTATUS);
+ u16 val = readw((u16 __iomem *)remapped_base + cfgSTATUS);
+
+ writew(val | 0x08, (u16 __iomem *)remapped_base + cfgSTATUS);
udelay(100);
- writew(0x00, (u16 *) remapped_base + cfgSTATUS);
+ writew(0x00, (u16 __iomem *)remapped_base + cfgSTATUS);
udelay(100);
}
@@ -1022,7 +1023,8 @@ struct w100_pll_info *w100_get_xtal_table(unsigned int freq)
return pll_entry->pll_table;
pll_entry++;
} while (pll_entry->xtal_freq);
- return 0;
+
+ return NULL;
}
diff --git a/drivers/video/fbdev/wm8505fb.c b/drivers/video/fbdev/wm8505fb.c
index b656eff58c23..8f4d674fa0d0 100644
--- a/drivers/video/fbdev/wm8505fb.c
+++ b/drivers/video/fbdev/wm8505fb.c
@@ -339,7 +339,7 @@ static int wm8505fb_probe(struct platform_device *pdev)
fbi->fb.fix.smem_start = fb_mem_phys;
fbi->fb.fix.smem_len = fb_mem_len;
- fbi->fb.screen_base = fb_mem_virt;
+ fbi->fb.screen_buffer = fb_mem_virt;
fbi->fb.screen_size = fb_mem_len;
fbi->contrast = 0x10;
diff --git a/drivers/video/hdmi.c b/drivers/video/hdmi.c
index 9c82e2a0a411..856a8c4e84a2 100644
--- a/drivers/video/hdmi.c
+++ b/drivers/video/hdmi.c
@@ -53,18 +53,14 @@ static void hdmi_infoframe_set_checksum(void *buffer, size_t size)
/**
* hdmi_avi_infoframe_init() - initialize an HDMI AVI infoframe
* @frame: HDMI AVI infoframe
- *
- * Returns 0 on success or a negative error code on failure.
*/
-int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
+void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame)
{
memset(frame, 0, sizeof(*frame));
frame->type = HDMI_INFOFRAME_TYPE_AVI;
frame->version = 2;
frame->length = HDMI_AVI_INFOFRAME_SIZE;
-
- return 0;
}
EXPORT_SYMBOL(hdmi_avi_infoframe_init);
@@ -1553,7 +1549,6 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
const void *buffer, size_t size)
{
const u8 *ptr = buffer;
- int ret;
if (size < HDMI_INFOFRAME_SIZE(AVI))
return -EINVAL;
@@ -1566,9 +1561,7 @@ static int hdmi_avi_infoframe_unpack(struct hdmi_avi_infoframe *frame,
if (hdmi_infoframe_checksum(buffer, HDMI_INFOFRAME_SIZE(AVI)) != 0)
return -EINVAL;
- ret = hdmi_avi_infoframe_init(frame);
- if (ret)
- return ret;
+ hdmi_avi_infoframe_init(frame);
ptr += HDMI_INFOFRAME_HEADER_SIZE;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index cec868f8db3f..9ea2b43d4b01 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -207,6 +207,7 @@ config DA9063_WATCHDOG
config DA9062_WATCHDOG
tristate "Dialog DA9062/61 Watchdog"
depends on MFD_DA9062 || COMPILE_TEST
+ depends on I2C
select WATCHDOG_CORE
help
Support for the watchdog in the DA9062 and DA9061 PMICs.
@@ -841,6 +842,7 @@ config MEDIATEK_WATCHDOG
tristate "Mediatek SoCs watchdog support"
depends on ARCH_MEDIATEK || COMPILE_TEST
select WATCHDOG_CORE
+ select RESET_CONTROLLER
help
Say Y here to include support for the watchdog timer
in Mediatek SoCs.
diff --git a/drivers/watchdog/da9062_wdt.c b/drivers/watchdog/da9062_wdt.c
index 47eefe072b40..0ad15d55071c 100644
--- a/drivers/watchdog/da9062_wdt.c
+++ b/drivers/watchdog/da9062_wdt.c
@@ -16,6 +16,7 @@
#include <linux/jiffies.h>
#include <linux/mfd/da9062/registers.h>
#include <linux/mfd/da9062/core.h>
+#include <linux/property.h>
#include <linux/regmap.h>
#include <linux/of.h>
@@ -31,6 +32,7 @@ static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
struct da9062_watchdog {
struct da9062 *hw;
struct watchdog_device wdtdev;
+ bool use_sw_pm;
};
static unsigned int da9062_wdt_timeout_to_sel(unsigned int secs)
@@ -95,13 +97,6 @@ static int da9062_wdt_stop(struct watchdog_device *wdd)
struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
int ret;
- ret = da9062_reset_watchdog_timer(wdt);
- if (ret) {
- dev_err(wdt->hw->dev, "Failed to ping the watchdog (err = %d)\n",
- ret);
- return ret;
- }
-
ret = regmap_update_bits(wdt->hw->regmap,
DA9062AA_CONTROL_D,
DA9062AA_TWDSCALE_MASK,
@@ -200,6 +195,8 @@ static int da9062_wdt_probe(struct platform_device *pdev)
if (!wdt)
return -ENOMEM;
+ wdt->use_sw_pm = device_property_present(dev, "dlg,use-sw-pm");
+
wdt->hw = chip;
wdt->wdtdev.info = &da9062_watchdog_info;
@@ -226,6 +223,10 @@ static int da9062_wdt_probe(struct platform_device *pdev)
static int __maybe_unused da9062_wdt_suspend(struct device *dev)
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
+ struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
+
+ if (!wdt->use_sw_pm)
+ return 0;
if (watchdog_active(wdd))
return da9062_wdt_stop(wdd);
@@ -236,6 +237,10 @@ static int __maybe_unused da9062_wdt_suspend(struct device *dev)
static int __maybe_unused da9062_wdt_resume(struct device *dev)
{
struct watchdog_device *wdd = dev_get_drvdata(dev);
+ struct da9062_watchdog *wdt = watchdog_get_drvdata(wdd);
+
+ if (!wdt->use_sw_pm)
+ return 0;
if (watchdog_active(wdd))
return da9062_wdt_start(wdd);
diff --git a/drivers/watchdog/wdat_wdt.c b/drivers/watchdog/wdat_wdt.c
index b069349b52f5..3065dd670a18 100644
--- a/drivers/watchdog/wdat_wdt.c
+++ b/drivers/watchdog/wdat_wdt.c
@@ -54,6 +54,13 @@ module_param(nowayout, bool, 0);
MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
__MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
+#define WDAT_DEFAULT_TIMEOUT 30
+
+static int timeout = WDAT_DEFAULT_TIMEOUT;
+module_param(timeout, int, 0);
+MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds (default="
+ __MODULE_STRING(WDAT_DEFAULT_TIMEOUT) ")");
+
static int wdat_wdt_read(struct wdat_wdt *wdat,
const struct wdat_instruction *instr, u32 *value)
{
@@ -389,7 +396,7 @@ static int wdat_wdt_probe(struct platform_device *pdev)
memset(&r, 0, sizeof(r));
r.start = gas->address;
- r.end = r.start + gas->access_width - 1;
+ r.end = r.start + ACPI_ACCESS_BYTE_WIDTH(gas->access_width) - 1;
if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
r.flags = IORESOURCE_MEM;
} else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
@@ -438,6 +445,22 @@ static int wdat_wdt_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, wdat);
+ /*
+ * Set initial timeout so that userspace has time to configure the
+ * watchdog properly after it has opened the device. In some cases
+ * the BIOS default is too short and causes immediate reboot.
+ */
+ if (timeout * 1000 < wdat->wdd.min_hw_heartbeat_ms ||
+ timeout * 1000 > wdat->wdd.max_hw_heartbeat_ms) {
+ dev_warn(dev, "Invalid timeout %d given, using %d\n",
+ timeout, WDAT_DEFAULT_TIMEOUT);
+ timeout = WDAT_DEFAULT_TIMEOUT;
+ }
+
+ ret = wdat_wdt_set_timeout(&wdat->wdd, timeout);
+ if (ret)
+ return ret;
+
watchdog_set_nowayout(&wdat->wdd, nowayout);
return devm_watchdog_register_device(dev, &wdat->wdd);
}
diff --git a/drivers/xen/preempt.c b/drivers/xen/preempt.c
index 70650b248de5..17240c5325a3 100644
--- a/drivers/xen/preempt.c
+++ b/drivers/xen/preempt.c
@@ -33,7 +33,9 @@ asmlinkage __visible void xen_maybe_preempt_hcall(void)
* cpu.
*/
__this_cpu_write(xen_in_preemptible_hcall, false);
- _cond_resched();
+ local_irq_enable();
+ cond_resched();
+ local_irq_disable();
__this_cpu_write(xen_in_preemptible_hcall, true);
}
}
diff --git a/drivers/xen/xen-pciback/pciback.h b/drivers/xen/xen-pciback/pciback.h
index ce1077e32466..7c95516a860f 100644
--- a/drivers/xen/xen-pciback/pciback.h
+++ b/drivers/xen/xen-pciback/pciback.h
@@ -52,7 +52,7 @@ struct xen_pcibk_dev_data {
unsigned int ack_intr:1; /* .. and ACK-ing */
unsigned long handled;
unsigned int irq; /* Saved in case device transitions to MSI/MSI-X */
- char irq_name[0]; /* xen-pcibk[000:04:00.0] */
+ char irq_name[]; /* xen-pcibk[000:04:00.0] */
};
/* Used by XenBus and xen_pcibk_ops.c */
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index d239fc3c5e3d..eb5151fc8efa 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -313,6 +313,8 @@ static int process_msg(void)
req->msg.type = state.msg.type;
req->msg.len = state.msg.len;
req->body = state.body;
+ /* write body, then update state */
+ virt_wmb();
req->state = xb_req_state_got_reply;
req->cb(req);
} else
@@ -395,6 +397,8 @@ static int process_writes(void)
if (state.req->state == xb_req_state_aborted)
kfree(state.req);
else {
+ /* write err, then update state */
+ virt_wmb();
state.req->state = xb_req_state_got_reply;
wake_up(&state.req->wq);
}
diff --git a/drivers/xen/xenbus/xenbus_probe.c b/drivers/xen/xenbus/xenbus_probe.c
index 66975da4f3b6..8c4d05b687b7 100644
--- a/drivers/xen/xenbus/xenbus_probe.c
+++ b/drivers/xen/xenbus/xenbus_probe.c
@@ -239,9 +239,9 @@ int xenbus_dev_probe(struct device *_dev)
goto fail;
}
- spin_lock(&dev->reclaim_lock);
+ down(&dev->reclaim_sem);
err = drv->probe(dev, id);
- spin_unlock(&dev->reclaim_lock);
+ up(&dev->reclaim_sem);
if (err)
goto fail_put;
@@ -271,9 +271,9 @@ int xenbus_dev_remove(struct device *_dev)
free_otherend_watch(dev);
if (drv->remove) {
- spin_lock(&dev->reclaim_lock);
+ down(&dev->reclaim_sem);
drv->remove(dev);
- spin_unlock(&dev->reclaim_lock);
+ up(&dev->reclaim_sem);
}
module_put(drv->driver.owner);
@@ -473,7 +473,7 @@ int xenbus_probe_node(struct xen_bus_type *bus,
goto fail;
dev_set_name(&xendev->dev, "%s", devname);
- spin_lock_init(&xendev->reclaim_lock);
+ sema_init(&xendev->reclaim_sem, 1);
/* Register with generic device framework. */
err = device_register(&xendev->dev);
diff --git a/drivers/xen/xenbus/xenbus_probe_backend.c b/drivers/xen/xenbus/xenbus_probe_backend.c
index 791f6fe01e91..9b2fbe69bccc 100644
--- a/drivers/xen/xenbus/xenbus_probe_backend.c
+++ b/drivers/xen/xenbus/xenbus_probe_backend.c
@@ -45,6 +45,7 @@
#include <linux/mm.h>
#include <linux/notifier.h>
#include <linux/export.h>
+#include <linux/semaphore.h>
#include <asm/page.h>
#include <asm/pgtable.h>
@@ -257,10 +258,10 @@ static int backend_reclaim_memory(struct device *dev, void *data)
drv = to_xenbus_driver(dev->driver);
if (drv && drv->reclaim_memory) {
xdev = to_xenbus_device(dev);
- if (!spin_trylock(&xdev->reclaim_lock))
+ if (down_trylock(&xdev->reclaim_sem))
return 0;
drv->reclaim_memory(xdev);
- spin_unlock(&xdev->reclaim_lock);
+ up(&xdev->reclaim_sem);
}
return 0;
}
diff --git a/drivers/xen/xenbus/xenbus_xs.c b/drivers/xen/xenbus/xenbus_xs.c
index ddc18da61834..3a06eb699f33 100644
--- a/drivers/xen/xenbus/xenbus_xs.c
+++ b/drivers/xen/xenbus/xenbus_xs.c
@@ -191,8 +191,11 @@ static bool xenbus_ok(void)
static bool test_reply(struct xb_req_data *req)
{
- if (req->state == xb_req_state_got_reply || !xenbus_ok())
+ if (req->state == xb_req_state_got_reply || !xenbus_ok()) {
+ /* read req->state before all other fields */
+ virt_rmb();
return true;
+ }
/* Make sure to reread req->state each time. */
barrier();
@@ -202,7 +205,7 @@ static bool test_reply(struct xb_req_data *req)
static void *read_reply(struct xb_req_data *req)
{
- while (req->state != xb_req_state_got_reply) {
+ do {
wait_event(req->wq, test_reply(req));
if (!xenbus_ok())
@@ -216,7 +219,7 @@ static void *read_reply(struct xb_req_data *req)
if (req->err)
return ERR_PTR(req->err);
- }
+ } while (req->state != xb_req_state_got_reply);
return req->body;
}
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 7fa9bb79ad08..c6c9a6a8e6c8 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3164,6 +3164,7 @@ int __cold open_ctree(struct super_block *sb,
/* do not make disk changes in broken FS or nologreplay is given */
if (btrfs_super_log_root(disk_super) != 0 &&
!btrfs_test_opt(fs_info, NOLOGREPLAY)) {
+ btrfs_info(fs_info, "start tree-log replay");
ret = btrfs_replay_log(fs_info, fs_devices);
if (ret) {
err = ret;
@@ -3199,6 +3200,7 @@ int __cold open_ctree(struct super_block *sb,
if (IS_ERR(fs_info->fs_root)) {
err = PTR_ERR(fs_info->fs_root);
btrfs_warn(fs_info, "failed to read fs tree: %d", err);
+ fs_info->fs_root = NULL;
goto fail_qgroup;
}
@@ -4275,6 +4277,7 @@ static int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
cond_resched();
spin_lock(&delayed_refs->lock);
}
+ btrfs_qgroup_destroy_extent_records(trans);
spin_unlock(&delayed_refs->lock);
@@ -4500,7 +4503,6 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
wake_up(&fs_info->transaction_wait);
btrfs_destroy_delayed_inodes(fs_info);
- btrfs_assert_delayed_root_empty(fs_info);
btrfs_destroy_marked_extents(fs_info, &cur_trans->dirty_pages,
EXTENT_DIRTY);
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 0163fdd59f8f..a7bc66121330 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4430,6 +4430,8 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
ret = alloc_reserved_file_extent(trans, 0, root_objectid, 0, owner,
offset, ins, 1);
+ if (ret)
+ btrfs_pin_extent(fs_info, ins->objectid, ins->offset, 1);
btrfs_put_block_group(block_group);
return ret;
}
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c
index 6f417ff68980..bd6229fb2b6f 100644
--- a/fs/btrfs/extent_map.c
+++ b/fs/btrfs/extent_map.c
@@ -237,6 +237,17 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
struct extent_map *merge = NULL;
struct rb_node *rb;
+ /*
+ * We can't modify an extent map that is in the tree and that is being
+ * used by another task, as it can cause that other task to see it in
+ * inconsistent state during the merging. We always have 1 reference for
+ * the tree and 1 for this task (which is unpinning the extent map or
+ * clearing the logging flag), so anything > 2 means it's being used by
+ * other tasks too.
+ */
+ if (refcount_read(&em->refs) > 2)
+ return;
+
if (em->start != 0) {
rb = rb_prev(&em->rb_node);
if (rb)
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5b3ec93ff911..27076ebadb36 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -4085,6 +4085,8 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
u64 bytes_deleted = 0;
bool be_nice = false;
bool should_throttle = false;
+ const u64 lock_start = ALIGN_DOWN(new_size, fs_info->sectorsize);
+ struct extent_state *cached_state = NULL;
BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY);
@@ -4101,6 +4103,10 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
return -ENOMEM;
path->reada = READA_BACK;
+ if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
+ lock_extent_bits(&BTRFS_I(inode)->io_tree, lock_start, (u64)-1,
+ &cached_state);
+
/*
* We want to drop from the next block forward in case this new size is
* not block aligned since we will be keeping the last block of the
@@ -4137,7 +4143,6 @@ search_again:
goto out;
}
- path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret < 0)
goto out;
@@ -4289,7 +4294,6 @@ delete:
root == fs_info->tree_root)) {
struct btrfs_ref ref = { 0 };
- btrfs_set_path_blocking(path);
bytes_deleted += extent_num_bytes;
btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
@@ -4365,6 +4369,8 @@ out:
if (!ret && last_size > new_size)
last_size = new_size;
btrfs_ordered_update_i_size(inode, last_size, NULL);
+ unlock_extent_cached(&BTRFS_I(inode)->io_tree, lock_start,
+ (u64)-1, &cached_state);
}
btrfs_free_path(path);
@@ -7777,6 +7783,7 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
{
struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
struct btrfs_io_bio *orig_io_bio = btrfs_io_bio(dip->orig_bio);
+ u16 csum_size;
blk_status_t ret;
/*
@@ -7796,7 +7803,8 @@ static inline blk_status_t btrfs_lookup_and_bind_dio_csum(struct inode *inode,
file_offset -= dip->logical_offset;
file_offset >>= inode->i_sb->s_blocksize_bits;
- io_bio->csum = (u8 *)(((u32 *)orig_io_bio->csum) + file_offset);
+ csum_size = btrfs_super_csum_size(btrfs_sb(inode->i_sb)->super_copy);
+ io_bio->csum = orig_io_bio->csum + csum_size * file_offset;
return 0;
}
@@ -9818,6 +9826,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_key ins;
u64 cur_offset = start;
+ u64 clear_offset = start;
u64 i_size;
u64 cur_bytes;
u64 last_alloc = (u64)-1;
@@ -9852,6 +9861,15 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
btrfs_end_transaction(trans);
break;
}
+
+ /*
+ * We've reserved this space, and thus converted it from
+ * ->bytes_may_use to ->bytes_reserved. Any error that happens
+ * from here on out we will only need to clear our reservation
+ * for the remaining unreserved area, so advance our
+ * clear_offset by our extent size.
+ */
+ clear_offset += ins.offset;
btrfs_dec_block_group_reservations(fs_info, ins.objectid);
last_alloc = ins.offset;
@@ -9931,9 +9949,9 @@ next:
if (own_trans)
btrfs_end_transaction(trans);
}
- if (cur_offset < end)
- btrfs_free_reserved_data_space(inode, NULL, cur_offset,
- end - cur_offset + 1);
+ if (clear_offset < end)
+ btrfs_free_reserved_data_space(inode, NULL, clear_offset,
+ end - clear_offset + 1);
return ret;
}
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index ecb9fb6a6fe0..a65f189a5b94 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -679,10 +679,15 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
}
btrfs_start_ordered_extent(inode, ordered, 1);
end = ordered->file_offset;
+ /*
+ * If the ordered extent had an error save the error but don't
+ * exit without waiting first for all other ordered extents in
+ * the range to complete.
+ */
if (test_bit(BTRFS_ORDERED_IOERR, &ordered->flags))
ret = -EIO;
btrfs_put_ordered_extent(ordered);
- if (ret || end == 0 || end == start)
+ if (end == 0 || end == start)
break;
end--;
}
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index 98d9a50352d6..ff1870ff3474 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -4002,3 +4002,16 @@ out:
}
return ret;
}
+
+void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans)
+{
+ struct btrfs_qgroup_extent_record *entry;
+ struct btrfs_qgroup_extent_record *next;
+ struct rb_root *root;
+
+ root = &trans->delayed_refs.dirty_extent_root;
+ rbtree_postorder_for_each_entry_safe(entry, next, root, node) {
+ ulist_free(entry->old_roots);
+ kfree(entry);
+ }
+}
diff --git a/fs/btrfs/qgroup.h b/fs/btrfs/qgroup.h
index 236f12224d52..1bc654459469 100644
--- a/fs/btrfs/qgroup.h
+++ b/fs/btrfs/qgroup.h
@@ -414,5 +414,6 @@ int btrfs_qgroup_add_swapped_blocks(struct btrfs_trans_handle *trans,
u64 last_snapshot);
int btrfs_qgroup_trace_subtree_after_cow(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct extent_buffer *eb);
+void btrfs_qgroup_destroy_extent_records(struct btrfs_transaction *trans);
#endif
diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
index b57f3618e58e..454a1015d026 100644
--- a/fs/btrfs/ref-verify.c
+++ b/fs/btrfs/ref-verify.c
@@ -744,6 +744,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
*/
be = add_block_entry(fs_info, bytenr, num_bytes, ref_root);
if (IS_ERR(be)) {
+ kfree(ref);
kfree(ra);
ret = PTR_ERR(be);
goto out;
@@ -757,6 +758,8 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
"re-allocated a block that still has references to it!");
dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra);
+ kfree(ref);
+ kfree(ra);
goto out_unlock;
}
@@ -819,6 +822,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
"dropping a ref for a existing root that doesn't have a ref on the block");
dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra);
+ kfree(ref);
kfree(ra);
goto out_unlock;
}
@@ -834,6 +838,7 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
"attempting to add another ref for an existing ref on a tree block");
dump_block_entry(fs_info, be);
dump_ref_action(fs_info, ra);
+ kfree(ref);
kfree(ra);
goto out_unlock;
}
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 0616a5434793..67c63858812a 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -1834,6 +1834,8 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
}
if (btrfs_super_log_root(fs_info->super_copy) != 0) {
+ btrfs_warn(fs_info,
+ "mount required to replay tree-log, cannot remount read-write");
ret = -EINVAL;
goto restore;
}
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index 7436422194da..3c10e78924d0 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -901,6 +901,12 @@ static int addrm_unknown_feature_attrs(struct btrfs_fs_info *fs_info, bool add)
static void __btrfs_sysfs_remove_fsid(struct btrfs_fs_devices *fs_devs)
{
+ if (fs_devs->devinfo_kobj) {
+ kobject_del(fs_devs->devinfo_kobj);
+ kobject_put(fs_devs->devinfo_kobj);
+ fs_devs->devinfo_kobj = NULL;
+ }
+
if (fs_devs->devices_kobj) {
kobject_del(fs_devs->devices_kobj);
kobject_put(fs_devs->devices_kobj);
@@ -1289,7 +1295,7 @@ int btrfs_sysfs_add_device_link(struct btrfs_fs_devices *fs_devices,
init_completion(&dev->kobj_unregister);
error = kobject_init_and_add(&dev->devid_kobj, &devid_ktype,
- fs_devices->devices_kobj, "%llu",
+ fs_devices->devinfo_kobj, "%llu",
dev->devid);
if (error) {
kobject_put(&dev->devid_kobj);
@@ -1369,6 +1375,15 @@ int btrfs_sysfs_add_fsid(struct btrfs_fs_devices *fs_devs)
return -ENOMEM;
}
+ fs_devs->devinfo_kobj = kobject_create_and_add("devinfo",
+ &fs_devs->fsid_kobj);
+ if (!fs_devs->devinfo_kobj) {
+ btrfs_err(fs_devs->fs_info,
+ "failed to init sysfs devinfo kobject");
+ btrfs_sysfs_remove_fsid(fs_devs);
+ return -ENOMEM;
+ }
+
return 0;
}
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 33dcc88b428a..beb6c69cd1e5 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -121,6 +121,8 @@ void btrfs_put_transaction(struct btrfs_transaction *transaction)
BUG_ON(!list_empty(&transaction->list));
WARN_ON(!RB_EMPTY_ROOT(
&transaction->delayed_refs.href_root.rb_root));
+ WARN_ON(!RB_EMPTY_ROOT(
+ &transaction->delayed_refs.dirty_extent_root));
if (transaction->delayed_refs.pending_csums)
btrfs_err(transaction->fs_info,
"pending csums is %llu",
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index 409f4816fb89..f01552a0785e 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -258,6 +258,7 @@ struct btrfs_fs_devices {
/* sysfs kobjects */
struct kobject fsid_kobj;
struct kobject *devices_kobj;
+ struct kobject *devinfo_kobj;
struct completion kobj_unregister;
};
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index c3b8e8e0bf17..7e0190b1f821 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -1418,6 +1418,7 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
struct ceph_cap_flush *prealloc_cf;
ssize_t count, written = 0;
int err, want, got;
+ bool direct_lock = false;
loff_t pos;
loff_t limit = max(i_size_read(inode), fsc->max_file_size);
@@ -1428,8 +1429,11 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (!prealloc_cf)
return -ENOMEM;
+ if ((iocb->ki_flags & (IOCB_DIRECT | IOCB_APPEND)) == IOCB_DIRECT)
+ direct_lock = true;
+
retry_snap:
- if (iocb->ki_flags & IOCB_DIRECT)
+ if (direct_lock)
ceph_start_io_direct(inode);
else
ceph_start_io_write(inode);
@@ -1519,14 +1523,15 @@ retry_snap:
/* we might need to revert back to that point */
data = *from;
- if (iocb->ki_flags & IOCB_DIRECT) {
+ if (iocb->ki_flags & IOCB_DIRECT)
written = ceph_direct_read_write(iocb, &data, snapc,
&prealloc_cf);
- ceph_end_io_direct(inode);
- } else {
+ else
written = ceph_sync_write(iocb, &data, pos, snapc);
+ if (direct_lock)
+ ceph_end_io_direct(inode);
+ else
ceph_end_io_write(inode);
- }
if (written > 0)
iov_iter_advance(from, written);
ceph_put_snap_context(snapc);
@@ -1577,7 +1582,7 @@ retry_snap:
goto out_unlocked;
out:
- if (iocb->ki_flags & IOCB_DIRECT)
+ if (direct_lock)
ceph_end_io_direct(inode);
else
ceph_end_io_write(inode);
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index 1d9f083b8a11..c7f150686a53 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -203,6 +203,26 @@ struct ceph_parse_opts_ctx {
};
/*
+ * Remove adjacent slashes and then the trailing slash, unless it is
+ * the only remaining character.
+ *
+ * E.g. "//dir1////dir2///" --> "/dir1/dir2", "///" --> "/".
+ */
+static void canonicalize_path(char *path)
+{
+ int i, j = 0;
+
+ for (i = 0; path[i] != '\0'; i++) {
+ if (path[i] != '/' || j < 1 || path[j - 1] != '/')
+ path[j++] = path[i];
+ }
+
+ if (j > 1 && path[j - 1] == '/')
+ j--;
+ path[j] = '\0';
+}
+
+/*
* Parse the source parameter. Distinguish the server list from the path.
*
* The source will look like:
@@ -224,15 +244,16 @@ static int ceph_parse_source(struct fs_parameter *param, struct fs_context *fc)
dev_name_end = strchr(dev_name, '/');
if (dev_name_end) {
- kfree(fsopt->server_path);
-
/*
* The server_path will include the whole chars from userland
* including the leading '/'.
*/
+ kfree(fsopt->server_path);
fsopt->server_path = kstrdup(dev_name_end, GFP_KERNEL);
if (!fsopt->server_path)
return -ENOMEM;
+
+ canonicalize_path(fsopt->server_path);
} else {
dev_name_end = dev_name + strlen(dev_name);
}
@@ -456,73 +477,6 @@ static int strcmp_null(const char *s1, const char *s2)
return strcmp(s1, s2);
}
-/**
- * path_remove_extra_slash - Remove the extra slashes in the server path
- * @server_path: the server path and could be NULL
- *
- * Return NULL if the path is NULL or only consists of "/", or a string
- * without any extra slashes including the leading slash(es) and the
- * slash(es) at the end of the server path, such as:
- * "//dir1////dir2///" --> "dir1/dir2"
- */
-static char *path_remove_extra_slash(const char *server_path)
-{
- const char *path = server_path;
- const char *cur, *end;
- char *buf, *p;
- int len;
-
- /* if the server path is omitted */
- if (!path)
- return NULL;
-
- /* remove all the leading slashes */
- while (*path == '/')
- path++;
-
- /* if the server path only consists of slashes */
- if (*path == '\0')
- return NULL;
-
- len = strlen(path);
-
- buf = kmalloc(len + 1, GFP_KERNEL);
- if (!buf)
- return ERR_PTR(-ENOMEM);
-
- end = path + len;
- p = buf;
- do {
- cur = strchr(path, '/');
- if (!cur)
- cur = end;
-
- len = cur - path;
-
- /* including one '/' */
- if (cur != end)
- len += 1;
-
- memcpy(p, path, len);
- p += len;
-
- while (cur <= end && *cur == '/')
- cur++;
- path = cur;
- } while (path < end);
-
- *p = '\0';
-
- /*
- * remove the last slash if there has and just to make sure that
- * we will get something like "dir1/dir2"
- */
- if (*(--p) == '/')
- *p = '\0';
-
- return buf;
-}
-
static int compare_mount_options(struct ceph_mount_options *new_fsopt,
struct ceph_options *new_opt,
struct ceph_fs_client *fsc)
@@ -530,7 +484,6 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
struct ceph_mount_options *fsopt1 = new_fsopt;
struct ceph_mount_options *fsopt2 = fsc->mount_options;
int ofs = offsetof(struct ceph_mount_options, snapdir_name);
- char *p1, *p2;
int ret;
ret = memcmp(fsopt1, fsopt2, ofs);
@@ -540,21 +493,12 @@ static int compare_mount_options(struct ceph_mount_options *new_fsopt,
ret = strcmp_null(fsopt1->snapdir_name, fsopt2->snapdir_name);
if (ret)
return ret;
+
ret = strcmp_null(fsopt1->mds_namespace, fsopt2->mds_namespace);
if (ret)
return ret;
- p1 = path_remove_extra_slash(fsopt1->server_path);
- if (IS_ERR(p1))
- return PTR_ERR(p1);
- p2 = path_remove_extra_slash(fsopt2->server_path);
- if (IS_ERR(p2)) {
- kfree(p1);
- return PTR_ERR(p2);
- }
- ret = strcmp_null(p1, p2);
- kfree(p1);
- kfree(p2);
+ ret = strcmp_null(fsopt1->server_path, fsopt2->server_path);
if (ret)
return ret;
@@ -957,7 +901,9 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
mutex_lock(&fsc->client->mount_mutex);
if (!fsc->sb->s_root) {
- const char *path, *p;
+ const char *path = fsc->mount_options->server_path ?
+ fsc->mount_options->server_path + 1 : "";
+
err = __ceph_open_session(fsc->client, started);
if (err < 0)
goto out;
@@ -969,22 +915,11 @@ static struct dentry *ceph_real_mount(struct ceph_fs_client *fsc,
goto out;
}
- p = path_remove_extra_slash(fsc->mount_options->server_path);
- if (IS_ERR(p)) {
- err = PTR_ERR(p);
- goto out;
- }
- /* if the server path is omitted or just consists of '/' */
- if (!p)
- path = "";
- else
- path = p;
dout("mount opening path '%s'\n", path);
ceph_fs_debugfs_init(fsc);
root = open_root_dentry(fsc, path, started);
- kfree(p);
if (IS_ERR(root)) {
err = PTR_ERR(root);
goto out;
@@ -1097,10 +1032,6 @@ static int ceph_get_tree(struct fs_context *fc)
if (!fc->source)
return invalfc(fc, "No source");
-#ifdef CONFIG_CEPH_FS_POSIX_ACL
- fc->sb_flags |= SB_POSIXACL;
-#endif
-
/* create client (which we may/may not use) */
fsc = create_fs_client(pctx->opts, pctx->copts);
pctx->opts = NULL;
@@ -1223,6 +1154,10 @@ static int ceph_init_fs_context(struct fs_context *fc)
fsopt->max_readdir_bytes = CEPH_MAX_READDIR_BYTES_DEFAULT;
fsopt->congestion_kb = default_congestion_kb();
+#ifdef CONFIG_CEPH_FS_POSIX_ACL
+ fc->sb_flags |= SB_POSIXACL;
+#endif
+
fc->fs_private = pctx;
fc->ops = &ceph_context_ops;
return 0;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 1e456a9011bb..037cdfb2ad4f 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -91,7 +91,7 @@ struct ceph_mount_options {
char *snapdir_name; /* default ".snap" */
char *mds_namespace; /* default NULL */
- char *server_path; /* default "/" */
+ char *server_path; /* default NULL (means "/") */
char *fscache_uniq; /* default NULL */
};
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 606f26d862dc..cc3ada12848d 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -324,6 +324,8 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
if (full_path == NULL)
goto cdda_exit;
+ convert_delimiter(full_path, '\\');
+
cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
if (!cifs_sb_master_tlink(cifs_sb)) {
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c
index 440828afcdde..716574aab3b6 100644
--- a/fs/cifs/cifsacl.c
+++ b/fs/cifs/cifsacl.c
@@ -601,7 +601,7 @@ static void access_flags_to_mode(__le32 ace_flags, int type, umode_t *pmode,
((flags & FILE_EXEC_RIGHTS) == FILE_EXEC_RIGHTS))
*pmode |= (S_IXUGO & (*pbits_to_set));
- cifs_dbg(NOISY, "access flags 0x%x mode now 0x%x\n", flags, *pmode);
+ cifs_dbg(NOISY, "access flags 0x%x mode now %04o\n", flags, *pmode);
return;
}
@@ -630,7 +630,7 @@ static void mode_to_access_flags(umode_t mode, umode_t bits_to_use,
if (mode & S_IXUGO)
*pace_flags |= SET_FILE_EXEC_RIGHTS;
- cifs_dbg(NOISY, "mode: 0x%x, access flags now 0x%x\n",
+ cifs_dbg(NOISY, "mode: %04o, access flags now 0x%x\n",
mode, *pace_flags);
return;
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index febab27cd838..fa77fe5258b0 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -414,7 +414,7 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
seq_puts(s, "ntlm");
break;
case Kerberos:
- seq_printf(s, "krb5,cruid=%u", from_kuid_munged(&init_user_ns,ses->cred_uid));
+ seq_puts(s, "krb5");
break;
case RawNTLMSSP:
seq_puts(s, "ntlmssp");
@@ -427,6 +427,10 @@ cifs_show_security(struct seq_file *s, struct cifs_ses *ses)
if (ses->sign)
seq_puts(s, "i");
+
+ if (ses->sectype == Kerberos)
+ seq_printf(s, ",cruid=%u",
+ from_kuid_munged(&init_user_ns, ses->cred_uid));
}
static void
@@ -526,6 +530,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
if (tcon->seal)
seq_puts(s, ",seal");
+ else if (tcon->ses->server->ignore_signature)
+ seq_puts(s, ",signloosely");
if (tcon->nocase)
seq_puts(s, ",nocase");
if (tcon->local_lease)
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index de82cfa44b1a..0d956360e984 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -1281,6 +1281,7 @@ struct cifs_fid {
__u64 volatile_fid; /* volatile file id for smb2 */
__u8 lease_key[SMB2_LEASE_KEY_SIZE]; /* lease key for smb2 */
__u8 create_guid[16];
+ __u32 access;
struct cifs_pending_open *pending_open;
unsigned int epoch;
#ifdef CONFIG_CIFS_DEBUG2
@@ -1741,6 +1742,12 @@ static inline bool is_retryable_error(int error)
return false;
}
+
+/* cifs_get_writable_file() flags */
+#define FIND_WR_ANY 0
+#define FIND_WR_FSUID_ONLY 1
+#define FIND_WR_WITH_DELETE 2
+
#define MID_FREE 0
#define MID_REQUEST_ALLOCATED 1
#define MID_REQUEST_SUBMITTED 2
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 89eaaf46d1ca..e5cb681ec138 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -134,11 +134,12 @@ extern bool backup_cred(struct cifs_sb_info *);
extern bool is_size_safe_to_change(struct cifsInodeInfo *, __u64 eof);
extern void cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset,
unsigned int bytes_written);
-extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, bool);
+extern struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *, int);
extern int cifs_get_writable_file(struct cifsInodeInfo *cifs_inode,
- bool fsuid_only,
+ int flags,
struct cifsFileInfo **ret_file);
extern int cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
+ int flags,
struct cifsFileInfo **ret_file);
extern struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *, bool);
extern int cifs_get_readable_path(struct cifs_tcon *tcon, const char *name,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 3c89569e7210..6f6fb3606a5d 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -1492,6 +1492,7 @@ openRetry:
*oplock = rsp->OplockLevel;
/* cifs fid stays in le */
oparms->fid->netfid = rsp->Fid;
+ oparms->fid->access = desired_access;
/* Let caller know file was created so we can set the mode. */
/* Do we care about the CreateAction in any other cases? */
@@ -2115,7 +2116,7 @@ cifs_writev_requeue(struct cifs_writedata *wdata)
wdata2->tailsz = tailsz;
wdata2->bytes = cur_len;
- rc = cifs_get_writable_file(CIFS_I(inode), false,
+ rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY,
&wdata2->cfile);
if (!wdata2->cfile) {
cifs_dbg(VFS, "No writable handle to retry writepages rc=%d\n",
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index a941ac7a659d..4804d1df8c1c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -4151,7 +4151,7 @@ int cifs_setup_cifs_sb(struct smb_vol *pvolume_info,
cifs_sb->mnt_gid = pvolume_info->linux_gid;
cifs_sb->mnt_file_mode = pvolume_info->file_mode;
cifs_sb->mnt_dir_mode = pvolume_info->dir_mode;
- cifs_dbg(FYI, "file mode: 0x%hx dir mode: 0x%hx\n",
+ cifs_dbg(FYI, "file mode: %04ho dir mode: %04ho\n",
cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode);
cifs_sb->actimeo = pvolume_info->actimeo;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index bc9516ab4b34..3b942ecdd4be 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -1958,7 +1958,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
/* Return -EBADF if no handle is found and general rc otherwise */
int
-cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
+cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, int flags,
struct cifsFileInfo **ret_file)
{
struct cifsFileInfo *open_file, *inv_file = NULL;
@@ -1966,7 +1966,8 @@ cifs_get_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only,
bool any_available = false;
int rc = -EBADF;
unsigned int refind = 0;
-
+ bool fsuid_only = flags & FIND_WR_FSUID_ONLY;
+ bool with_delete = flags & FIND_WR_WITH_DELETE;
*ret_file = NULL;
/*
@@ -1998,6 +1999,8 @@ refind_writable:
continue;
if (fsuid_only && !uid_eq(open_file->uid, current_fsuid()))
continue;
+ if (with_delete && !(open_file->fid.access & DELETE))
+ continue;
if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
if (!open_file->invalidHandle) {
/* found a good writable file */
@@ -2045,12 +2048,12 @@ refind_writable:
}
struct cifsFileInfo *
-find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
+find_writable_file(struct cifsInodeInfo *cifs_inode, int flags)
{
struct cifsFileInfo *cfile;
int rc;
- rc = cifs_get_writable_file(cifs_inode, fsuid_only, &cfile);
+ rc = cifs_get_writable_file(cifs_inode, flags, &cfile);
if (rc)
cifs_dbg(FYI, "couldn't find writable handle rc=%d", rc);
@@ -2059,6 +2062,7 @@ find_writable_file(struct cifsInodeInfo *cifs_inode, bool fsuid_only)
int
cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
+ int flags,
struct cifsFileInfo **ret_file)
{
struct list_head *tmp;
@@ -2085,7 +2089,7 @@ cifs_get_writable_path(struct cifs_tcon *tcon, const char *name,
kfree(full_path);
cinode = CIFS_I(d_inode(cfile->dentry));
spin_unlock(&tcon->open_file_lock);
- return cifs_get_writable_file(cinode, 0, ret_file);
+ return cifs_get_writable_file(cinode, flags, ret_file);
}
spin_unlock(&tcon->open_file_lock);
@@ -2162,7 +2166,8 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to)
if (mapping->host->i_size - offset < (loff_t)to)
to = (unsigned)(mapping->host->i_size - offset);
- rc = cifs_get_writable_file(CIFS_I(mapping->host), false, &open_file);
+ rc = cifs_get_writable_file(CIFS_I(mapping->host), FIND_WR_ANY,
+ &open_file);
if (!rc) {
bytes_written = cifs_write(open_file, open_file->pid,
write_data, to - from, &offset);
@@ -2355,7 +2360,7 @@ retry:
if (cfile)
cifsFileInfo_put(cfile);
- rc = cifs_get_writable_file(CIFS_I(inode), false, &cfile);
+ rc = cifs_get_writable_file(CIFS_I(inode), FIND_WR_ANY, &cfile);
/* in case of an error store it to return later */
if (rc)
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 9ba623b601ec..1e8a4b1579db 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -653,8 +653,8 @@ cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info,
*/
if ((fattr->cf_nlink < 1) && !tcon->unix_ext &&
!info->DeletePending) {
- cifs_dbg(1, "bogus file nlink value %u\n",
- fattr->cf_nlink);
+ cifs_dbg(VFS, "bogus file nlink value %u\n",
+ fattr->cf_nlink);
fattr->cf_flags |= CIFS_FATTR_UNKNOWN_NLINK;
}
}
@@ -1648,7 +1648,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, umode_t mode)
struct TCP_Server_Info *server;
char *full_path;
- cifs_dbg(FYI, "In cifs_mkdir, mode = 0x%hx inode = 0x%p\n",
+ cifs_dbg(FYI, "In cifs_mkdir, mode = %04ho inode = 0x%p\n",
mode, inode);
cifs_sb = CIFS_SB(inode->i_sb);
@@ -2073,6 +2073,7 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
struct inode *inode = d_inode(dentry);
struct super_block *sb = dentry->d_sb;
char *full_path = NULL;
+ int count = 0;
if (inode == NULL)
return -ENOENT;
@@ -2094,15 +2095,18 @@ int cifs_revalidate_dentry_attr(struct dentry *dentry)
full_path, inode, inode->i_count.counter,
dentry, cifs_get_time(dentry), jiffies);
+again:
if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext)
rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid);
else
rc = cifs_get_inode_info(&inode, full_path, NULL, sb,
xid, NULL);
-
+ if (rc == -EAGAIN && count++ < 10)
+ goto again;
out:
kfree(full_path);
free_xid(xid);
+
return rc;
}
@@ -2278,7 +2282,7 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs,
* writebehind data than the SMB timeout for the SetPathInfo
* request would allow
*/
- open_file = find_writable_file(cifsInode, true);
+ open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
if (open_file) {
tcon = tlink_tcon(open_file->tlink);
server = tcon->ses->server;
@@ -2428,7 +2432,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
args->ctime = NO_CHANGE_64;
args->device = 0;
- open_file = find_writable_file(cifsInode, true);
+ open_file = find_writable_file(cifsInode, FIND_WR_FSUID_ONLY);
if (open_file) {
u16 nfid = open_file->fid.netfid;
u32 npid = open_file->pid;
@@ -2531,7 +2535,7 @@ cifs_setattr_nounix(struct dentry *direntry, struct iattr *attrs)
rc = 0;
if (attrs->ia_valid & ATTR_MTIME) {
- rc = cifs_get_writable_file(cifsInode, false, &wfile);
+ rc = cifs_get_writable_file(cifsInode, FIND_WR_ANY, &wfile);
if (!rc) {
tcon = tlink_tcon(wfile->tlink);
rc = tcon->ses->server->ops->flush(xid, tcon, &wfile->fid);
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index eb994e313c6a..b130efaf8feb 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -766,7 +766,7 @@ smb_set_file_info(struct inode *inode, const char *full_path,
struct cifs_tcon *tcon;
/* if the file is already open for write, just use that fileid */
- open_file = find_writable_file(cinode, true);
+ open_file = find_writable_file(cinode, FIND_WR_FSUID_ONLY);
if (open_file) {
fid.netfid = open_file->fid.netfid;
netpid = open_file->pid;
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 1cf207564ff9..a8c301ae00ed 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -521,7 +521,7 @@ smb2_mkdir_setinfo(struct inode *inode, const char *name,
cifs_i = CIFS_I(inode);
dosattrs = cifs_i->cifsAttrs | ATTR_READONLY;
data.Attributes = cpu_to_le32(dosattrs);
- cifs_get_writable_path(tcon, name, &cfile);
+ cifs_get_writable_path(tcon, name, FIND_WR_ANY, &cfile);
tmprc = smb2_compound_op(xid, tcon, cifs_sb, name,
FILE_WRITE_ATTRIBUTES, FILE_CREATE,
CREATE_NOT_FILE, ACL_NO_MODE,
@@ -577,7 +577,7 @@ smb2_rename_path(const unsigned int xid, struct cifs_tcon *tcon,
{
struct cifsFileInfo *cfile;
- cifs_get_writable_path(tcon, from_name, &cfile);
+ cifs_get_writable_path(tcon, from_name, FIND_WR_WITH_DELETE, &cfile);
return smb2_set_path_attr(xid, tcon, from_name, to_name,
cifs_sb, DELETE, SMB2_OP_RENAME, cfile);
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index baa825f4cec0..c31e84ee3c39 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -1116,7 +1116,8 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
void *data[1];
struct smb2_file_full_ea_info *ea = NULL;
struct kvec close_iov[1];
- int rc;
+ struct smb2_query_info_rsp *rsp;
+ int rc, used_len = 0;
if (smb3_encryption_required(tcon))
flags |= CIFS_TRANSFORM_REQ;
@@ -1139,6 +1140,38 @@ smb2_set_ea(const unsigned int xid, struct cifs_tcon *tcon,
cifs_sb);
if (rc == -ENODATA)
goto sea_exit;
+ } else {
+ /* If we are adding a attribute we should first check
+ * if there will be enough space available to store
+ * the new EA. If not we should not add it since we
+ * would not be able to even read the EAs back.
+ */
+ rc = smb2_query_info_compound(xid, tcon, utf16_path,
+ FILE_READ_EA,
+ FILE_FULL_EA_INFORMATION,
+ SMB2_O_INFO_FILE,
+ CIFSMaxBufSize -
+ MAX_SMB2_CREATE_RESPONSE_SIZE -
+ MAX_SMB2_CLOSE_RESPONSE_SIZE,
+ &rsp_iov[1], &resp_buftype[1], cifs_sb);
+ if (rc == 0) {
+ rsp = (struct smb2_query_info_rsp *)rsp_iov[1].iov_base;
+ used_len = le32_to_cpu(rsp->OutputBufferLength);
+ }
+ free_rsp_buf(resp_buftype[1], rsp_iov[1].iov_base);
+ resp_buftype[1] = CIFS_NO_BUFFER;
+ memset(&rsp_iov[1], 0, sizeof(rsp_iov[1]));
+ rc = 0;
+
+ /* Use a fudge factor of 256 bytes in case we collide
+ * with a different set_EAs command.
+ */
+ if(CIFSMaxBufSize - MAX_SMB2_CREATE_RESPONSE_SIZE -
+ MAX_SMB2_CLOSE_RESPONSE_SIZE - 256 <
+ used_len + ea_name_len + ea_value_len + 1) {
+ rc = -ENOSPC;
+ goto sea_exit;
+ }
}
}
@@ -1331,6 +1364,7 @@ smb2_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock)
cfile->fid.persistent_fid = fid->persistent_fid;
cfile->fid.volatile_fid = fid->volatile_fid;
+ cfile->fid.access = fid->access;
#ifdef CONFIG_CIFS_DEBUG2
cfile->fid.mid = fid->mid;
#endif /* CIFS_DEBUG2 */
@@ -3294,7 +3328,7 @@ static loff_t smb3_llseek(struct file *file, struct cifs_tcon *tcon, loff_t offs
* some servers (Windows2016) will not reflect recent writes in
* QUERY_ALLOCATED_RANGES until SMB2_flush is called.
*/
- wrcfile = find_writable_file(cifsi, false);
+ wrcfile = find_writable_file(cifsi, FIND_WR_ANY);
if (wrcfile) {
filemap_write_and_wait(inode->i_mapping);
smb2_flush_file(xid, tcon, &wrcfile->fid);
@@ -4795,6 +4829,7 @@ struct smb_version_operations smb21_operations = {
.wp_retry_size = smb2_wp_retry_size,
.dir_needs_close = smb2_dir_needs_close,
.enum_snapshots = smb3_enum_snapshots,
+ .notify = smb3_notify,
.get_dfs_refer = smb2_get_dfs_refer,
.select_sectype = smb2_select_sectype,
#ifdef CONFIG_CIFS_XATTR
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 1234f9ccab03..28c0be5e69b7 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2771,6 +2771,7 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
atomic_inc(&tcon->num_remote_opens);
oparms->fid->persistent_fid = rsp->PersistentFileId;
oparms->fid->volatile_fid = rsp->VolatileFileId;
+ oparms->fid->access = oparms->desired_access;
#ifdef CONFIG_CIFS_DEBUG2
oparms->fid->mid = le64_to_cpu(rsp->sync_hdr.MessageId);
#endif /* CIFS_DEBUG2 */
diff --git a/fs/dax.c b/fs/dax.c
index 1f1f0201cad1..35da144375a0 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -937,12 +937,11 @@ static int dax_writeback_one(struct xa_state *xas, struct dax_device *dax_dev,
* on persistent storage prior to completion of the operation.
*/
int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc)
+ struct dax_device *dax_dev, struct writeback_control *wbc)
{
XA_STATE(xas, &mapping->i_pages, wbc->range_start >> PAGE_SHIFT);
struct inode *inode = mapping->host;
pgoff_t end_index = wbc->range_end >> PAGE_SHIFT;
- struct dax_device *dax_dev;
void *entry;
int ret = 0;
unsigned int scanned = 0;
@@ -953,10 +952,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
return 0;
- dax_dev = dax_get_by_host(bdev->bd_disk->disk_name);
- if (!dax_dev)
- return -EIO;
-
trace_dax_writeback_range(inode, xas.xa_index, end_index);
tag_pages_for_writeback(mapping, xas.xa_index, end_index);
@@ -977,7 +972,6 @@ int dax_writeback_mapping_range(struct address_space *mapping,
xas_lock_irq(&xas);
}
xas_unlock_irq(&xas);
- put_dax(dax_dev);
trace_dax_writeback_range_done(inode, xas.xa_index, end_index);
return ret;
}
@@ -1207,6 +1201,9 @@ dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
lockdep_assert_held(&inode->i_rwsem);
}
+ if (iocb->ki_flags & IOCB_NOWAIT)
+ flags |= IOMAP_NOWAIT;
+
while (iov_iter_count(iter)) {
ret = iomap_apply(inode, pos, iov_iter_count(iter), flags, ops,
iter, dax_iomap_actor);
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 634b09d18b77..db987b5110a9 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -1090,21 +1090,12 @@ static const struct file_operations fops_regset32 = {
* This function creates a file in debugfs with the given name that reports
* the names and values of a set of 32-bit registers. If the @mode variable
* is so set it can be read from. Writing is not supported.
- *
- * This function will return a pointer to a dentry if it succeeds. This
- * pointer must be passed to the debugfs_remove() function when the file is
- * to be removed (no automatic cleanup happens if your module is unloaded,
- * you are responsible here.) If an error occurs, ERR_PTR(-ERROR) will be
- * returned.
- *
- * If debugfs is not enabled in the kernel, the value ERR_PTR(-ENODEV) will
- * be returned.
*/
-struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
- struct dentry *parent,
- struct debugfs_regset32 *regset)
+void debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset)
{
- return debugfs_create_file(name, mode, parent, regset, &fops_regset32);
+ debugfs_create_file(name, mode, parent, regset, &fops_regset32);
}
EXPORT_SYMBOL_GPL(debugfs_create_regset32);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index db1ef144c63a..2c449aed1b92 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -311,8 +311,10 @@ static int crypt_scatterlist(struct ecryptfs_crypt_stat *crypt_stat,
struct extent_crypt_result ecr;
int rc = 0;
- BUG_ON(!crypt_stat || !crypt_stat->tfm
- || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED));
+ if (!crypt_stat || !crypt_stat->tfm
+ || !(crypt_stat->flags & ECRYPTFS_STRUCT_INITIALIZED))
+ return -EINVAL;
+
if (unlikely(ecryptfs_verbosity > 0)) {
ecryptfs_printk(KERN_DEBUG, "Key size [%zd]; key:\n",
crypt_stat->key_size);
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 1c1a56be7ea2..e6ac78c62ca4 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -8,7 +8,7 @@
* Copyright (C) 2004-2008 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
* Trevor S. Highland <[email protected]>
- * Tyler Hicks <[email protected]>
+ * Tyler Hicks <[email protected]>
*/
#ifndef ECRYPTFS_KERNEL_H
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c
index 7d326aa0308e..af3eb02bbca1 100644
--- a/fs/ecryptfs/keystore.c
+++ b/fs/ecryptfs/keystore.c
@@ -1304,7 +1304,7 @@ parse_tag_1_packet(struct ecryptfs_crypt_stat *crypt_stat,
printk(KERN_WARNING "Tag 1 packet contains key larger "
"than ECRYPTFS_MAX_ENCRYPTED_KEY_BYTES\n");
rc = -EINVAL;
- goto out;
+ goto out_free;
}
memcpy((*new_auth_tok)->session_key.encrypted_key,
&data[(*packet_size)], (body_size - (ECRYPTFS_SIG_SIZE + 2)));
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index b8a7ce379ffe..e63259fdef28 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -7,7 +7,7 @@
* Copyright (C) 2004-2007 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
* Michael C. Thompson <[email protected]>
- * Tyler Hicks <[email protected]>
+ * Tyler Hicks <[email protected]>
*/
#include <linux/dcache.h>
diff --git a/fs/ecryptfs/messaging.c b/fs/ecryptfs/messaging.c
index d668e60b85b5..8646ba76def3 100644
--- a/fs/ecryptfs/messaging.c
+++ b/fs/ecryptfs/messaging.c
@@ -4,7 +4,7 @@
*
* Copyright (C) 2004-2008 International Business Machines Corp.
* Author(s): Michael A. Halcrow <[email protected]>
- * Tyler Hicks <[email protected]>
+ * Tyler Hicks <[email protected]>
*/
#include <linux/sched.h>
#include <linux/slab.h>
@@ -379,6 +379,7 @@ int __init ecryptfs_init_messaging(void)
* ecryptfs_message_buf_len),
GFP_KERNEL);
if (!ecryptfs_msg_ctx_arr) {
+ kfree(ecryptfs_daemon_hash);
rc = -ENOMEM;
goto out;
}
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 119667e65890..c885cf7d724b 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -960,8 +960,9 @@ ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
static int
ext2_dax_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
- return dax_writeback_mapping_range(mapping,
- mapping->host->i_sb->s_bdev, wbc);
+ struct ext2_sb_info *sbi = EXT2_SB(mapping->host->i_sb);
+
+ return dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
}
const struct address_space_operations ext2_aops = {
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 5f993a411251..8fd0b3cdab4c 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -270,6 +270,7 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
ext4_group_t ngroups = ext4_get_groups_count(sb);
struct ext4_group_desc *desc;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct buffer_head *bh_p;
if (block_group >= ngroups) {
ext4_error(sb, "block_group >= groups_count - block_group = %u,"
@@ -280,7 +281,14 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
group_desc = block_group >> EXT4_DESC_PER_BLOCK_BITS(sb);
offset = block_group & (EXT4_DESC_PER_BLOCK(sb) - 1);
- if (!sbi->s_group_desc[group_desc]) {
+ bh_p = sbi_array_rcu_deref(sbi, s_group_desc, group_desc);
+ /*
+ * sbi_array_rcu_deref returns with rcu unlocked, this is ok since
+ * the pointer being dereferenced won't be dereferenced again. By
+ * looking at the usage in add_new_gdb() the value isn't modified,
+ * just the pointer, and so it remains valid.
+ */
+ if (!bh_p) {
ext4_error(sb, "Group descriptor not loaded - "
"block_group = %u, group_desc = %u, desc = %u",
block_group, group_desc, offset);
@@ -288,10 +296,10 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
}
desc = (struct ext4_group_desc *)(
- (__u8 *)sbi->s_group_desc[group_desc]->b_data +
+ (__u8 *)bh_p->b_data +
offset * EXT4_DESC_SIZE(sb));
if (bh)
- *bh = sbi->s_group_desc[group_desc];
+ *bh = bh_p;
return desc;
}
diff --git a/fs/ext4/block_validity.c b/fs/ext4/block_validity.c
index 1ee04e76bbe0..0a734ffb4310 100644
--- a/fs/ext4/block_validity.c
+++ b/fs/ext4/block_validity.c
@@ -207,6 +207,7 @@ static int ext4_protect_reserved_inode(struct super_block *sb,
return PTR_ERR(inode);
num = (inode->i_size + sb->s_blocksize - 1) >> sb->s_blocksize_bits;
while (i < num) {
+ cond_resched();
map.m_lblk = i;
map.m_len = num - i;
n = ext4_map_blocks(NULL, inode, &map, 0);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 1f340743c9a8..9aa1f75409b0 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -129,12 +129,14 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
if (err != ERR_BAD_DX_DIR) {
return err;
}
- /*
- * We don't set the inode dirty flag since it's not
- * critical that it get flushed back to the disk.
- */
- ext4_clear_inode_flag(file_inode(file),
- EXT4_INODE_INDEX);
+ /* Can we just clear INDEX flag to ignore htree information? */
+ if (!ext4_has_metadata_csum(sb)) {
+ /*
+ * We don't set the inode dirty flag since it's not
+ * critical that it gets flushed back to the disk.
+ */
+ ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
+ }
}
if (ext4_has_inline_data(inode)) {
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 9a2ee2428ecc..61b37a052052 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1400,7 +1400,7 @@ struct ext4_sb_info {
loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
struct buffer_head * s_sbh; /* Buffer containing the super block */
struct ext4_super_block *s_es; /* Pointer to the super block in the buffer */
- struct buffer_head **s_group_desc;
+ struct buffer_head * __rcu *s_group_desc;
unsigned int s_mount_opt;
unsigned int s_mount_opt2;
unsigned int s_mount_flags;
@@ -1462,7 +1462,7 @@ struct ext4_sb_info {
#endif
/* for buddy allocator */
- struct ext4_group_info ***s_group_info;
+ struct ext4_group_info ** __rcu *s_group_info;
struct inode *s_buddy_cache;
spinlock_t s_md_lock;
unsigned short *s_mb_offsets;
@@ -1512,7 +1512,7 @@ struct ext4_sb_info {
unsigned int s_extent_max_zeroout_kb;
unsigned int s_log_groups_per_flex;
- struct flex_groups *s_flex_groups;
+ struct flex_groups * __rcu *s_flex_groups;
ext4_group_t s_flex_groups_allocated;
/* workqueue for reserved extent conversions (buffered io) */
@@ -1552,8 +1552,11 @@ struct ext4_sb_info {
struct ratelimit_state s_warning_ratelimit_state;
struct ratelimit_state s_msg_ratelimit_state;
- /* Barrier between changing inodes' journal flags and writepages ops. */
- struct percpu_rw_semaphore s_journal_flag_rwsem;
+ /*
+ * Barrier between writepages ops and changing any inode's JOURNAL_DATA
+ * or EXTENTS flag.
+ */
+ struct percpu_rw_semaphore s_writepages_rwsem;
struct dax_device *s_daxdev;
#ifdef CONFIG_EXT4_DEBUG
unsigned long s_simulate_fail;
@@ -1577,6 +1580,23 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
}
/*
+ * Returns: sbi->field[index]
+ * Used to access an array element from the following sbi fields which require
+ * rcu protection to avoid dereferencing an invalid pointer due to reassignment
+ * - s_group_desc
+ * - s_group_info
+ * - s_flex_group
+ */
+#define sbi_array_rcu_deref(sbi, field, index) \
+({ \
+ typeof(*((sbi)->field)) _v; \
+ rcu_read_lock(); \
+ _v = ((typeof(_v)*)rcu_dereference((sbi)->field))[index]; \
+ rcu_read_unlock(); \
+ _v; \
+})
+
+/*
* Simulate_fail codes
*/
#define EXT4_SIM_BBITMAP_EIO 1
@@ -2544,8 +2564,11 @@ void ext4_insert_dentry(struct inode *inode,
struct ext4_filename *fname);
static inline void ext4_update_dx_flag(struct inode *inode)
{
- if (!ext4_has_feature_dir_index(inode->i_sb))
+ if (!ext4_has_feature_dir_index(inode->i_sb)) {
+ /* ext4_iget() should have caught this... */
+ WARN_ON_ONCE(ext4_has_feature_metadata_csum(inode->i_sb));
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
+ }
}
static const unsigned char ext4_filetype_table[] = {
DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
@@ -2727,6 +2750,7 @@ extern int ext4_generic_delete_entry(handle_t *handle,
extern bool ext4_empty_dir(struct inode *inode);
/* resize.c */
+extern void ext4_kvfree_array_rcu(void *to_free);
extern int ext4_group_add(struct super_block *sb,
struct ext4_new_group_data *input);
extern int ext4_group_extend(struct super_block *sb,
@@ -2973,13 +2997,13 @@ static inline
struct ext4_group_info *ext4_get_group_info(struct super_block *sb,
ext4_group_t group)
{
- struct ext4_group_info ***grp_info;
+ struct ext4_group_info **grp_info;
long indexv, indexh;
BUG_ON(group >= EXT4_SB(sb)->s_groups_count);
- grp_info = EXT4_SB(sb)->s_group_info;
indexv = group >> (EXT4_DESC_PER_BLOCK_BITS(sb));
indexh = group & ((EXT4_DESC_PER_BLOCK(sb)) - 1);
- return grp_info[indexv][indexh];
+ grp_info = sbi_array_rcu_deref(EXT4_SB(sb), s_group_info, indexv);
+ return grp_info[indexh];
}
/*
@@ -3029,7 +3053,7 @@ static inline void ext4_update_i_disksize(struct inode *inode, loff_t newsize)
!inode_is_locked(inode));
down_write(&EXT4_I(inode)->i_data_sem);
if (newsize > EXT4_I(inode)->i_disksize)
- EXT4_I(inode)->i_disksize = newsize;
+ WRITE_ONCE(EXT4_I(inode)->i_disksize, newsize);
up_write(&EXT4_I(inode)->i_data_sem);
}
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index c66e8f9451a2..f95ee99091e4 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -328,11 +328,13 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
percpu_counter_inc(&sbi->s_freeinodes_counter);
if (sbi->s_log_groups_per_flex) {
- ext4_group_t f = ext4_flex_group(sbi, block_group);
+ struct flex_groups *fg;
- atomic_inc(&sbi->s_flex_groups[f].free_inodes);
+ fg = sbi_array_rcu_deref(sbi, s_flex_groups,
+ ext4_flex_group(sbi, block_group));
+ atomic_inc(&fg->free_inodes);
if (is_directory)
- atomic_dec(&sbi->s_flex_groups[f].used_dirs);
+ atomic_dec(&fg->used_dirs);
}
BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
@@ -368,12 +370,13 @@ static void get_orlov_stats(struct super_block *sb, ext4_group_t g,
int flex_size, struct orlov_stats *stats)
{
struct ext4_group_desc *desc;
- struct flex_groups *flex_group = EXT4_SB(sb)->s_flex_groups;
if (flex_size > 1) {
- stats->free_inodes = atomic_read(&flex_group[g].free_inodes);
- stats->free_clusters = atomic64_read(&flex_group[g].free_clusters);
- stats->used_dirs = atomic_read(&flex_group[g].used_dirs);
+ struct flex_groups *fg = sbi_array_rcu_deref(EXT4_SB(sb),
+ s_flex_groups, g);
+ stats->free_inodes = atomic_read(&fg->free_inodes);
+ stats->free_clusters = atomic64_read(&fg->free_clusters);
+ stats->used_dirs = atomic_read(&fg->used_dirs);
return;
}
@@ -1054,7 +1057,8 @@ got:
if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, group);
- atomic_inc(&sbi->s_flex_groups[f].used_dirs);
+ atomic_inc(&sbi_array_rcu_deref(sbi, s_flex_groups,
+ f)->used_dirs);
}
}
if (ext4_has_group_desc_csum(sb)) {
@@ -1077,7 +1081,8 @@ got:
if (sbi->s_log_groups_per_flex) {
flex_group = ext4_flex_group(sbi, group);
- atomic_dec(&sbi->s_flex_groups[flex_group].free_inodes);
+ atomic_dec(&sbi_array_rcu_deref(sbi, s_flex_groups,
+ flex_group)->free_inodes);
}
inode->i_ino = ino + group * EXT4_INODES_PER_GROUP(sb);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 3313168b680f..fa0ff78dc033 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -2465,7 +2465,7 @@ update_disksize:
* truncate are avoided by checking i_size under i_data_sem.
*/
disksize = ((loff_t)mpd->first_page) << PAGE_SHIFT;
- if (disksize > EXT4_I(inode)->i_disksize) {
+ if (disksize > READ_ONCE(EXT4_I(inode)->i_disksize)) {
int err2;
loff_t i_size;
@@ -2628,7 +2628,7 @@ static int ext4_writepages(struct address_space *mapping,
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO;
- percpu_down_read(&sbi->s_journal_flag_rwsem);
+ percpu_down_read(&sbi->s_writepages_rwsem);
trace_ext4_writepages(inode, wbc);
/*
@@ -2849,7 +2849,7 @@ unplug:
out_writepages:
trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write);
- percpu_up_read(&sbi->s_journal_flag_rwsem);
+ percpu_up_read(&sbi->s_writepages_rwsem);
return ret;
}
@@ -2864,13 +2864,13 @@ static int ext4_dax_writepages(struct address_space *mapping,
if (unlikely(ext4_forced_shutdown(EXT4_SB(inode->i_sb))))
return -EIO;
- percpu_down_read(&sbi->s_journal_flag_rwsem);
+ percpu_down_read(&sbi->s_writepages_rwsem);
trace_ext4_writepages(inode, wbc);
- ret = dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev, wbc);
+ ret = dax_writeback_mapping_range(mapping, sbi->s_daxdev, wbc);
trace_ext4_writepages_result(inode, wbc, ret,
nr_to_write - wbc->nr_to_write);
- percpu_up_read(&sbi->s_journal_flag_rwsem);
+ percpu_up_read(&sbi->s_writepages_rwsem);
return ret;
}
@@ -4644,6 +4644,18 @@ struct inode *__ext4_iget(struct super_block *sb, unsigned long ino,
ret = -EFSCORRUPTED;
goto bad_inode;
}
+ /*
+ * If dir_index is not enabled but there's dir with INDEX flag set,
+ * we'd normally treat htree data as empty space. But with metadata
+ * checksumming that corrupts checksums so forbid that.
+ */
+ if (!ext4_has_feature_dir_index(sb) && ext4_has_metadata_csum(sb) &&
+ ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) {
+ ext4_error_inode(inode, function, line, 0,
+ "iget: Dir with htree data on filesystem without dir_index feature.");
+ ret = -EFSCORRUPTED;
+ goto bad_inode;
+ }
ei->i_disksize = inode->i_size;
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
@@ -5849,7 +5861,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
}
}
- percpu_down_write(&sbi->s_journal_flag_rwsem);
+ percpu_down_write(&sbi->s_writepages_rwsem);
jbd2_journal_lock_updates(journal);
/*
@@ -5866,7 +5878,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
err = jbd2_journal_flush(journal);
if (err < 0) {
jbd2_journal_unlock_updates(journal);
- percpu_up_write(&sbi->s_journal_flag_rwsem);
+ percpu_up_write(&sbi->s_writepages_rwsem);
return err;
}
ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
@@ -5874,7 +5886,7 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
ext4_set_aops(inode);
jbd2_journal_unlock_updates(journal);
- percpu_up_write(&sbi->s_journal_flag_rwsem);
+ percpu_up_write(&sbi->s_writepages_rwsem);
if (val)
up_write(&EXT4_I(inode)->i_mmap_sem);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index f64838187559..51a78eb65f3c 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2356,7 +2356,7 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
unsigned size;
- struct ext4_group_info ***new_groupinfo;
+ struct ext4_group_info ***old_groupinfo, ***new_groupinfo;
size = (ngroups + EXT4_DESC_PER_BLOCK(sb) - 1) >>
EXT4_DESC_PER_BLOCK_BITS(sb);
@@ -2369,13 +2369,16 @@ int ext4_mb_alloc_groupinfo(struct super_block *sb, ext4_group_t ngroups)
ext4_msg(sb, KERN_ERR, "can't allocate buddy meta group");
return -ENOMEM;
}
- if (sbi->s_group_info) {
- memcpy(new_groupinfo, sbi->s_group_info,
+ rcu_read_lock();
+ old_groupinfo = rcu_dereference(sbi->s_group_info);
+ if (old_groupinfo)
+ memcpy(new_groupinfo, old_groupinfo,
sbi->s_group_info_size * sizeof(*sbi->s_group_info));
- kvfree(sbi->s_group_info);
- }
- sbi->s_group_info = new_groupinfo;
+ rcu_read_unlock();
+ rcu_assign_pointer(sbi->s_group_info, new_groupinfo);
sbi->s_group_info_size = size / sizeof(*sbi->s_group_info);
+ if (old_groupinfo)
+ ext4_kvfree_array_rcu(old_groupinfo);
ext4_debug("allocated s_groupinfo array for %d meta_bg's\n",
sbi->s_group_info_size);
return 0;
@@ -2387,6 +2390,7 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
{
int i;
int metalen = 0;
+ int idx = group >> EXT4_DESC_PER_BLOCK_BITS(sb);
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_info **meta_group_info;
struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
@@ -2405,12 +2409,12 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
"for a buddy group");
goto exit_meta_group_info;
}
- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] =
- meta_group_info;
+ rcu_read_lock();
+ rcu_dereference(sbi->s_group_info)[idx] = meta_group_info;
+ rcu_read_unlock();
}
- meta_group_info =
- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)];
+ meta_group_info = sbi_array_rcu_deref(sbi, s_group_info, idx);
i = group & (EXT4_DESC_PER_BLOCK(sb) - 1);
meta_group_info[i] = kmem_cache_zalloc(cachep, GFP_NOFS);
@@ -2458,8 +2462,13 @@ int ext4_mb_add_groupinfo(struct super_block *sb, ext4_group_t group,
exit_group_info:
/* If a meta_group_info table has been allocated, release it now */
if (group % EXT4_DESC_PER_BLOCK(sb) == 0) {
- kfree(sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)]);
- sbi->s_group_info[group >> EXT4_DESC_PER_BLOCK_BITS(sb)] = NULL;
+ struct ext4_group_info ***group_info;
+
+ rcu_read_lock();
+ group_info = rcu_dereference(sbi->s_group_info);
+ kfree(group_info[idx]);
+ group_info[idx] = NULL;
+ rcu_read_unlock();
}
exit_meta_group_info:
return -ENOMEM;
@@ -2472,6 +2481,7 @@ static int ext4_mb_init_backend(struct super_block *sb)
struct ext4_sb_info *sbi = EXT4_SB(sb);
int err;
struct ext4_group_desc *desc;
+ struct ext4_group_info ***group_info;
struct kmem_cache *cachep;
err = ext4_mb_alloc_groupinfo(sb, ngroups);
@@ -2507,11 +2517,16 @@ err_freebuddy:
while (i-- > 0)
kmem_cache_free(cachep, ext4_get_group_info(sb, i));
i = sbi->s_group_info_size;
+ rcu_read_lock();
+ group_info = rcu_dereference(sbi->s_group_info);
while (i-- > 0)
- kfree(sbi->s_group_info[i]);
+ kfree(group_info[i]);
+ rcu_read_unlock();
iput(sbi->s_buddy_cache);
err_freesgi:
- kvfree(sbi->s_group_info);
+ rcu_read_lock();
+ kvfree(rcu_dereference(sbi->s_group_info));
+ rcu_read_unlock();
return -ENOMEM;
}
@@ -2700,7 +2715,7 @@ int ext4_mb_release(struct super_block *sb)
ext4_group_t ngroups = ext4_get_groups_count(sb);
ext4_group_t i;
int num_meta_group_infos;
- struct ext4_group_info *grinfo;
+ struct ext4_group_info *grinfo, ***group_info;
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
@@ -2719,9 +2734,12 @@ int ext4_mb_release(struct super_block *sb)
num_meta_group_infos = (ngroups +
EXT4_DESC_PER_BLOCK(sb) - 1) >>
EXT4_DESC_PER_BLOCK_BITS(sb);
+ rcu_read_lock();
+ group_info = rcu_dereference(sbi->s_group_info);
for (i = 0; i < num_meta_group_infos; i++)
- kfree(sbi->s_group_info[i]);
- kvfree(sbi->s_group_info);
+ kfree(group_info[i]);
+ kvfree(group_info);
+ rcu_read_unlock();
}
kfree(sbi->s_mb_offsets);
kfree(sbi->s_mb_maxs);
@@ -3020,7 +3038,8 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
ext4_group_t flex_group = ext4_flex_group(sbi,
ac->ac_b_ex.fe_group);
atomic64_sub(ac->ac_b_ex.fe_len,
- &sbi->s_flex_groups[flex_group].free_clusters);
+ &sbi_array_rcu_deref(sbi, s_flex_groups,
+ flex_group)->free_clusters);
}
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
@@ -4918,7 +4937,8 @@ do_more:
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
atomic64_add(count_clusters,
- &sbi->s_flex_groups[flex_group].free_clusters);
+ &sbi_array_rcu_deref(sbi, s_flex_groups,
+ flex_group)->free_clusters);
}
/*
@@ -5075,7 +5095,8 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi, block_group);
atomic64_add(clusters_freed,
- &sbi->s_flex_groups[flex_group].free_clusters);
+ &sbi_array_rcu_deref(sbi, s_flex_groups,
+ flex_group)->free_clusters);
}
ext4_mb_unload_buddy(&e4b);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index 89725fa42573..fb6520f37135 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -407,6 +407,7 @@ static int free_ext_block(handle_t *handle, struct inode *inode)
int ext4_ext_migrate(struct inode *inode)
{
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
handle_t *handle;
int retval = 0, i;
__le32 *i_data;
@@ -431,6 +432,8 @@ int ext4_ext_migrate(struct inode *inode)
*/
return retval;
+ percpu_down_write(&sbi->s_writepages_rwsem);
+
/*
* Worst case we can touch the allocation bitmaps, a bgd
* block, and a block to link in the orphan list. We do need
@@ -441,7 +444,7 @@ int ext4_ext_migrate(struct inode *inode)
if (IS_ERR(handle)) {
retval = PTR_ERR(handle);
- return retval;
+ goto out_unlock;
}
goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
@@ -452,7 +455,7 @@ int ext4_ext_migrate(struct inode *inode)
if (IS_ERR(tmp_inode)) {
retval = PTR_ERR(tmp_inode);
ext4_journal_stop(handle);
- return retval;
+ goto out_unlock;
}
i_size_write(tmp_inode, i_size_read(inode));
/*
@@ -494,7 +497,7 @@ int ext4_ext_migrate(struct inode *inode)
*/
ext4_orphan_del(NULL, tmp_inode);
retval = PTR_ERR(handle);
- goto out;
+ goto out_tmp_inode;
}
ei = EXT4_I(inode);
@@ -576,10 +579,11 @@ err_out:
ext4_ext_tree_init(handle, tmp_inode);
out_stop:
ext4_journal_stop(handle);
-out:
+out_tmp_inode:
unlock_new_inode(tmp_inode);
iput(tmp_inode);
-
+out_unlock:
+ percpu_up_write(&sbi->s_writepages_rwsem);
return retval;
}
@@ -589,7 +593,8 @@ out:
int ext4_ind_migrate(struct inode *inode)
{
struct ext4_extent_header *eh;
- struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ struct ext4_super_block *es = sbi->s_es;
struct ext4_inode_info *ei = EXT4_I(inode);
struct ext4_extent *ex;
unsigned int i, len;
@@ -613,9 +618,13 @@ int ext4_ind_migrate(struct inode *inode)
if (test_opt(inode->i_sb, DELALLOC))
ext4_alloc_da_blocks(inode);
+ percpu_down_write(&sbi->s_writepages_rwsem);
+
handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
- if (IS_ERR(handle))
- return PTR_ERR(handle);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ goto out_unlock;
+ }
down_write(&EXT4_I(inode)->i_data_sem);
ret = ext4_ext_check_inode(inode);
@@ -650,5 +659,7 @@ int ext4_ind_migrate(struct inode *inode)
errout:
ext4_journal_stop(handle);
up_write(&EXT4_I(inode)->i_data_sem);
+out_unlock:
+ percpu_up_write(&sbi->s_writepages_rwsem);
return ret;
}
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 1c44b1a32001..87f7551c5132 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -120,10 +120,10 @@ void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
{
__ext4_warning(sb, function, line, "%s", msg);
__ext4_warning(sb, function, line,
- "MMP failure info: last update time: %llu, last update "
- "node: %s, last update device: %s",
- (long long unsigned int) le64_to_cpu(mmp->mmp_time),
- mmp->mmp_nodename, mmp->mmp_bdevname);
+ "MMP failure info: last update time: %llu, last update node: %.*s, last update device: %.*s",
+ (unsigned long long)le64_to_cpu(mmp->mmp_time),
+ (int)sizeof(mmp->mmp_nodename), mmp->mmp_nodename,
+ (int)sizeof(mmp->mmp_bdevname), mmp->mmp_bdevname);
}
/*
@@ -154,6 +154,7 @@ static int kmmpd(void *data)
mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval,
EXT4_MMP_MIN_CHECK_INTERVAL);
mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval);
+ BUILD_BUG_ON(sizeof(mmp->mmp_bdevname) < BDEVNAME_SIZE);
bdevname(bh->b_bdev, mmp->mmp_bdevname);
memcpy(mmp->mmp_nodename, init_utsname()->nodename,
@@ -379,7 +380,8 @@ skip:
/*
* Start a kernel thread to update the MMP block periodically.
*/
- EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s",
+ EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%.*s",
+ (int)sizeof(mmp->mmp_bdevname),
bdevname(bh->b_bdev,
mmp->mmp_bdevname));
if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) {
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 129d2ebae00d..b05ea72f38fd 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1511,6 +1511,7 @@ restart:
/*
* We deal with the read-ahead logic here.
*/
+ cond_resched();
if (ra_ptr >= ra_max) {
/* Refill the readahead buffer */
ra_ptr = 0;
@@ -2213,6 +2214,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
retval = ext4_dx_add_entry(handle, &fname, dir, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
goto out;
+ /* Can we just ignore htree data? */
+ if (ext4_has_metadata_csum(sb)) {
+ EXT4_ERROR_INODE(dir,
+ "Directory has corrupted htree index.");
+ retval = -EFSCORRUPTED;
+ goto out;
+ }
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++;
ext4_mark_inode_dirty(handle, dir);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 86a2500ed292..a50b51270ea9 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -17,6 +17,33 @@
#include "ext4_jbd2.h"
+struct ext4_rcu_ptr {
+ struct rcu_head rcu;
+ void *ptr;
+};
+
+static void ext4_rcu_ptr_callback(struct rcu_head *head)
+{
+ struct ext4_rcu_ptr *ptr;
+
+ ptr = container_of(head, struct ext4_rcu_ptr, rcu);
+ kvfree(ptr->ptr);
+ kfree(ptr);
+}
+
+void ext4_kvfree_array_rcu(void *to_free)
+{
+ struct ext4_rcu_ptr *ptr = kzalloc(sizeof(*ptr), GFP_KERNEL);
+
+ if (ptr) {
+ ptr->ptr = to_free;
+ call_rcu(&ptr->rcu, ext4_rcu_ptr_callback);
+ return;
+ }
+ synchronize_rcu();
+ kvfree(to_free);
+}
+
int ext4_resize_begin(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
@@ -542,8 +569,8 @@ static int setup_new_flex_group_blocks(struct super_block *sb,
brelse(gdb);
goto out;
}
- memcpy(gdb->b_data, sbi->s_group_desc[j]->b_data,
- gdb->b_size);
+ memcpy(gdb->b_data, sbi_array_rcu_deref(sbi,
+ s_group_desc, j)->b_data, gdb->b_size);
set_buffer_uptodate(gdb);
err = ext4_handle_dirty_metadata(handle, NULL, gdb);
@@ -860,13 +887,15 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
}
brelse(dind);
- o_group_desc = EXT4_SB(sb)->s_group_desc;
+ rcu_read_lock();
+ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
memcpy(n_group_desc, o_group_desc,
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
+ rcu_read_unlock();
n_group_desc[gdb_num] = gdb_bh;
- EXT4_SB(sb)->s_group_desc = n_group_desc;
+ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
EXT4_SB(sb)->s_gdb_count++;
- kvfree(o_group_desc);
+ ext4_kvfree_array_rcu(o_group_desc);
le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
err = ext4_handle_dirty_super(handle, sb);
@@ -909,9 +938,11 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
return err;
}
- o_group_desc = EXT4_SB(sb)->s_group_desc;
+ rcu_read_lock();
+ o_group_desc = rcu_dereference(EXT4_SB(sb)->s_group_desc);
memcpy(n_group_desc, o_group_desc,
EXT4_SB(sb)->s_gdb_count * sizeof(struct buffer_head *));
+ rcu_read_unlock();
n_group_desc[gdb_num] = gdb_bh;
BUFFER_TRACE(gdb_bh, "get_write_access");
@@ -922,9 +953,9 @@ static int add_new_gdb_meta_bg(struct super_block *sb,
return err;
}
- EXT4_SB(sb)->s_group_desc = n_group_desc;
+ rcu_assign_pointer(EXT4_SB(sb)->s_group_desc, n_group_desc);
EXT4_SB(sb)->s_gdb_count++;
- kvfree(o_group_desc);
+ ext4_kvfree_array_rcu(o_group_desc);
return err;
}
@@ -1188,7 +1219,8 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
* use non-sparse filesystems anymore. This is already checked above.
*/
if (gdb_off) {
- gdb_bh = sbi->s_group_desc[gdb_num];
+ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
+ gdb_num);
BUFFER_TRACE(gdb_bh, "get_write_access");
err = ext4_journal_get_write_access(handle, gdb_bh);
@@ -1270,7 +1302,7 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
/*
* get_write_access() has been called on gdb_bh by ext4_add_new_desc().
*/
- gdb_bh = sbi->s_group_desc[gdb_num];
+ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc, gdb_num);
/* Update group descriptor block for new group */
gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
gdb_off * EXT4_DESC_SIZE(sb));
@@ -1398,11 +1430,14 @@ static void ext4_update_super(struct super_block *sb,
percpu_counter_read(&sbi->s_freeclusters_counter));
if (ext4_has_feature_flex_bg(sb) && sbi->s_log_groups_per_flex) {
ext4_group_t flex_group;
+ struct flex_groups *fg;
+
flex_group = ext4_flex_group(sbi, group_data[0].group);
+ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
atomic64_add(EXT4_NUM_B2C(sbi, free_blocks),
- &sbi->s_flex_groups[flex_group].free_clusters);
+ &fg->free_clusters);
atomic_add(EXT4_INODES_PER_GROUP(sb) * flex_gd->count,
- &sbi->s_flex_groups[flex_group].free_inodes);
+ &fg->free_inodes);
}
/*
@@ -1497,7 +1532,8 @@ exit_journal:
for (; gdb_num <= gdb_num_end; gdb_num++) {
struct buffer_head *gdb_bh;
- gdb_bh = sbi->s_group_desc[gdb_num];
+ gdb_bh = sbi_array_rcu_deref(sbi, s_group_desc,
+ gdb_num);
if (old_gdb == gdb_bh->b_blocknr)
continue;
update_backups(sb, gdb_bh->b_blocknr, gdb_bh->b_data,
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 8434217549b3..0c7c4adb664e 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1014,6 +1014,8 @@ static void ext4_put_super(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_super_block *es = sbi->s_es;
+ struct buffer_head **group_desc;
+ struct flex_groups **flex_groups;
int aborted = 0;
int i, err;
@@ -1046,15 +1048,23 @@ static void ext4_put_super(struct super_block *sb)
if (!sb_rdonly(sb))
ext4_commit_super(sb, 1);
+ rcu_read_lock();
+ group_desc = rcu_dereference(sbi->s_group_desc);
for (i = 0; i < sbi->s_gdb_count; i++)
- brelse(sbi->s_group_desc[i]);
- kvfree(sbi->s_group_desc);
- kvfree(sbi->s_flex_groups);
+ brelse(group_desc[i]);
+ kvfree(group_desc);
+ flex_groups = rcu_dereference(sbi->s_flex_groups);
+ if (flex_groups) {
+ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
+ kvfree(flex_groups[i]);
+ kvfree(flex_groups);
+ }
+ rcu_read_unlock();
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
- percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
+ percpu_free_rwsem(&sbi->s_writepages_rwsem);
#ifdef CONFIG_QUOTA
for (i = 0; i < EXT4_MAXQUOTAS; i++)
kfree(get_qf_name(sb, sbi, i));
@@ -2380,8 +2390,8 @@ done:
int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
- struct flex_groups *new_groups;
- int size;
+ struct flex_groups **old_groups, **new_groups;
+ int size, i, j;
if (!sbi->s_log_groups_per_flex)
return 0;
@@ -2390,22 +2400,37 @@ int ext4_alloc_flex_bg_array(struct super_block *sb, ext4_group_t ngroup)
if (size <= sbi->s_flex_groups_allocated)
return 0;
- size = roundup_pow_of_two(size * sizeof(struct flex_groups));
- new_groups = kvzalloc(size, GFP_KERNEL);
+ new_groups = kvzalloc(roundup_pow_of_two(size *
+ sizeof(*sbi->s_flex_groups)), GFP_KERNEL);
if (!new_groups) {
- ext4_msg(sb, KERN_ERR, "not enough memory for %d flex groups",
- size / (int) sizeof(struct flex_groups));
+ ext4_msg(sb, KERN_ERR,
+ "not enough memory for %d flex group pointers", size);
return -ENOMEM;
}
-
- if (sbi->s_flex_groups) {
- memcpy(new_groups, sbi->s_flex_groups,
- (sbi->s_flex_groups_allocated *
- sizeof(struct flex_groups)));
- kvfree(sbi->s_flex_groups);
+ for (i = sbi->s_flex_groups_allocated; i < size; i++) {
+ new_groups[i] = kvzalloc(roundup_pow_of_two(
+ sizeof(struct flex_groups)),
+ GFP_KERNEL);
+ if (!new_groups[i]) {
+ for (j = sbi->s_flex_groups_allocated; j < i; j++)
+ kvfree(new_groups[j]);
+ kvfree(new_groups);
+ ext4_msg(sb, KERN_ERR,
+ "not enough memory for %d flex groups", size);
+ return -ENOMEM;
+ }
}
- sbi->s_flex_groups = new_groups;
- sbi->s_flex_groups_allocated = size / sizeof(struct flex_groups);
+ rcu_read_lock();
+ old_groups = rcu_dereference(sbi->s_flex_groups);
+ if (old_groups)
+ memcpy(new_groups, old_groups,
+ (sbi->s_flex_groups_allocated *
+ sizeof(struct flex_groups *)));
+ rcu_read_unlock();
+ rcu_assign_pointer(sbi->s_flex_groups, new_groups);
+ sbi->s_flex_groups_allocated = size;
+ if (old_groups)
+ ext4_kvfree_array_rcu(old_groups);
return 0;
}
@@ -2413,6 +2438,7 @@ static int ext4_fill_flex_info(struct super_block *sb)
{
struct ext4_sb_info *sbi = EXT4_SB(sb);
struct ext4_group_desc *gdp = NULL;
+ struct flex_groups *fg;
ext4_group_t flex_group;
int i, err;
@@ -2430,12 +2456,11 @@ static int ext4_fill_flex_info(struct super_block *sb)
gdp = ext4_get_group_desc(sb, i, NULL);
flex_group = ext4_flex_group(sbi, i);
- atomic_add(ext4_free_inodes_count(sb, gdp),
- &sbi->s_flex_groups[flex_group].free_inodes);
+ fg = sbi_array_rcu_deref(sbi, s_flex_groups, flex_group);
+ atomic_add(ext4_free_inodes_count(sb, gdp), &fg->free_inodes);
atomic64_add(ext4_free_group_clusters(sb, gdp),
- &sbi->s_flex_groups[flex_group].free_clusters);
- atomic_add(ext4_used_dirs_count(sb, gdp),
- &sbi->s_flex_groups[flex_group].used_dirs);
+ &fg->free_clusters);
+ atomic_add(ext4_used_dirs_count(sb, gdp), &fg->used_dirs);
}
return 1;
@@ -3009,17 +3034,11 @@ static int ext4_feature_set_ok(struct super_block *sb, int readonly)
return 0;
}
-#ifndef CONFIG_QUOTA
- if (ext4_has_feature_quota(sb) && !readonly) {
+#if !IS_ENABLED(CONFIG_QUOTA) || !IS_ENABLED(CONFIG_QFMT_V2)
+ if (!readonly && (ext4_has_feature_quota(sb) ||
+ ext4_has_feature_project(sb))) {
ext4_msg(sb, KERN_ERR,
- "Filesystem with quota feature cannot be mounted RDWR "
- "without CONFIG_QUOTA");
- return 0;
- }
- if (ext4_has_feature_project(sb) && !readonly) {
- ext4_msg(sb, KERN_ERR,
- "Filesystem with project quota feature cannot be mounted RDWR "
- "without CONFIG_QUOTA");
+ "The kernel was not built with CONFIG_QUOTA and CONFIG_QFMT_V2");
return 0;
}
#endif /* CONFIG_QUOTA */
@@ -3640,9 +3659,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
{
struct dax_device *dax_dev = fs_dax_get_by_bdev(sb->s_bdev);
char *orig_data = kstrdup(data, GFP_KERNEL);
- struct buffer_head *bh;
+ struct buffer_head *bh, **group_desc;
struct ext4_super_block *es = NULL;
struct ext4_sb_info *sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
+ struct flex_groups **flex_groups;
ext4_fsblk_t block;
ext4_fsblk_t sb_block = get_sb_block(&data);
ext4_fsblk_t logical_sb_block;
@@ -3814,6 +3834,15 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
*/
sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT;
+ blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
+ if (blocksize < EXT4_MIN_BLOCK_SIZE ||
+ blocksize > EXT4_MAX_BLOCK_SIZE) {
+ ext4_msg(sb, KERN_ERR,
+ "Unsupported filesystem blocksize %d (%d log_block_size)",
+ blocksize, le32_to_cpu(es->s_log_block_size));
+ goto failed_mount;
+ }
+
if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
@@ -3831,6 +3860,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
ext4_msg(sb, KERN_ERR,
"unsupported inode size: %d",
sbi->s_inode_size);
+ ext4_msg(sb, KERN_ERR, "blocksize: %d", blocksize);
goto failed_mount;
}
/*
@@ -4033,14 +4063,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
if (!ext4_feature_set_ok(sb, (sb_rdonly(sb))))
goto failed_mount;
- blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
- if (blocksize < EXT4_MIN_BLOCK_SIZE ||
- blocksize > EXT4_MAX_BLOCK_SIZE) {
- ext4_msg(sb, KERN_ERR,
- "Unsupported filesystem blocksize %d (%d log_block_size)",
- blocksize, le32_to_cpu(es->s_log_block_size));
- goto failed_mount;
- }
if (le32_to_cpu(es->s_log_block_size) >
(EXT4_MAX_BLOCK_LOG_SIZE - EXT4_MIN_BLOCK_LOG_SIZE)) {
ext4_msg(sb, KERN_ERR,
@@ -4294,9 +4316,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
goto failed_mount;
}
}
- sbi->s_group_desc = kvmalloc_array(db_count,
- sizeof(struct buffer_head *),
- GFP_KERNEL);
+ rcu_assign_pointer(sbi->s_group_desc,
+ kvmalloc_array(db_count,
+ sizeof(struct buffer_head *),
+ GFP_KERNEL));
if (sbi->s_group_desc == NULL) {
ext4_msg(sb, KERN_ERR, "not enough memory");
ret = -ENOMEM;
@@ -4312,14 +4335,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
}
for (i = 0; i < db_count; i++) {
+ struct buffer_head *bh;
+
block = descriptor_loc(sb, logical_sb_block, i);
- sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
- if (!sbi->s_group_desc[i]) {
+ bh = sb_bread_unmovable(sb, block);
+ if (!bh) {
ext4_msg(sb, KERN_ERR,
"can't read group descriptor %d", i);
db_count = i;
goto failed_mount2;
}
+ rcu_read_lock();
+ rcu_dereference(sbi->s_group_desc)[i] = bh;
+ rcu_read_unlock();
}
sbi->s_gdb_count = db_count;
if (!ext4_check_descriptors(sb, logical_sb_block, &first_not_zeroed)) {
@@ -4598,7 +4626,7 @@ no_journal:
err = percpu_counter_init(&sbi->s_dirtyclusters_counter, 0,
GFP_KERNEL);
if (!err)
- err = percpu_init_rwsem(&sbi->s_journal_flag_rwsem);
+ err = percpu_init_rwsem(&sbi->s_writepages_rwsem);
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
@@ -4686,13 +4714,19 @@ failed_mount7:
ext4_unregister_li_request(sb);
failed_mount6:
ext4_mb_release(sb);
- if (sbi->s_flex_groups)
- kvfree(sbi->s_flex_groups);
+ rcu_read_lock();
+ flex_groups = rcu_dereference(sbi->s_flex_groups);
+ if (flex_groups) {
+ for (i = 0; i < sbi->s_flex_groups_allocated; i++)
+ kvfree(flex_groups[i]);
+ kvfree(flex_groups);
+ }
+ rcu_read_unlock();
percpu_counter_destroy(&sbi->s_freeclusters_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
- percpu_free_rwsem(&sbi->s_journal_flag_rwsem);
+ percpu_free_rwsem(&sbi->s_writepages_rwsem);
failed_mount5:
ext4_ext_release(sb);
ext4_release_system_zone(sb);
@@ -4721,9 +4755,12 @@ failed_mount3:
if (sbi->s_mmp_tsk)
kthread_stop(sbi->s_mmp_tsk);
failed_mount2:
+ rcu_read_lock();
+ group_desc = rcu_dereference(sbi->s_group_desc);
for (i = 0; i < db_count; i++)
- brelse(sbi->s_group_desc[i]);
- kvfree(sbi->s_group_desc);
+ brelse(group_desc[i]);
+ kvfree(group_desc);
+ rcu_read_unlock();
failed_mount:
if (sbi->s_chksum_driver)
crypto_free_shash(sbi->s_chksum_driver);
@@ -5585,10 +5622,7 @@ static int ext4_statfs_project(struct super_block *sb,
return PTR_ERR(dquot);
spin_lock(&dquot->dq_dqb_lock);
- limit = 0;
- if (dquot->dq_dqb.dqb_bsoftlimit &&
- (!limit || dquot->dq_dqb.dqb_bsoftlimit < limit))
- limit = dquot->dq_dqb.dqb_bsoftlimit;
+ limit = dquot->dq_dqb.dqb_bsoftlimit;
if (dquot->dq_dqb.dqb_bhardlimit &&
(!limit || dquot->dq_dqb.dqb_bhardlimit < limit))
limit = dquot->dq_dqb.dqb_bhardlimit;
@@ -5603,10 +5637,7 @@ static int ext4_statfs_project(struct super_block *sb,
(buf->f_blocks - curblock) : 0;
}
- limit = 0;
- if (dquot->dq_dqb.dqb_isoftlimit &&
- (!limit || dquot->dq_dqb.dqb_isoftlimit < limit))
- limit = dquot->dq_dqb.dqb_isoftlimit;
+ limit = dquot->dq_dqb.dqb_isoftlimit;
if (dquot->dq_dqb.dqb_ihardlimit &&
(!limit || dquot->dq_dqb.dqb_ihardlimit < limit))
limit = dquot->dq_dqb.dqb_ihardlimit;
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 594b05ae16c9..71946da84388 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -750,6 +750,13 @@ static struct inode *fat_alloc_inode(struct super_block *sb)
return NULL;
init_rwsem(&ei->truncate_lock);
+ /* Zeroing to allow iput() even if partial initialized inode. */
+ ei->mmu_private = 0;
+ ei->i_start = 0;
+ ei->i_logstart = 0;
+ ei->i_attrs = 0;
+ ei->i_pos = 0;
+
return &ei->vfs_inode;
}
@@ -1374,16 +1381,6 @@ out:
return 0;
}
-static void fat_dummy_inode_init(struct inode *inode)
-{
- /* Initialize this dummy inode to work as no-op. */
- MSDOS_I(inode)->mmu_private = 0;
- MSDOS_I(inode)->i_start = 0;
- MSDOS_I(inode)->i_logstart = 0;
- MSDOS_I(inode)->i_attrs = 0;
- MSDOS_I(inode)->i_pos = 0;
-}
-
static int fat_read_root(struct inode *inode)
{
struct msdos_sb_info *sbi = MSDOS_SB(inode->i_sb);
@@ -1844,13 +1841,11 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
fat_inode = new_inode(sb);
if (!fat_inode)
goto out_fail;
- fat_dummy_inode_init(fat_inode);
sbi->fat_inode = fat_inode;
fsinfo_inode = new_inode(sb);
if (!fsinfo_inode)
goto out_fail;
- fat_dummy_inode_init(fsinfo_inode);
fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
sbi->fsinfo_inode = fsinfo_inode;
insert_inode_hash(fsinfo_inode);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index 9bc167562ee8..2e4c0fa2074b 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -735,8 +735,9 @@ static void send_sigio_to_task(struct task_struct *p,
return;
switch (signum) {
- kernel_siginfo_t si;
- default:
+ default: {
+ kernel_siginfo_t si;
+
/* Queue a rt signal with the appropriate fd as its
value. We use SI_SIGIO as the source, not
SI_KERNEL, since kernel signals always get
@@ -769,6 +770,7 @@ static void send_sigio_to_task(struct task_struct *p,
si.si_fd = fd;
if (!do_send_sig_info(signum, &si, p, type))
break;
+ }
/* fall-through - fall back on the old plain SIGIO signal */
case 0:
do_send_sig_info(SIGIO, SEND_SIG_PRIV, p, type);
diff --git a/fs/io-wq.c b/fs/io-wq.c
index cb60a42b9fdf..5cef075c0b37 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/kthread.h>
#include <linux/rculist_nulls.h>
+#include <linux/fs_struct.h>
#include "io-wq.h"
@@ -59,6 +60,7 @@ struct io_worker {
const struct cred *cur_creds;
const struct cred *saved_creds;
struct files_struct *restore_files;
+ struct fs_struct *restore_fs;
};
#if BITS_PER_LONG == 64
@@ -151,6 +153,9 @@ static bool __io_worker_unuse(struct io_wqe *wqe, struct io_worker *worker)
task_unlock(current);
}
+ if (current->fs != worker->restore_fs)
+ current->fs = worker->restore_fs;
+
/*
* If we have an active mm, we need to drop the wq lock before unusing
* it. If we do, return true and let the caller retry the idle loop.
@@ -311,6 +316,7 @@ static void io_worker_start(struct io_wqe *wqe, struct io_worker *worker)
worker->flags |= (IO_WORKER_F_UP | IO_WORKER_F_RUNNING);
worker->restore_files = current->files;
+ worker->restore_fs = current->fs;
io_wqe_inc_running(wqe, worker);
}
@@ -481,6 +487,8 @@ next:
current->files = work->files;
task_unlock(current);
}
+ if (work->fs && current->fs != work->fs)
+ current->fs = work->fs;
if (work->mm != worker->mm)
io_wq_switch_mm(worker, work);
if (worker->cur_creds != work->creds)
@@ -494,7 +502,7 @@ next:
if (worker->mm)
work->flags |= IO_WQ_WORK_HAS_MM;
- if (wq->get_work && !(work->flags & IO_WQ_WORK_INTERNAL)) {
+ if (wq->get_work) {
put_work = work;
wq->get_work(work);
}
@@ -527,42 +535,23 @@ next:
} while (1);
}
-static inline void io_worker_spin_for_work(struct io_wqe *wqe)
-{
- int i = 0;
-
- while (++i < 1000) {
- if (io_wqe_run_queue(wqe))
- break;
- if (need_resched())
- break;
- cpu_relax();
- }
-}
-
static int io_wqe_worker(void *data)
{
struct io_worker *worker = data;
struct io_wqe *wqe = worker->wqe;
struct io_wq *wq = wqe->wq;
- bool did_work;
io_worker_start(wqe, worker);
- did_work = false;
while (!test_bit(IO_WQ_BIT_EXIT, &wq->state)) {
set_current_state(TASK_INTERRUPTIBLE);
loop:
- if (did_work)
- io_worker_spin_for_work(wqe);
spin_lock_irq(&wqe->lock);
if (io_wqe_run_queue(wqe)) {
__set_current_state(TASK_RUNNING);
io_worker_handle_work(worker);
- did_work = true;
goto loop;
}
- did_work = false;
/* drops the lock on success, retry */
if (__io_worker_idle(wqe, worker)) {
__release(&wqe->lock);
@@ -691,11 +680,16 @@ static int io_wq_manager(void *data)
/* create fixed workers */
refcount_set(&wq->refs, workers_to_create);
for_each_node(node) {
+ if (!node_online(node))
+ continue;
if (!create_io_worker(wq, wq->wqes[node], IO_WQ_ACCT_BOUND))
goto err;
workers_to_create--;
}
+ while (workers_to_create--)
+ refcount_dec(&wq->refs);
+
complete(&wq->done);
while (!kthread_should_stop()) {
@@ -703,6 +697,9 @@ static int io_wq_manager(void *data)
struct io_wqe *wqe = wq->wqes[node];
bool fork_worker[2] = { false, false };
+ if (!node_online(node))
+ continue;
+
spin_lock_irq(&wqe->lock);
if (io_wqe_need_worker(wqe, IO_WQ_ACCT_BOUND))
fork_worker[IO_WQ_ACCT_BOUND] = true;
@@ -750,6 +747,17 @@ static bool io_wq_can_queue(struct io_wqe *wqe, struct io_wqe_acct *acct,
return true;
}
+static void io_run_cancel(struct io_wq_work *work)
+{
+ do {
+ struct io_wq_work *old_work = work;
+
+ work->flags |= IO_WQ_WORK_CANCEL;
+ work->func(&work);
+ work = (work == old_work) ? NULL : work;
+ } while (work);
+}
+
static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
{
struct io_wqe_acct *acct = io_work_get_acct(wqe, work);
@@ -763,8 +771,7 @@ static void io_wqe_enqueue(struct io_wqe *wqe, struct io_wq_work *work)
* It's close enough to not be an issue, fork() has the same delay.
*/
if (unlikely(!io_wq_can_queue(wqe, acct, work))) {
- work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ io_run_cancel(work);
return;
}
@@ -821,7 +828,9 @@ static bool io_wq_for_each_worker(struct io_wqe *wqe,
list_for_each_entry_rcu(worker, &wqe->all_list, all_list) {
if (io_worker_get(worker)) {
- ret = func(worker, data);
+ /* no task if node is/was offline */
+ if (worker->task)
+ ret = func(worker, data);
io_worker_release(worker);
if (ret)
break;
@@ -901,8 +910,7 @@ static enum io_wq_cancel io_wqe_cancel_cb_work(struct io_wqe *wqe,
spin_unlock_irqrestore(&wqe->lock, flags);
if (found) {
- work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ io_run_cancel(work);
return IO_WQ_CANCEL_OK;
}
@@ -929,17 +937,19 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
return ret;
}
+struct work_match {
+ bool (*fn)(struct io_wq_work *, void *data);
+ void *data;
+};
+
static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
{
- struct io_wq_work *work = data;
+ struct work_match *match = data;
unsigned long flags;
bool ret = false;
- if (worker->cur_work != work)
- return false;
-
spin_lock_irqsave(&worker->lock, flags);
- if (worker->cur_work == work &&
+ if (match->fn(worker->cur_work, match->data) &&
!(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL)) {
send_sig(SIGINT, worker->task, 1);
ret = true;
@@ -950,15 +960,13 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
}
static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
- struct io_wq_work *cwork)
+ struct work_match *match)
{
struct io_wq_work_node *node, *prev;
struct io_wq_work *work;
unsigned long flags;
bool found = false;
- cwork->flags |= IO_WQ_WORK_CANCEL;
-
/*
* First check pending list, if we're lucky we can just remove it
* from there. CANCEL_OK means that the work is returned as-new,
@@ -968,7 +976,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
wq_list_for_each(node, prev, &wqe->work_list) {
work = container_of(node, struct io_wq_work, list);
- if (work == cwork) {
+ if (match->fn(work, match->data)) {
wq_node_del(&wqe->work_list, node, prev);
found = true;
break;
@@ -977,8 +985,7 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
spin_unlock_irqrestore(&wqe->lock, flags);
if (found) {
- work->flags |= IO_WQ_WORK_CANCEL;
- work->func(&work);
+ io_run_cancel(work);
return IO_WQ_CANCEL_OK;
}
@@ -989,20 +996,31 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
* completion will run normally in this case.
*/
rcu_read_lock();
- found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, cwork);
+ found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
rcu_read_unlock();
return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
}
+static bool io_wq_work_match(struct io_wq_work *work, void *data)
+{
+ return work == data;
+}
+
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
{
+ struct work_match match = {
+ .fn = io_wq_work_match,
+ .data = cwork
+ };
enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
int node;
+ cwork->flags |= IO_WQ_WORK_CANCEL;
+
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
- ret = io_wqe_cancel_work(wqe, cwork);
+ ret = io_wqe_cancel_work(wqe, &match);
if (ret != IO_WQ_CANCEL_NOTFOUND)
break;
}
@@ -1010,38 +1028,33 @@ enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
return ret;
}
-struct io_wq_flush_data {
- struct io_wq_work work;
- struct completion done;
-};
-
-static void io_wq_flush_func(struct io_wq_work **workptr)
+static bool io_wq_pid_match(struct io_wq_work *work, void *data)
{
- struct io_wq_work *work = *workptr;
- struct io_wq_flush_data *data;
+ pid_t pid = (pid_t) (unsigned long) data;
- data = container_of(work, struct io_wq_flush_data, work);
- complete(&data->done);
+ if (work)
+ return work->task_pid == pid;
+ return false;
}
-/*
- * Doesn't wait for previously queued work to finish. When this completes,
- * it just means that previously queued work was started.
- */
-void io_wq_flush(struct io_wq *wq)
+enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
{
- struct io_wq_flush_data data;
+ struct work_match match = {
+ .fn = io_wq_pid_match,
+ .data = (void *) (unsigned long) pid
+ };
+ enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
int node;
for_each_node(node) {
struct io_wqe *wqe = wq->wqes[node];
- init_completion(&data.done);
- INIT_IO_WORK(&data.work, io_wq_flush_func);
- data.work.flags |= IO_WQ_WORK_INTERNAL;
- io_wqe_enqueue(wqe, &data.work);
- wait_for_completion(&data.done);
+ ret = io_wqe_cancel_work(wqe, &match);
+ if (ret != IO_WQ_CANCEL_NOTFOUND)
+ break;
}
+
+ return ret;
}
struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
@@ -1067,12 +1080,15 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
for_each_node(node) {
struct io_wqe *wqe;
+ int alloc_node = node;
- wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, node);
+ if (!node_online(alloc_node))
+ alloc_node = NUMA_NO_NODE;
+ wqe = kzalloc_node(sizeof(struct io_wqe), GFP_KERNEL, alloc_node);
if (!wqe)
goto err;
wq->wqes[node] = wqe;
- wqe->node = node;
+ wqe->node = alloc_node;
wqe->acct[IO_WQ_ACCT_BOUND].max_workers = bounded;
atomic_set(&wqe->acct[IO_WQ_ACCT_BOUND].nr_running, 0);
if (wq->user) {
@@ -1080,7 +1096,6 @@ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
task_rlimit(current, RLIMIT_NPROC);
}
atomic_set(&wqe->acct[IO_WQ_ACCT_UNBOUND].nr_running, 0);
- wqe->node = node;
wqe->wq = wq;
spin_lock_init(&wqe->lock);
INIT_WQ_LIST(&wqe->work_list);
diff --git a/fs/io-wq.h b/fs/io-wq.h
index 50b3378febf2..e5e15f2c93ec 100644
--- a/fs/io-wq.h
+++ b/fs/io-wq.h
@@ -8,7 +8,6 @@ enum {
IO_WQ_WORK_HAS_MM = 2,
IO_WQ_WORK_HASHED = 4,
IO_WQ_WORK_UNBOUND = 32,
- IO_WQ_WORK_INTERNAL = 64,
IO_WQ_WORK_CB = 128,
IO_WQ_WORK_NO_CANCEL = 256,
IO_WQ_WORK_CONCURRENT = 512,
@@ -74,18 +73,15 @@ struct io_wq_work {
struct files_struct *files;
struct mm_struct *mm;
const struct cred *creds;
+ struct fs_struct *fs;
unsigned flags;
+ pid_t task_pid;
};
-#define INIT_IO_WORK(work, _func) \
- do { \
- (work)->list.next = NULL; \
- (work)->func = _func; \
- (work)->flags = 0; \
- (work)->files = NULL; \
- (work)->mm = NULL; \
- (work)->creds = NULL; \
- } while (0) \
+#define INIT_IO_WORK(work, _func) \
+ do { \
+ *(work) = (struct io_wq_work){ .func = _func }; \
+ } while (0) \
typedef void (get_work_fn)(struct io_wq_work *);
typedef void (put_work_fn)(struct io_wq_work *);
@@ -103,10 +99,10 @@ void io_wq_destroy(struct io_wq *wq);
void io_wq_enqueue(struct io_wq *wq, struct io_wq_work *work);
void io_wq_enqueue_hashed(struct io_wq *wq, struct io_wq_work *work, void *val);
-void io_wq_flush(struct io_wq *wq);
void io_wq_cancel_all(struct io_wq *wq);
enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
+enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid);
typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
diff --git a/fs/io_uring.c b/fs/io_uring.c
index 77f22c3da30f..c06082bb039a 100644
--- a/fs/io_uring.c
+++ b/fs/io_uring.c
@@ -75,6 +75,7 @@
#include <linux/fsnotify.h>
#include <linux/fadvise.h>
#include <linux/eventpoll.h>
+#include <linux/fs_struct.h>
#define CREATE_TRACE_POINTS
#include <trace/events/io_uring.h>
@@ -182,19 +183,15 @@ struct fixed_file_table {
struct file **files;
};
-enum {
- FFD_F_ATOMIC,
-};
-
struct fixed_file_data {
struct fixed_file_table *table;
struct io_ring_ctx *ctx;
struct percpu_ref refs;
struct llist_head put_llist;
- unsigned long state;
struct work_struct ref_work;
struct completion done;
+ struct rcu_head rcu;
};
struct io_ring_ctx {
@@ -204,11 +201,11 @@ struct io_ring_ctx {
struct {
unsigned int flags;
- int compat: 1;
- int account_mem: 1;
- int cq_overflow_flushed: 1;
- int drain_next: 1;
- int eventfd_async: 1;
+ unsigned int compat: 1;
+ unsigned int account_mem: 1;
+ unsigned int cq_overflow_flushed: 1;
+ unsigned int drain_next: 1;
+ unsigned int eventfd_async: 1;
/*
* Ring buffer of indices into array of io_uring_sqe, which is
@@ -441,6 +438,7 @@ struct io_async_msghdr {
struct iovec *iov;
struct sockaddr __user *uaddr;
struct msghdr msg;
+ struct sockaddr_storage addr;
};
struct io_async_rw {
@@ -450,17 +448,12 @@ struct io_async_rw {
ssize_t size;
};
-struct io_async_open {
- struct filename *filename;
-};
-
struct io_async_ctx {
union {
struct io_async_rw rw;
struct io_async_msghdr msg;
struct io_async_connect connect;
struct io_timeout_data timeout;
- struct io_async_open open;
};
};
@@ -483,6 +476,8 @@ enum {
REQ_F_MUST_PUNT_BIT,
REQ_F_TIMEOUT_NOSEQ_BIT,
REQ_F_COMP_LOCKED_BIT,
+ REQ_F_NEED_CLEANUP_BIT,
+ REQ_F_OVERFLOW_BIT,
};
enum {
@@ -521,6 +516,10 @@ enum {
REQ_F_TIMEOUT_NOSEQ = BIT(REQ_F_TIMEOUT_NOSEQ_BIT),
/* completion under lock */
REQ_F_COMP_LOCKED = BIT(REQ_F_COMP_LOCKED_BIT),
+ /* needs cleanup */
+ REQ_F_NEED_CLEANUP = BIT(REQ_F_NEED_CLEANUP_BIT),
+ /* in overflow list */
+ REQ_F_OVERFLOW = BIT(REQ_F_OVERFLOW_BIT),
};
/*
@@ -553,7 +552,6 @@ struct io_kiocb {
* llist_node is only used for poll deferred completions
*/
struct llist_node llist_node;
- bool has_user;
bool in_async;
bool needs_fixed_file;
u8 opcode;
@@ -614,6 +612,8 @@ struct io_op_def {
unsigned not_supported : 1;
/* needs file table */
unsigned file_table : 1;
+ /* needs ->fs */
+ unsigned needs_fs : 1;
};
static const struct io_op_def io_op_defs[] = {
@@ -656,12 +656,14 @@ static const struct io_op_def io_op_defs[] = {
.needs_mm = 1,
.needs_file = 1,
.unbound_nonreg_file = 1,
+ .needs_fs = 1,
},
[IORING_OP_RECVMSG] = {
.async_ctx = 1,
.needs_mm = 1,
.needs_file = 1,
.unbound_nonreg_file = 1,
+ .needs_fs = 1,
},
[IORING_OP_TIMEOUT] = {
.async_ctx = 1,
@@ -692,6 +694,7 @@ static const struct io_op_def io_op_defs[] = {
.needs_file = 1,
.fd_non_neg = 1,
.file_table = 1,
+ .needs_fs = 1,
},
[IORING_OP_CLOSE] = {
.needs_file = 1,
@@ -705,6 +708,7 @@ static const struct io_op_def io_op_defs[] = {
.needs_mm = 1,
.needs_file = 1,
.fd_non_neg = 1,
+ .needs_fs = 1,
},
[IORING_OP_READ] = {
.needs_mm = 1,
@@ -736,6 +740,7 @@ static const struct io_op_def io_op_defs[] = {
.needs_file = 1,
.fd_non_neg = 1,
.file_table = 1,
+ .needs_fs = 1,
},
[IORING_OP_EPOLL_CTL] = {
.unbound_nonreg_file = 1,
@@ -754,6 +759,7 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
unsigned nr_args);
static int io_grab_files(struct io_kiocb *req);
static void io_ring_file_ref_flush(struct fixed_file_data *data);
+static void io_cleanup_req(struct io_kiocb *req);
static struct kmem_cache *req_cachep;
@@ -909,6 +915,18 @@ static inline void io_req_work_grab_env(struct io_kiocb *req,
}
if (!req->work.creds)
req->work.creds = get_current_cred();
+ if (!req->work.fs && def->needs_fs) {
+ spin_lock(&current->fs->lock);
+ if (!current->fs->in_exec) {
+ req->work.fs = current->fs;
+ req->work.fs->users++;
+ } else {
+ req->work.flags |= IO_WQ_WORK_CANCEL;
+ }
+ spin_unlock(&current->fs->lock);
+ }
+ if (!req->work.task_pid)
+ req->work.task_pid = task_pid_vnr(current);
}
static inline void io_req_work_drop_env(struct io_kiocb *req)
@@ -921,6 +939,16 @@ static inline void io_req_work_drop_env(struct io_kiocb *req)
put_cred(req->work.creds);
req->work.creds = NULL;
}
+ if (req->work.fs) {
+ struct fs_struct *fs = req->work.fs;
+
+ spin_lock(&req->work.fs->lock);
+ if (--fs->users)
+ fs = NULL;
+ spin_unlock(&req->work.fs->lock);
+ if (fs)
+ free_fs_struct(fs);
+ }
}
static inline bool io_prep_async_work(struct io_kiocb *req,
@@ -972,6 +1000,7 @@ static void io_kill_timeout(struct io_kiocb *req)
if (ret != -1) {
atomic_inc(&req->ctx->cq_timeouts);
list_del_init(&req->list);
+ req->flags |= REQ_F_COMP_LOCKED;
io_cqring_fill_event(req, 0);
io_put_req(req);
}
@@ -1074,6 +1103,7 @@ static bool io_cqring_overflow_flush(struct io_ring_ctx *ctx, bool force)
req = list_first_entry(&ctx->cq_overflow_list, struct io_kiocb,
list);
list_move(&req->list, &list);
+ req->flags &= ~REQ_F_OVERFLOW;
if (cqe) {
WRITE_ONCE(cqe->user_data, req->user_data);
WRITE_ONCE(cqe->res, req->result);
@@ -1126,6 +1156,7 @@ static void io_cqring_fill_event(struct io_kiocb *req, long res)
set_bit(0, &ctx->sq_check_overflow);
set_bit(0, &ctx->cq_check_overflow);
}
+ req->flags |= REQ_F_OVERFLOW;
refcount_inc(&req->refs);
req->result = res;
list_add_tail(&req->list, &ctx->cq_overflow_list);
@@ -1226,6 +1257,9 @@ static void __io_req_aux_free(struct io_kiocb *req)
{
struct io_ring_ctx *ctx = req->ctx;
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ io_cleanup_req(req);
+
kfree(req->io);
if (req->file) {
if (req->flags & REQ_F_FIXED_FILE)
@@ -1446,10 +1480,10 @@ static void io_free_req(struct io_kiocb *req)
__attribute__((nonnull))
static void io_put_req_find_next(struct io_kiocb *req, struct io_kiocb **nxtptr)
{
- io_req_find_next(req, nxtptr);
-
- if (refcount_dec_and_test(&req->refs))
+ if (refcount_dec_and_test(&req->refs)) {
+ io_req_find_next(req, nxtptr);
__io_free_req(req);
+ }
}
static void io_put_req(struct io_kiocb *req)
@@ -1635,11 +1669,17 @@ static void io_iopoll_reap_events(struct io_ring_ctx *ctx)
mutex_unlock(&ctx->uring_lock);
}
-static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
- long min)
+static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
+ long min)
{
int iters = 0, ret = 0;
+ /*
+ * We disallow the app entering submit/complete with polling, but we
+ * still need to lock the ring to prevent racing with polled issue
+ * that got punted to a workqueue.
+ */
+ mutex_lock(&ctx->uring_lock);
do {
int tmin = 0;
@@ -1675,21 +1715,6 @@ static int __io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
ret = 0;
} while (min && !*nr_events && !need_resched());
- return ret;
-}
-
-static int io_iopoll_check(struct io_ring_ctx *ctx, unsigned *nr_events,
- long min)
-{
- int ret;
-
- /*
- * We disallow the app entering submit/complete with polling, but we
- * still need to lock the ring to prevent racing with polled issue
- * that got punted to a workqueue.
- */
- mutex_lock(&ctx->uring_lock);
- ret = __io_iopoll_check(ctx, nr_events, min);
mutex_unlock(&ctx->uring_lock);
return ret;
}
@@ -1793,6 +1818,10 @@ static void io_iopoll_req_issued(struct io_kiocb *req)
list_add(&req->list, &ctx->poll_list);
else
list_add_tail(&req->list, &ctx->poll_list);
+
+ if ((ctx->flags & IORING_SETUP_SQPOLL) &&
+ wq_has_sleeper(&ctx->sqo_wait))
+ wake_up(&ctx->sqo_wait);
}
static void io_file_put(struct io_submit_state *state)
@@ -2043,7 +2072,7 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
ssize_t ret;
ret = import_single_range(rw, buf, sqe_len, *iovec, iter);
*iovec = NULL;
- return ret;
+ return ret < 0 ? ret : sqe_len;
}
if (req->io) {
@@ -2056,9 +2085,6 @@ static ssize_t io_import_iovec(int rw, struct io_kiocb *req,
return iorw->size;
}
- if (!req->has_user)
- return -EFAULT;
-
#ifdef CONFIG_COMPAT
if (req->ctx->compat)
return compat_import_iovec(rw, buf, sqe_len, UIO_FASTIOV,
@@ -2137,6 +2163,8 @@ static void io_req_map_rw(struct io_kiocb *req, ssize_t io_size,
req->io->rw.iov = req->io->rw.fast_iov;
memcpy(req->io->rw.iov, fast_iov,
sizeof(struct iovec) * iter->nr_segs);
+ } else {
+ req->flags |= REQ_F_NEED_CLEANUP;
}
}
@@ -2148,17 +2176,6 @@ static int io_alloc_async_ctx(struct io_kiocb *req)
return req->io == NULL;
}
-static void io_rw_async(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct iovec *iov = NULL;
-
- if (req->io->rw.iov != req->io->rw.fast_iov)
- iov = req->io->rw.iov;
- io_wq_submit_work(workptr);
- kfree(iov);
-}
-
static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
struct iovec *iovec, struct iovec *fast_iov,
struct iov_iter *iter)
@@ -2171,7 +2188,6 @@ static int io_setup_async_rw(struct io_kiocb *req, ssize_t io_size,
io_req_map_rw(req, io_size, iovec, fast_iov, iter);
}
- req->work.func = io_rw_async;
return 0;
}
@@ -2189,7 +2205,8 @@ static int io_read_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (unlikely(!(req->file->f_mode & FMODE_READ)))
return -EBADF;
- if (!req->io)
+ /* either don't need iovec imported or already have it */
+ if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
return 0;
io = req->io;
@@ -2258,8 +2275,8 @@ copy_iov:
}
}
out_free:
- if (!io_wq_current_is_worker())
- kfree(iovec);
+ kfree(iovec);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
return ret;
}
@@ -2277,7 +2294,8 @@ static int io_write_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe,
if (unlikely(!(req->file->f_mode & FMODE_WRITE)))
return -EBADF;
- if (!req->io)
+ /* either don't need iovec imported or already have it */
+ if (!req->io || req->flags & REQ_F_NEED_CLEANUP)
return 0;
io = req->io;
@@ -2352,6 +2370,12 @@ static int io_write(struct io_kiocb *req, struct io_kiocb **nxt,
ret2 = call_write_iter(req->file, kiocb, &iter);
else
ret2 = loop_rw_iter(WRITE, req->file, kiocb, &iter);
+ /*
+ * Raw bdev writes will -EOPNOTSUPP for IOCB_NOWAIT. Just
+ * retry them without IOCB_NOWAIT.
+ */
+ if (ret2 == -EOPNOTSUPP && (kiocb->ki_flags & IOCB_NOWAIT))
+ ret2 = -EAGAIN;
if (!force_nonblock || ret2 != -EAGAIN) {
kiocb_done(kiocb, ret2, nxt, req->in_async);
} else {
@@ -2364,8 +2388,8 @@ copy_iov:
}
}
out_free:
- if (!io_wq_current_is_worker())
- kfree(iovec);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+ kfree(iovec);
return ret;
}
@@ -2485,6 +2509,9 @@ static void io_fallocate_finish(struct io_wq_work **workptr)
struct io_kiocb *nxt = NULL;
int ret;
+ if (io_req_cancelled(req))
+ return;
+
ret = vfs_fallocate(req->file, req->sync.mode, req->sync.off,
req->sync.len);
if (ret < 0)
@@ -2534,6 +2561,10 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
+ if (sqe->flags & IOSQE_FIXED_FILE)
+ return -EBADF;
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ return 0;
req->open.dfd = READ_ONCE(sqe->fd);
req->open.how.mode = READ_ONCE(sqe->len);
@@ -2547,6 +2578,7 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return ret;
}
+ req->flags |= REQ_F_NEED_CLEANUP;
return 0;
}
@@ -2559,6 +2591,10 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
+ if (sqe->flags & IOSQE_FIXED_FILE)
+ return -EBADF;
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ return 0;
req->open.dfd = READ_ONCE(sqe->fd);
fname = u64_to_user_ptr(READ_ONCE(sqe->addr));
@@ -2583,6 +2619,7 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return ret;
}
+ req->flags |= REQ_F_NEED_CLEANUP;
return 0;
}
@@ -2614,6 +2651,7 @@ static int io_openat2(struct io_kiocb *req, struct io_kiocb **nxt,
}
err:
putname(req->open.filename);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
@@ -2754,6 +2792,10 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
if (sqe->ioprio || sqe->buf_index)
return -EINVAL;
+ if (sqe->flags & IOSQE_FIXED_FILE)
+ return -EBADF;
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ return 0;
req->open.dfd = READ_ONCE(sqe->fd);
req->open.mask = READ_ONCE(sqe->len);
@@ -2771,6 +2813,7 @@ static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return ret;
}
+ req->flags |= REQ_F_NEED_CLEANUP;
return 0;
}
@@ -2808,6 +2851,7 @@ retry:
ret = cp_statx(&stat, ctx->buffer);
err:
putname(ctx->filename);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
if (ret < 0)
req_set_fail_links(req);
io_cqring_add_event(req, ret);
@@ -2827,7 +2871,7 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
sqe->rw_flags || sqe->buf_index)
return -EINVAL;
if (sqe->flags & IOSQE_FIXED_FILE)
- return -EINVAL;
+ return -EBADF;
req->close.fd = READ_ONCE(sqe->fd);
if (req->file->f_op == &io_uring_fops ||
@@ -2837,24 +2881,26 @@ static int io_close_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return 0;
}
+/* only called when __close_fd_get_file() is done */
+static void __io_close_finish(struct io_kiocb *req, struct io_kiocb **nxt)
+{
+ int ret;
+
+ ret = filp_close(req->close.put_file, req->work.files);
+ if (ret < 0)
+ req_set_fail_links(req);
+ io_cqring_add_event(req, ret);
+ fput(req->close.put_file);
+ io_put_req_find_next(req, nxt);
+}
+
static void io_close_finish(struct io_wq_work **workptr)
{
struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
struct io_kiocb *nxt = NULL;
- /* Invoked with files, we need to do the close */
- if (req->work.files) {
- int ret;
-
- ret = filp_close(req->close.put_file, req->work.files);
- if (ret < 0)
- req_set_fail_links(req);
- io_cqring_add_event(req, ret);
- }
-
- fput(req->close.put_file);
-
- io_put_req_find_next(req, &nxt);
+ /* not cancellable, don't do io_req_cancelled() */
+ __io_close_finish(req, &nxt);
if (nxt)
io_wq_assign_next(workptr, nxt);
}
@@ -2877,22 +2923,8 @@ static int io_close(struct io_kiocb *req, struct io_kiocb **nxt,
* No ->flush(), safely close from here and just punt the
* fput() to async context.
*/
- ret = filp_close(req->close.put_file, current->files);
-
- if (ret < 0)
- req_set_fail_links(req);
- io_cqring_add_event(req, ret);
-
- if (io_wq_current_is_worker()) {
- struct io_wq_work *old_work, *work;
-
- old_work = work = &req->work;
- io_close_finish(&work);
- if (work && work != old_work)
- *nxt = container_of(work, struct io_kiocb, work);
- return 0;
- }
-
+ __io_close_finish(req, nxt);
+ return 0;
eagain:
req->work.func = io_close_finish;
/*
@@ -2960,35 +2992,34 @@ static int io_sync_file_range(struct io_kiocb *req, struct io_kiocb **nxt,
return 0;
}
-#if defined(CONFIG_NET)
-static void io_sendrecv_async(struct io_wq_work **workptr)
-{
- struct io_kiocb *req = container_of(*workptr, struct io_kiocb, work);
- struct iovec *iov = NULL;
-
- if (req->io->rw.iov != req->io->rw.fast_iov)
- iov = req->io->msg.iov;
- io_wq_submit_work(workptr);
- kfree(iov);
-}
-#endif
-
static int io_sendmsg_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
#if defined(CONFIG_NET)
struct io_sr_msg *sr = &req->sr_msg;
struct io_async_ctx *io = req->io;
+ int ret;
sr->msg_flags = READ_ONCE(sqe->msg_flags);
sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
+#ifdef CONFIG_COMPAT
+ if (req->ctx->compat)
+ sr->msg_flags |= MSG_CMSG_COMPAT;
+#endif
+
if (!io || req->opcode == IORING_OP_SEND)
return 0;
+ /* iovec is already imported */
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ return 0;
io->msg.iov = io->msg.fast_iov;
- return sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+ ret = sendmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
&io->msg.iov);
+ if (!ret)
+ req->flags |= REQ_F_NEED_CLEANUP;
+ return ret;
#else
return -EOPNOTSUPP;
#endif
@@ -3008,12 +3039,11 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
sock = sock_from_file(req->file, &ret);
if (sock) {
struct io_async_ctx io;
- struct sockaddr_storage addr;
unsigned flags;
if (req->io) {
kmsg = &req->io->msg;
- kmsg->msg.msg_name = &addr;
+ kmsg->msg.msg_name = &req->io->msg.addr;
/* if iov is set, it's allocated already */
if (!kmsg->iov)
kmsg->iov = kmsg->fast_iov;
@@ -3022,7 +3052,7 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
struct io_sr_msg *sr = &req->sr_msg;
kmsg = &io.msg;
- kmsg->msg.msg_name = &addr;
+ kmsg->msg.msg_name = &io.msg.addr;
io.msg.iov = io.msg.fast_iov;
ret = sendmsg_copy_msghdr(&io.msg.msg, sr->msg,
@@ -3041,18 +3071,22 @@ static int io_sendmsg(struct io_kiocb *req, struct io_kiocb **nxt,
if (force_nonblock && ret == -EAGAIN) {
if (req->io)
return -EAGAIN;
- if (io_alloc_async_ctx(req))
+ if (io_alloc_async_ctx(req)) {
+ if (kmsg->iov != kmsg->fast_iov)
+ kfree(kmsg->iov);
return -ENOMEM;
+ }
+ req->flags |= REQ_F_NEED_CLEANUP;
memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
- req->work.func = io_sendrecv_async;
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
}
- if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
+ if (kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
@@ -3120,17 +3154,29 @@ static int io_recvmsg_prep(struct io_kiocb *req,
#if defined(CONFIG_NET)
struct io_sr_msg *sr = &req->sr_msg;
struct io_async_ctx *io = req->io;
+ int ret;
sr->msg_flags = READ_ONCE(sqe->msg_flags);
sr->msg = u64_to_user_ptr(READ_ONCE(sqe->addr));
sr->len = READ_ONCE(sqe->len);
+#ifdef CONFIG_COMPAT
+ if (req->ctx->compat)
+ sr->msg_flags |= MSG_CMSG_COMPAT;
+#endif
+
if (!io || req->opcode == IORING_OP_RECV)
return 0;
+ /* iovec is already imported */
+ if (req->flags & REQ_F_NEED_CLEANUP)
+ return 0;
io->msg.iov = io->msg.fast_iov;
- return recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
+ ret = recvmsg_copy_msghdr(&io->msg.msg, sr->msg, sr->msg_flags,
&io->msg.uaddr, &io->msg.iov);
+ if (!ret)
+ req->flags |= REQ_F_NEED_CLEANUP;
+ return ret;
#else
return -EOPNOTSUPP;
#endif
@@ -3150,12 +3196,11 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
sock = sock_from_file(req->file, &ret);
if (sock) {
struct io_async_ctx io;
- struct sockaddr_storage addr;
unsigned flags;
if (req->io) {
kmsg = &req->io->msg;
- kmsg->msg.msg_name = &addr;
+ kmsg->msg.msg_name = &req->io->msg.addr;
/* if iov is set, it's allocated already */
if (!kmsg->iov)
kmsg->iov = kmsg->fast_iov;
@@ -3164,7 +3209,7 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
struct io_sr_msg *sr = &req->sr_msg;
kmsg = &io.msg;
- kmsg->msg.msg_name = &addr;
+ kmsg->msg.msg_name = &io.msg.addr;
io.msg.iov = io.msg.fast_iov;
ret = recvmsg_copy_msghdr(&io.msg.msg, sr->msg,
@@ -3185,18 +3230,22 @@ static int io_recvmsg(struct io_kiocb *req, struct io_kiocb **nxt,
if (force_nonblock && ret == -EAGAIN) {
if (req->io)
return -EAGAIN;
- if (io_alloc_async_ctx(req))
+ if (io_alloc_async_ctx(req)) {
+ if (kmsg->iov != kmsg->fast_iov)
+ kfree(kmsg->iov);
return -ENOMEM;
+ }
memcpy(&req->io->msg, &io.msg, sizeof(io.msg));
- req->work.func = io_sendrecv_async;
+ req->flags |= REQ_F_NEED_CLEANUP;
return -EAGAIN;
}
if (ret == -ERESTARTSYS)
ret = -EINTR;
}
- if (!io_wq_current_is_worker() && kmsg && kmsg->iov != kmsg->fast_iov)
+ if (kmsg && kmsg->iov != kmsg->fast_iov)
kfree(kmsg->iov);
+ req->flags &= ~REQ_F_NEED_CLEANUP;
io_cqring_add_event(req, ret);
if (ret < 0)
req_set_fail_links(req);
@@ -4207,6 +4256,35 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
return -EIOCBQUEUED;
}
+static void io_cleanup_req(struct io_kiocb *req)
+{
+ struct io_async_ctx *io = req->io;
+
+ switch (req->opcode) {
+ case IORING_OP_READV:
+ case IORING_OP_READ_FIXED:
+ case IORING_OP_READ:
+ case IORING_OP_WRITEV:
+ case IORING_OP_WRITE_FIXED:
+ case IORING_OP_WRITE:
+ if (io->rw.iov != io->rw.fast_iov)
+ kfree(io->rw.iov);
+ break;
+ case IORING_OP_SENDMSG:
+ case IORING_OP_RECVMSG:
+ if (io->msg.iov != io->msg.fast_iov)
+ kfree(io->msg.iov);
+ break;
+ case IORING_OP_OPENAT:
+ case IORING_OP_OPENAT2:
+ case IORING_OP_STATX:
+ putname(req->open.filename);
+ break;
+ }
+
+ req->flags &= ~REQ_F_NEED_CLEANUP;
+}
+
static int io_issue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_kiocb **nxt, bool force_nonblock)
{
@@ -4446,7 +4524,6 @@ static void io_wq_submit_work(struct io_wq_work **workptr)
}
if (!ret) {
- req->has_user = (work->flags & IO_WQ_WORK_HAS_MM) != 0;
req->in_async = true;
do {
ret = io_issue_sqe(req, NULL, &nxt, false);
@@ -4479,7 +4556,7 @@ static int io_req_needs_file(struct io_kiocb *req, int fd)
{
if (!io_op_defs[req->opcode].needs_file)
return 0;
- if (fd == -1 && io_op_defs[req->opcode].fd_non_neg)
+ if ((fd == -1 || fd == AT_FDCWD) && io_op_defs[req->opcode].fd_non_neg)
return 0;
return 1;
}
@@ -4639,11 +4716,21 @@ static void __io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
{
struct io_kiocb *linked_timeout;
struct io_kiocb *nxt = NULL;
+ const struct cred *old_creds = NULL;
int ret;
again:
linked_timeout = io_prep_linked_timeout(req);
+ if (req->work.creds && req->work.creds != current_cred()) {
+ if (old_creds)
+ revert_creds(old_creds);
+ if (old_creds == req->work.creds)
+ old_creds = NULL; /* restored original creds */
+ else
+ old_creds = override_creds(req->work.creds);
+ }
+
ret = io_issue_sqe(req, sqe, &nxt, true);
/*
@@ -4669,7 +4756,7 @@ punt:
err:
/* drop submission reference */
- io_put_req(req);
+ io_put_req_find_next(req, &nxt);
if (linked_timeout) {
if (!ret)
@@ -4693,6 +4780,8 @@ done_req:
goto punt;
goto again;
}
+ if (old_creds)
+ revert_creds(old_creds);
}
static void io_queue_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe)
@@ -4737,7 +4826,6 @@ static inline void io_queue_link_head(struct io_kiocb *req)
static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
struct io_submit_state *state, struct io_kiocb **link)
{
- const struct cred *old_creds = NULL;
struct io_ring_ctx *ctx = req->ctx;
unsigned int sqe_flags;
int ret, id;
@@ -4752,14 +4840,12 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
id = READ_ONCE(sqe->personality);
if (id) {
- const struct cred *personality_creds;
-
- personality_creds = idr_find(&ctx->personality_idr, id);
- if (unlikely(!personality_creds)) {
+ req->work.creds = idr_find(&ctx->personality_idr, id);
+ if (unlikely(!req->work.creds)) {
ret = -EINVAL;
goto err_req;
}
- old_creds = override_creds(personality_creds);
+ get_cred(req->work.creds);
}
/* same numerical values with corresponding REQ_F_*, safe to copy */
@@ -4771,8 +4857,6 @@ static bool io_submit_sqe(struct io_kiocb *req, const struct io_uring_sqe *sqe,
err_req:
io_cqring_add_event(req, ret);
io_double_put_req(req);
- if (old_creds)
- revert_creds(old_creds);
return false;
}
@@ -4833,8 +4917,6 @@ err_req:
}
}
- if (old_creds)
- revert_creds(old_creds);
return true;
}
@@ -4950,6 +5032,7 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
for (i = 0; i < nr; i++) {
const struct io_uring_sqe *sqe;
struct io_kiocb *req;
+ int err;
req = io_get_req(ctx, statep);
if (unlikely(!req)) {
@@ -4966,20 +5049,23 @@ static int io_submit_sqes(struct io_ring_ctx *ctx, unsigned int nr,
submitted++;
if (unlikely(req->opcode >= IORING_OP_LAST)) {
- io_cqring_add_event(req, -EINVAL);
+ err = -EINVAL;
+fail_req:
+ io_cqring_add_event(req, err);
io_double_put_req(req);
break;
}
if (io_op_defs[req->opcode].needs_mm && !*mm) {
mm_fault = mm_fault || !mmget_not_zero(ctx->sqo_mm);
- if (!mm_fault) {
- use_mm(ctx->sqo_mm);
- *mm = ctx->sqo_mm;
+ if (unlikely(mm_fault)) {
+ err = -EFAULT;
+ goto fail_req;
}
+ use_mm(ctx->sqo_mm);
+ *mm = ctx->sqo_mm;
}
- req->has_user = *mm != NULL;
req->in_async = async;
req->needs_fixed_file = async;
trace_io_uring_submit_sqe(ctx, req->opcode, req->user_data,
@@ -5011,9 +5097,8 @@ static int io_sq_thread(void *data)
const struct cred *old_cred;
mm_segment_t old_fs;
DEFINE_WAIT(wait);
- unsigned inflight;
unsigned long timeout;
- int ret;
+ int ret = 0;
complete(&ctx->completions[1]);
@@ -5021,39 +5106,19 @@ static int io_sq_thread(void *data)
set_fs(USER_DS);
old_cred = override_creds(ctx->creds);
- ret = timeout = inflight = 0;
+ timeout = jiffies + ctx->sq_thread_idle;
while (!kthread_should_park()) {
unsigned int to_submit;
- if (inflight) {
+ if (!list_empty(&ctx->poll_list)) {
unsigned nr_events = 0;
- if (ctx->flags & IORING_SETUP_IOPOLL) {
- /*
- * inflight is the count of the maximum possible
- * entries we submitted, but it can be smaller
- * if we dropped some of them. If we don't have
- * poll entries available, then we know that we
- * have nothing left to poll for. Reset the
- * inflight count to zero in that case.
- */
- mutex_lock(&ctx->uring_lock);
- if (!list_empty(&ctx->poll_list))
- __io_iopoll_check(ctx, &nr_events, 0);
- else
- inflight = 0;
- mutex_unlock(&ctx->uring_lock);
- } else {
- /*
- * Normal IO, just pretend everything completed.
- * We don't have to poll completions for that.
- */
- nr_events = inflight;
- }
-
- inflight -= nr_events;
- if (!inflight)
+ mutex_lock(&ctx->uring_lock);
+ if (!list_empty(&ctx->poll_list))
+ io_iopoll_getevents(ctx, &nr_events, 0);
+ else
timeout = jiffies + ctx->sq_thread_idle;
+ mutex_unlock(&ctx->uring_lock);
}
to_submit = io_sqring_entries(ctx);
@@ -5064,34 +5129,47 @@ static int io_sq_thread(void *data)
*/
if (!to_submit || ret == -EBUSY) {
/*
+ * Drop cur_mm before scheduling, we can't hold it for
+ * long periods (or over schedule()). Do this before
+ * adding ourselves to the waitqueue, as the unuse/drop
+ * may sleep.
+ */
+ if (cur_mm) {
+ unuse_mm(cur_mm);
+ mmput(cur_mm);
+ cur_mm = NULL;
+ }
+
+ /*
* We're polling. If we're within the defined idle
* period, then let us spin without work before going
* to sleep. The exception is if we got EBUSY doing
* more IO, we should wait for the application to
* reap events and wake us up.
*/
- if (inflight ||
+ if (!list_empty(&ctx->poll_list) ||
(!time_after(jiffies, timeout) && ret != -EBUSY &&
!percpu_ref_is_dying(&ctx->refs))) {
cond_resched();
continue;
}
+ prepare_to_wait(&ctx->sqo_wait, &wait,
+ TASK_INTERRUPTIBLE);
+
/*
- * Drop cur_mm before scheduling, we can't hold it for
- * long periods (or over schedule()). Do this before
- * adding ourselves to the waitqueue, as the unuse/drop
- * may sleep.
+ * While doing polled IO, before going to sleep, we need
+ * to check if there are new reqs added to poll_list, it
+ * is because reqs may have been punted to io worker and
+ * will be added to poll_list later, hence check the
+ * poll_list again.
*/
- if (cur_mm) {
- unuse_mm(cur_mm);
- mmput(cur_mm);
- cur_mm = NULL;
+ if ((ctx->flags & IORING_SETUP_IOPOLL) &&
+ !list_empty_careful(&ctx->poll_list)) {
+ finish_wait(&ctx->sqo_wait, &wait);
+ continue;
}
- prepare_to_wait(&ctx->sqo_wait, &wait,
- TASK_INTERRUPTIBLE);
-
/* Tell userspace we may need a wakeup call */
ctx->rings->sq_flags |= IORING_SQ_NEED_WAKEUP;
/* make sure to read SQ tail after writing flags */
@@ -5119,8 +5197,7 @@ static int io_sq_thread(void *data)
mutex_lock(&ctx->uring_lock);
ret = io_submit_sqes(ctx, to_submit, NULL, -1, &cur_mm, true);
mutex_unlock(&ctx->uring_lock);
- if (ret > 0)
- inflight += ret;
+ timeout = jiffies + ctx->sq_thread_idle;
}
set_fs(old_fs);
@@ -5254,6 +5331,26 @@ static void io_file_ref_kill(struct percpu_ref *ref)
complete(&data->done);
}
+static void __io_file_ref_exit_and_free(struct rcu_head *rcu)
+{
+ struct fixed_file_data *data = container_of(rcu, struct fixed_file_data,
+ rcu);
+ percpu_ref_exit(&data->refs);
+ kfree(data);
+}
+
+static void io_file_ref_exit_and_free(struct rcu_head *rcu)
+{
+ /*
+ * We need to order our exit+free call against the potentially
+ * existing call_rcu() for switching to atomic. One way to do that
+ * is to have this rcu callback queue the final put and free, as we
+ * could otherwise have a pre-existing atomic switch complete _after_
+ * the free callback we queued.
+ */
+ call_rcu(rcu, __io_file_ref_exit_and_free);
+}
+
static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
{
struct fixed_file_data *data = ctx->file_data;
@@ -5266,14 +5363,13 @@ static int io_sqe_files_unregister(struct io_ring_ctx *ctx)
flush_work(&data->ref_work);
wait_for_completion(&data->done);
io_ring_file_ref_flush(data);
- percpu_ref_exit(&data->refs);
__io_sqe_files_unregister(ctx);
nr_tables = DIV_ROUND_UP(ctx->nr_user_files, IORING_MAX_FILES_TABLE);
for (i = 0; i < nr_tables; i++)
kfree(data->table[i].files);
kfree(data->table);
- kfree(data);
+ call_rcu(&data->rcu, io_file_ref_exit_and_free);
ctx->file_data = NULL;
ctx->nr_user_files = 0;
return 0;
@@ -5525,7 +5621,6 @@ static void io_ring_file_ref_switch(struct work_struct *work)
data = container_of(work, struct fixed_file_data, ref_work);
io_ring_file_ref_flush(data);
- percpu_ref_get(&data->refs);
percpu_ref_switch_to_percpu(&data->refs);
}
@@ -5701,8 +5796,13 @@ static void io_atomic_switch(struct percpu_ref *ref)
{
struct fixed_file_data *data;
+ /*
+ * Juggle reference to ensure we hit zero, if needed, so we can
+ * switch back to percpu mode
+ */
data = container_of(ref, struct fixed_file_data, refs);
- clear_bit(FFD_F_ATOMIC, &data->state);
+ percpu_ref_put(&data->refs);
+ percpu_ref_get(&data->refs);
}
static bool io_queue_file_removal(struct fixed_file_data *data,
@@ -5725,11 +5825,7 @@ static bool io_queue_file_removal(struct fixed_file_data *data,
llist_add(&pfile->llist, &data->put_llist);
if (pfile == &pfile_stack) {
- if (!test_and_set_bit(FFD_F_ATOMIC, &data->state)) {
- percpu_ref_put(&data->refs);
- percpu_ref_switch_to_atomic(&data->refs,
- io_atomic_switch);
- }
+ percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch);
wait_for_completion(&done);
flush_work(&data->ref_work);
return false;
@@ -5803,10 +5899,8 @@ static int __io_sqe_files_update(struct io_ring_ctx *ctx,
up->offset++;
}
- if (ref_switch && !test_and_set_bit(FFD_F_ATOMIC, &data->state)) {
- percpu_ref_put(&data->refs);
+ if (ref_switch)
percpu_ref_switch_to_atomic(&data->refs, io_atomic_switch);
- }
return done ? done : err;
}
@@ -6264,6 +6358,7 @@ static void io_ring_ctx_free(struct io_ring_ctx *ctx)
io_sqe_buffer_unregister(ctx);
io_sqe_files_unregister(ctx);
io_eventfd_unregister(ctx);
+ idr_destroy(&ctx->personality_idr);
#if defined(CONFIG_UNIX)
if (ctx->ring_sock) {
@@ -6301,7 +6396,7 @@ static __poll_t io_uring_poll(struct file *file, poll_table *wait)
if (READ_ONCE(ctx->rings->sq.tail) - ctx->cached_sq_head !=
ctx->rings->sq_ring_entries)
mask |= EPOLLOUT | EPOLLWRNORM;
- if (READ_ONCE(ctx->rings->cq.head) != ctx->cached_cq_tail)
+ if (io_cqring_events(ctx, false))
mask |= EPOLLIN | EPOLLRDNORM;
return mask;
@@ -6393,6 +6488,29 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
if (!cancel_req)
break;
+ if (cancel_req->flags & REQ_F_OVERFLOW) {
+ spin_lock_irq(&ctx->completion_lock);
+ list_del(&cancel_req->list);
+ cancel_req->flags &= ~REQ_F_OVERFLOW;
+ if (list_empty(&ctx->cq_overflow_list)) {
+ clear_bit(0, &ctx->sq_check_overflow);
+ clear_bit(0, &ctx->cq_check_overflow);
+ }
+ spin_unlock_irq(&ctx->completion_lock);
+
+ WRITE_ONCE(ctx->rings->cq_overflow,
+ atomic_inc_return(&ctx->cached_cq_overflow));
+
+ /*
+ * Put inflight ref and overflow ref. If that's
+ * all we had, then we're done with this request.
+ */
+ if (refcount_sub_and_test(2, &cancel_req->refs)) {
+ io_put_req(cancel_req);
+ continue;
+ }
+ }
+
io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
io_put_req(cancel_req);
schedule();
@@ -6405,6 +6523,13 @@ static int io_uring_flush(struct file *file, void *data)
struct io_ring_ctx *ctx = file->private_data;
io_uring_cancel_files(ctx, data);
+
+ /*
+ * If the task is going away, cancel work it may have pending
+ */
+ if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+ io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
+
return 0;
}
@@ -6547,6 +6672,7 @@ out_fput:
return submitted ? submitted : ret;
}
+#ifdef CONFIG_PROC_FS
static int io_uring_show_cred(int id, void *p, void *data)
{
const struct cred *cred = p;
@@ -6620,6 +6746,7 @@ static void io_uring_show_fdinfo(struct seq_file *m, struct file *f)
percpu_ref_put(&ctx->refs);
}
}
+#endif
static const struct file_operations io_uring_fops = {
.release = io_uring_release,
@@ -6631,7 +6758,9 @@ static const struct file_operations io_uring_fops = {
#endif
.poll = io_uring_poll,
.fasync = io_uring_fasync,
+#ifdef CONFIG_PROC_FS
.show_fdinfo = io_uring_show_fdinfo,
+#endif
};
static int io_allocate_scq_urings(struct io_ring_ctx *ctx,
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 2494095e0340..27373f5792a4 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -976,29 +976,33 @@ restart_loop:
* it. */
/*
- * A buffer which has been freed while still being journaled by
- * a previous transaction.
- */
- if (buffer_freed(bh)) {
+ * A buffer which has been freed while still being journaled
+ * by a previous transaction, refile the buffer to BJ_Forget of
+ * the running transaction. If the just committed transaction
+ * contains "add to orphan" operation, we can completely
+ * invalidate the buffer now. We are rather through in that
+ * since the buffer may be still accessible when blocksize <
+ * pagesize and it is attached to the last partial page.
+ */
+ if (buffer_freed(bh) && !jh->b_next_transaction) {
+ struct address_space *mapping;
+
+ clear_buffer_freed(bh);
+ clear_buffer_jbddirty(bh);
+
/*
- * If the running transaction is the one containing
- * "add to orphan" operation (b_next_transaction !=
- * NULL), we have to wait for that transaction to
- * commit before we can really get rid of the buffer.
- * So just clear b_modified to not confuse transaction
- * credit accounting and refile the buffer to
- * BJ_Forget of the running transaction. If the just
- * committed transaction contains "add to orphan"
- * operation, we can completely invalidate the buffer
- * now. We are rather through in that since the
- * buffer may be still accessible when blocksize <
- * pagesize and it is attached to the last partial
- * page.
+ * Block device buffers need to stay mapped all the
+ * time, so it is enough to clear buffer_jbddirty and
+ * buffer_freed bits. For the file mapping buffers (i.e.
+ * journalled data) we need to unmap buffer and clear
+ * more bits. We also need to be careful about the check
+ * because the data page mapping can get cleared under
+ * out hands, which alse need not to clear more bits
+ * because the page and buffers will be freed and can
+ * never be reused once we are done with them.
*/
- jh->b_modified = 0;
- if (!jh->b_next_transaction) {
- clear_buffer_freed(bh);
- clear_buffer_jbddirty(bh);
+ mapping = READ_ONCE(bh->b_page->mapping);
+ if (mapping && !sb_is_blkdev_sb(mapping->host->i_sb)) {
clear_buffer_mapped(bh);
clear_buffer_new(bh);
clear_buffer_req(bh);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index e77a5a0b4e46..3dccc23cf010 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -936,8 +936,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
char *frozen_buffer = NULL;
unsigned long start_lock, time_lock;
- if (is_handle_aborted(handle))
- return -EROFS;
journal = transaction->t_journal;
jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy);
@@ -1152,8 +1150,8 @@ static bool jbd2_write_access_granted(handle_t *handle, struct buffer_head *bh,
/* For undo access buffer must have data copied */
if (undo && !jh->b_committed_data)
goto out;
- if (jh->b_transaction != handle->h_transaction &&
- jh->b_next_transaction != handle->h_transaction)
+ if (READ_ONCE(jh->b_transaction) != handle->h_transaction &&
+ READ_ONCE(jh->b_next_transaction) != handle->h_transaction)
goto out;
/*
* There are two reasons for the barrier here:
@@ -1189,6 +1187,9 @@ int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
int rc;
+ if (is_handle_aborted(handle))
+ return -EROFS;
+
if (jbd2_write_access_granted(handle, bh, false))
return 0;
@@ -1326,6 +1327,9 @@ int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh)
struct journal_head *jh;
char *committed_data = NULL;
+ if (is_handle_aborted(handle))
+ return -EROFS;
+
if (jbd2_write_access_granted(handle, bh, true))
return 0;
@@ -2329,14 +2333,16 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh,
return -EBUSY;
}
/*
- * OK, buffer won't be reachable after truncate. We just set
- * j_next_transaction to the running transaction (if there is
- * one) and mark buffer as freed so that commit code knows it
- * should clear dirty bits when it is done with the buffer.
+ * OK, buffer won't be reachable after truncate. We just clear
+ * b_modified to not confuse transaction credit accounting, and
+ * set j_next_transaction to the running transaction (if there
+ * is one) and mark buffer as freed so that commit code knows
+ * it should clear dirty bits when it is done with the buffer.
*/
set_buffer_freed(bh);
if (journal->j_running_transaction && buffer_jbddirty(bh))
jh->b_next_transaction = journal->j_running_transaction;
+ jh->b_modified = 0;
spin_unlock(&journal->j_list_lock);
spin_unlock(&jh->b_state_lock);
write_unlock(&journal->j_state_lock);
@@ -2563,8 +2569,8 @@ bool __jbd2_journal_refile_buffer(struct journal_head *jh)
* our jh reference and thus __jbd2_journal_file_buffer() must not
* take a new one.
*/
- jh->b_transaction = jh->b_next_transaction;
- jh->b_next_transaction = NULL;
+ WRITE_ONCE(jh->b_transaction, jh->b_next_transaction);
+ WRITE_ONCE(jh->b_next_transaction, NULL);
if (buffer_freed(bh))
jlist = BJ_Forget;
else if (jh->b_modified)
diff --git a/fs/locks.c b/fs/locks.c
index 44b6da032842..426b55d333d5 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -753,20 +753,6 @@ int locks_delete_block(struct file_lock *waiter)
{
int status = -ENOENT;
- /*
- * If fl_blocker is NULL, it won't be set again as this thread
- * "owns" the lock and is the only one that might try to claim
- * the lock. So it is safe to test fl_blocker locklessly.
- * Also if fl_blocker is NULL, this waiter is not listed on
- * fl_blocked_requests for some lock, so no other request can
- * be added to the list of fl_blocked_requests for this
- * request. So if fl_blocker is NULL, it is safe to
- * locklessly check if fl_blocked_requests is empty. If both
- * of these checks succeed, there is no need to take the lock.
- */
- if (waiter->fl_blocker == NULL &&
- list_empty(&waiter->fl_blocked_requests))
- return status;
spin_lock(&blocked_lock_lock);
if (waiter->fl_blocker)
status = 0;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 4a841071d8a7..1865322de142 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -42,13 +42,27 @@ static void nfs_mark_delegation_revoked(struct nfs_delegation *delegation)
if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
delegation->stateid.type = NFS4_INVALID_STATEID_TYPE;
atomic_long_dec(&nfs_active_delegations);
+ if (!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
+ nfs_clear_verifier_delegated(delegation->inode);
}
}
+static struct nfs_delegation *nfs_get_delegation(struct nfs_delegation *delegation)
+{
+ refcount_inc(&delegation->refcount);
+ return delegation;
+}
+
+static void nfs_put_delegation(struct nfs_delegation *delegation)
+{
+ if (refcount_dec_and_test(&delegation->refcount))
+ __nfs_free_delegation(delegation);
+}
+
static void nfs_free_delegation(struct nfs_delegation *delegation)
{
nfs_mark_delegation_revoked(delegation);
- __nfs_free_delegation(delegation);
+ nfs_put_delegation(delegation);
}
/**
@@ -241,13 +255,18 @@ void nfs_inode_reclaim_delegation(struct inode *inode, const struct cred *cred,
static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *delegation, int issync)
{
+ const struct cred *cred;
int res = 0;
- if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
- res = nfs4_proc_delegreturn(inode,
- delegation->cred,
+ if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) {
+ spin_lock(&delegation->lock);
+ cred = get_cred(delegation->cred);
+ spin_unlock(&delegation->lock);
+ res = nfs4_proc_delegreturn(inode, cred,
&delegation->stateid,
issync);
+ put_cred(cred);
+ }
return res;
}
@@ -273,9 +292,13 @@ nfs_start_delegation_return_locked(struct nfs_inode *nfsi)
if (delegation == NULL)
goto out;
spin_lock(&delegation->lock);
- if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags))
- ret = delegation;
+ if (!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
+ /* Refcount matched in nfs_end_delegation_return() */
+ ret = nfs_get_delegation(delegation);
+ }
spin_unlock(&delegation->lock);
+ if (ret)
+ nfs_clear_verifier_delegated(&nfsi->vfs_inode);
out:
return ret;
}
@@ -393,6 +416,7 @@ int nfs_inode_set_delegation(struct inode *inode, const struct cred *cred,
if (delegation == NULL)
return -ENOMEM;
nfs4_stateid_copy(&delegation->stateid, stateid);
+ refcount_set(&delegation->refcount, 1);
delegation->type = type;
delegation->pagemod_limit = pagemod_limit;
delegation->change_attr = inode_peek_iversion_raw(inode);
@@ -492,6 +516,8 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
err = nfs_do_return_delegation(inode, delegation, issync);
out:
+ /* Refcount matched in nfs_start_delegation_return_locked() */
+ nfs_put_delegation(delegation);
return err;
}
@@ -686,9 +712,12 @@ void nfs4_inode_return_delegation_on_close(struct inode *inode)
list_empty(&NFS_I(inode)->open_files) &&
!test_and_set_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) {
clear_bit(NFS_DELEGATION_RETURN_IF_CLOSED, &delegation->flags);
- ret = delegation;
+ /* Refcount matched in nfs_end_delegation_return() */
+ ret = nfs_get_delegation(delegation);
}
spin_unlock(&delegation->lock);
+ if (ret)
+ nfs_clear_verifier_delegated(inode);
}
out:
rcu_read_unlock();
@@ -1088,10 +1117,11 @@ restart:
delegation = nfs_start_delegation_return_locked(NFS_I(inode));
rcu_read_unlock();
if (delegation != NULL) {
- delegation = nfs_detach_delegation(NFS_I(inode),
- delegation, server);
- if (delegation != NULL)
+ if (nfs_detach_delegation(NFS_I(inode), delegation,
+ server) != NULL)
nfs_free_delegation(delegation);
+ /* Match nfs_start_delegation_return_locked */
+ nfs_put_delegation(delegation);
}
iput(inode);
nfs_sb_deactive(server->super);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 31b84604d383..9b00a0b7f832 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -22,6 +22,7 @@ struct nfs_delegation {
unsigned long pagemod_limit;
__u64 change_attr;
unsigned long flags;
+ refcount_t refcount;
spinlock_t lock;
struct rcu_head rcu;
};
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 1320288ff9ec..193d6fb363b7 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -155,6 +155,7 @@ typedef struct {
loff_t current_index;
decode_dirent_t decode;
+ unsigned long dir_verifier;
unsigned long timestamp;
unsigned long gencount;
unsigned int cache_entry_index;
@@ -353,6 +354,7 @@ int nfs_readdir_xdr_filler(struct page **pages, nfs_readdir_descriptor_t *desc,
again:
timestamp = jiffies;
gencount = nfs_inc_attr_generation_counter();
+ desc->dir_verifier = nfs_save_change_attribute(inode);
error = NFS_PROTO(inode)->readdir(file_dentry(file), cred, entry->cookie, pages,
NFS_SERVER(inode)->dtsize, desc->plus);
if (error < 0) {
@@ -455,13 +457,13 @@ void nfs_force_use_readdirplus(struct inode *dir)
}
static
-void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
+void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry,
+ unsigned long dir_verifier)
{
struct qstr filename = QSTR_INIT(entry->name, entry->len);
DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
struct dentry *dentry;
struct dentry *alias;
- struct inode *dir = d_inode(parent);
struct inode *inode;
int status;
@@ -500,7 +502,7 @@ again:
if (nfs_same_file(dentry, entry)) {
if (!entry->fh->size)
goto out;
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ nfs_set_verifier(dentry, dir_verifier);
status = nfs_refresh_inode(d_inode(dentry), entry->fattr);
if (!status)
nfs_setsecurity(d_inode(dentry), entry->fattr, entry->label);
@@ -526,7 +528,7 @@ again:
dput(dentry);
dentry = alias;
}
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ nfs_set_verifier(dentry, dir_verifier);
out:
dput(dentry);
}
@@ -564,7 +566,8 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en
count++;
if (desc->plus)
- nfs_prime_dcache(file_dentry(desc->file), entry);
+ nfs_prime_dcache(file_dentry(desc->file), entry,
+ desc->dir_verifier);
status = nfs_readdir_add_to_array(entry, page);
if (status != 0)
@@ -983,14 +986,113 @@ static int nfs_fsync_dir(struct file *filp, loff_t start, loff_t end,
* full lookup on all child dentries of 'dir' whenever a change occurs
* on the server that might have invalidated our dcache.
*
+ * Note that we reserve bit '0' as a tag to let us know when a dentry
+ * was revalidated while holding a delegation on its inode.
+ *
* The caller should be holding dir->i_lock
*/
void nfs_force_lookup_revalidate(struct inode *dir)
{
- NFS_I(dir)->cache_change_attribute++;
+ NFS_I(dir)->cache_change_attribute += 2;
}
EXPORT_SYMBOL_GPL(nfs_force_lookup_revalidate);
+/**
+ * nfs_verify_change_attribute - Detects NFS remote directory changes
+ * @dir: pointer to parent directory inode
+ * @verf: previously saved change attribute
+ *
+ * Return "false" if the verifiers doesn't match the change attribute.
+ * This would usually indicate that the directory contents have changed on
+ * the server, and that any dentries need revalidating.
+ */
+static bool nfs_verify_change_attribute(struct inode *dir, unsigned long verf)
+{
+ return (verf & ~1UL) == nfs_save_change_attribute(dir);
+}
+
+static void nfs_set_verifier_delegated(unsigned long *verf)
+{
+ *verf |= 1UL;
+}
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+static void nfs_unset_verifier_delegated(unsigned long *verf)
+{
+ *verf &= ~1UL;
+}
+#endif /* IS_ENABLED(CONFIG_NFS_V4) */
+
+static bool nfs_test_verifier_delegated(unsigned long verf)
+{
+ return verf & 1;
+}
+
+static bool nfs_verifier_is_delegated(struct dentry *dentry)
+{
+ return nfs_test_verifier_delegated(dentry->d_time);
+}
+
+static void nfs_set_verifier_locked(struct dentry *dentry, unsigned long verf)
+{
+ struct inode *inode = d_inode(dentry);
+
+ if (!nfs_verifier_is_delegated(dentry) &&
+ !nfs_verify_change_attribute(d_inode(dentry->d_parent), verf))
+ goto out;
+ if (inode && NFS_PROTO(inode)->have_delegation(inode, FMODE_READ))
+ nfs_set_verifier_delegated(&verf);
+out:
+ dentry->d_time = verf;
+}
+
+/**
+ * nfs_set_verifier - save a parent directory verifier in the dentry
+ * @dentry: pointer to dentry
+ * @verf: verifier to save
+ *
+ * Saves the parent directory verifier in @dentry. If the inode has
+ * a delegation, we also tag the dentry as having been revalidated
+ * while holding a delegation so that we know we don't have to
+ * look it up again after a directory change.
+ */
+void nfs_set_verifier(struct dentry *dentry, unsigned long verf)
+{
+
+ spin_lock(&dentry->d_lock);
+ nfs_set_verifier_locked(dentry, verf);
+ spin_unlock(&dentry->d_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_set_verifier);
+
+#if IS_ENABLED(CONFIG_NFS_V4)
+/**
+ * nfs_clear_verifier_delegated - clear the dir verifier delegation tag
+ * @inode: pointer to inode
+ *
+ * Iterates through the dentries in the inode alias list and clears
+ * the tag used to indicate that the dentry has been revalidated
+ * while holding a delegation.
+ * This function is intended for use when the delegation is being
+ * returned or revoked.
+ */
+void nfs_clear_verifier_delegated(struct inode *inode)
+{
+ struct dentry *alias;
+
+ if (!inode)
+ return;
+ spin_lock(&inode->i_lock);
+ hlist_for_each_entry(alias, &inode->i_dentry, d_u.d_alias) {
+ spin_lock(&alias->d_lock);
+ nfs_unset_verifier_delegated(&alias->d_time);
+ spin_unlock(&alias->d_lock);
+ }
+ spin_unlock(&inode->i_lock);
+}
+EXPORT_SYMBOL_GPL(nfs_clear_verifier_delegated);
+#endif /* IS_ENABLED(CONFIG_NFS_V4) */
+
/*
* A check for whether or not the parent directory has changed.
* In the case it has, we assume that the dentries are untrustworthy
@@ -1159,6 +1261,7 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
struct nfs_fh *fhandle;
struct nfs_fattr *fattr;
struct nfs4_label *label;
+ unsigned long dir_verifier;
int ret;
ret = -ENOMEM;
@@ -1168,6 +1271,7 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
if (fhandle == NULL || fattr == NULL || IS_ERR(label))
goto out;
+ dir_verifier = nfs_save_change_attribute(dir);
ret = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, label);
if (ret < 0) {
switch (ret) {
@@ -1188,7 +1292,7 @@ nfs_lookup_revalidate_dentry(struct inode *dir, struct dentry *dentry,
goto out;
nfs_setsecurity(inode, fattr, label);
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ nfs_set_verifier(dentry, dir_verifier);
/* set a readdirplus hint that we had a cache miss */
nfs_force_use_readdirplus(dir);
@@ -1230,7 +1334,7 @@ nfs_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
goto out_bad;
}
- if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
+ if (nfs_verifier_is_delegated(dentry))
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
/* Force a full look up iff the parent directory has changed */
@@ -1415,6 +1519,7 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
struct nfs_fh *fhandle = NULL;
struct nfs_fattr *fattr = NULL;
struct nfs4_label *label = NULL;
+ unsigned long dir_verifier;
int error;
dfprintk(VFS, "NFS: lookup(%pd2)\n", dentry);
@@ -1440,6 +1545,7 @@ struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, unsigned in
if (IS_ERR(label))
goto out;
+ dir_verifier = nfs_save_change_attribute(dir);
trace_nfs_lookup_enter(dir, dentry, flags);
error = NFS_PROTO(dir)->lookup(dir, dentry, fhandle, fattr, label);
if (error == -ENOENT)
@@ -1463,7 +1569,7 @@ no_entry:
goto out_label;
dentry = res;
}
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
+ nfs_set_verifier(dentry, dir_verifier);
out_label:
trace_nfs_lookup_exit(dir, dentry, flags, error);
nfs4_label_free(label);
@@ -1668,7 +1774,7 @@ nfs4_do_lookup_revalidate(struct inode *dir, struct dentry *dentry,
if (inode == NULL)
goto full_reval;
- if (NFS_PROTO(dir)->have_delegation(inode, FMODE_READ))
+ if (nfs_verifier_is_delegated(dentry))
return nfs_lookup_revalidate_delegated(dir, dentry, inode);
/* NFS only supports OPEN on regular files */
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 1309e6f47f3d..11bf15800ac9 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -2114,6 +2114,7 @@ static void init_once(void *foo)
init_rwsem(&nfsi->rmdir_sem);
mutex_init(&nfsi->commit_mutex);
nfs4_init_once(nfsi);
+ nfsi->cache_change_attribute = 0;
}
static int __init nfs_init_inodecache(void)
diff --git a/fs/nfs/nfs4file.c b/fs/nfs/nfs4file.c
index be4eb720d5b6..1297919e0fce 100644
--- a/fs/nfs/nfs4file.c
+++ b/fs/nfs/nfs4file.c
@@ -87,7 +87,6 @@ nfs4_file_open(struct inode *inode, struct file *filp)
if (inode != d_inode(dentry))
goto out_drop;
- nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
nfs_file_set_open_context(filp, ctx);
nfs_fscache_open_file(inode, filp);
err = 0;
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 95d07a3dc5d1..69b7ab7a5815 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2974,10 +2974,13 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
struct dentry *dentry;
struct nfs4_state *state;
fmode_t acc_mode = _nfs4_ctx_to_accessmode(ctx);
+ struct inode *dir = d_inode(opendata->dir);
+ unsigned long dir_verifier;
unsigned int seq;
int ret;
seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
+ dir_verifier = nfs_save_change_attribute(dir);
ret = _nfs4_proc_open(opendata, ctx);
if (ret != 0)
@@ -3005,8 +3008,19 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
dput(ctx->dentry);
ctx->dentry = dentry = alias;
}
- nfs_set_verifier(dentry,
- nfs_save_change_attribute(d_inode(opendata->dir)));
+ }
+
+ switch(opendata->o_arg.claim) {
+ default:
+ break;
+ case NFS4_OPEN_CLAIM_NULL:
+ case NFS4_OPEN_CLAIM_DELEGATE_CUR:
+ case NFS4_OPEN_CLAIM_DELEGATE_PREV:
+ if (!opendata->rpc_done)
+ break;
+ if (opendata->o_res.delegation_type != 0)
+ dir_verifier = nfs_save_change_attribute(dir);
+ nfs_set_verifier(dentry, dir_verifier);
}
/* Parse layoutget results before we check for access */
@@ -5322,7 +5336,7 @@ static void nfs4_proc_write_setup(struct nfs_pgio_header *hdr,
hdr->timestamp = jiffies;
msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_WRITE];
- nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 1, 0);
+ nfs4_init_sequence(&hdr->args.seq_args, &hdr->res.seq_res, 0, 0);
nfs4_state_protect_write(server->nfs_client, clnt, msg, hdr);
}
diff --git a/fs/pipe.c b/fs/pipe.c
index 5a34d6c22d4c..2144507447c5 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -722,9 +722,10 @@ pipe_release(struct inode *inode, struct file *file)
if (file->f_mode & FMODE_WRITE)
pipe->writers--;
- if (pipe->readers || pipe->writers) {
- wake_up_interruptible_sync_poll(&pipe->rd_wait, EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLHUP);
- wake_up_interruptible_sync_poll(&pipe->wr_wait, EPOLLOUT | EPOLLWRNORM | EPOLLERR | EPOLLHUP);
+ /* Was that the last reader or writer, but not the other side? */
+ if (!pipe->readers != !pipe->writers) {
+ wake_up_interruptible_all(&pipe->rd_wait);
+ wake_up_interruptible_all(&pipe->wr_wait);
kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
}
@@ -1026,8 +1027,8 @@ static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt)
static void wake_up_partner(struct pipe_inode_info *pipe)
{
- wake_up_interruptible(&pipe->rd_wait);
- wake_up_interruptible(&pipe->wr_wait);
+ wake_up_interruptible_all(&pipe->rd_wait);
+ wake_up_interruptible_all(&pipe->wr_wait);
}
static int fifo_open(struct inode *inode, struct file *filp)
@@ -1144,7 +1145,7 @@ err_rd:
err_wr:
if (!--pipe->writers)
- wake_up_interruptible(&pipe->rd_wait);
+ wake_up_interruptible_all(&pipe->rd_wait);
ret = -ERESTARTSYS;
goto err;
@@ -1271,8 +1272,9 @@ static long pipe_set_size(struct pipe_inode_info *pipe, unsigned long arg)
pipe->max_usage = nr_slots;
pipe->tail = tail;
pipe->head = head;
- wake_up_interruptible_all(&pipe->rd_wait);
- wake_up_interruptible_all(&pipe->wr_wait);
+
+ /* This might have made more room for writers */
+ wake_up_interruptible(&pipe->wr_wait);
return pipe->max_usage * PAGE_SIZE;
out_revert_acct:
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 3a688eb5c5ae..58e937be24ce 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -587,7 +587,7 @@ xfs_dax_writepages(
xfs_iflags_clear(ip, XFS_ITRUNCATED);
return dax_writeback_mapping_range(mapping,
- xfs_inode_buftarg(ip)->bt_bdev, wbc);
+ xfs_inode_buftarg(ip)->bt_daxdev, wbc);
}
STATIC sector_t
diff --git a/fs/zonefs/Kconfig b/fs/zonefs/Kconfig
index fb87ad372e29..ef2697b78820 100644
--- a/fs/zonefs/Kconfig
+++ b/fs/zonefs/Kconfig
@@ -2,6 +2,7 @@ config ZONEFS_FS
tristate "zonefs filesystem support"
depends on BLOCK
depends on BLK_DEV_ZONED
+ select FS_IOMAP
help
zonefs is a simple file system which exposes zones of a zoned block
device (e.g. host-managed or host-aware SMR disk drives) as files.
diff --git a/fs/zonefs/super.c b/fs/zonefs/super.c
index 8bc6ef82d693..69aee3dfb660 100644
--- a/fs/zonefs/super.c
+++ b/fs/zonefs/super.c
@@ -601,13 +601,13 @@ static ssize_t zonefs_file_dio_write(struct kiocb *iocb, struct iov_iter *from)
ssize_t ret;
/*
- * For async direct IOs to sequential zone files, ignore IOCB_NOWAIT
+ * For async direct IOs to sequential zone files, refuse IOCB_NOWAIT
* as this can cause write reordering (e.g. the first aio gets EAGAIN
* on the inode lock but the second goes through but is now unaligned).
*/
- if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !is_sync_kiocb(iocb)
- && (iocb->ki_flags & IOCB_NOWAIT))
- iocb->ki_flags &= ~IOCB_NOWAIT;
+ if (zi->i_ztype == ZONEFS_ZTYPE_SEQ && !is_sync_kiocb(iocb) &&
+ (iocb->ki_flags & IOCB_NOWAIT))
+ return -EOPNOTSUPP;
if (iocb->ki_flags & IOCB_NOWAIT) {
if (!inode_trylock(inode))
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 00994b1b8681..8e8be989c2a6 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -752,6 +752,8 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
+ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(void))
+ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_get_gpe_device(u32 gpe_index,
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index a2583c2bc054..4defed58ea33 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -532,11 +532,12 @@ typedef u64 acpi_integer;
strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE)
/*
- * Algorithm to obtain access bit width.
+ * Algorithm to obtain access bit or byte width.
* Can be used with access_width of struct acpi_generic_address and access_size of
* struct acpi_resource_generic_register.
*/
#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2))
+#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1))
/*******************************************************************************
*
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index 9d4d5cc47969..0b34a12c4a1c 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -129,6 +129,7 @@ struct dw_hdmi_plat_data {
unsigned long input_bus_format;
unsigned long input_bus_encoding;
bool use_drm_infoframe;
+ bool ycbcr_420_allowed;
/* Vendor PHY support */
const struct dw_hdmi_phy_ops *phy_ops;
diff --git a/include/drm/bridge/mhl.h b/include/drm/bridge/mhl.h
index 1cc77bf38324..d96626a0e3fa 100644
--- a/include/drm/bridge/mhl.h
+++ b/include/drm/bridge/mhl.h
@@ -327,13 +327,13 @@ struct mhl_burst_bits_per_pixel_fmt {
struct {
u8 stream_id;
u8 pixel_format;
- } __packed desc[0];
+ } __packed desc[];
} __packed;
struct mhl_burst_emsc_support {
struct mhl3_burst_header hdr;
u8 num_entries;
- __be16 burst_id[0];
+ __be16 burst_id[];
} __packed;
struct mhl_burst_audio_descr {
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 951dfb15c27b..7b6cb4774e7d 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -670,6 +670,9 @@ __drm_atomic_get_current_plane_state(struct drm_atomic_state *state,
}
int __must_check
+drm_atomic_add_encoder_bridges(struct drm_atomic_state *state,
+ struct drm_encoder *encoder);
+int __must_check
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_crtc *crtc);
int __must_check
@@ -992,4 +995,77 @@ drm_atomic_crtc_effectively_active(const struct drm_crtc_state *state)
return state->active || state->self_refresh_active;
}
+/**
+ * struct drm_bus_cfg - bus configuration
+ *
+ * This structure stores the configuration of a physical bus between two
+ * components in an output pipeline, usually between two bridges, an encoder
+ * and a bridge, or a bridge and a connector.
+ *
+ * The bus configuration is stored in &drm_bridge_state separately for the
+ * input and output buses, as seen from the point of view of each bridge. The
+ * bus configuration of a bridge output is usually identical to the
+ * configuration of the next bridge's input, but may differ if the signals are
+ * modified between the two bridges, for instance by an inverter on the board.
+ * The input and output configurations of a bridge may differ if the bridge
+ * modifies the signals internally, for instance by performing format
+ * conversion, or modifying signals polarities.
+ */
+struct drm_bus_cfg {
+ /**
+ * @format: format used on this bus (one of the MEDIA_BUS_FMT_* format)
+ *
+ * This field should not be directly modified by drivers
+ * (drm_atomic_bridge_chain_select_bus_fmts() takes care of the bus
+ * format negotiation).
+ */
+ u32 format;
+
+ /**
+ * @flags: DRM_BUS_* flags used on this bus
+ */
+ u32 flags;
+};
+
+/**
+ * struct drm_bridge_state - Atomic bridge state object
+ */
+struct drm_bridge_state {
+ /**
+ * @base: inherit from &drm_private_state
+ */
+ struct drm_private_state base;
+
+ /**
+ * @bridge: the bridge this state refers to
+ */
+ struct drm_bridge *bridge;
+
+ /**
+ * @input_bus_cfg: input bus configuration
+ */
+ struct drm_bus_cfg input_bus_cfg;
+
+ /**
+ * @output_bus_cfg: input bus configuration
+ */
+ struct drm_bus_cfg output_bus_cfg;
+};
+
+static inline struct drm_bridge_state *
+drm_priv_to_bridge_state(struct drm_private_state *priv)
+{
+ return container_of(priv, struct drm_bridge_state, base);
+}
+
+struct drm_bridge_state *
+drm_atomic_get_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge);
+struct drm_bridge_state *
+drm_atomic_get_old_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge);
+struct drm_bridge_state *
+drm_atomic_get_new_bridge_state(struct drm_atomic_state *state,
+ struct drm_bridge *bridge);
+
#endif /* DRM_ATOMIC_H_ */
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 9db3cac48f4f..b268180c97eb 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -224,4 +224,12 @@ drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state,
return old_plane_state->crtc && !new_plane_state->crtc;
}
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
#endif /* DRM_ATOMIC_HELPER_H_ */
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index 8171dea4cc22..3f8f1d627f7c 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -26,6 +26,8 @@
#include <linux/types.h>
+struct drm_bridge;
+struct drm_bridge_state;
struct drm_crtc;
struct drm_crtc_state;
struct drm_plane;
@@ -80,3 +82,14 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
void __drm_atomic_helper_private_obj_duplicate_state(struct drm_private_obj *obj,
struct drm_private_state *state);
+
+void __drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state);
+struct drm_bridge_state *
+drm_atomic_helper_bridge_duplicate_state(struct drm_bridge *bridge);
+void drm_atomic_helper_bridge_destroy_state(struct drm_bridge *bridge,
+ struct drm_bridge_state *state);
+void __drm_atomic_helper_bridge_reset(struct drm_bridge *bridge,
+ struct drm_bridge_state *state);
+struct drm_bridge_state *
+drm_atomic_helper_bridge_reset(struct drm_bridge *bridge);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 694e153a7531..ea2aa5ebae34 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -23,15 +23,32 @@
#ifndef __DRM_BRIDGE_H__
#define __DRM_BRIDGE_H__
-#include <linux/list.h>
#include <linux/ctype.h>
+#include <linux/list.h>
+#include <linux/mutex.h>
+
+#include <drm/drm_atomic.h>
#include <drm/drm_encoder.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_modes.h>
struct drm_bridge;
struct drm_bridge_timings;
+struct drm_connector;
struct drm_panel;
+struct edid;
+struct i2c_adapter;
+
+/**
+ * enum drm_bridge_attach_flags - Flags for &drm_bridge_funcs.attach
+ */
+enum drm_bridge_attach_flags {
+ /**
+ * @DRM_BRIDGE_ATTACH_NO_CONNECTOR: When this flag is set the bridge
+ * shall not create a drm_connector.
+ */
+ DRM_BRIDGE_ATTACH_NO_CONNECTOR = BIT(0),
+};
/**
* struct drm_bridge_funcs - drm_bridge control functions
@@ -41,7 +58,8 @@ struct drm_bridge_funcs {
* @attach:
*
* This callback is invoked whenever our bridge is being attached to a
- * &drm_encoder.
+ * &drm_encoder. The flags argument tunes the behaviour of the attach
+ * operation (see DRM_BRIDGE_ATTACH_*).
*
* The @attach callback is optional.
*
@@ -49,7 +67,8 @@ struct drm_bridge_funcs {
*
* Zero on success, error code on failure.
*/
- int (*attach)(struct drm_bridge *bridge);
+ int (*attach)(struct drm_bridge *bridge,
+ enum drm_bridge_attach_flags flags);
/**
* @detach:
@@ -109,7 +128,9 @@ struct drm_bridge_funcs {
* this function passes all other callbacks must succeed for this
* configuration.
*
- * The @mode_fixup callback is optional.
+ * The mode_fixup callback is optional. &drm_bridge_funcs.mode_fixup()
+ * is not called when &drm_bridge_funcs.atomic_check() is implemented,
+ * so only one of them should be provided.
*
* NOTE:
*
@@ -263,7 +284,7 @@ struct drm_bridge_funcs {
* The @atomic_pre_enable callback is optional.
*/
void (*atomic_pre_enable)(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state);
+ struct drm_bridge_state *old_bridge_state);
/**
* @atomic_enable:
@@ -288,7 +309,7 @@ struct drm_bridge_funcs {
* The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state);
+ struct drm_bridge_state *old_bridge_state);
/**
* @atomic_disable:
*
@@ -311,7 +332,7 @@ struct drm_bridge_funcs {
* The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state);
+ struct drm_bridge_state *old_bridge_state);
/**
* @atomic_post_disable:
@@ -337,7 +358,275 @@ struct drm_bridge_funcs {
* The @atomic_post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
- struct drm_atomic_state *old_state);
+ struct drm_bridge_state *old_bridge_state);
+
+ /**
+ * @atomic_duplicate_state:
+ *
+ * Duplicate the current bridge state object (which is guaranteed to be
+ * non-NULL).
+ *
+ * The atomic_duplicate_state hook is mandatory if the bridge
+ * implements any of the atomic hooks, and should be left unassigned
+ * otherwise. For bridges that don't subclass &drm_bridge_state, the
+ * drm_atomic_helper_bridge_duplicate_state() helper function shall be
+ * used to implement this hook.
+ *
+ * RETURNS:
+ * A valid drm_bridge_state object or NULL if the allocation fails.
+ */
+ struct drm_bridge_state *(*atomic_duplicate_state)(struct drm_bridge *bridge);
+
+ /**
+ * @atomic_destroy_state:
+ *
+ * Destroy a bridge state object previously allocated by
+ * &drm_bridge_funcs.atomic_duplicate_state().
+ *
+ * The atomic_destroy_state hook is mandatory if the bridge implements
+ * any of the atomic hooks, and should be left unassigned otherwise.
+ * For bridges that don't subclass &drm_bridge_state, the
+ * drm_atomic_helper_bridge_destroy_state() helper function shall be
+ * used to implement this hook.
+ */
+ void (*atomic_destroy_state)(struct drm_bridge *bridge,
+ struct drm_bridge_state *state);
+
+ /**
+ * @atomic_get_output_bus_fmts:
+ *
+ * Return the supported bus formats on the output end of a bridge.
+ * The returned array must be allocated with kmalloc() and will be
+ * freed by the caller. If the allocation fails, NULL should be
+ * returned. num_output_fmts must be set to the returned array size.
+ * Formats listed in the returned array should be listed in decreasing
+ * preference order (the core will try all formats until it finds one
+ * that works).
+ *
+ * This method is only called on the last element of the bridge chain
+ * as part of the bus format negotiation process that happens in
+ * &drm_atomic_bridge_chain_select_bus_fmts().
+ * This method is optional. When not implemented, the core will
+ * fall back to &drm_connector.display_info.bus_formats[0] if
+ * &drm_connector.display_info.num_bus_formats > 0,
+ * or to MEDIA_BUS_FMT_FIXED otherwise.
+ */
+ u32 *(*atomic_get_output_bus_fmts)(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ unsigned int *num_output_fmts);
+
+ /**
+ * @atomic_get_input_bus_fmts:
+ *
+ * Return the supported bus formats on the input end of a bridge for
+ * a specific output bus format.
+ *
+ * The returned array must be allocated with kmalloc() and will be
+ * freed by the caller. If the allocation fails, NULL should be
+ * returned. num_output_fmts must be set to the returned array size.
+ * Formats listed in the returned array should be listed in decreasing
+ * preference order (the core will try all formats until it finds one
+ * that works). When the format is not supported NULL should be
+ * returned and num_output_fmts should be set to 0.
+ *
+ * This method is called on all elements of the bridge chain as part of
+ * the bus format negotiation process that happens in
+ * drm_atomic_bridge_chain_select_bus_fmts().
+ * This method is optional. When not implemented, the core will bypass
+ * bus format negotiation on this element of the bridge without
+ * failing, and the previous element in the chain will be passed
+ * MEDIA_BUS_FMT_FIXED as its output bus format.
+ *
+ * Bridge drivers that need to support being linked to bridges that are
+ * not supporting bus format negotiation should handle the
+ * output_fmt == MEDIA_BUS_FMT_FIXED case appropriately, by selecting a
+ * sensible default value or extracting this information from somewhere
+ * else (FW property, &drm_display_mode, &drm_display_info, ...)
+ *
+ * Note: Even if input format selection on the first bridge has no
+ * impact on the negotiation process (bus format negotiation stops once
+ * we reach the first element of the chain), drivers are expected to
+ * return accurate input formats as the input format may be used to
+ * configure the CRTC output appropriately.
+ */
+ u32 *(*atomic_get_input_bus_fmts)(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
+ /**
+ * @atomic_check:
+ *
+ * This method is responsible for checking bridge state correctness.
+ * It can also check the state of the surrounding components in chain
+ * to make sure the whole pipeline can work properly.
+ *
+ * &drm_bridge_funcs.atomic_check() hooks are called in reverse
+ * order (from the last to the first bridge).
+ *
+ * This method is optional. &drm_bridge_funcs.mode_fixup() is not
+ * called when &drm_bridge_funcs.atomic_check() is implemented, so only
+ * one of them should be provided.
+ *
+ * If drivers need to tweak &drm_bridge_state.input_bus_cfg.flags or
+ * &drm_bridge_state.output_bus_cfg.flags it should should happen in
+ * this function. By default the &drm_bridge_state.output_bus_cfg.flags
+ * field is set to the next bridge
+ * &drm_bridge_state.input_bus_cfg.flags value or
+ * &drm_connector.display_info.bus_flags if the bridge is the last
+ * element in the chain.
+ *
+ * RETURNS:
+ * zero if the check passed, a negative error code otherwise.
+ */
+ int (*atomic_check)(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+
+ /**
+ * @atomic_reset:
+ *
+ * Reset the bridge to a predefined state (or retrieve its current
+ * state) and return a &drm_bridge_state object matching this state.
+ * This function is called at attach time.
+ *
+ * The atomic_reset hook is mandatory if the bridge implements any of
+ * the atomic hooks, and should be left unassigned otherwise. For
+ * bridges that don't subclass &drm_bridge_state, the
+ * drm_atomic_helper_bridge_reset() helper function shall be used to
+ * implement this hook.
+ *
+ * Note that the atomic_reset() semantics is not exactly matching the
+ * reset() semantics found on other components (connector, plane, ...).
+ *
+ * 1. The reset operation happens when the bridge is attached, not when
+ * drm_mode_config_reset() is called
+ * 2. It's meant to be used exclusively on bridges that have been
+ * converted to the ATOMIC API
+ *
+ * RETURNS:
+ * A valid drm_bridge_state object in case of success, an ERR_PTR()
+ * giving the reason of the failure otherwise.
+ */
+ struct drm_bridge_state *(*atomic_reset)(struct drm_bridge *bridge);
+
+ /**
+ * @detect:
+ *
+ * Check if anything is attached to the bridge output.
+ *
+ * This callback is optional, if not implemented the bridge will be
+ * considered as always having a component attached to its output.
+ * Bridges that implement this callback shall set the
+ * DRM_BRIDGE_OP_DETECT flag in their &drm_bridge->ops.
+ *
+ * RETURNS:
+ *
+ * drm_connector_status indicating the bridge output status.
+ */
+ enum drm_connector_status (*detect)(struct drm_bridge *bridge);
+
+ /**
+ * @get_modes:
+ *
+ * Fill all modes currently valid for the sink into the &drm_connector
+ * with drm_mode_probed_add().
+ *
+ * The @get_modes callback is mostly intended to support non-probeable
+ * displays such as many fixed panels. Bridges that support reading
+ * EDID shall leave @get_modes unimplemented and implement the
+ * &drm_bridge_funcs->get_edid callback instead.
+ *
+ * This callback is optional. Bridges that implement it shall set the
+ * DRM_BRIDGE_OP_MODES flag in their &drm_bridge->ops.
+ *
+ * The connector parameter shall be used for the sole purpose of
+ * filling modes, and shall not be stored internally by bridge drivers
+ * for future usage.
+ *
+ * RETURNS:
+ *
+ * The number of modes added by calling drm_mode_probed_add().
+ */
+ int (*get_modes)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @get_edid:
+ *
+ * Read and parse the EDID data of the connected display.
+ *
+ * The @get_edid callback is the preferred way of reporting mode
+ * information for a display connected to the bridge output. Bridges
+ * that support reading EDID shall implement this callback and leave
+ * the @get_modes callback unimplemented.
+ *
+ * The caller of this operation shall first verify the output
+ * connection status and refrain from reading EDID from a disconnected
+ * output.
+ *
+ * This callback is optional. Bridges that implement it shall set the
+ * DRM_BRIDGE_OP_EDID flag in their &drm_bridge->ops.
+ *
+ * The connector parameter shall be used for the sole purpose of EDID
+ * retrieval and parsing, and shall not be stored internally by bridge
+ * drivers for future usage.
+ *
+ * RETURNS:
+ *
+ * An edid structure newly allocated with kmalloc() (or similar) on
+ * success, or NULL otherwise. The caller is responsible for freeing
+ * the returned edid structure with kfree().
+ */
+ struct edid *(*get_edid)(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+
+ /**
+ * @hpd_notify:
+ *
+ * Notify the bridge of hot plug detection.
+ *
+ * This callback is optional, it may be implemented by bridges that
+ * need to be notified of display connection or disconnection for
+ * internal reasons. One use case is to reset the internal state of CEC
+ * controllers for HDMI bridges.
+ */
+ void (*hpd_notify)(struct drm_bridge *bridge,
+ enum drm_connector_status status);
+
+ /**
+ * @hpd_enable:
+ *
+ * Enable hot plug detection. From now on the bridge shall call
+ * drm_bridge_hpd_notify() each time a change is detected in the output
+ * connection status, until hot plug detection gets disabled with
+ * @hpd_disable.
+ *
+ * This callback is optional and shall only be implemented by bridges
+ * that support hot-plug notification without polling. Bridges that
+ * implement it shall also implement the @hpd_disable callback and set
+ * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops.
+ */
+ void (*hpd_enable)(struct drm_bridge *bridge);
+
+ /**
+ * @hpd_disable:
+ *
+ * Disable hot plug detection. Once this function returns the bridge
+ * shall not call drm_bridge_hpd_notify() when a change in the output
+ * connection status occurs.
+ *
+ * This callback is optional and shall only be implemented by bridges
+ * that support hot-plug notification without polling. Bridges that
+ * implement it shall also implement the @hpd_enable callback and set
+ * the DRM_BRIDGE_OP_HPD flag in their &drm_bridge->ops.
+ */
+ void (*hpd_disable)(struct drm_bridge *bridge);
};
/**
@@ -377,9 +666,44 @@ struct drm_bridge_timings {
};
/**
+ * enum drm_bridge_ops - Bitmask of operations supported by the bridge
+ */
+enum drm_bridge_ops {
+ /**
+ * @DRM_BRIDGE_OP_DETECT: The bridge can detect displays connected to
+ * its output. Bridges that set this flag shall implement the
+ * &drm_bridge_funcs->detect callback.
+ */
+ DRM_BRIDGE_OP_DETECT = BIT(0),
+ /**
+ * @DRM_BRIDGE_OP_EDID: The bridge can retrieve the EDID of the display
+ * connected to its output. Bridges that set this flag shall implement
+ * the &drm_bridge_funcs->get_edid callback.
+ */
+ DRM_BRIDGE_OP_EDID = BIT(1),
+ /**
+ * @DRM_BRIDGE_OP_HPD: The bridge can detect hot-plug and hot-unplug
+ * without requiring polling. Bridges that set this flag shall
+ * implement the &drm_bridge_funcs->hpd_enable and
+ * &drm_bridge_funcs->hpd_disable callbacks if they support enabling
+ * and disabling hot-plug detection dynamically.
+ */
+ DRM_BRIDGE_OP_HPD = BIT(2),
+ /**
+ * @DRM_BRIDGE_OP_MODES: The bridge can retrieve the modes supported
+ * by the display at its output. This does not include reading EDID
+ * which is separately covered by @DRM_BRIDGE_OP_EDID. Bridges that set
+ * this flag shall implement the &drm_bridge_funcs->get_modes callback.
+ */
+ DRM_BRIDGE_OP_MODES = BIT(3),
+};
+
+/**
* struct drm_bridge - central DRM bridge control structure
*/
struct drm_bridge {
+ /** @base: inherit from &drm_private_object */
+ struct drm_private_obj base;
/** @dev: DRM device this bridge belongs to */
struct drm_device *dev;
/** @encoder: encoder to which this bridge is connected */
@@ -402,13 +726,52 @@ struct drm_bridge {
const struct drm_bridge_funcs *funcs;
/** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
+ /** @ops: bitmask of operations supported by the bridge */
+ enum drm_bridge_ops ops;
+ /**
+ * @type: Type of the connection at the bridge output
+ * (DRM_MODE_CONNECTOR_*). For bridges at the end of this chain this
+ * identifies the type of connected display.
+ */
+ int type;
+ /**
+ * @interlace_allowed: Indicate that the bridge can handle interlaced
+ * modes.
+ */
+ bool interlace_allowed;
+ /**
+ * @ddc: Associated I2C adapter for DDC access, if any.
+ */
+ struct i2c_adapter *ddc;
+ /** private: */
+ /**
+ * @hpd_mutex: Protects the @hpd_cb and @hpd_data fields.
+ */
+ struct mutex hpd_mutex;
+ /**
+ * @hpd_cb: Hot plug detection callback, registered with
+ * drm_bridge_hpd_enable().
+ */
+ void (*hpd_cb)(void *data, enum drm_connector_status status);
+ /**
+ * @hpd_data: Private data passed to the Hot plug detection callback
+ * @hpd_cb.
+ */
+ void *hpd_data;
};
+static inline struct drm_bridge *
+drm_priv_to_bridge(struct drm_private_obj *priv)
+{
+ return container_of(priv, struct drm_bridge, base);
+}
+
void drm_bridge_add(struct drm_bridge *bridge);
void drm_bridge_remove(struct drm_bridge *bridge);
struct drm_bridge *of_drm_find_bridge(struct device_node *np);
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
- struct drm_bridge *previous);
+ struct drm_bridge *previous,
+ enum drm_bridge_attach_flags flags);
/**
* drm_bridge_get_next_bridge() - Get the next bridge in the chain
@@ -482,6 +845,9 @@ void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
void drm_bridge_chain_pre_enable(struct drm_bridge *bridge);
void drm_bridge_chain_enable(struct drm_bridge *bridge);
+int drm_atomic_bridge_chain_check(struct drm_bridge *bridge,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
@@ -491,6 +857,27 @@ void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
+u32 *
+drm_atomic_helper_bridge_propagate_bus_fmt(struct drm_bridge *bridge,
+ struct drm_bridge_state *bridge_state,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ u32 output_fmt,
+ unsigned int *num_input_fmts);
+
+enum drm_connector_status drm_bridge_detect(struct drm_bridge *bridge);
+int drm_bridge_get_modes(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+struct edid *drm_bridge_get_edid(struct drm_bridge *bridge,
+ struct drm_connector *connector);
+void drm_bridge_hpd_enable(struct drm_bridge *bridge,
+ void (*cb)(void *data,
+ enum drm_connector_status status),
+ void *data);
+void drm_bridge_hpd_disable(struct drm_bridge *bridge);
+void drm_bridge_hpd_notify(struct drm_bridge *bridge,
+ enum drm_connector_status status);
+
#ifdef CONFIG_DRM_PANEL_BRIDGE
struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel);
struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
diff --git a/include/drm/drm_bridge_connector.h b/include/drm/drm_bridge_connector.h
new file mode 100644
index 000000000000..33f6c3bbdb4a
--- /dev/null
+++ b/include/drm/drm_bridge_connector.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Copyright (C) 2019 Laurent Pinchart <[email protected]>
+ */
+
+#ifndef __DRM_BRIDGE_CONNECTOR_H__
+#define __DRM_BRIDGE_CONNECTOR_H__
+
+struct drm_connector;
+struct drm_device;
+struct drm_encoder;
+
+void drm_bridge_connector_enable_hpd(struct drm_connector *connector);
+void drm_bridge_connector_disable_hpd(struct drm_connector *connector);
+struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
+ struct drm_encoder *encoder);
+
+#endif /* __DRM_BRIDGE_CONNECTOR_H__ */
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
index 5cf2c5dd8b1e..3ed5dee899fd 100644
--- a/include/drm/drm_client.h
+++ b/include/drm/drm_client.h
@@ -44,6 +44,11 @@ struct drm_client_funcs {
* returns zero gets the privilege to restore and no more clients are
* called. This callback is not called after @unregister has been called.
*
+ * Note that the core does not guarantee exclusion against concurrent
+ * drm_open(). Clients need to ensure this themselves, for example by
+ * using drm_master_internal_acquire() and
+ * drm_master_internal_release().
+ *
* This callback is optional.
*/
int (*restore)(struct drm_client_dev *client);
@@ -156,7 +161,7 @@ int drm_client_modeset_create(struct drm_client_dev *client);
void drm_client_modeset_free(struct drm_client_dev *client);
int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, unsigned int height);
bool drm_client_rotation(struct drm_mode_set *modeset, unsigned int *rotation);
-int drm_client_modeset_commit_force(struct drm_client_dev *client);
+int drm_client_modeset_commit_locked(struct drm_client_dev *client);
int drm_client_modeset_commit(struct drm_client_dev *client);
int drm_client_modeset_dpms(struct drm_client_dev *client, int mode);
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 221910948b37..19ae6bb5c85b 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -254,6 +254,23 @@ enum drm_panel_orientation {
DRM_MODE_PANEL_ORIENTATION_RIGHT_UP,
};
+/**
+ * struct drm_monitor_range_info - Panel's Monitor range in EDID for
+ * &drm_display_info
+ *
+ * This struct is used to store a frequency range supported by panel
+ * as parsed from EDID's detailed monitor range descriptor block.
+ *
+ * @min_vfreq: This is the min supported refresh rate in Hz from
+ * EDID's detailed monitor range.
+ * @max_vfreq: This is the max supported refresh rate in Hz from
+ * EDID's detailed monitor range
+ */
+struct drm_monitor_range_info {
+ u8 min_vfreq;
+ u8 max_vfreq;
+};
+
/*
* This is a consolidated colorimetry list supported by HDMI and
* DP protocol standard. The respective connectors will register
@@ -435,6 +452,14 @@ struct drm_display_info {
bool dvi_dual;
/**
+ * @is_hdmi: True if the sink is an HDMI device.
+ *
+ * This field shall be used instead of calling
+ * drm_detect_hdmi_monitor() when possible.
+ */
+ bool is_hdmi;
+
+ /**
* @has_hdmi_infoframe: Does the sink support the HDMI infoframe?
*/
bool has_hdmi_infoframe;
@@ -465,6 +490,11 @@ struct drm_display_info {
* @non_desktop: Non desktop display (HMD).
*/
bool non_desktop;
+
+ /**
+ * @monitor_range: Frequency range supported by monitor range descriptor
+ */
+ struct drm_monitor_range_info monitor_range;
};
int drm_display_info_set_bus_formats(struct drm_display_info *info,
@@ -1357,6 +1387,12 @@ struct drm_connector {
* rev1.1 4.2.2.6
*/
bool edid_corrupt;
+ /**
+ * @real_edid_checksum: real edid checksum for corrupted edid block.
+ * Required in Displayport 1.4 compliance testing
+ * rev1.1 4.2.2.6
+ */
+ u8 real_edid_checksum;
/** @debugfs_entry: debugfs directory for this connector */
struct dentry *debugfs_entry;
@@ -1512,6 +1548,7 @@ drm_connector_is_unregistered(struct drm_connector *connector)
DRM_CONNECTOR_UNREGISTERED;
}
+const char *drm_get_connector_type_name(unsigned int connector_type);
const char *drm_get_connector_status_name(enum drm_connector_status status);
const char *drm_get_subpixel_order_name(enum subpixel_order order);
const char *drm_get_dpms_name(int val);
@@ -1552,8 +1589,13 @@ void drm_connector_set_link_status_property(struct drm_connector *connector,
uint64_t link_status);
void drm_connector_set_vrr_capable_property(
struct drm_connector *connector, bool capable);
-int drm_connector_init_panel_orientation_property(
- struct drm_connector *connector, int width, int height);
+int drm_connector_set_panel_orientation(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation);
+int drm_connector_set_panel_orientation_with_quirk(
+ struct drm_connector *connector,
+ enum drm_panel_orientation panel_orientation,
+ int width, int height);
int drm_connector_attach_max_bpc_property(struct drm_connector *connector,
int min, int max);
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 5e9b15a0e8c5..59b51a09cae6 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -174,12 +174,25 @@ struct drm_crtc_state {
* @no_vblank:
*
* Reflects the ability of a CRTC to send VBLANK events. This state
- * usually depends on the pipeline configuration, and the main usuage
- * is CRTCs feeding a writeback connector operating in oneshot mode.
- * In this case the VBLANK event is only generated when a job is queued
- * to the writeback connector, and we want the core to fake VBLANK
- * events when this part of the pipeline hasn't changed but others had
- * or when the CRTC and connectors are being disabled.
+ * usually depends on the pipeline configuration. If set to true, DRM
+ * atomic helpers will send out a fake VBLANK event during display
+ * updates after all hardware changes have been committed. This is
+ * implemented in drm_atomic_helper_fake_vblank().
+ *
+ * One usage is for drivers and/or hardware without support for VBLANK
+ * interrupts. Such drivers typically do not initialize vblanking
+ * (i.e., call drm_vblank_init() with the number of CRTCs). For CRTCs
+ * without initialized vblanking, this field is set to true in
+ * drm_atomic_helper_check_modeset(), and a fake VBLANK event will be
+ * send out on each update of the display pipeline by
+ * drm_atomic_helper_fake_vblank().
+ *
+ * Another usage is CRTCs feeding a writeback connector operating in
+ * oneshot mode. In this case the fake VBLANK event is only generated
+ * when a job is queued to the writeback connector, and we want the
+ * core to fake VBLANK events when this part of the pipeline hasn't
+ * changed but others had or when the CRTC and connectors are being
+ * disabled.
*
* __drm_atomic_helper_crtc_duplicate_state() will not reset the value
* from the current state, the CRTC driver is then responsible for
@@ -335,7 +348,14 @@ struct drm_crtc_state {
* - Events for disabled CRTCs are not allowed, and drivers can ignore
* that case.
*
- * This can be handled by the drm_crtc_send_vblank_event() function,
+ * For very simple hardware without VBLANK interrupt, enabling
+ * &struct drm_crtc_state.no_vblank makes DRM's atomic commit helpers
+ * send a fake VBLANK event at the end of the display update after all
+ * hardware changes have been applied. See
+ * drm_atomic_helper_fake_vblank().
+ *
+ * For more complex hardware this
+ * can be handled by the drm_crtc_send_vblank_event() function,
* which the driver should call on the provided event upon completion of
* the atomic commit. Note that if the driver supports vblank signalling
* and timestamping the vblank counters and timestamps must agree with
@@ -867,6 +887,47 @@ struct drm_crtc_funcs {
* new drivers as the replacement of &drm_driver.disable_vblank hook.
*/
void (*disable_vblank)(struct drm_crtc *crtc);
+
+ /**
+ * @get_vblank_timestamp:
+ *
+ * Called by drm_get_last_vbltimestamp(). Should return a precise
+ * timestamp when the most recent vblank interval ended or will end.
+ *
+ * Specifically, the timestamp in @vblank_time should correspond as
+ * closely as possible to the time when the first video scanline of
+ * the video frame after the end of vblank will start scanning out,
+ * the time immediately after end of the vblank interval. If the
+ * @crtc is currently inside vblank, this will be a time in the future.
+ * If the @crtc is currently scanning out a frame, this will be the
+ * past start time of the current scanout. This is meant to adhere
+ * to the OpenML OML_sync_control extension specification.
+ *
+ * Parameters:
+ *
+ * crtc:
+ * CRTC for which timestamp should be returned.
+ * max_error:
+ * Maximum allowable timestamp error in nanoseconds.
+ * Implementation should strive to provide timestamp
+ * with an error of at most max_error nanoseconds.
+ * Returns true upper bound on error for timestamp.
+ * vblank_time:
+ * Target location for returned vblank timestamp.
+ * in_vblank_irq:
+ * True when called from drm_crtc_handle_vblank(). Some drivers
+ * need to apply some workarounds for gpu-specific vblank irq quirks
+ * if flag is set.
+ *
+ * Returns:
+ *
+ * True on success, false on failure, which means the core should
+ * fallback to a simple timestamp taken in drm_crtc_handle_vblank().
+ */
+ bool (*get_vblank_timestamp)(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq);
};
/**
@@ -974,11 +1035,12 @@ struct drm_crtc {
* Programmed mode in hw, after adjustments for encoders, crtc, panel
* scaling etc. Should only be used by legacy drivers, for high
* precision vblank timestamps in
- * drm_calc_vbltimestamp_from_scanoutpos().
+ * drm_crtc_vblank_helper_get_vblank_timestamp().
*
* Note that atomic drivers should not use this, but instead use
* &drm_crtc_state.adjusted_mode. And for high-precision timestamps
- * drm_calc_vbltimestamp_from_scanoutpos() used &drm_vblank_crtc.hwmode,
+ * drm_crtc_vblank_helper_get_vblank_timestamp() used
+ * &drm_vblank_crtc.hwmode,
* which is filled out by calling drm_calc_timestamping_constants().
*/
struct drm_display_mode hwmode;
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 1acfc3bbd3fb..bb60a949f416 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -144,7 +144,7 @@ struct drm_device {
* Usage counter for outstanding files open,
* protected by drm_global_mutex
*/
- int open_count;
+ atomic_t open_count;
/** @filelist_mutex: Protects @filelist. */
struct mutex filelist_mutex;
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index bc04467f7c3a..c6119e4c169a 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -392,6 +392,8 @@
# define DP_DS_12BPC 2
# define DP_DS_16BPC 3
+#define DP_MAX_DOWNSTREAM_PORTS 0x10
+
/* DP Forward error Correction Registers */
#define DP_FEC_CAPABILITY 0x090 /* 1.4 */
# define DP_FEC_CAPABLE (1 << 0)
@@ -1457,6 +1459,9 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE]);
+bool drm_dp_send_real_edid_checksum(struct drm_dp_aux *aux,
+ u8 real_edid_checksum);
+
int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4]);
int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
@@ -1493,13 +1498,16 @@ struct drm_dp_desc {
int drm_dp_read_desc(struct drm_dp_aux *aux, struct drm_dp_desc *desc,
bool is_branch);
+u32 drm_dp_get_edid_quirks(const struct edid *edid);
/**
* enum drm_dp_quirk - Display Port sink/branch device specific quirks
*
* Display Port sink and branch devices in the wild have a variety of bugs, try
* to collect them here. The quirks are shared, but it's up to the drivers to
- * implement workarounds for them.
+ * implement workarounds for them. Note that because some devices have
+ * unreliable OUIDs, the EDID of sinks should also be checked for quirks using
+ * drm_dp_get_edid_quirks().
*/
enum drm_dp_quirk {
/**
@@ -1530,19 +1538,31 @@ enum drm_dp_quirk {
* The DSC caps can be read from the physical aux instead.
*/
DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
+ /**
+ * @DP_QUIRK_FORCE_DPCD_BACKLIGHT:
+ *
+ * The device is telling the truth when it says that it uses DPCD
+ * backlight controls, even if the system's firmware disagrees. This
+ * quirk should be checked against both the ident and panel EDID.
+ * When present, the driver should honor the DPCD backlight
+ * capabilities advertised.
+ */
+ DP_QUIRK_FORCE_DPCD_BACKLIGHT,
};
/**
* drm_dp_has_quirk() - does the DP device have a specific quirk
* @desc: Device decriptor filled by drm_dp_read_desc()
+ * @edid_quirks: Optional quirk bitmask filled by drm_dp_get_edid_quirks()
* @quirk: Quirk to query for
*
* Return true if DP device identified by @desc has @quirk.
*/
static inline bool
-drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
+drm_dp_has_quirk(const struct drm_dp_desc *desc, u32 edid_quirks,
+ enum drm_dp_quirk quirk)
{
- return desc->quirks & BIT(quirk);
+ return (desc->quirks | edid_quirks) & BIT(quirk);
}
#ifdef CONFIG_DRM_DP_CEC
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index bcb39da9adb4..9a1e8ba4f839 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -479,7 +479,6 @@ struct drm_dp_mst_topology_mgr;
struct drm_dp_mst_topology_cbs {
/* create a connector for a port */
struct drm_connector *(*add_connector)(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, const char *path);
- void (*register_connector)(struct drm_connector *connector);
void (*destroy_connector)(struct drm_dp_mst_topology_mgr *mgr,
struct drm_connector *connector);
};
@@ -591,6 +590,11 @@ struct drm_dp_mst_topology_mgr {
bool payload_id_table_cleared : 1;
/**
+ * @is_waiting_for_dwn_reply: whether we're waiting for a down reply.
+ */
+ bool is_waiting_for_dwn_reply : 1;
+
+ /**
* @mst_primary: Pointer to the primary/first branch device.
*/
struct drm_dp_mst_branch *mst_primary;
@@ -620,11 +624,6 @@ struct drm_dp_mst_topology_mgr {
struct mutex qlock;
/**
- * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
- */
- bool is_waiting_for_dwn_reply;
-
- /**
* @tx_msg_downq: List of pending down replies.
*/
struct list_head tx_msg_downq;
@@ -635,11 +634,13 @@ struct drm_dp_mst_topology_mgr {
struct mutex payload_lock;
/**
* @proposed_vcpis: Array of pointers for the new VCPI allocation. The
- * VCPI structure itself is &drm_dp_mst_port.vcpi.
+ * VCPI structure itself is &drm_dp_mst_port.vcpi, and the size of
+ * this array is determined by @max_payloads.
*/
struct drm_dp_vcpi **proposed_vcpis;
/**
- * @payloads: Array of payloads.
+ * @payloads: Array of payloads. The size of this array is determined
+ * by @max_payloads.
*/
struct drm_dp_payload *payloads;
/**
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index cf13470810a5..97109df5beac 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -269,159 +269,6 @@ struct drm_driver {
void (*release) (struct drm_device *);
/**
- * @get_vblank_counter:
- *
- * Driver callback for fetching a raw hardware vblank counter for the
- * CRTC specified with the pipe argument. If a device doesn't have a
- * hardware counter, the driver can simply leave the hook as NULL.
- * The DRM core will account for missed vblank events while interrupts
- * where disabled based on system timestamps.
- *
- * Wraparound handling and loss of events due to modesetting is dealt
- * with in the DRM core code, as long as drivers call
- * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or
- * enabling a CRTC.
- *
- * This is deprecated and should not be used by new drivers.
- * Use &drm_crtc_funcs.get_vblank_counter instead.
- *
- * Returns:
- *
- * Raw vblank counter value.
- */
- u32 (*get_vblank_counter) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * @enable_vblank:
- *
- * Enable vblank interrupts for the CRTC specified with the pipe
- * argument.
- *
- * This is deprecated and should not be used by new drivers.
- * Use &drm_crtc_funcs.enable_vblank instead.
- *
- * Returns:
- *
- * Zero on success, appropriate errno if the given @crtc's vblank
- * interrupt cannot be enabled.
- */
- int (*enable_vblank) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * @disable_vblank:
- *
- * Disable vblank interrupts for the CRTC specified with the pipe
- * argument.
- *
- * This is deprecated and should not be used by new drivers.
- * Use &drm_crtc_funcs.disable_vblank instead.
- */
- void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
-
- /**
- * @get_scanout_position:
- *
- * Called by vblank timestamping code.
- *
- * Returns the current display scanout position from a crtc, and an
- * optional accurate ktime_get() timestamp of when position was
- * measured. Note that this is a helper callback which is only used if a
- * driver uses drm_calc_vbltimestamp_from_scanoutpos() for the
- * @get_vblank_timestamp callback.
- *
- * Parameters:
- *
- * dev:
- * DRM device.
- * pipe:
- * Id of the crtc to query.
- * in_vblank_irq:
- * True when called from drm_crtc_handle_vblank(). Some drivers
- * need to apply some workarounds for gpu-specific vblank irq quirks
- * if flag is set.
- * vpos:
- * Target location for current vertical scanout position.
- * hpos:
- * Target location for current horizontal scanout position.
- * stime:
- * Target location for timestamp taken immediately before
- * scanout position query. Can be NULL to skip timestamp.
- * etime:
- * Target location for timestamp taken immediately after
- * scanout position query. Can be NULL to skip timestamp.
- * mode:
- * Current display timings.
- *
- * Returns vpos as a positive number while in active scanout area.
- * Returns vpos as a negative number inside vblank, counting the number
- * of scanlines to go until end of vblank, e.g., -1 means "one scanline
- * until start of active scanout / end of vblank."
- *
- * Returns:
- *
- * True on success, false if a reliable scanout position counter could
- * not be read out.
- *
- * FIXME:
- *
- * Since this is a helper to implement @get_vblank_timestamp, we should
- * move it to &struct drm_crtc_helper_funcs, like all the other
- * helper-internal hooks.
- */
- bool (*get_scanout_position) (struct drm_device *dev, unsigned int pipe,
- bool in_vblank_irq, int *vpos, int *hpos,
- ktime_t *stime, ktime_t *etime,
- const struct drm_display_mode *mode);
-
- /**
- * @get_vblank_timestamp:
- *
- * Called by drm_get_last_vbltimestamp(). Should return a precise
- * timestamp when the most recent VBLANK interval ended or will end.
- *
- * Specifically, the timestamp in @vblank_time should correspond as
- * closely as possible to the time when the first video scanline of
- * the video frame after the end of VBLANK will start scanning out,
- * the time immediately after end of the VBLANK interval. If the
- * @crtc is currently inside VBLANK, this will be a time in the future.
- * If the @crtc is currently scanning out a frame, this will be the
- * past start time of the current scanout. This is meant to adhere
- * to the OpenML OML_sync_control extension specification.
- *
- * Paramters:
- *
- * dev:
- * dev DRM device handle.
- * pipe:
- * crtc for which timestamp should be returned.
- * max_error:
- * Maximum allowable timestamp error in nanoseconds.
- * Implementation should strive to provide timestamp
- * with an error of at most max_error nanoseconds.
- * Returns true upper bound on error for timestamp.
- * vblank_time:
- * Target location for returned vblank timestamp.
- * in_vblank_irq:
- * True when called from drm_crtc_handle_vblank(). Some drivers
- * need to apply some workarounds for gpu-specific vblank irq quirks
- * if flag is set.
- *
- * Returns:
- *
- * True on success, false on failure, which means the core should
- * fallback to a simple timestamp taken in drm_crtc_handle_vblank().
- *
- * FIXME:
- *
- * We should move this hook to &struct drm_crtc_funcs like all the other
- * vblank hooks.
- */
- bool (*get_vblank_timestamp) (struct drm_device *dev, unsigned int pipe,
- int *max_error,
- ktime_t *vblank_time,
- bool in_vblank_irq);
-
- /**
* @irq_handler:
*
* Interrupt handler called when using drm_irq_install(). Not used by
@@ -458,20 +305,6 @@ struct drm_driver {
void (*irq_uninstall) (struct drm_device *dev);
/**
- * @master_create:
- *
- * Called whenever a new master is created. Only used by vmwgfx.
- */
- int (*master_create)(struct drm_device *dev, struct drm_master *master);
-
- /**
- * @master_destroy:
- *
- * Called whenever a master is destroyed. Only used by vmwgfx.
- */
- void (*master_destroy)(struct drm_device *dev, struct drm_master *master);
-
- /**
* @master_set:
*
* Called whenever the minor master is set. Only used by vmwgfx.
@@ -775,6 +608,9 @@ struct drm_driver {
int (*dma_ioctl) (struct drm_device *dev, void *data, struct drm_file *file_priv);
int (*dma_quiescent) (struct drm_device *);
int (*context_dtor) (struct drm_device *dev, int context);
+ u32 (*get_vblank_counter)(struct drm_device *dev, unsigned int pipe);
+ int (*enable_vblank)(struct drm_device *dev, unsigned int pipe);
+ void (*disable_vblank)(struct drm_device *dev, unsigned int pipe);
int dev_priv_size;
};
@@ -824,6 +660,25 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
}
/**
+ * drm_core_check_all_features - check driver feature flags mask
+ * @dev: DRM device to check
+ * @features: feature flag(s) mask
+ *
+ * This checks @dev for driver features, see &drm_driver.driver_features,
+ * &drm_device.driver_features, and the various &enum drm_driver_feature flags.
+ *
+ * Returns true if all features in the @features mask are supported, false
+ * otherwise.
+ */
+static inline bool drm_core_check_all_features(const struct drm_device *dev,
+ u32 features)
+{
+ u32 supported = dev->driver->driver_features & dev->driver_features;
+
+ return features && (supported & features) == features;
+}
+
+/**
* drm_core_check_feature - check driver feature flags
* @dev: DRM device to check
* @feature: feature flag
@@ -833,9 +688,10 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
*
* Returns true if the @feature is supported, false otherwise.
*/
-static inline bool drm_core_check_feature(const struct drm_device *dev, u32 feature)
+static inline bool drm_core_check_feature(const struct drm_device *dev,
+ enum drm_driver_feature feature)
{
- return dev->driver->driver_features & dev->driver_features & feature;
+ return drm_core_check_all_features(dev, feature);
}
/**
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index f0b03d401c27..34b15e3d070c 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -91,6 +91,11 @@ struct detailed_data_string {
u8 str[13];
} __attribute__((packed));
+#define DRM_EDID_DEFAULT_GTF_SUPPORT_FLAG 0x00
+#define DRM_EDID_RANGE_LIMITS_ONLY_FLAG 0x01
+#define DRM_EDID_SECONDARY_GTF_SUPPORT_FLAG 0x02
+#define DRM_EDID_CVT_SUPPORT_FLAG 0x04
+
struct detailed_data_monitor_range {
u8 min_vfreq;
u8 max_vfreq;
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 5623994b6e9e..4370e039c015 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -174,7 +174,8 @@ struct drm_encoder {
struct drm_crtc *crtc;
/**
- * @bridge_chain: Bridges attached to this encoder.
+ * @bridge_chain: Bridges attached to this encoder. Drivers shall not
+ * access this field directly.
*/
struct list_head bridge_chain;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 1c6633da0f91..208dbf87afa3 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -213,8 +213,7 @@ drm_fb_helper_from_client(struct drm_client_dev *client)
#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
const struct drm_fb_helper_funcs *funcs);
-int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *helper, int max_conn);
+int drm_fb_helper_init(struct drm_device *dev, struct drm_fb_helper *helper);
void drm_fb_helper_fini(struct drm_fb_helper *helper);
int drm_fb_helper_blank(int blank, struct fb_info *info);
int drm_fb_helper_pan_display(struct fb_var_screeninfo *var,
@@ -279,8 +278,7 @@ static inline void drm_fb_helper_prepare(struct drm_device *dev,
}
static inline int drm_fb_helper_init(struct drm_device *dev,
- struct drm_fb_helper *helper,
- int max_conn)
+ struct drm_fb_helper *helper)
{
/* So drivers can use it to free the struct */
helper->dev = dev;
@@ -453,27 +451,6 @@ drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
#endif
-/* TODO: There's a todo entry to remove these three */
-static inline int
-drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper)
-{
- return 0;
-}
-
-static inline int
-drm_fb_helper_add_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
-{
- return 0;
-}
-
-static inline int
-drm_fb_helper_remove_one_connector(struct drm_fb_helper *fb_helper,
- struct drm_connector *connector)
-{
- return 0;
-}
-
/**
* drm_fb_helper_remove_conflicting_framebuffers - remove firmware-configured framebuffers
* @a: memory range, users of which are to be removed
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 8b099b347817..19df8028a6c4 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -374,6 +374,7 @@ int drm_open(struct inode *inode, struct file *filp);
ssize_t drm_read(struct file *filp, char __user *buffer,
size_t count, loff_t *offset);
int drm_release(struct inode *inode, struct file *filp);
+int drm_release_noglobal(struct inode *inode, struct file *filp);
__poll_t drm_poll(struct file *filp, struct poll_table_struct *wait);
int drm_event_reserve_init_locked(struct drm_device *dev,
struct drm_file *file_priv,
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index e34a7b7f848a..294b2931c4cc 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -96,6 +96,11 @@ struct drm_gem_shmem_object {
* The address are un-mapped when the count reaches zero.
*/
unsigned int vmap_use_count;
+
+ /**
+ * @map_cached: map object cached (instead of using writecombine).
+ */
+ bool map_cached;
};
#define to_drm_gem_shmem_obj(obj) \
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index 573e9fd109bf..0f6e47213d8d 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -6,6 +6,7 @@
#include <drm/drm_file.h>
#include <drm/drm_gem.h>
#include <drm/drm_ioctl.h>
+#include <drm/drm_modes.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
@@ -205,4 +206,12 @@ struct drm_vram_mm *drm_vram_helper_alloc_mm(
struct drm_device *dev, uint64_t vram_base, size_t vram_size);
void drm_vram_helper_release_mm(struct drm_device *dev);
+/*
+ * Mode-config helpers
+ */
+
+enum drm_mode_status
+drm_vram_helper_mode_valid(struct drm_device *dev,
+ const struct drm_display_mode *mode);
+
#endif
diff --git a/include/drm/drm_hdcp.h b/include/drm/drm_hdcp.h
index 06a11202a097..c6bab4986a65 100644
--- a/include/drm/drm_hdcp.h
+++ b/include/drm/drm_hdcp.h
@@ -276,7 +276,7 @@ void drm_hdcp_cpu_to_be24(u8 seq_num[HDCP_2_2_SEQ_NUM_LEN], u32 val)
#define DRM_HDCP_2_VRL_LENGTH_SIZE 3
#define DRM_HDCP_2_DCP_SIG_SIZE 384
#define DRM_HDCP_2_NO_OF_DEV_PLUS_RESERVED_SZ 4
-#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC) >> 6)
+#define DRM_HDCP_2_KSV_COUNT_2_LSBITS(byte) (((byte) & 0xC0) >> 6)
struct hdcp_srm_header {
u8 srm_id;
@@ -288,8 +288,8 @@ struct hdcp_srm_header {
struct drm_device;
struct drm_connector;
-bool drm_hdcp_check_ksvs_revoked(struct drm_device *dev,
- u8 *ksvs, u32 ksv_count);
+int drm_hdcp_check_ksvs_revoked(struct drm_device *dev,
+ u8 *ksvs, u32 ksv_count);
int drm_connector_attach_content_protection_property(
struct drm_connector *connector, bool hdcp_content_type);
void drm_hdcp_update_content_protection(struct drm_connector *connector,
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 5745710453c8..dcef3598f49e 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -194,17 +194,11 @@ void drm_legacy_idlelock_release(struct drm_lock_data *lock);
#ifdef CONFIG_PCI
-void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
#else
-static inline void __drm_legacy_pci_free(struct drm_device *dev,
- drm_dma_handle_t *dmah)
-{
-}
-
static inline int drm_legacy_pci_init(struct drm_driver *driver,
struct pci_driver *pdriver)
{
diff --git a/include/drm/drm_mipi_dbi.h b/include/drm/drm_mipi_dbi.h
index 67c66f5ee591..33f325f5af2b 100644
--- a/include/drm/drm_mipi_dbi.h
+++ b/include/drm/drm_mipi_dbi.h
@@ -110,6 +110,18 @@ struct mipi_dbi_dev {
unsigned int rotation;
/**
+ * @left_offset: Horizontal offset of the display relative to the
+ * controller's driver array
+ */
+ unsigned int left_offset;
+
+ /**
+ * @top_offset: Vertical offset of the display relative to the
+ * controller's driver array
+ */
+ unsigned int top_offset;
+
+ /**
* @backlight: backlight device (optional)
*/
struct backlight_device *backlight;
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index d7939c054259..ee8b0e80ca90 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -272,7 +272,7 @@ static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
*/
static inline bool drm_mm_initialized(const struct drm_mm *mm)
{
- return mm->hole_stack.next;
+ return READ_ONCE(mm->hole_stack.next);
}
/**
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index e946e20c61d8..99134d4f35eb 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -371,20 +371,13 @@ struct drm_display_mode {
int crtc_vtotal;
/**
- * @private:
+ * @private_flags:
*
- * Pointer for driver private data. This can only be used for mode
+ * Driver private flags. private_flags can only be used for mode
* objects passed to drivers in modeset operations. It shouldn't be used
* by atomic drivers since they can store any additional data by
* subclassing state structures.
*/
- int *private;
-
- /**
- * @private_flags:
- *
- * Similar to @private, but just an integer.
- */
int private_flags;
/**
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 5a87f1bd7a3f..7c20b1c8b6a7 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -450,6 +450,53 @@ struct drm_crtc_helper_funcs {
*/
void (*atomic_disable)(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state);
+
+ /**
+ * @get_scanout_position:
+ *
+ * Called by vblank timestamping code.
+ *
+ * Returns the current display scanout position from a CRTC and an
+ * optional accurate ktime_get() timestamp of when the position was
+ * measured. Note that this is a helper callback which is only used
+ * if a driver uses drm_crtc_vblank_helper_get_vblank_timestamp()
+ * for the @drm_crtc_funcs.get_vblank_timestamp callback.
+ *
+ * Parameters:
+ *
+ * crtc:
+ * The CRTC.
+ * in_vblank_irq:
+ * True when called from drm_crtc_handle_vblank(). Some drivers
+ * need to apply some workarounds for gpu-specific vblank irq
+ * quirks if the flag is set.
+ * vpos:
+ * Target location for current vertical scanout position.
+ * hpos:
+ * Target location for current horizontal scanout position.
+ * stime:
+ * Target location for timestamp taken immediately before
+ * scanout position query. Can be NULL to skip timestamp.
+ * etime:
+ * Target location for timestamp taken immediately after
+ * scanout position query. Can be NULL to skip timestamp.
+ * mode:
+ * Current display timings.
+ *
+ * Returns vpos as a positive number while in active scanout area.
+ * Returns vpos as a negative number inside vblank, counting the number
+ * of scanlines to go until end of vblank, e.g., -1 means "one scanline
+ * until start of active scanout / end of vblank."
+ *
+ * Returns:
+ *
+ * True on success, false if a reliable scanout position counter could
+ * not be read out.
+ */
+ bool (*get_scanout_position)(struct drm_crtc *crtc,
+ bool in_vblank_irq, int *vpos, int *hpos,
+ ktime_t *stime, ktime_t *etime,
+ const struct drm_display_mode *mode);
};
/**
@@ -646,22 +693,6 @@ struct drm_encoder_helper_funcs {
struct drm_connector_state *conn_state);
/**
- * @get_crtc:
- *
- * This callback is used by the legacy CRTC helpers to work around
- * deficiencies in its own book-keeping.
- *
- * Do not use, use atomic helpers instead, which get the book keeping
- * right.
- *
- * FIXME:
- *
- * Currently only nouveau is using this, and as soon as nouveau is
- * atomic we can ditch this hook.
- */
- struct drm_crtc *(*get_crtc)(struct drm_encoder *encoder);
-
- /**
* @detect:
*
* This callback can be used by drivers who want to do detection on the
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 121f7aabccd1..6193cb555acc 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -198,7 +198,8 @@ static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
}
#endif
-#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+#if IS_ENABLED(CONFIG_DRM_PANEL) && (IS_BUILTIN(CONFIG_BACKLIGHT_CLASS_DEVICE) || \
+ (IS_MODULE(CONFIG_DRM) && IS_MODULE(CONFIG_BACKLIGHT_CLASS_DEVICE)))
int drm_panel_of_backlight(struct drm_panel *panel);
#else
static inline int drm_panel_of_backlight(struct drm_panel *panel)
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
index 9031e217b506..3941b0255ecf 100644
--- a/include/drm/drm_pci.h
+++ b/include/drm/drm_pci.h
@@ -45,10 +45,6 @@ struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align);
void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
-int drm_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver);
-
#else
static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
@@ -62,13 +58,6 @@ static inline void drm_pci_free(struct drm_device *dev,
{
}
-static inline int drm_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- return -ENOSYS;
-}
-
#endif
#endif /* _DRM_PCI_H_ */
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 8f99d389792d..ca7cee8e728a 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -382,42 +382,6 @@ void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_VBL(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \
- drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
-
-#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
-({ \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- drm_dev_dbg(dev, category, fmt, ##__VA_ARGS__); \
-})
-
-/**
- * Rate limited debug output. Like DRM_DEBUG() but won't flood the log.
- *
- * @dev: device pointer
- * @fmt: printf() like format string.
- */
-#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...) \
- _DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE, \
- fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER, \
- fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS, \
- fmt, ##__VA_ARGS__)
-#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...) \
- _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME, \
- fmt, ##__VA_ARGS__)
/*
* struct drm_device based logging
@@ -541,16 +505,42 @@ void __drm_err(const char *format, ...);
__drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
-#define DRM_DEBUG_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
+({ \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ drm_dev_dbg(NULL, DRM_UT_KMS, fmt, ##__VA_ARGS__); \
+})
+
+/*
+ * struct drm_device based WARNs
+ *
+ * drm_WARN*() acts like WARN*(), but with the key difference of
+ * using device specific information so that we know from which device
+ * warning is originating from.
+ *
+ * Prefer drm_device based drm_WARN* over regular WARN*
+ */
-#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+/* Helper for struct drm_device based WARNs */
+#define drm_WARN(drm, condition, format, arg...) \
+ WARN(condition, "%s %s: " format, \
+ dev_driver_string((drm)->dev), \
+ dev_name((drm)->dev), ## arg)
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+#define drm_WARN_ONCE(drm, condition, format, arg...) \
+ WARN_ONCE(condition, "%s %s: " format, \
+ dev_driver_string((drm)->dev), \
+ dev_name((drm)->dev), ## arg)
+
+#define drm_WARN_ON(drm, x) \
+ drm_WARN((drm), (x), "%s", \
+ "drm_WARN_ON(" __stringify(x) ")")
-#define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+#define drm_WARN_ON_ONCE(drm, x) \
+ drm_WARN_ONCE((drm), (x), "%s", \
+ "drm_WARN_ON_ONCE(" __stringify(x) ")")
#endif /* DRM_PRINT_H_ */
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 15afee9cf049..a026375464ff 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -100,8 +100,11 @@ struct drm_simple_display_pipe_funcs {
* This is the function drivers should submit the
* &drm_pending_vblank_event from. Using either
* drm_crtc_arm_vblank_event(), when the driver supports vblank
- * interrupt handling, or drm_crtc_send_vblank_event() directly in case
- * the hardware lacks vblank support entirely.
+ * interrupt handling, or drm_crtc_send_vblank_event() for more
+ * complex case. In case the hardware lacks vblank support entirely,
+ * drivers can set &struct drm_crtc_state.no_vblank in
+ * &struct drm_simple_display_pipe_funcs.check and let DRM's
+ * atomic helper fake a vblank event.
*/
void (*update)(struct drm_simple_display_pipe *pipe,
struct drm_plane_state *old_plane_state);
@@ -178,4 +181,8 @@ int drm_simple_display_pipe_init(struct drm_device *dev,
const uint64_t *format_modifiers,
struct drm_connector *connector);
+int drm_simple_encoder_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ int encoder_type);
+
#endif /* __LINUX_DRM_SIMPLE_KMS_HELPER_H */
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index c16c44052b3d..dd9f5b9e56e4 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -174,13 +174,13 @@ struct drm_vblank_crtc {
unsigned int pipe;
/**
* @framedur_ns: Frame/Field duration in ns, used by
- * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+ * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by
* drm_calc_timestamping_constants().
*/
int framedur_ns;
/**
* @linedur_ns: Line duration in ns, used by
- * drm_calc_vbltimestamp_from_scanoutpos() and computed by
+ * drm_crtc_vblank_helper_get_vblank_timestamp() and computed by
* drm_calc_timestamping_constants().
*/
int linedur_ns;
@@ -190,8 +190,8 @@ struct drm_vblank_crtc {
*
* Cache of the current hardware display mode. Only valid when @enabled
* is set. This is used by helpers like
- * drm_calc_vbltimestamp_from_scanoutpos(). We can't just access the
- * hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode,
+ * drm_crtc_vblank_helper_get_vblank_timestamp(). We can't just access
+ * the hardware mode by e.g. looking at &drm_crtc_state.adjusted_mode,
* because that one is really hard to get from interrupt context.
*/
struct drm_display_mode hwmode;
@@ -206,6 +206,7 @@ struct drm_vblank_crtc {
};
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
+bool drm_dev_has_vblank(const struct drm_device *dev);
u64 drm_crtc_vblank_count(struct drm_crtc *crtc);
u64 drm_crtc_vblank_count_and_time(struct drm_crtc *crtc,
ktime_t *vblanktime);
@@ -229,13 +230,32 @@ u64 drm_crtc_accurate_vblank_count(struct drm_crtc *crtc);
void drm_vblank_restore(struct drm_device *dev, unsigned int pipe);
void drm_crtc_vblank_restore(struct drm_crtc *crtc);
-bool drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
- unsigned int pipe, int *max_error,
- ktime_t *vblank_time,
- bool in_vblank_irq);
void drm_calc_timestamping_constants(struct drm_crtc *crtc,
const struct drm_display_mode *mode);
wait_queue_head_t *drm_crtc_vblank_waitqueue(struct drm_crtc *crtc);
void drm_crtc_set_max_vblank_count(struct drm_crtc *crtc,
u32 max_vblank_count);
+
+/*
+ * Helpers for struct drm_crtc_funcs
+ */
+
+typedef bool (*drm_vblank_get_scanout_position_func)(struct drm_crtc *crtc,
+ bool in_vblank_irq,
+ int *vpos, int *hpos,
+ ktime_t *stime,
+ ktime_t *etime,
+ const struct drm_display_mode *mode);
+
+bool
+drm_crtc_vblank_helper_get_vblank_timestamp_internal(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq,
+ drm_vblank_get_scanout_position_func get_scanout_position);
+bool drm_crtc_vblank_helper_get_vblank_timestamp(struct drm_crtc *crtc,
+ int *max_error,
+ ktime_t *vblank_time,
+ bool in_vblank_irq);
+
#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 589be851f8a1..26b04ff62676 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -262,7 +262,7 @@ struct drm_sched_backend_ops {
* @job_list_lock: lock to protect the ring_mirror_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
- * @score: score to help loadbalancer pick a idle sched
+ * @num_jobs: the number of jobs in queue in the scheduler
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
*
@@ -283,8 +283,8 @@ struct drm_gpu_scheduler {
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t score;
- bool ready;
+ atomic_t num_jobs;
+ bool ready;
bool free_guilty;
};
@@ -297,6 +297,10 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched);
int drm_sched_job_init(struct drm_sched_job *job,
struct drm_sched_entity *entity,
void *owner);
+void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
+
void drm_sched_job_cleanup(struct drm_sched_job *job);
void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
@@ -337,5 +341,8 @@ void drm_sched_fence_finished(struct drm_sched_fence *fence);
unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
unsigned long remaining);
+struct drm_gpu_scheduler *
+drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list);
#endif
diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h
index 4d48de8890ca..702f613243bb 100644
--- a/include/drm/i915_mei_hdcp_interface.h
+++ b/include/drm/i915_mei_hdcp_interface.h
@@ -12,7 +12,6 @@
#include <linux/mutex.h>
#include <linux/device.h>
#include <drm/drm_hdcp.h>
-#include <drm/i915_drm.h>
/**
* enum hdcp_port_type - HDCP port implementation type defined by ME FW
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 66ca49db9633..b9bc1b00142e 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -135,18 +135,14 @@ struct ttm_tt;
* @num_pages: Actual number of pages.
* @acc_size: Accounted size for this object.
* @kref: Reference count of this buffer object. When this refcount reaches
- * zero, the object is put on the delayed delete list.
- * @list_kref: List reference count of this buffer object. This member is
- * used to avoid destruction while the buffer object is still on a list.
- * Lru lists may keep one refcount, the delayed delete list, and kref != 0
- * keeps one refcount. When this refcount reaches zero,
- * the object is destroyed.
+ * zero, the object is destroyed or put on the delayed delete list.
* @mem: structure describing current placement.
* @persistent_swap_storage: Usually the swap storage is deleted for buffers
* pinned in physical memory. If this behaviour is not desired, this member
* holds a pointer to a persistent shmem object.
* @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing.
+ * @deleted: True if the object is only a zombie and already deleted.
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
@@ -183,9 +179,7 @@ struct ttm_buffer_object {
/**
* Members not needing protection.
*/
-
struct kref kref;
- struct kref list_kref;
/**
* Members protected by the bo::resv::reserved lock.
@@ -195,6 +189,7 @@ struct ttm_buffer_object {
struct file *persistent_swap_storage;
struct ttm_tt *ttm;
bool evicted;
+ bool deleted;
/**
* Members protected by the bdev::lru_lock.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index cac7a8a0825a..c9e0fd09f4b2 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -210,8 +210,6 @@ struct ttm_mem_type_manager {
* struct ttm_bo_driver
*
* @create_ttm_backend_entry: Callback to create a struct ttm_backend.
- * @invalidate_caches: Callback to invalidate read caches when a buffer object
- * has been evicted.
* @init_mem_type: Callback to initialize a struct ttm_mem_type_manager
* structure.
* @evict_flags: Callback to obtain placement flags when a buffer is evicted.
@@ -256,19 +254,6 @@ struct ttm_bo_driver {
*/
void (*ttm_tt_unpopulate)(struct ttm_tt *ttm);
- /**
- * struct ttm_bo_driver member invalidate_caches
- *
- * @bdev: the buffer object device.
- * @flags: new placement of the rebound buffer object.
- *
- * A previosly evicted buffer has been rebound in a
- * potentially new location. Tell the driver that it might
- * consider invalidating read (texture) caches on the next command
- * submission as a consequence.
- */
-
- int (*invalidate_caches)(struct ttm_bo_device *bdev, uint32_t flags);
int (*init_mem_type)(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 053ea4b51988..f629d40c645c 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -524,7 +524,7 @@ struct request_queue {
unsigned int sg_reserved_size;
int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
- struct blk_trace *blk_trace;
+ struct blk_trace __rcu *blk_trace;
struct mutex blk_trace_mutex;
#endif
/*
@@ -1494,7 +1494,6 @@ static inline void put_dev_sector(Sector p)
}
int kblockd_schedule_work(struct work_struct *work);
-int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7bb2d8de9f30..3b6ff5902edc 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
**/
#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
do { \
- struct blk_trace *bt = (q)->blk_trace; \
+ struct blk_trace *bt; \
+ \
+ rcu_read_lock(); \
+ bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \
__trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
+ rcu_read_unlock(); \
} while (0)
#define blk_add_trace_msg(q, fmt, ...) \
blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
@@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
static inline bool blk_trace_note_message_enabled(struct request_queue *q)
{
- struct blk_trace *bt = q->blk_trace;
- if (likely(!bt))
- return false;
- return bt->act_mask & BLK_TC_NOTIFY;
+ struct blk_trace *bt;
+ bool ret;
+
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
+ rcu_read_unlock();
+ return ret;
}
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
index 7e18c939663e..d11e183fcb54 100644
--- a/include/linux/bootconfig.h
+++ b/include/linux/bootconfig.h
@@ -10,6 +10,9 @@
#include <linux/kernel.h>
#include <linux/types.h>
+#define BOOTCONFIG_MAGIC "#BOOTCONFIG\n"
+#define BOOTCONFIG_MAGIC_LEN 12
+
/* XBC tree node */
struct xbc_node {
u16 next;
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 11083d84eb23..df2475be134a 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -248,15 +248,6 @@ typedef struct compat_siginfo {
} _sifields;
} compat_siginfo_t;
-/*
- * These functions operate on 32- or 64-bit specs depending on
- * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments.
- */
-extern int compat_get_timespec(struct timespec *, const void __user *);
-extern int compat_put_timespec(const struct timespec *, void __user *);
-extern int compat_get_timeval(struct timeval *, const void __user *);
-extern int compat_put_timeval(const struct timeval *, void __user *);
-
struct compat_iovec {
compat_uptr_t iov_base;
compat_size_t iov_len;
@@ -416,26 +407,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginf
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
-static inline int old_timeval32_compare(struct old_timeval32 *lhs,
- struct old_timeval32 *rhs)
-{
- if (lhs->tv_sec < rhs->tv_sec)
- return -1;
- if (lhs->tv_sec > rhs->tv_sec)
- return 1;
- return lhs->tv_usec - rhs->tv_usec;
-}
-
-static inline int old_timespec32_compare(struct old_timespec32 *lhs,
- struct old_timespec32 *rhs)
-{
- if (lhs->tv_sec < rhs->tv_sec)
- return -1;
- if (lhs->tv_sec > rhs->tv_sec)
- return 1;
- return lhs->tv_nsec - rhs->tv_nsec;
-}
-
extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat);
/*
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 018dce868de6..0fb561d1b524 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -201,9 +201,6 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy)
return cpumask_weight(policy->cpus) > 1;
}
-/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
-extern struct kobject *cpufreq_global_kobject;
-
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_get(unsigned int cpu);
unsigned int cpufreq_quick_get(unsigned int cpu);
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 9bd8528bd305..328c2dbb4409 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -129,11 +129,6 @@ static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
sectors);
}
-static inline struct dax_device *fs_dax_get_by_host(const char *host)
-{
- return dax_get_by_host(host);
-}
-
static inline void fs_put_dax(struct dax_device *dax_dev)
{
put_dax(dax_dev);
@@ -141,7 +136,7 @@ static inline void fs_put_dax(struct dax_device *dax_dev)
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc);
+ struct dax_device *dax_dev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping);
dax_entry_t dax_lock_page(struct page *page);
@@ -160,11 +155,6 @@ static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
return false;
}
-static inline struct dax_device *fs_dax_get_by_host(const char *host)
-{
- return NULL;
-}
-
static inline void fs_put_dax(struct dax_device *dax_dev)
{
}
@@ -180,7 +170,7 @@ static inline struct page *dax_layout_busy_page(struct address_space *mapping)
}
static inline int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc)
+ struct dax_device *dax_dev, struct writeback_control *wbc)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 3d013de64f70..43efcc49f061 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -127,9 +127,9 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
-struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
- struct dentry *parent,
- struct debugfs_regset32 *regset);
+void debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset);
void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix);
@@ -304,11 +304,10 @@ static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_regset32(const char *name,
- umode_t mode, struct dentry *parent,
- struct debugfs_regset32 *regset)
+static inline void debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset)
{
- return ERR_PTR(-ENODEV);
}
static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
diff --git a/include/linux/device.h b/include/linux/device.h
index 0cd7c647c16c..fa04dfd22bbc 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -798,6 +798,17 @@ static inline struct device_node *dev_of_node(struct device *dev)
return dev->of_node;
}
+static inline bool dev_has_sync_state(struct device *dev)
+{
+ if (!dev)
+ return false;
+ if (dev->driver && dev->driver->sync_state)
+ return true;
+ if (dev->bus && dev->bus->sync_state)
+ return true;
+ return false;
+}
+
/*
* High level routines for use by the bus drivers
*/
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index abf5459a5b9d..1ade486fc2bb 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -43,18 +43,6 @@ struct dma_buf_ops {
bool cache_sgt_mapping;
/**
- * @dynamic_mapping:
- *
- * If true the framework makes sure that the map/unmap_dma_buf
- * callbacks are always called with the dma_resv object locked.
- *
- * If false the framework makes sure that the map/unmap_dma_buf
- * callbacks are always called without the dma_resv object locked.
- * Mutual exclusive with @cache_sgt_mapping.
- */
- bool dynamic_mapping;
-
- /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
@@ -94,13 +82,42 @@ struct dma_buf_ops {
void (*detach)(struct dma_buf *, struct dma_buf_attachment *);
/**
+ * @pin:
+ *
+ * This is called by dma_buf_pin and lets the exporter know that the
+ * DMA-buf can't be moved any more.
+ *
+ * This is called with the dmabuf->resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
+ *
+ * This callback is optional and should only be used in limited use
+ * cases like scanout and not for temporary pin operations.
+ *
+ * Returns:
+ *
+ * 0 on success, negative error code on failure.
+ */
+ int (*pin)(struct dma_buf_attachment *attach);
+
+ /**
+ * @unpin:
+ *
+ * This is called by dma_buf_unpin and lets the exporter know that the
+ * DMA-buf can be moved again.
+ *
+ * This is called with the dmabuf->resv object locked and is mutual
+ * exclusive with @cache_sgt_mapping.
+ *
+ * This callback is optional.
+ */
+ void (*unpin)(struct dma_buf_attachment *attach);
+
+ /**
* @map_dma_buf:
*
* This is called by dma_buf_map_attachment() and is used to map a
* shared &dma_buf into device address space, and it is mandatory. It
- * can only be called if @attach has been called successfully. This
- * essentially pins the DMA buffer into place, and it cannot be moved
- * any more
+ * can only be called if @attach has been called successfully.
*
* This call may sleep, e.g. when the backing storage first needs to be
* allocated, or moved to a location suitable for all currently attached
@@ -141,9 +158,8 @@ struct dma_buf_ops {
*
* This is called by dma_buf_unmap_attachment() and should unmap and
* release the &sg_table allocated in @map_dma_buf, and it is mandatory.
- * It should also unpin the backing storage if this is the last mapping
- * of the DMA buffer, it the exporter supports backing storage
- * migration.
+ * For static dma_buf handling this might also unpins the backing
+ * storage if this is the last mapping of the DMA buffer.
*/
void (*unmap_dma_buf)(struct dma_buf_attachment *,
struct sg_table *,
@@ -312,6 +328,34 @@ struct dma_buf {
};
/**
+ * struct dma_buf_attach_ops - importer operations for an attachment
+ * @move_notify: [optional] notification that the DMA-buf is moving
+ *
+ * Attachment operations implemented by the importer.
+ */
+struct dma_buf_attach_ops {
+ /**
+ * @move_notify
+ *
+ * If this callback is provided the framework can avoid pinning the
+ * backing store while mappings exists.
+ *
+ * This callback is called with the lock of the reservation object
+ * associated with the dma_buf held and the mapping function must be
+ * called with this lock held as well. This makes sure that no mapping
+ * is created concurrently with an ongoing move operation.
+ *
+ * Mappings stay valid and are not directly affected by this callback.
+ * But the DMA-buf can now be in a different physical location, so all
+ * mappings should be destroyed and re-created as soon as possible.
+ *
+ * New mappings can be created after this callback returns, and will
+ * point to the new location of the DMA-buf.
+ */
+ void (*move_notify)(struct dma_buf_attachment *attach);
+};
+
+/**
* struct dma_buf_attachment - holds device-buffer attachment data
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
@@ -319,8 +363,9 @@ struct dma_buf {
* @sgt: cached mapping.
* @dir: direction of cached mapping.
* @priv: exporter specific attachment data.
- * @dynamic_mapping: true if dma_buf_map/unmap_attachment() is called with the
- * dma_resv lock held.
+ * @importer_ops: importer operations for this attachment, if provided
+ * dma_buf_map/unmap_attachment() must be called with the dma_resv lock held.
+ * @importer_priv: importer specific attachment data.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@@ -337,7 +382,8 @@ struct dma_buf_attachment {
struct list_head node;
struct sg_table *sgt;
enum dma_data_direction dir;
- bool dynamic_mapping;
+ const struct dma_buf_attach_ops *importer_ops;
+ void *importer_priv;
void *priv;
};
@@ -399,7 +445,7 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
*/
static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
{
- return dmabuf->ops->dynamic_mapping;
+ return !!dmabuf->ops->pin;
}
/**
@@ -413,16 +459,19 @@ static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
static inline bool
dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
{
- return attach->dynamic_mapping;
+ return !!attach->importer_ops;
}
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
struct device *dev);
struct dma_buf_attachment *
dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
- bool dynamic_mapping);
+ const struct dma_buf_attach_ops *importer_ops,
+ void *importer_priv);
void dma_buf_detach(struct dma_buf *dmabuf,
struct dma_buf_attachment *attach);
+int dma_buf_pin(struct dma_buf_attachment *attach);
+void dma_buf_unpin(struct dma_buf_attachment *attach);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index 9918a6c910c5..9613d796cfb1 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -207,7 +207,7 @@ struct hdmi_drm_infoframe {
u16 max_fall;
};
-int hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
+void hdmi_avi_infoframe_init(struct hdmi_avi_infoframe *frame);
ssize_t hdmi_avi_infoframe_pack(struct hdmi_avi_infoframe *frame, void *buffer,
size_t size);
ssize_t hdmi_avi_infoframe_pack_only(const struct hdmi_avi_infoframe *frame,
diff --git a/include/linux/hid.h b/include/linux/hid.h
index cd41f209043f..875f71132b14 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -492,7 +492,7 @@ struct hid_report_enum {
};
#define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
-#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */
+#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */
#define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
#define HID_OUTPUT_FIFO_SIZE 64
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index ef1cbb5f454f..33d379602314 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -22,12 +22,22 @@ extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
unsigned int data_len);
+#if IS_ENABLED(CONFIG_NF_NAT)
+void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
+#else
+#define icmpv6_ndo_send icmpv6_send
+#endif
+
#else
static inline void icmpv6_send(struct sk_buff *skb,
u8 type, u8 code, __u32 info)
{
+}
+static inline void icmpv6_ndo_send(struct sk_buff *skb,
+ u8 type, u8 code, __u32 info)
+{
}
#endif
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
index 94f047a8a845..d7c403d0dd27 100644
--- a/include/linux/intel-svm.h
+++ b/include/linux/intel-svm.h
@@ -122,7 +122,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
BUG();
}
-static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
+static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
{
return -EINVAL;
}
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index b2d47571ab67..8d062e86d954 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -192,7 +192,7 @@ enum {
IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
/* Irq domain name was allocated in __irq_domain_add() */
- IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
+ IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
/* Irq domain is an IPI domain with virq per cpu */
IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index b2bb44f87f5a..d1fb05135665 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -66,33 +66,15 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
*/
#define ktime_sub_ns(kt, nsval) ((kt) - (nsval))
-/* convert a timespec to ktime_t format: */
-static inline ktime_t timespec_to_ktime(struct timespec ts)
-{
- return ktime_set(ts.tv_sec, ts.tv_nsec);
-}
-
/* convert a timespec64 to ktime_t format: */
static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
{
return ktime_set(ts.tv_sec, ts.tv_nsec);
}
-/* convert a timeval to ktime_t format: */
-static inline ktime_t timeval_to_ktime(struct timeval tv)
-{
- return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
-}
-
-/* Map the ktime_t to timespec conversion to ns_to_timespec function */
-#define ktime_to_timespec(kt) ns_to_timespec((kt))
-
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
#define ktime_to_timespec64(kt) ns_to_timespec64((kt))
-/* Map the ktime_t to timeval conversion to ns_to_timeval function */
-#define ktime_to_timeval(kt) ns_to_timeval((kt))
-
/* Convert ktime_t to nanoseconds */
static inline s64 ktime_to_ns(const ktime_t kt)
{
@@ -216,25 +198,6 @@ static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
/**
- * ktime_to_timespec_cond - convert a ktime_t variable to timespec
- * format only if the variable contains data
- * @kt: the ktime_t variable to convert
- * @ts: the timespec variable to store the result in
- *
- * Return: %true if there was a successful conversion, %false if kt was 0.
- */
-static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
- struct timespec *ts)
-{
- if (kt) {
- *ts = ktime_to_timespec(kt);
- return true;
- } else {
- return false;
- }
-}
-
-/**
* ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
* format only if the variable contains data
* @kt: the ktime_t variable to convert
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index e89eb67356cb..bcb9b2ac0791 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -889,6 +889,8 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
+int kvm_arch_post_init_vm(struct kvm *kvm);
+void kvm_arch_pre_destroy_vm(struct kvm *kvm);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
@@ -1342,7 +1344,7 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
struct kvm_vcpu *kvm_get_running_vcpu(void);
-struct kvm_vcpu __percpu **kvm_get_running_vcpus(void);
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
bool kvm_arch_has_irq_bypass(void);
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index ff8c9d527bb4..bfdf41537cf1 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -688,7 +688,10 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 nic_rx_multi_path_tirs[0x1];
u8 nic_rx_multi_path_tirs_fts[0x1];
u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
- u8 reserved_at_3[0x1d];
+ u8 reserved_at_3[0x4];
+ u8 sw_owner_reformat_supported[0x1];
+ u8 reserved_at_8[0x18];
+
u8 encap_general_header[0x1];
u8 reserved_at_21[0xa];
u8 log_max_packet_reformat_context[0x5];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 52269e56c514..c54fb96cb1e6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2715,6 +2715,10 @@ static inline bool debug_pagealloc_enabled_static(void)
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+/*
+ * When called in DEBUG_PAGEALLOC context, the call should most likely be
+ * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
+ */
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index a9c6b5c61d27..6c3f7032e8d9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -72,6 +72,8 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
+#define MAX_NEST_DEV 8
+
/*
* Transmit return codes: transmit return codes originate from three different
* namespaces:
@@ -1616,6 +1618,7 @@ enum netdev_priv_flags {
* and drivers will need to set them appropriately.
*
* @mpls_features: Mask of features inheritable by MPLS
+ * @gso_partial_features: value(s) from NETIF_F_GSO\*
*
* @ifindex: interface index
* @group: The group the device belongs to
@@ -1640,8 +1643,11 @@ enum netdev_priv_flags {
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
* @ethtool_ops: Management operations
+ * @l3mdev_ops: Layer 3 master device operations
* @ndisc_ops: Includes callbacks for different IPv6 neighbour
* discovery handling. Necessary for e.g. 6LoWPAN.
+ * @xfrmdev_ops: Transformation offload operations
+ * @tlsdev_ops: Transport Layer Security offload operations
* @header_ops: Includes callbacks for creating,parsing,caching,etc
* of Layer 2 headers.
*
@@ -1680,6 +1686,7 @@ enum netdev_priv_flags {
* @dev_port: Used to differentiate devices that share
* the same function
* @addr_list_lock: XXX: need comments on this one
+ * @name_assign_type: network interface name assignment type
* @uc_promisc: Counter that indicates promiscuous mode
* has been enabled due to the need to listen to
* additional unicast addresses in a device that
@@ -1702,6 +1709,9 @@ enum netdev_priv_flags {
* @ip6_ptr: IPv6 specific data
* @ax25_ptr: AX.25 specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
+ * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
+ * device struct
+ * @mpls_ptr: mpls_dev struct pointer
*
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
@@ -1710,6 +1720,8 @@ enum netdev_priv_flags {
* @num_rx_queues: Number of RX queues
* allocated at register_netdev() time
* @real_num_rx_queues: Number of RX queues currently active in device
+ * @xdp_prog: XDP sockets filter program pointer
+ * @gro_flush_timeout: timeout for GRO layer in NAPI
*
* @rx_handler: handler for received packets
* @rx_handler_data: XXX: need comments on this one
@@ -1731,10 +1743,14 @@ enum netdev_priv_flags {
* @qdisc: Root qdisc from userspace point of view
* @tx_queue_len: Max frames per queue allowed
* @tx_global_lock: XXX: need comments on this one
+ * @xdp_bulkq: XDP device bulk queue
+ * @xps_cpus_map: all CPUs map for XPS device
+ * @xps_rxqs_map: all RXQs map for XPS device
*
* @xps_maps: XXX: need comments on this one
* @miniq_egress: clsact qdisc specific data for
* egress processing
+ * @qdisc_hash: qdisc hash table
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
@@ -3548,7 +3564,7 @@ static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
}
/**
- * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
+ * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
* @n: CPU/Rx queue index
* @src1p: the first CPUs/Rx queues mask pointer
* @src2p: the second CPUs/Rx queues mask pointer
@@ -4375,11 +4391,8 @@ void *netdev_lower_get_next(struct net_device *dev,
ldev; \
ldev = netdev_lower_get_next(dev, &(iter)))
-struct net_device *netdev_all_lower_get_next(struct net_device *dev,
+struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
struct list_head **iter);
-struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
- struct list_head **iter);
-
int netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *lower_dev,
void *data),
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 908d38dbcb91..5448c8b443db 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -121,6 +121,7 @@ struct ip_set_ext {
u32 timeout;
u8 packets_op;
u8 bytes_op;
+ bool target;
};
struct ip_set;
@@ -187,6 +188,14 @@ struct ip_set_type_variant {
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+ /* Region-locking is used */
+ bool region_lock;
+};
+
+struct ip_set_region {
+ spinlock_t lock; /* Region lock */
+ size_t ext_size; /* Size of the dynamic extensions */
+ u32 elements; /* Number of elements vs timeout */
};
/* The core set type structure */
@@ -501,7 +510,7 @@ ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
}
#define IP_SET_INIT_KEXT(skb, opt, set) \
- { .bytes = (skb)->len, .packets = 1, \
+ { .bytes = (skb)->len, .packets = 1, .target = true,\
.timeout = ip_set_adt_opt_timeout(opt, set) }
#define IP_SET_INIT_UEXT(set) \
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index a5f8f03ecd59..5d5b91e54f73 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -337,35 +337,17 @@ static inline int nfs_server_capable(struct inode *inode, int cap)
return NFS_SERVER(inode)->caps & cap;
}
-static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
-{
- dentry->d_time = verf;
-}
-
/**
* nfs_save_change_attribute - Returns the inode attribute change cookie
* @dir - pointer to parent directory inode
- * The "change attribute" is updated every time we finish an operation
- * that will result in a metadata change on the server.
+ * The "cache change attribute" is updated when we need to revalidate
+ * our dentry cache after a directory was seen to change on the server.
*/
static inline unsigned long nfs_save_change_attribute(struct inode *dir)
{
return NFS_I(dir)->cache_change_attribute;
}
-/**
- * nfs_verify_change_attribute - Detects NFS remote directory changes
- * @dir - pointer to parent directory inode
- * @chattr - previously saved change attribute
- * Return "false" if the verifiers doesn't match the change attribute.
- * This would usually indicate that the directory contents have changed on
- * the server, and that any dentries need revalidating.
- */
-static inline int nfs_verify_change_attribute(struct inode *dir, unsigned long chattr)
-{
- return chattr == NFS_I(dir)->cache_change_attribute;
-}
-
/*
* linux/fs/nfs/inode.c
*/
@@ -495,6 +477,10 @@ extern const struct file_operations nfs_dir_operations;
extern const struct dentry_operations nfs_dentry_operations;
extern void nfs_force_lookup_revalidate(struct inode *dir);
+extern void nfs_set_verifier(struct dentry * dentry, unsigned long verf);
+#if IS_ENABLED(CONFIG_NFS_V4)
+extern void nfs_clear_verifier_delegated(struct inode *inode);
+#endif /* IS_ENABLED(CONFIG_NFS_V4) */
extern struct dentry *nfs_add_or_obtain(struct dentry *dentry,
struct nfs_fh *fh, struct nfs_fattr *fattr,
struct nfs4_label *label);
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index d5765039652a..ae58fad7f1e0 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -29,7 +29,8 @@ struct pipe_buffer {
/**
* struct pipe_inode_info - a linux kernel pipe
* @mutex: mutex protecting the whole thing
- * @wait: reader/writer wait point in case of empty/full pipe
+ * @rd_wait: reader wait point in case of empty pipe
+ * @wr_wait: writer wait point in case of full pipe
* @head: The point of buffer production
* @tail: The point of buffer consumption
* @max_usage: The maximum number of slots that may be used in the ring
diff --git a/include/linux/platform_data/simplefb.h b/include/linux/platform_data/simplefb.h
index 4f733a411d18..ca8337695c2a 100644
--- a/include/linux/platform_data/simplefb.h
+++ b/include/linux/platform_data/simplefb.h
@@ -10,7 +10,7 @@
#include <drm/drm_fourcc.h>
#include <linux/fb.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
/* format array, use it to initialize a "struct simplefb_format" array */
#define SIMPLEFB_FORMATS \
diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h
index 0bf9fddb8306..3b400b1919a9 100644
--- a/include/linux/platform_data/spi-omap2-mcspi.h
+++ b/include/linux/platform_data/spi-omap2-mcspi.h
@@ -11,6 +11,7 @@ struct omap2_mcspi_platform_config {
unsigned short num_cs;
unsigned int regs_offset;
unsigned int pin_dir:1;
+ size_t max_xfer_len;
};
struct omap2_mcspi_device_config {
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index e5b752027a03..9670b54b484a 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -145,6 +145,13 @@ static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
}
}
+/* after that hlist_nulls_del will work */
+static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
+{
+ n->pprev = &n->next;
+ n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL);
+}
+
/**
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index 1abe91ff6e4a..6d67e9a5af6b 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -15,9 +15,11 @@ static inline void nohz_balance_enter_idle(int cpu) { }
#ifdef CONFIG_NO_HZ_COMMON
void calc_load_nohz_start(void);
+void calc_load_nohz_remote(struct rq *rq);
void calc_load_nohz_stop(void);
#else
static inline void calc_load_nohz_start(void) { }
+static inline void calc_load_nohz_remote(struct rq *rq) { }
static inline void calc_load_nohz_stop(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index ca8806b69388..5b50278c4bc8 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -611,9 +611,15 @@ typedef unsigned char *sk_buff_data_t;
* @next: Next buffer in list
* @prev: Previous buffer in list
* @tstamp: Time we arrived/left
+ * @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point
+ * for retransmit timer
* @rbnode: RB tree node, alternative to next/prev for netem/tcp
+ * @list: queue head
* @sk: Socket we are owned by
+ * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
+ * fragmentation management
* @dev: Device we arrived on/are leaving by
+ * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
* @cb: Control buffer. Free for use by every layer. Put private vars here
* @_skb_refdst: destination entry (with norefcount bit)
* @sp: the security path, used for xfrm
@@ -632,6 +638,9 @@ typedef unsigned char *sk_buff_data_t;
* @pkt_type: Packet class
* @fclone: skbuff clone status
* @ipvs_property: skbuff is owned by ipvs
+ * @inner_protocol_type: whether the inner protocol is
+ * ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO
+ * @remcsum_offload: remote checksum offload is enabled
* @offload_fwd_mark: Packet was L2-forwarded in hardware
* @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device
@@ -650,6 +659,8 @@ typedef unsigned char *sk_buff_data_t;
* @tc_index: Traffic control index
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
+ * @head_frag: skb was allocated from page fragments,
+ * not allocated by kmalloc() or vmalloc().
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
* @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
@@ -660,15 +671,28 @@ typedef unsigned char *sk_buff_data_t;
* @wifi_acked_valid: wifi_acked was set
* @wifi_acked: whether frame was acked on wifi or not
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
+ * @encapsulation: indicates the inner headers in the skbuff are valid
+ * @encap_hdr_csum: software checksum is needed
+ * @csum_valid: checksum is already valid
* @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
+ * @csum_complete_sw: checksum was completed by software
+ * @csum_level: indicates the number of consecutive checksums found in
+ * the packet minus one that have been verified as
+ * CHECKSUM_UNNECESSARY (max 3)
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
* @napi_id: id of the NAPI struct this skb came from
+ * @sender_cpu: (aka @napi_id) source CPU in XPS
* @secmark: security marking
* @mark: Generic packet mark
+ * @reserved_tailroom: (aka @mark) number of bytes of free space available
+ * at the tail of an sk_buff
+ * @vlan_present: VLAN tag is present
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
* @inner_protocol: Protocol (encapsulation)
+ * @inner_ipproto: (aka @inner_protocol) stores ipproto when
+ * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
* @inner_transport_header: Inner transport layer header (encapsulation)
* @inner_network_header: Network layer header (encapsulation)
* @inner_mac_header: Link layer header (encapsulation)
@@ -750,7 +774,9 @@ struct sk_buff {
#endif
#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
+ /* private: */
__u8 __cloned_offset[0];
+ /* public: */
__u8 cloned:1,
nohdr:1,
fclone:2,
@@ -775,7 +801,9 @@ struct sk_buff {
#endif
#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
+ /* private: */
__u8 __pkt_type_offset[0];
+ /* public: */
__u8 pkt_type:3;
__u8 ignore_df:1;
__u8 nf_trace:1;
@@ -798,7 +826,9 @@ struct sk_buff {
#define PKT_VLAN_PRESENT_BIT 0
#endif
#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
+ /* private: */
__u8 __pkt_vlan_present_offset[0];
+ /* public: */
__u8 vlan_present:1;
__u8 csum_complete_sw:1;
__u8 csum_level:2;
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 4a230c2f1c31..2b2055b035ee 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -191,7 +191,7 @@ struct platform_s2idle_ops {
int (*begin)(void);
int (*prepare)(void);
int (*prepare_late)(void);
- void (*wake)(void);
+ bool (*wake)(void);
void (*restore_early)(void);
void (*restore)(void);
void (*end)(void);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index cde3dc18e21a..046bb94bd4d6 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -64,6 +64,9 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target);
+dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+
#ifdef CONFIG_SWIOTLB
extern enum swiotlb_force swiotlb_force;
extern phys_addr_t io_tlb_start, io_tlb_end;
@@ -73,8 +76,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
-bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev);
@@ -85,12 +86,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
{
return false;
}
-static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
- dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return false;
-}
static inline void swiotlb_exit(void)
{
}
diff --git a/include/linux/time32.h b/include/linux/time32.h
index cad4c3186002..cf9320cd2d0b 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -12,8 +12,6 @@
#include <linux/time64.h>
#include <linux/timex.h>
-#define TIME_T_MAX (__kernel_old_time_t)((1UL << ((sizeof(__kernel_old_time_t) << 3) - 1)) - 1)
-
typedef s32 old_time32_t;
struct old_timespec32 {
@@ -73,162 +71,12 @@ struct __kernel_timex;
int get_old_timex32(struct __kernel_timex *, const struct old_timex32 __user *);
int put_old_timex32(struct old_timex32 __user *, const struct __kernel_timex *);
-#if __BITS_PER_LONG == 64
-
-/* timespec64 is defined as timespec here */
-static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
-{
- return *(const struct timespec *)&ts64;
-}
-
-static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
-{
- return *(const struct timespec64 *)&ts;
-}
-
-#else
-static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
-{
- struct timespec ret;
-
- ret.tv_sec = (time_t)ts64.tv_sec;
- ret.tv_nsec = ts64.tv_nsec;
- return ret;
-}
-
-static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
-{
- struct timespec64 ret;
-
- ret.tv_sec = ts.tv_sec;
- ret.tv_nsec = ts.tv_nsec;
- return ret;
-}
-#endif
-
-static inline int timespec_equal(const struct timespec *a,
- const struct timespec *b)
-{
- return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
-}
-
-/*
- * lhs < rhs: return <0
- * lhs == rhs: return 0
- * lhs > rhs: return >0
- */
-static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
-{
- if (lhs->tv_sec < rhs->tv_sec)
- return -1;
- if (lhs->tv_sec > rhs->tv_sec)
- return 1;
- return lhs->tv_nsec - rhs->tv_nsec;
-}
-
-/*
- * Returns true if the timespec is norm, false if denorm:
- */
-static inline bool timespec_valid(const struct timespec *ts)
-{
- /* Dates before 1970 are bogus */
- if (ts->tv_sec < 0)
- return false;
- /* Can't have more nanoseconds then a second */
- if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
- return false;
- return true;
-}
-
-/**
- * timespec_to_ns - Convert timespec to nanoseconds
- * @ts: pointer to the timespec variable to be converted
- *
- * Returns the scalar nanosecond representation of the timespec
- * parameter.
- */
-static inline s64 timespec_to_ns(const struct timespec *ts)
-{
- return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
-}
-
/**
- * ns_to_timespec - Convert nanoseconds to timespec
- * @nsec: the nanoseconds value to be converted
- *
- * Returns the timespec representation of the nsec parameter.
- */
-extern struct timespec ns_to_timespec(const s64 nsec);
-
-/**
- * timespec_add_ns - Adds nanoseconds to a timespec
- * @a: pointer to timespec to be incremented
- * @ns: unsigned nanoseconds value to be added
- *
- * This must always be inlined because its used from the x86-64 vdso,
- * which cannot call other kernel functions.
- */
-static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
-{
- a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
- a->tv_nsec = ns;
-}
-
-static inline unsigned long mktime(const unsigned int year,
- const unsigned int mon, const unsigned int day,
- const unsigned int hour, const unsigned int min,
- const unsigned int sec)
-{
- return mktime64(year, mon, day, hour, min, sec);
-}
-
-static inline bool timeval_valid(const struct timeval *tv)
-{
- /* Dates before 1970 are bogus */
- if (tv->tv_sec < 0)
- return false;
-
- /* Can't have more microseconds then a second */
- if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
- return false;
-
- return true;
-}
-
-/**
- * timeval_to_ns - Convert timeval to nanoseconds
- * @ts: pointer to the timeval variable to be converted
- *
- * Returns the scalar nanosecond representation of the timeval
- * parameter.
- */
-static inline s64 timeval_to_ns(const struct timeval *tv)
-{
- return ((s64) tv->tv_sec * NSEC_PER_SEC) +
- tv->tv_usec * NSEC_PER_USEC;
-}
-
-/**
- * ns_to_timeval - Convert nanoseconds to timeval
+ * ns_to_kernel_old_timeval - Convert nanoseconds to timeval
* @nsec: the nanoseconds value to be converted
*
* Returns the timeval representation of the nsec parameter.
*/
-extern struct timeval ns_to_timeval(const s64 nsec);
extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec);
-/*
- * Old names for the 32-bit time_t interfaces, these will be removed
- * when everything uses the new names.
- */
-#define compat_time_t old_time32_t
-#define compat_timeval old_timeval32
-#define compat_timespec old_timespec32
-#define compat_itimerspec old_itimerspec32
-#define ns_to_compat_timeval ns_to_old_timeval32
-#define get_compat_itimerspec64 get_old_itimerspec32
-#define put_compat_itimerspec64 put_old_itimerspec32
-#define compat_get_timespec64 get_old_timespec32
-#define compat_put_timespec64 put_old_timespec32
-
#endif
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
index cc59cc9e0e84..266017fc9ee9 100644
--- a/include/linux/timekeeping32.h
+++ b/include/linux/timekeeping32.h
@@ -11,36 +11,4 @@ static inline unsigned long get_seconds(void)
return ktime_get_real_seconds();
}
-static inline void getnstimeofday(struct timespec *ts)
-{
- struct timespec64 ts64;
-
- ktime_get_real_ts64(&ts64);
- *ts = timespec64_to_timespec(ts64);
-}
-
-static inline void ktime_get_ts(struct timespec *ts)
-{
- struct timespec64 ts64;
-
- ktime_get_ts64(&ts64);
- *ts = timespec64_to_timespec(ts64);
-}
-
-static inline void getrawmonotonic(struct timespec *ts)
-{
- struct timespec64 ts64;
-
- ktime_get_raw_ts64(&ts64);
- *ts = timespec64_to_timespec(ts64);
-}
-
-static inline void getboottime(struct timespec *ts)
-{
- struct timespec64 ts64;
-
- getboottime64(&ts64);
- *ts = timespec64_to_timespec(ts64);
-}
-
#endif
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index af2c85d3a1dd..6c7a10a6d71e 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -440,7 +440,7 @@ struct synth_event_trace_state {
struct synth_event *event;
unsigned int cur_field;
unsigned int n_u64;
- bool enabled;
+ bool disabled;
bool add_next;
bool add_name;
};
diff --git a/include/linux/tty.h b/include/linux/tty.h
index bfa4e2ee94a9..bd5fe0e907e8 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -225,6 +225,8 @@ struct tty_port_client_operations {
void (*write_wakeup)(struct tty_port *port);
};
+extern const struct tty_port_client_operations tty_port_default_client_ops;
+
struct tty_port {
struct tty_bufhead buf; /* Locked internally */
struct tty_struct *tty; /* Back pointer */
diff --git a/include/linux/types.h b/include/linux/types.h
index eb870ad42919..d3021c879179 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -65,11 +65,6 @@ typedef __kernel_ssize_t ssize_t;
typedef __kernel_ptrdiff_t ptrdiff_t;
#endif
-#ifndef _TIME_T
-#define _TIME_T
-typedef __kernel_old_time_t time_t;
-#endif
-
#ifndef _CLOCK_T
#define _CLOCK_T
typedef __kernel_clock_t clock_t;
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index a1be64c9940f..22c1f579afe3 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -69,4 +69,7 @@
/* Hub needs extra delay after resetting its port. */
#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
+/* device has blacklisted endpoints */
+#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index d93017a7ce5c..628383915827 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/in6.h>
#include <linux/siphash.h>
+#include <linux/string.h>
#include <uapi/linux/if_ether.h>
struct sk_buff;
@@ -33,7 +34,6 @@ enum flow_dissect_ret {
/**
* struct flow_dissector_key_basic:
- * @thoff: Transport header offset
* @n_proto: Network header protocol (eg. IPv4/IPv6)
* @ip_proto: Transport header protocol (eg. TCP/UDP)
*/
@@ -349,4 +349,12 @@ struct bpf_flow_dissector {
void *data_end;
};
+static inline void
+flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
+ struct flow_dissector_key_basic *key_basic)
+{
+ memset(key_control, 0, sizeof(*key_control));
+ memset(key_basic, 0, sizeof(*key_basic));
+}
+
#endif
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 5d4bfdba9adf..9ac2d2672a93 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -43,6 +43,12 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32
__icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
}
+#if IS_ENABLED(CONFIG_NF_NAT)
+void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+#else
+#define icmp_ndo_send icmp_send
+#endif
+
int icmp_rcv(struct sk_buff *skb);
int icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index aa145808e57a..77e6b5a83b06 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -1004,12 +1004,11 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
struct ieee80211_tx_info {
/* common information */
u32 flags;
- u8 band;
-
- u8 hw_queue;
-
- u16 ack_frame_id:6;
- u16 tx_time_est:10;
+ u32 band:3,
+ ack_frame_id:13,
+ hw_queue:4,
+ tx_time_est:10;
+ /* 2 free bits */
union {
struct {
diff --git a/include/net/sock.h b/include/net/sock.h
index 02162b0378f7..328564525526 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -117,19 +117,26 @@ typedef __u64 __bitwise __addrpair;
* struct sock_common - minimal network layer representation of sockets
* @skc_daddr: Foreign IPv4 addr
* @skc_rcv_saddr: Bound local IPv4 addr
+ * @skc_addrpair: 8-byte-aligned __u64 union of @skc_daddr & @skc_rcv_saddr
* @skc_hash: hash value used with various protocol lookup tables
* @skc_u16hashes: two u16 hash values used by UDP lookup tables
* @skc_dport: placeholder for inet_dport/tw_dport
* @skc_num: placeholder for inet_num/tw_num
+ * @skc_portpair: __u32 union of @skc_dport & @skc_num
* @skc_family: network address family
* @skc_state: Connection state
* @skc_reuse: %SO_REUSEADDR setting
* @skc_reuseport: %SO_REUSEPORT setting
+ * @skc_ipv6only: socket is IPV6 only
+ * @skc_net_refcnt: socket is using net ref counting
* @skc_bound_dev_if: bound device index if != 0
* @skc_bind_node: bind hash linkage for various protocol lookup tables
* @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
* @skc_prot: protocol handlers inside a network family
* @skc_net: reference to the network namespace of this socket
+ * @skc_v6_daddr: IPV6 destination address
+ * @skc_v6_rcv_saddr: IPV6 source address
+ * @skc_cookie: socket's cookie value
* @skc_node: main hash linkage for various protocol lookup tables
* @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
* @skc_tx_queue_mapping: tx queue number for this connection
@@ -137,7 +144,15 @@ typedef __u64 __bitwise __addrpair;
* @skc_flags: place holder for sk_flags
* %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
* %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
+ * @skc_listener: connection request listener socket (aka rsk_listener)
+ * [union with @skc_flags]
+ * @skc_tw_dr: (aka tw_dr) ptr to &struct inet_timewait_death_row
+ * [union with @skc_flags]
* @skc_incoming_cpu: record/match cpu processing incoming packets
+ * @skc_rcv_wnd: (aka rsk_rcv_wnd) TCP receive window size (possibly scaled)
+ * [union with @skc_incoming_cpu]
+ * @skc_tw_rcv_nxt: (aka tw_rcv_nxt) TCP window next expected seq number
+ * [union with @skc_incoming_cpu]
* @skc_refcnt: reference count
*
* This is the minimal network layer representation of sockets, the header
@@ -245,6 +260,7 @@ struct bpf_sk_storage;
* @sk_dst_cache: destination cache
* @sk_dst_pending_confirm: need to confirm neighbour
* @sk_policy: flow policy
+ * @sk_rx_skb_cache: cache copy of recently accessed RX skb
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
@@ -265,6 +281,8 @@ struct bpf_sk_storage;
* @sk_no_check_rx: allow zero checksum in RX packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
* @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
+ * @sk_route_forced_caps: static, forced route capabilities
+ * (set in tcp_init_sock())
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_gso_max_size: Maximum GSO segment size to build
* @sk_gso_max_segs: Maximum number of GSO segments
@@ -303,6 +321,8 @@ struct bpf_sk_storage;
* @sk_frag: cached page frag
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
+ * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
+ * @sk_tx_skb_cache: cache copy of recently accessed TX skb
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_cgrp_data: cgroup data for this cgroup
@@ -313,11 +333,14 @@ struct bpf_sk_storage;
* @sk_write_space: callback to indicate there is bf sending space available
* @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
* @sk_backlog_rcv: callback to process the backlog
+ * @sk_validate_xmit_skb: ptr to an optional validate function
* @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
* @sk_reuseport_cb: reuseport group container
+ * @sk_bpf_storage: ptr to cache and control for bpf_sk_storage
* @sk_rcu: used during RCU grace period
* @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
* @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
+ * @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
*/
struct sock {
@@ -393,7 +416,9 @@ struct sock {
struct sk_filter __rcu *sk_filter;
union {
struct socket_wq __rcu *sk_wq;
+ /* private: */
struct socket_wq *sk_wq_raw;
+ /* public: */
};
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
@@ -2017,7 +2042,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
* sk_wmem_alloc_get - returns write allocations
* @sk: socket
*
- * Returns sk_wmem_alloc minus initial offset of one
+ * Return: sk_wmem_alloc minus initial offset of one
*/
static inline int sk_wmem_alloc_get(const struct sock *sk)
{
@@ -2028,7 +2053,7 @@ static inline int sk_wmem_alloc_get(const struct sock *sk)
* sk_rmem_alloc_get - returns read allocations
* @sk: socket
*
- * Returns sk_rmem_alloc
+ * Return: sk_rmem_alloc
*/
static inline int sk_rmem_alloc_get(const struct sock *sk)
{
@@ -2039,7 +2064,7 @@ static inline int sk_rmem_alloc_get(const struct sock *sk)
* sk_has_allocations - check if allocations are outstanding
* @sk: socket
*
- * Returns true if socket has write or read allocations
+ * Return: true if socket has write or read allocations
*/
static inline bool sk_has_allocations(const struct sock *sk)
{
@@ -2050,7 +2075,7 @@ static inline bool sk_has_allocations(const struct sock *sk)
* skwq_has_sleeper - check if there are any waiting processes
* @wq: struct socket_wq
*
- * Returns true if socket_wq has waiting processes
+ * Return: true if socket_wq has waiting processes
*
* The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory
* barrier call. They were added due to the race found within the tcp code.
@@ -2238,6 +2263,9 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
* gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
* inside other socket operations and end up recursing into sk_page_frag()
* while it's already in use.
+ *
+ * Return: a per task page_frag if context allows that,
+ * otherwise a per socket one.
*/
static inline struct page_frag *sk_page_frag(struct sock *sk)
{
@@ -2432,6 +2460,7 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
&skb_shinfo(skb)->tskey);
}
+DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
@@ -2440,7 +2469,6 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
* This routine must be called with interrupts disabled or with the socket
* locked so that the sk_buff queue operation is ok.
*/
-DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
diff --git a/include/scsi/iscsi_proto.h b/include/scsi/iscsi_proto.h
index 533f56733ba8..b71b5c4f418c 100644
--- a/include/scsi/iscsi_proto.h
+++ b/include/scsi/iscsi_proto.h
@@ -627,7 +627,6 @@ struct iscsi_reject {
#define ISCSI_REASON_BOOKMARK_INVALID 9
#define ISCSI_REASON_BOOKMARK_NO_RESOURCES 10
#define ISCSI_REASON_NEGOTIATION_RESET 11
-#define ISCSI_REASON_WAITING_FOR_LOGOUT 12
/* Max. number of Key=Value pairs in a text message */
#define MAX_KEY_VALUE_PAIRS 8192
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 40ab20439fee..a36b7227a15a 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -77,9 +77,9 @@ struct snd_rawmidi_substream {
struct list_head list; /* list of all substream for given stream */
int stream; /* direction */
int number; /* substream number */
- unsigned int opened: 1, /* open flag */
- append: 1, /* append flag (merge more streams) */
- active_sensing: 1; /* send active sensing when close */
+ bool opened; /* open flag */
+ bool append; /* append flag (merge more streams) */
+ bool active_sensing; /* send active sensing when close */
int use_count; /* use counter (for output) */
size_t bytes;
struct snd_rawmidi *rmidi;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 2a306c6f3fbc..1b6afbc1a4ed 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -392,8 +392,6 @@ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol);
-int snd_soc_dapm_put_enum_double_locked(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol);
int snd_soc_dapm_info_pin_switch(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_info *uinfo);
int snd_soc_dapm_get_pin_switch(struct snd_kcontrol *kcontrol,
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f0e4f36f83bf..8a2266676b2d 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1157,7 +1157,7 @@ struct snd_soc_pcm_runtime {
((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
(i)++)
#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
- for (; ((--i) >= 0) && ((dai) = rtd->codec_dais[i]);)
+ for (; (--(i) >= 0) && ((dai) = rtd->codec_dais[i]);)
void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
diff --git a/include/uapi/asm-generic/posix_types.h b/include/uapi/asm-generic/posix_types.h
index 2f9c80595ba7..b5f7594eee7a 100644
--- a/include/uapi/asm-generic/posix_types.h
+++ b/include/uapi/asm-generic/posix_types.h
@@ -87,7 +87,9 @@ typedef struct {
typedef __kernel_long_t __kernel_off_t;
typedef long long __kernel_loff_t;
typedef __kernel_long_t __kernel_old_time_t;
+#ifndef __KERNEL__
typedef __kernel_long_t __kernel_time_t;
+#endif
typedef long long __kernel_time64_t;
typedef __kernel_long_t __kernel_clock_t;
typedef int __kernel_timer_t;
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index ac3879829bb5..65f69723cbdc 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -125,9 +125,10 @@ extern "C" {
/* Flag that BO sharing will be explicitly synchronized */
#define AMDGPU_GEM_CREATE_EXPLICIT_SYNC (1 << 7)
/* Flag that indicates allocating MQD gart on GFX9, where the mtype
- * for the second page onward should be set to NC.
+ * for the second page onward should be set to NC. It should never
+ * be used by user space applications.
*/
-#define AMDGPU_GEM_CREATE_MQD_GFX9 (1 << 8)
+#define AMDGPU_GEM_CREATE_CP_MQD_GFX9 (1 << 8)
/* Flag that BO may contain sensitive data that must be wiped before
* releasing the memory
*/
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 868bf7996c0f..808b48a93330 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -948,6 +948,8 @@ extern "C" {
#define DRM_IOCTL_SYNCOBJ_TRANSFER DRM_IOWR(0xCC, struct drm_syncobj_transfer)
#define DRM_IOCTL_SYNCOBJ_TIMELINE_SIGNAL DRM_IOWR(0xCD, struct drm_syncobj_timeline_array)
+#define DRM_IOCTL_MODE_GETFB2 DRM_IOWR(0xCE, struct drm_mode_fb_cmd2)
+
/**
* Device specific ioctls should only be in their respective headers
* The device specific ioctl range is from 0x40 to 0x9f.
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 829c0a48577f..2813e579b480 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1619,6 +1619,27 @@ struct drm_i915_gem_context_param {
* By default, new contexts allow persistence.
*/
#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
+
+/*
+ * I915_CONTEXT_PARAM_RINGSIZE:
+ *
+ * Sets the size of the CS ringbuffer to use for logical ring contexts. This
+ * applies a limit of how many batches can be queued to HW before the caller
+ * is blocked due to lack of space for more commands.
+ *
+ * Only reliably possible to be set prior to first use, i.e. during
+ * construction. At any later point, the current execution must be flushed as
+ * the ring can only be changed while the context is idle. Note, the ringsize
+ * can be specified as a constructor property, see
+ * I915_CONTEXT_CREATE_EXT_SETPARAM, but can also be set later if required.
+ *
+ * Only applies to the current set of engine and lost when those engines
+ * are replaced by a new mapping (see I915_CONTEXT_PARAM_ENGINES).
+ *
+ * Must be between 4 - 512 KiB, in intervals of page size [4 KiB].
+ * Default is 16 KiB.
+ */
+#define I915_CONTEXT_PARAM_RINGSIZE 0xc
/* Must be kept compact -- no holes and well documented */
__u64 value;
diff --git a/include/uapi/drm/lima_drm.h b/include/uapi/drm/lima_drm.h
index 95a00fb867e6..1ec58d652a5a 100644
--- a/include/uapi/drm/lima_drm.h
+++ b/include/uapi/drm/lima_drm.h
@@ -32,12 +32,19 @@ struct drm_lima_get_param {
__u64 value; /* out, parameter value */
};
+/*
+ * heap buffer dynamically increase backup memory size when GP task fail
+ * due to lack of heap memory. size field of heap buffer is an up bound of
+ * the backup memory which can be set to a fairly large value.
+ */
+#define LIMA_BO_FLAG_HEAP (1 << 0)
+
/**
* create a buffer for used by GPU
*/
struct drm_lima_gem_create {
__u32 size; /* in, buffer size */
- __u32 flags; /* in, currently no flags, must be zero */
+ __u32 flags; /* in, buffer flags */
__u32 handle; /* out, GEM buffer handle */
__u32 pad; /* pad, must be zero */
};
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index f1d74a2bd234..22f235260a3a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -1045,9 +1045,9 @@ union bpf_attr {
* supports redirection to the egress interface, and accepts no
* flag at all.
*
- * The same effect can be attained with the more generic
- * **bpf_redirect_map**\ (), which requires specific maps to be
- * used but offers better performance.
+ * The same effect can also be attained with the more generic
+ * **bpf_redirect_map**\ (), which uses a BPF map to store the
+ * redirect target instead of providing it directly to the helper.
* Return
* For XDP, the helper returns **XDP_REDIRECT** on success or
* **XDP_ABORTED** on error. For other program types, the values
@@ -1611,13 +1611,11 @@ union bpf_attr {
* the caller. Any higher bits in the *flags* argument must be
* unset.
*
- * When used to redirect packets to net devices, this helper
- * provides a high performance increase over **bpf_redirect**\ ().
- * This is due to various implementation details of the underlying
- * mechanisms, one of which is the fact that **bpf_redirect_map**\
- * () tries to send packet as a "bulk" to the device.
+ * See also bpf_redirect(), which only supports redirecting to an
+ * ifindex, but doesn't require a map to do so.
* Return
- * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
+ * **XDP_REDIRECT** on success, or the value of the two lower bits
+ * of the **flags* argument on error.
*
* int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
* Description
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 2df8ceca1f9b..6622912c2342 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -272,9 +272,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 41
+#define DM_VERSION_MINOR 42
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2019-09-16)"
+#define DM_VERSION_EXTRA "-ioctl (2020-02-27)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 336014bf8868..b6f0bb1dc799 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -97,6 +97,15 @@ enum ip_conntrack_status {
IPS_UNTRACKED_BIT = 12,
IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
+#ifdef __KERNEL__
+ /* Re-purposed for in-kernel use:
+ * Tags a conntrack entry that clashed with an existing entry
+ * on insert.
+ */
+ IPS_NAT_CLASH_BIT = IPS_UNTRACKED_BIT,
+ IPS_NAT_CLASH = IPS_UNTRACKED,
+#endif
+
/* Conntrack got a helper explicitly attached via CT target. */
IPS_HELPER_BIT = 13,
IPS_HELPER = (1 << IPS_HELPER_BIT),
@@ -110,7 +119,8 @@ enum ip_conntrack_status {
*/
IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
- IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD),
+ IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_UNTRACKED |
+ IPS_OFFLOAD),
__IPS_MAX_BIT = 15,
};
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index fa7f97da5b76..7272f85d6d6a 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -135,9 +135,9 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
static __always_inline unsigned long __swab(const unsigned long y)
{
-#if BITS_PER_LONG == 64
+#if __BITS_PER_LONG == 64
return __swab64(y);
-#else /* BITS_PER_LONG == 32 */
+#else /* __BITS_PER_LONG == 32 */
return __swab32(y);
#endif
}
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index a655aa28dc6e..4f4b6e48e01c 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -5,6 +5,7 @@
#include <linux/types.h>
#include <linux/time_types.h>
+#ifndef __KERNEL__
#ifndef _STRUCT_TIMESPEC
#define _STRUCT_TIMESPEC
struct timespec {
@@ -18,6 +19,17 @@ struct timeval {
__kernel_suseconds_t tv_usec; /* microseconds */
};
+struct itimerspec {
+ struct timespec it_interval;/* timer period */
+ struct timespec it_value; /* timer expiration */
+};
+
+struct itimerval {
+ struct timeval it_interval;/* timer interval */
+ struct timeval it_value; /* current value */
+};
+#endif
+
struct timezone {
int tz_minuteswest; /* minutes west of Greenwich */
int tz_dsttime; /* type of dst correction */
@@ -31,16 +43,6 @@ struct timezone {
#define ITIMER_VIRTUAL 1
#define ITIMER_PROF 2
-struct itimerspec {
- struct timespec it_interval; /* timer period */
- struct timespec it_value; /* timer expiration */
-};
-
-struct itimerval {
- struct timeval it_interval; /* timer interval */
- struct timeval it_value; /* current value */
-};
-
/*
* The IDs of the various system clocks (for POSIX.1b interval timers):
*/
diff --git a/include/uapi/linux/usb/charger.h b/include/uapi/linux/usb/charger.h
index 5f72af35b3ed..ad22079125bf 100644
--- a/include/uapi/linux/usb/charger.h
+++ b/include/uapi/linux/usb/charger.h
@@ -14,18 +14,18 @@
* ACA (Accessory Charger Adapters)
*/
enum usb_charger_type {
- UNKNOWN_TYPE,
- SDP_TYPE,
- DCP_TYPE,
- CDP_TYPE,
- ACA_TYPE,
+ UNKNOWN_TYPE = 0,
+ SDP_TYPE = 1,
+ DCP_TYPE = 2,
+ CDP_TYPE = 3,
+ ACA_TYPE = 4,
};
/* USB charger state */
enum usb_charger_state {
- USB_CHARGER_DEFAULT,
- USB_CHARGER_PRESENT,
- USB_CHARGER_ABSENT,
+ USB_CHARGER_DEFAULT = 0,
+ USB_CHARGER_PRESENT = 1,
+ USB_CHARGER_ABSENT = 2,
};
#endif /* _UAPI__LINUX_USB_CHARGER_H */
diff --git a/include/video/mmp_disp.h b/include/video/mmp_disp.h
index 1f9bc133e230..77252cb46361 100644
--- a/include/video/mmp_disp.h
+++ b/include/video/mmp_disp.h
@@ -231,7 +231,7 @@ struct mmp_path {
/* layers */
int overlay_num;
- struct mmp_overlay overlays[0];
+ struct mmp_overlay overlays[];
};
extern struct mmp_path *mmp_get_path(const char *name);
diff --git a/include/video/samsung_fimd.h b/include/video/samsung_fimd.h
index b6571c3cfa31..c4a93ce1de48 100644
--- a/include/video/samsung_fimd.h
+++ b/include/video/samsung_fimd.h
@@ -10,7 +10,7 @@
*
* This is the register set for the fimd and new style framebuffer interface
* found from the S3C2443 onwards into the S3C2416, S3C2450, the
- * S3C64XX series such as the S3C6400 and S3C6410, and EXYNOS series.
+ * S3C64XX series such as the S3C6400 and S3C6410, and Exynos series.
*/
/* VIDCON0 */
diff --git a/include/xen/interface/io/tpmif.h b/include/xen/interface/io/tpmif.h
index 28e7dcd75e82..f8aa8bac5196 100644
--- a/include/xen/interface/io/tpmif.h
+++ b/include/xen/interface/io/tpmif.h
@@ -46,7 +46,7 @@ struct vtpm_shared_page {
uint8_t pad;
uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */
- uint32_t extra_pages[0]; /* grant IDs; length in nr_extra_pages */
+ uint32_t extra_pages[]; /* grant IDs; length in nr_extra_pages */
};
#endif
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 89a889585ba0..850a43bd69d3 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -42,6 +42,7 @@
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/semaphore.h>
#include <xen/interface/xen.h>
#include <xen/interface/grant_table.h>
#include <xen/interface/io/xenbus.h>
@@ -76,7 +77,7 @@ struct xenbus_device {
enum xenbus_state state;
struct completion down;
struct work_struct work;
- spinlock_t reclaim_lock;
+ struct semaphore reclaim_sem;
};
static inline struct xenbus_device *to_xenbus_device(struct device *dev)
diff --git a/init/Kconfig b/init/Kconfig
index cfee56c151f1..20a6ac33761c 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1226,14 +1226,12 @@ endif
config BOOT_CONFIG
bool "Boot config support"
- depends on BLK_DEV_INITRD
- select LIBXBC
- default y
+ select BLK_DEV_INITRD
help
Extra boot config allows system admin to pass a config file as
complemental extension of kernel cmdline when booting.
The boot config file must be attached at the end of initramfs
- with checksum and size.
+ with checksum, size and magic word.
See <file:Documentation/admin-guide/bootconfig.rst> for details.
If unsure, say Y.
diff --git a/init/main.c b/init/main.c
index cc0ee4873419..ee4947af823f 100644
--- a/init/main.c
+++ b/init/main.c
@@ -142,6 +142,15 @@ static char *extra_command_line;
/* Extra init arguments */
static char *extra_init_args;
+#ifdef CONFIG_BOOT_CONFIG
+/* Is bootconfig on command line? */
+static bool bootconfig_found;
+static bool initargs_found;
+#else
+# define bootconfig_found false
+# define initargs_found false
+#endif
+
static char *execute_command;
static char *ramdisk_execute_command;
@@ -259,7 +268,6 @@ static int __init xbc_snprint_cmdline(char *buf, size_t size,
{
struct xbc_node *knode, *vnode;
char *end = buf + size;
- char c = '\"';
const char *val;
int ret;
@@ -270,25 +278,20 @@ static int __init xbc_snprint_cmdline(char *buf, size_t size,
return ret;
vnode = xbc_node_get_child(knode);
- ret = snprintf(buf, rest(buf, end), "%s%c", xbc_namebuf,
- vnode ? '=' : ' ');
- if (ret < 0)
- return ret;
- buf += ret;
- if (!vnode)
+ if (!vnode) {
+ ret = snprintf(buf, rest(buf, end), "%s ", xbc_namebuf);
+ if (ret < 0)
+ return ret;
+ buf += ret;
continue;
-
- c = '\"';
+ }
xbc_array_for_each_value(vnode, val) {
- ret = snprintf(buf, rest(buf, end), "%c%s", c, val);
+ ret = snprintf(buf, rest(buf, end), "%s=\"%s\" ",
+ xbc_namebuf, val);
if (ret < 0)
return ret;
buf += ret;
- c = ',';
}
- if (rest(buf, end) > 2)
- strcpy(buf, "\" ");
- buf += 2;
}
return buf - (end - size);
@@ -326,7 +329,7 @@ static char * __init xbc_make_cmdline(const char *key)
return new_cmdline;
}
-u32 boot_config_checksum(unsigned char *p, u32 size)
+static u32 boot_config_checksum(unsigned char *p, u32 size)
{
u32 ret = 0;
@@ -336,23 +339,40 @@ u32 boot_config_checksum(unsigned char *p, u32 size)
return ret;
}
+static int __init bootconfig_params(char *param, char *val,
+ const char *unused, void *arg)
+{
+ if (strcmp(param, "bootconfig") == 0) {
+ bootconfig_found = true;
+ } else if (strcmp(param, "--") == 0) {
+ initargs_found = true;
+ }
+ return 0;
+}
+
static void __init setup_boot_config(const char *cmdline)
{
+ static char tmp_cmdline[COMMAND_LINE_SIZE] __initdata;
u32 size, csum;
char *data, *copy;
- const char *p;
u32 *hdr;
int ret;
- p = strstr(cmdline, "bootconfig");
- if (!p || (p != cmdline && !isspace(*(p-1))) ||
- (p[10] && !isspace(p[10])))
+ strlcpy(tmp_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ parse_args("bootconfig", tmp_cmdline, NULL, 0, 0, 0, NULL,
+ bootconfig_params);
+
+ if (!bootconfig_found)
return;
if (!initrd_end)
goto not_found;
- hdr = (u32 *)(initrd_end - 8);
+ data = (char *)initrd_end - BOOTCONFIG_MAGIC_LEN;
+ if (memcmp(data, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN))
+ goto not_found;
+
+ hdr = (u32 *)(data - 8);
size = hdr[0];
csum = hdr[1];
@@ -396,6 +416,14 @@ not_found:
}
#else
#define setup_boot_config(cmdline) do { } while (0)
+
+static int __init warn_bootconfig(char *str)
+{
+ pr_warn("WARNING: 'bootconfig' found on the kernel command line but CONFIG_BOOTCONFIG is not set.\n");
+ return 0;
+}
+early_param("bootconfig", warn_bootconfig);
+
#endif
/* Change NUL term back to "=", to make "param" the whole string. */
@@ -562,11 +590,12 @@ static void __init setup_command_line(char *command_line)
* to init.
*/
len = strlen(saved_command_line);
- if (!strstr(boot_command_line, " -- ")) {
+ if (initargs_found) {
+ saved_command_line[len++] = ' ';
+ } else {
strcpy(saved_command_line + len, " -- ");
len += 4;
- } else
- saved_command_line[len++] = ' ';
+ }
strcpy(saved_command_line + len, extra_init_args);
}
diff --git a/ipc/sem.c b/ipc/sem.c
index 4f4303f32077..3687b71151b3 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -2384,11 +2384,9 @@ void exit_sem(struct task_struct *tsk)
ipc_assert_locked_object(&sma->sem_perm);
list_del(&un->list_id);
- /* we are the last process using this ulp, acquiring ulp->lock
- * isn't required. Besides that, we are also protected against
- * IPC_RMID as we hold sma->sem_perm lock now
- */
+ spin_lock(&ulp->lock);
list_del_rcu(&un->list_proc);
+ spin_unlock(&ulp->lock);
/* perform adjustments registered in un */
for (i = 0; i < sma->sem_nsems; i++) {
diff --git a/kernel/audit.c b/kernel/audit.c
index 17b0d523afb3..9ddfe2aa6671 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -1101,13 +1101,11 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
audit_log_end(ab);
}
-static int audit_set_feature(struct sk_buff *skb)
+static int audit_set_feature(struct audit_features *uaf)
{
- struct audit_features *uaf;
int i;
BUILD_BUG_ON(AUDIT_LAST_FEATURE + 1 > ARRAY_SIZE(audit_feature_names));
- uaf = nlmsg_data(nlmsg_hdr(skb));
/* if there is ever a version 2 we should handle that here */
@@ -1175,6 +1173,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
u32 seq;
void *data;
+ int data_len;
int err;
struct audit_buffer *ab;
u16 msg_type = nlh->nlmsg_type;
@@ -1188,6 +1187,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
seq = nlh->nlmsg_seq;
data = nlmsg_data(nlh);
+ data_len = nlmsg_len(nlh);
switch (msg_type) {
case AUDIT_GET: {
@@ -1211,7 +1211,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
struct audit_status s;
memset(&s, 0, sizeof(s));
/* guard against past and future API changes */
- memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
+ memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
if (s.mask & AUDIT_STATUS_ENABLED) {
err = audit_set_enabled(s.enabled);
if (err < 0)
@@ -1315,7 +1315,9 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
return err;
break;
case AUDIT_SET_FEATURE:
- err = audit_set_feature(skb);
+ if (data_len < sizeof(struct audit_features))
+ return -EINVAL;
+ err = audit_set_feature(data);
if (err)
return err;
break;
@@ -1327,6 +1329,8 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
err = audit_filter(msg_type, AUDIT_FILTER_USER);
if (err == 1) { /* match or error */
+ char *str = data;
+
err = 0;
if (msg_type == AUDIT_USER_TTY) {
err = tty_audit_push();
@@ -1334,26 +1338,24 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
break;
}
audit_log_user_recv_msg(&ab, msg_type);
- if (msg_type != AUDIT_USER_TTY)
+ if (msg_type != AUDIT_USER_TTY) {
+ /* ensure NULL termination */
+ str[data_len - 1] = '\0';
audit_log_format(ab, " msg='%.*s'",
AUDIT_MESSAGE_TEXT_MAX,
- (char *)data);
- else {
- int size;
-
+ str);
+ } else {
audit_log_format(ab, " data=");
- size = nlmsg_len(nlh);
- if (size > 0 &&
- ((unsigned char *)data)[size - 1] == '\0')
- size--;
- audit_log_n_untrustedstring(ab, data, size);
+ if (data_len > 0 && str[data_len - 1] == '\0')
+ data_len--;
+ audit_log_n_untrustedstring(ab, str, data_len);
}
audit_log_end(ab);
}
break;
case AUDIT_ADD_RULE:
case AUDIT_DEL_RULE:
- if (nlmsg_len(nlh) < sizeof(struct audit_rule_data))
+ if (data_len < sizeof(struct audit_rule_data))
return -EINVAL;
if (audit_enabled == AUDIT_LOCKED) {
audit_log_common_recv_msg(audit_context(), &ab,
@@ -1365,7 +1367,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
audit_log_end(ab);
return -EPERM;
}
- err = audit_rule_change(msg_type, seq, data, nlmsg_len(nlh));
+ err = audit_rule_change(msg_type, seq, data, data_len);
break;
case AUDIT_LIST_RULES:
err = audit_list_rules_send(skb, seq);
@@ -1380,7 +1382,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
case AUDIT_MAKE_EQUIV: {
void *bufp = data;
u32 sizes[2];
- size_t msglen = nlmsg_len(nlh);
+ size_t msglen = data_len;
char *old, *new;
err = -EINVAL;
@@ -1456,7 +1458,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
memset(&s, 0, sizeof(s));
/* guard against past and future API changes */
- memcpy(&s, data, min_t(size_t, sizeof(s), nlmsg_len(nlh)));
+ memcpy(&s, data, min_t(size_t, sizeof(s), data_len));
/* check if new data is valid */
if ((s.enabled != 0 && s.enabled != 1) ||
(s.log_passwd != 0 && s.log_passwd != 1))
diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
index b0126e9c0743..026e34da4ace 100644
--- a/kernel/auditfilter.c
+++ b/kernel/auditfilter.c
@@ -456,6 +456,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
bufp = data->buf;
for (i = 0; i < data->field_count; i++) {
struct audit_field *f = &entry->rule.fields[i];
+ u32 f_val;
err = -EINVAL;
@@ -464,12 +465,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
goto exit_free;
f->type = data->fields[i];
- f->val = data->values[i];
+ f_val = data->values[i];
/* Support legacy tests for a valid loginuid */
- if ((f->type == AUDIT_LOGINUID) && (f->val == AUDIT_UID_UNSET)) {
+ if ((f->type == AUDIT_LOGINUID) && (f_val == AUDIT_UID_UNSET)) {
f->type = AUDIT_LOGINUID_SET;
- f->val = 0;
+ f_val = 0;
entry->rule.pflags |= AUDIT_LOGINUID_LEGACY;
}
@@ -485,7 +486,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
case AUDIT_SUID:
case AUDIT_FSUID:
case AUDIT_OBJ_UID:
- f->uid = make_kuid(current_user_ns(), f->val);
+ f->uid = make_kuid(current_user_ns(), f_val);
if (!uid_valid(f->uid))
goto exit_free;
break;
@@ -494,11 +495,12 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
case AUDIT_SGID:
case AUDIT_FSGID:
case AUDIT_OBJ_GID:
- f->gid = make_kgid(current_user_ns(), f->val);
+ f->gid = make_kgid(current_user_ns(), f_val);
if (!gid_valid(f->gid))
goto exit_free;
break;
case AUDIT_ARCH:
+ f->val = f_val;
entry->rule.arch_f = f;
break;
case AUDIT_SUBJ_USER:
@@ -511,11 +513,13 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
case AUDIT_OBJ_TYPE:
case AUDIT_OBJ_LEV_LOW:
case AUDIT_OBJ_LEV_HIGH:
- str = audit_unpack_string(&bufp, &remain, f->val);
- if (IS_ERR(str))
+ str = audit_unpack_string(&bufp, &remain, f_val);
+ if (IS_ERR(str)) {
+ err = PTR_ERR(str);
goto exit_free;
- entry->rule.buflen += f->val;
-
+ }
+ entry->rule.buflen += f_val;
+ f->lsm_str = str;
err = security_audit_rule_init(f->type, f->op, str,
(void **)&f->lsm_rule);
/* Keep currently invalid fields around in case they
@@ -524,68 +528,71 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
pr_warn("audit rule for LSM \'%s\' is invalid\n",
str);
err = 0;
- }
- if (err) {
- kfree(str);
+ } else if (err)
goto exit_free;
- } else
- f->lsm_str = str;
break;
case AUDIT_WATCH:
- str = audit_unpack_string(&bufp, &remain, f->val);
- if (IS_ERR(str))
+ str = audit_unpack_string(&bufp, &remain, f_val);
+ if (IS_ERR(str)) {
+ err = PTR_ERR(str);
goto exit_free;
- entry->rule.buflen += f->val;
-
- err = audit_to_watch(&entry->rule, str, f->val, f->op);
+ }
+ err = audit_to_watch(&entry->rule, str, f_val, f->op);
if (err) {
kfree(str);
goto exit_free;
}
+ entry->rule.buflen += f_val;
break;
case AUDIT_DIR:
- str = audit_unpack_string(&bufp, &remain, f->val);
- if (IS_ERR(str))
+ str = audit_unpack_string(&bufp, &remain, f_val);
+ if (IS_ERR(str)) {
+ err = PTR_ERR(str);
goto exit_free;
- entry->rule.buflen += f->val;
-
+ }
err = audit_make_tree(&entry->rule, str, f->op);
kfree(str);
if (err)
goto exit_free;
+ entry->rule.buflen += f_val;
break;
case AUDIT_INODE:
+ f->val = f_val;
err = audit_to_inode(&entry->rule, f);
if (err)
goto exit_free;
break;
case AUDIT_FILTERKEY:
- if (entry->rule.filterkey || f->val > AUDIT_MAX_KEY_LEN)
+ if (entry->rule.filterkey || f_val > AUDIT_MAX_KEY_LEN)
goto exit_free;
- str = audit_unpack_string(&bufp, &remain, f->val);
- if (IS_ERR(str))
+ str = audit_unpack_string(&bufp, &remain, f_val);
+ if (IS_ERR(str)) {
+ err = PTR_ERR(str);
goto exit_free;
- entry->rule.buflen += f->val;
+ }
+ entry->rule.buflen += f_val;
entry->rule.filterkey = str;
break;
case AUDIT_EXE:
- if (entry->rule.exe || f->val > PATH_MAX)
+ if (entry->rule.exe || f_val > PATH_MAX)
goto exit_free;
- str = audit_unpack_string(&bufp, &remain, f->val);
+ str = audit_unpack_string(&bufp, &remain, f_val);
if (IS_ERR(str)) {
err = PTR_ERR(str);
goto exit_free;
}
- entry->rule.buflen += f->val;
-
- audit_mark = audit_alloc_mark(&entry->rule, str, f->val);
+ audit_mark = audit_alloc_mark(&entry->rule, str, f_val);
if (IS_ERR(audit_mark)) {
kfree(str);
err = PTR_ERR(audit_mark);
goto exit_free;
}
+ entry->rule.buflen += f_val;
entry->rule.exe = audit_mark;
break;
+ default:
+ f->val = f_val;
+ break;
}
}
diff --git a/kernel/bpf/btf.c b/kernel/bpf/btf.c
index 805c43b083e9..787140095e58 100644
--- a/kernel/bpf/btf.c
+++ b/kernel/bpf/btf.c
@@ -4142,9 +4142,9 @@ int btf_distill_func_proto(struct bpf_verifier_log *log,
* EFAULT - verifier bug
* 0 - 99% match. The last 1% is validated by the verifier.
*/
-int btf_check_func_type_match(struct bpf_verifier_log *log,
- struct btf *btf1, const struct btf_type *t1,
- struct btf *btf2, const struct btf_type *t2)
+static int btf_check_func_type_match(struct bpf_verifier_log *log,
+ struct btf *btf1, const struct btf_type *t1,
+ struct btf *btf2, const struct btf_type *t2)
{
const struct btf_param *args1, *args2;
const char *fn1, *fn2, *s1, *s2;
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 2d182c4ee9d9..a1468e3f5af2 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -56,6 +56,7 @@ struct htab_elem {
union {
struct bpf_htab *htab;
struct pcpu_freelist_node fnode;
+ struct htab_elem *batch_flink;
};
};
};
@@ -126,6 +127,17 @@ free_elems:
bpf_map_area_free(htab->elems);
}
+/* The LRU list has a lock (lru_lock). Each htab bucket has a lock
+ * (bucket_lock). If both locks need to be acquired together, the lock
+ * order is always lru_lock -> bucket_lock and this only happens in
+ * bpf_lru_list.c logic. For example, certain code path of
+ * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
+ * will acquire lru_lock first followed by acquiring bucket_lock.
+ *
+ * In hashtab.c, to avoid deadlock, lock acquisition of
+ * bucket_lock followed by lru_lock is not allowed. In such cases,
+ * bucket_lock needs to be released first before acquiring lru_lock.
+ */
static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
u32 hash)
{
@@ -1256,10 +1268,12 @@ __htab_map_lookup_and_delete_batch(struct bpf_map *map,
void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
void *ubatch = u64_to_user_ptr(attr->batch.in_batch);
u32 batch, max_count, size, bucket_size;
+ struct htab_elem *node_to_free = NULL;
u64 elem_map_flags, map_flags;
struct hlist_nulls_head *head;
struct hlist_nulls_node *n;
- unsigned long flags;
+ unsigned long flags = 0;
+ bool locked = false;
struct htab_elem *l;
struct bucket *b;
int ret = 0;
@@ -1319,15 +1333,25 @@ again_nocopy:
dst_val = values;
b = &htab->buckets[batch];
head = &b->head;
- raw_spin_lock_irqsave(&b->lock, flags);
+ /* do not grab the lock unless need it (bucket_cnt > 0). */
+ if (locked)
+ raw_spin_lock_irqsave(&b->lock, flags);
bucket_cnt = 0;
hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
bucket_cnt++;
+ if (bucket_cnt && !locked) {
+ locked = true;
+ goto again_nocopy;
+ }
+
if (bucket_cnt > (max_count - total)) {
if (total == 0)
ret = -ENOSPC;
+ /* Note that since bucket_cnt > 0 here, it is implicit
+ * that the locked was grabbed, so release it.
+ */
raw_spin_unlock_irqrestore(&b->lock, flags);
rcu_read_unlock();
this_cpu_dec(bpf_prog_active);
@@ -1337,6 +1361,9 @@ again_nocopy:
if (bucket_cnt > bucket_size) {
bucket_size = bucket_cnt;
+ /* Note that since bucket_cnt > 0 here, it is implicit
+ * that the locked was grabbed, so release it.
+ */
raw_spin_unlock_irqrestore(&b->lock, flags);
rcu_read_unlock();
this_cpu_dec(bpf_prog_active);
@@ -1346,6 +1373,10 @@ again_nocopy:
goto alloc;
}
+ /* Next block is only safe to run if you have grabbed the lock */
+ if (!locked)
+ goto next_batch;
+
hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
memcpy(dst_key, l->key, key_size);
@@ -1370,16 +1401,33 @@ again_nocopy:
}
if (do_delete) {
hlist_nulls_del_rcu(&l->hash_node);
- if (is_lru_map)
- bpf_lru_push_free(&htab->lru, &l->lru_node);
- else
+
+ /* bpf_lru_push_free() will acquire lru_lock, which
+ * may cause deadlock. See comments in function
+ * prealloc_lru_pop(). Let us do bpf_lru_push_free()
+ * after releasing the bucket lock.
+ */
+ if (is_lru_map) {
+ l->batch_flink = node_to_free;
+ node_to_free = l;
+ } else {
free_htab_elem(htab, l);
+ }
}
dst_key += key_size;
dst_val += value_size;
}
raw_spin_unlock_irqrestore(&b->lock, flags);
+ locked = false;
+
+ while (node_to_free) {
+ l = node_to_free;
+ node_to_free = node_to_free->batch_flink;
+ bpf_lru_push_free(&htab->lru, &l->lru_node);
+ }
+
+next_batch:
/* If we are not copying data, we can go to next bucket and avoid
* unlocking the rcu.
*/
diff --git a/kernel/bpf/offload.c b/kernel/bpf/offload.c
index 2c5dc6541ece..bd09290e3648 100644
--- a/kernel/bpf/offload.c
+++ b/kernel/bpf/offload.c
@@ -321,7 +321,7 @@ int bpf_prog_offload_info_fill(struct bpf_prog_info *info,
ulen = info->jited_prog_len;
info->jited_prog_len = aux->offload->jited_len;
- if (info->jited_prog_len & ulen) {
+ if (info->jited_prog_len && ulen) {
uinsns = u64_to_user_ptr(info->jited_prog_insns);
ulen = min_t(u32, info->jited_prog_len, ulen);
if (copy_to_user(uinsns, aux->offload->jited_image, ulen)) {
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index db552b9f9377..75f687301bbf 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -5927,11 +5927,14 @@ void cgroup_post_fork(struct task_struct *child)
spin_lock_irq(&css_set_lock);
- WARN_ON_ONCE(!list_empty(&child->cg_list));
- cset = task_css_set(current); /* current is @child's parent */
- get_css_set(cset);
- cset->nr_tasks++;
- css_set_move_task(child, NULL, cset, false);
+ /* init tasks are special, only link regular threads */
+ if (likely(child->pid)) {
+ WARN_ON_ONCE(!list_empty(&child->cg_list));
+ cset = task_css_set(current); /* current is @child's parent */
+ get_css_set(cset);
+ cset->nr_tasks++;
+ css_set_move_task(child, NULL, cset, false);
+ }
/*
* If the cgroup has to be frozen, the new task has too. Let's set
diff --git a/kernel/compat.c b/kernel/compat.c
index 95005f849c68..843dd17e6078 100644
--- a/kernel/compat.c
+++ b/kernel/compat.c
@@ -26,70 +26,6 @@
#include <linux/uaccess.h>
-static int __compat_get_timeval(struct timeval *tv, const struct old_timeval32 __user *ctv)
-{
- return (!access_ok(ctv, sizeof(*ctv)) ||
- __get_user(tv->tv_sec, &ctv->tv_sec) ||
- __get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
-}
-
-static int __compat_put_timeval(const struct timeval *tv, struct old_timeval32 __user *ctv)
-{
- return (!access_ok(ctv, sizeof(*ctv)) ||
- __put_user(tv->tv_sec, &ctv->tv_sec) ||
- __put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
-}
-
-static int __compat_get_timespec(struct timespec *ts, const struct old_timespec32 __user *cts)
-{
- return (!access_ok(cts, sizeof(*cts)) ||
- __get_user(ts->tv_sec, &cts->tv_sec) ||
- __get_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
-}
-
-static int __compat_put_timespec(const struct timespec *ts, struct old_timespec32 __user *cts)
-{
- return (!access_ok(cts, sizeof(*cts)) ||
- __put_user(ts->tv_sec, &cts->tv_sec) ||
- __put_user(ts->tv_nsec, &cts->tv_nsec)) ? -EFAULT : 0;
-}
-
-int compat_get_timeval(struct timeval *tv, const void __user *utv)
-{
- if (COMPAT_USE_64BIT_TIME)
- return copy_from_user(tv, utv, sizeof(*tv)) ? -EFAULT : 0;
- else
- return __compat_get_timeval(tv, utv);
-}
-EXPORT_SYMBOL_GPL(compat_get_timeval);
-
-int compat_put_timeval(const struct timeval *tv, void __user *utv)
-{
- if (COMPAT_USE_64BIT_TIME)
- return copy_to_user(utv, tv, sizeof(*tv)) ? -EFAULT : 0;
- else
- return __compat_put_timeval(tv, utv);
-}
-EXPORT_SYMBOL_GPL(compat_put_timeval);
-
-int compat_get_timespec(struct timespec *ts, const void __user *uts)
-{
- if (COMPAT_USE_64BIT_TIME)
- return copy_from_user(ts, uts, sizeof(*ts)) ? -EFAULT : 0;
- else
- return __compat_get_timespec(ts, uts);
-}
-EXPORT_SYMBOL_GPL(compat_get_timespec);
-
-int compat_put_timespec(const struct timespec *ts, void __user *uts)
-{
- if (COMPAT_USE_64BIT_TIME)
- return copy_to_user(uts, ts, sizeof(*ts)) ? -EFAULT : 0;
- else
- return __compat_put_timespec(ts, uts);
-}
-EXPORT_SYMBOL_GPL(compat_put_timespec);
-
#ifdef __ARCH_WANT_SYS_SIGPROCMASK
/*
diff --git a/kernel/dma/contiguous.c b/kernel/dma/contiguous.c
index daa4e6eefdde..8bc6f2d670f9 100644
--- a/kernel/dma/contiguous.c
+++ b/kernel/dma/contiguous.c
@@ -302,9 +302,16 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
phys_addr_t align = PAGE_SIZE << max(MAX_ORDER - 1, pageblock_order);
phys_addr_t mask = align - 1;
unsigned long node = rmem->fdt_node;
+ bool default_cma = of_get_flat_dt_prop(node, "linux,cma-default", NULL);
struct cma *cma;
int err;
+ if (size_cmdline != -1 && default_cma) {
+ pr_info("Reserved memory: bypass %s node, using cmdline CMA params instead\n",
+ rmem->name);
+ return -EBUSY;
+ }
+
if (!of_get_flat_dt_prop(node, "reusable", NULL) ||
of_get_flat_dt_prop(node, "no-map", NULL))
return -EINVAL;
@@ -322,7 +329,7 @@ static int __init rmem_cma_setup(struct reserved_mem *rmem)
/* Architecture specific contiguous memory fixup. */
dma_contiguous_early_fixup(rmem->base, rmem->size);
- if (of_get_flat_dt_prop(node, "linux,cma-default", NULL))
+ if (default_cma)
dma_contiguous_set_default(cma);
rmem->ops = &rmem_cma_ops;
diff --git a/kernel/dma/direct.c b/kernel/dma/direct.c
index 6af7ae83c4ad..ac7956c38f69 100644
--- a/kernel/dma/direct.c
+++ b/kernel/dma/direct.c
@@ -23,18 +23,6 @@
*/
unsigned int zone_dma_bits __ro_after_init = 24;
-static void report_addr(struct device *dev, dma_addr_t dma_addr, size_t size)
-{
- if (!dev->dma_mask) {
- dev_err_once(dev, "DMA map on device without dma_mask\n");
- } else if (*dev->dma_mask >= DMA_BIT_MASK(32) || dev->bus_dma_limit) {
- dev_err_once(dev,
- "overflow %pad+%zu of DMA mask %llx bus limit %llx\n",
- &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
- }
- WARN_ON_ONCE(1);
-}
-
static inline dma_addr_t phys_to_dma_direct(struct device *dev,
phys_addr_t phys)
{
@@ -357,13 +345,6 @@ void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
EXPORT_SYMBOL(dma_direct_unmap_sg);
#endif
-static inline bool dma_direct_possible(struct device *dev, dma_addr_t dma_addr,
- size_t size)
-{
- return swiotlb_force != SWIOTLB_FORCE &&
- dma_capable(dev, dma_addr, size, true);
-}
-
dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir,
unsigned long attrs)
@@ -371,9 +352,16 @@ dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
phys_addr_t phys = page_to_phys(page) + offset;
dma_addr_t dma_addr = phys_to_dma(dev, phys);
- if (unlikely(!dma_direct_possible(dev, dma_addr, size)) &&
- !swiotlb_map(dev, &phys, &dma_addr, size, dir, attrs)) {
- report_addr(dev, dma_addr, size);
+ if (unlikely(swiotlb_force == SWIOTLB_FORCE))
+ return swiotlb_map(dev, phys, size, dir, attrs);
+
+ if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
+ if (swiotlb_force != SWIOTLB_NO_FORCE)
+ return swiotlb_map(dev, phys, size, dir, attrs);
+
+ dev_WARN_ONCE(dev, 1,
+ "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
+ &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
return DMA_MAPPING_ERROR;
}
@@ -411,7 +399,10 @@ dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
dma_addr_t dma_addr = paddr;
if (unlikely(!dma_capable(dev, dma_addr, size, false))) {
- report_addr(dev, dma_addr, size);
+ dev_err_once(dev,
+ "DMA addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
+ &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
+ WARN_ON_ONCE(1);
return DMA_MAPPING_ERROR;
}
@@ -472,28 +463,26 @@ int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
}
#endif /* CONFIG_MMU */
-/*
- * Because 32-bit DMA masks are so common we expect every architecture to be
- * able to satisfy them - either by not supporting more physical memory, or by
- * providing a ZONE_DMA32. If neither is the case, the architecture needs to
- * use an IOMMU instead of the direct mapping.
- */
int dma_direct_supported(struct device *dev, u64 mask)
{
- u64 min_mask;
+ u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
- if (IS_ENABLED(CONFIG_ZONE_DMA))
- min_mask = DMA_BIT_MASK(zone_dma_bits);
- else
- min_mask = DMA_BIT_MASK(32);
-
- min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
+ /*
+ * Because 32-bit DMA masks are so common we expect every architecture
+ * to be able to satisfy them - either by not supporting more physical
+ * memory, or by providing a ZONE_DMA32. If neither is the case, the
+ * architecture needs to use an IOMMU instead of the direct mapping.
+ */
+ if (mask >= DMA_BIT_MASK(32))
+ return 1;
/*
* This check needs to be against the actual bit mask value, so
* use __phys_to_dma() here so that the SME encryption mask isn't
* part of the check.
*/
+ if (IS_ENABLED(CONFIG_ZONE_DMA))
+ min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
return mask >= __phys_to_dma(dev, min_mask);
}
diff --git a/kernel/dma/swiotlb.c b/kernel/dma/swiotlb.c
index 9280d6f8271e..c19379fabd20 100644
--- a/kernel/dma/swiotlb.c
+++ b/kernel/dma/swiotlb.c
@@ -22,6 +22,7 @@
#include <linux/cache.h>
#include <linux/dma-direct.h>
+#include <linux/dma-noncoherent.h>
#include <linux/mm.h>
#include <linux/export.h>
#include <linux/spinlock.h>
@@ -656,35 +657,38 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
}
/*
- * Create a swiotlb mapping for the buffer at @phys, and in case of DMAing
+ * Create a swiotlb mapping for the buffer at @paddr, and in case of DMAing
* to the device copy the data into it as well.
*/
-bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs)
+dma_addr_t swiotlb_map(struct device *dev, phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir, unsigned long attrs)
{
- trace_swiotlb_bounced(dev, *dma_addr, size, swiotlb_force);
+ phys_addr_t swiotlb_addr;
+ dma_addr_t dma_addr;
- if (unlikely(swiotlb_force == SWIOTLB_NO_FORCE)) {
- dev_warn_ratelimited(dev,
- "Cannot do DMA to address %pa\n", phys);
- return false;
- }
+ trace_swiotlb_bounced(dev, phys_to_dma(dev, paddr), size,
+ swiotlb_force);
- /* Oh well, have to allocate and map a bounce buffer. */
- *phys = swiotlb_tbl_map_single(dev, __phys_to_dma(dev, io_tlb_start),
- *phys, size, size, dir, attrs);
- if (*phys == (phys_addr_t)DMA_MAPPING_ERROR)
- return false;
+ swiotlb_addr = swiotlb_tbl_map_single(dev,
+ __phys_to_dma(dev, io_tlb_start),
+ paddr, size, size, dir, attrs);
+ if (swiotlb_addr == (phys_addr_t)DMA_MAPPING_ERROR)
+ return DMA_MAPPING_ERROR;
/* Ensure that the address returned is DMA'ble */
- *dma_addr = __phys_to_dma(dev, *phys);
- if (unlikely(!dma_capable(dev, *dma_addr, size, true))) {
- swiotlb_tbl_unmap_single(dev, *phys, size, size, dir,
+ dma_addr = __phys_to_dma(dev, swiotlb_addr);
+ if (unlikely(!dma_capable(dev, dma_addr, size, true))) {
+ swiotlb_tbl_unmap_single(dev, swiotlb_addr, size, size, dir,
attrs | DMA_ATTR_SKIP_CPU_SYNC);
- return false;
+ dev_WARN_ONCE(dev, 1,
+ "swiotlb addr %pad+%zu overflow (mask %llx, bus limit %llx).\n",
+ &dma_addr, size, *dev->dma_mask, dev->bus_dma_limit);
+ return DMA_MAPPING_ERROR;
}
- return true;
+ if (!dev_is_dma_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
+ arch_sync_dma_for_device(swiotlb_addr, size, dir);
+ return dma_addr;
}
size_t swiotlb_max_mapping_size(struct device *dev)
diff --git a/kernel/exit.c b/kernel/exit.c
index 2833ffb0c211..0b81b26a872a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -619,8 +619,8 @@ static void forget_original_parent(struct task_struct *father,
reaper = find_new_reaper(father, reaper);
list_for_each_entry(p, &father->children, sibling) {
for_each_thread(p, t) {
- t->real_parent = reaper;
- BUG_ON((!t->ptrace) != (t->parent == father));
+ RCU_INIT_POINTER(t->real_parent, reaper);
+ BUG_ON((!t->ptrace) != (rcu_access_pointer(t->parent) == father));
if (likely(!t->ptrace))
t->parent = t->real_parent;
if (t->pdeath_signal)
diff --git a/kernel/fork.c b/kernel/fork.c
index 60a1295f4384..86425305cd4a 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1508,7 +1508,7 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk)
return 0;
}
sig = kmem_cache_alloc(sighand_cachep, GFP_KERNEL);
- rcu_assign_pointer(tsk->sighand, sig);
+ RCU_INIT_POINTER(tsk->sighand, sig);
if (!sig)
return -ENOMEM;
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 3924fbe829d4..c9d8eb7f5c02 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -128,8 +128,6 @@ static inline void unregister_handler_proc(unsigned int irq,
extern bool irq_can_set_affinity_usr(unsigned int irq);
-extern int irq_select_affinity_usr(unsigned int irq);
-
extern void irq_set_thread_affinity(struct irq_desc *desc);
extern int irq_do_set_affinity(struct irq_data *data,
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 3089a60ea8f9..7eee98c38f25 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -481,23 +481,9 @@ int irq_setup_affinity(struct irq_desc *desc)
{
return irq_select_affinity(irq_desc_get_irq(desc));
}
-#endif
+#endif /* CONFIG_AUTO_IRQ_AFFINITY */
+#endif /* CONFIG_SMP */
-/*
- * Called when a bogus affinity is set via /proc/irq
- */
-int irq_select_affinity_usr(unsigned int irq)
-{
- struct irq_desc *desc = irq_to_desc(irq);
- unsigned long flags;
- int ret;
-
- raw_spin_lock_irqsave(&desc->lock, flags);
- ret = irq_setup_affinity(desc);
- raw_spin_unlock_irqrestore(&desc->lock, flags);
- return ret;
-}
-#endif
/**
* irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c
index 9e5783d98033..32c071d7bc03 100644
--- a/kernel/irq/proc.c
+++ b/kernel/irq/proc.c
@@ -111,6 +111,28 @@ static int irq_affinity_list_proc_show(struct seq_file *m, void *v)
return show_irq_affinity(AFFINITY_LIST, m);
}
+#ifndef CONFIG_AUTO_IRQ_AFFINITY
+static inline int irq_select_affinity_usr(unsigned int irq)
+{
+ /*
+ * If the interrupt is started up already then this fails. The
+ * interrupt is assigned to an online CPU already. There is no
+ * point to move it around randomly. Tell user space that the
+ * selected mask is bogus.
+ *
+ * If not then any change to the affinity is pointless because the
+ * startup code invokes irq_setup_affinity() which will select
+ * a online CPU anyway.
+ */
+ return -EINVAL;
+}
+#else
+/* ALPHA magic affinity auto selector. Keep it for historical reasons. */
+static inline int irq_select_affinity_usr(unsigned int irq)
+{
+ return irq_select_affinity(irq);
+}
+#endif
static ssize_t write_irq_affinity(int type, struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
diff --git a/kernel/power/snapshot.c b/kernel/power/snapshot.c
index ddade80ad276..d82b7b88d616 100644
--- a/kernel/power/snapshot.c
+++ b/kernel/power/snapshot.c
@@ -1681,7 +1681,7 @@ static unsigned long minimum_image_size(unsigned long saveable)
* hibernation for allocations made while saving the image and for device
* drivers, in case they need to allocate memory from their hibernation
* callbacks (these two numbers are given by PAGES_FOR_IO (which is a rough
- * estimate) and reserverd_size divided by PAGE_SIZE (which is tunable through
+ * estimate) and reserved_size divided by PAGE_SIZE (which is tunable through
* /sys/power/reserved_size, respectively). To make this happen, we compute the
* total number of available page frames and allocate at least
*
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 2c47280fbfc7..8b1bb5ee7e5d 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -131,11 +131,12 @@ static void s2idle_loop(void)
* to avoid them upfront.
*/
for (;;) {
- if (s2idle_ops && s2idle_ops->wake)
- s2idle_ops->wake();
-
- if (pm_wakeup_pending())
+ if (s2idle_ops && s2idle_ops->wake) {
+ if (s2idle_ops->wake())
+ break;
+ } else if (pm_wakeup_pending()) {
break;
+ }
pm_wakeup_clear(false);
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index fc1dfc007604..1a9983da4408 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -552,27 +552,32 @@ void resched_cpu(int cpu)
*/
int get_nohz_timer_target(void)
{
- int i, cpu = smp_processor_id();
+ int i, cpu = smp_processor_id(), default_cpu = -1;
struct sched_domain *sd;
- if (!idle_cpu(cpu) && housekeeping_cpu(cpu, HK_FLAG_TIMER))
- return cpu;
+ if (housekeeping_cpu(cpu, HK_FLAG_TIMER)) {
+ if (!idle_cpu(cpu))
+ return cpu;
+ default_cpu = cpu;
+ }
rcu_read_lock();
for_each_domain(cpu, sd) {
- for_each_cpu(i, sched_domain_span(sd)) {
+ for_each_cpu_and(i, sched_domain_span(sd),
+ housekeeping_cpumask(HK_FLAG_TIMER)) {
if (cpu == i)
continue;
- if (!idle_cpu(i) && housekeeping_cpu(i, HK_FLAG_TIMER)) {
+ if (!idle_cpu(i)) {
cpu = i;
goto unlock;
}
}
}
- if (!housekeeping_cpu(cpu, HK_FLAG_TIMER))
- cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
+ if (default_cpu == -1)
+ default_cpu = housekeeping_any_cpu(HK_FLAG_TIMER);
+ cpu = default_cpu;
unlock:
rcu_read_unlock();
return cpu;
@@ -1442,17 +1447,6 @@ void check_preempt_curr(struct rq *rq, struct task_struct *p, int flags)
#ifdef CONFIG_SMP
-static inline bool is_per_cpu_kthread(struct task_struct *p)
-{
- if (!(p->flags & PF_KTHREAD))
- return false;
-
- if (p->nr_cpus_allowed != 1)
- return false;
-
- return true;
-}
-
/*
* Per-CPU kthreads are allowed to run on !active && online CPUs, see
* __set_cpus_allowed_ptr() and select_fallback_rq().
@@ -3669,28 +3663,32 @@ static void sched_tick_remote(struct work_struct *work)
* statistics and checks timeslices in a time-independent way, regardless
* of when exactly it is running.
*/
- if (idle_cpu(cpu) || !tick_nohz_tick_stopped_cpu(cpu))
+ if (!tick_nohz_tick_stopped_cpu(cpu))
goto out_requeue;
rq_lock_irq(rq, &rf);
curr = rq->curr;
- if (is_idle_task(curr) || cpu_is_offline(cpu))
+ if (cpu_is_offline(cpu))
goto out_unlock;
+ curr = rq->curr;
update_rq_clock(rq);
- delta = rq_clock_task(rq) - curr->se.exec_start;
- /*
- * Make sure the next tick runs within a reasonable
- * amount of time.
- */
- WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+ if (!is_idle_task(curr)) {
+ /*
+ * Make sure the next tick runs within a reasonable
+ * amount of time.
+ */
+ delta = rq_clock_task(rq) - curr->se.exec_start;
+ WARN_ON_ONCE(delta > (u64)NSEC_PER_SEC * 3);
+ }
curr->sched_class->task_tick(rq, curr, 0);
+ calc_load_nohz_remote(rq);
out_unlock:
rq_unlock_irq(rq, &rf);
-
out_requeue:
+
/*
* Run the remote tick once per second (1Hz). This arbitrary
* frequency is large enough to avoid overload but short enough
@@ -7063,8 +7061,15 @@ void sched_move_task(struct task_struct *tsk)
if (queued)
enqueue_task(rq, tsk, queue_flags);
- if (running)
+ if (running) {
set_next_task(rq, tsk);
+ /*
+ * After changing group, the running task may have joined a
+ * throttled one but it's still the running task. Trigger a
+ * resched to make sure that task can still run.
+ */
+ resched_curr(rq);
+ }
task_rq_unlock(rq, tsk, &rf);
}
@@ -7260,7 +7265,7 @@ capacity_from_percent(char *buf)
&req.percent);
if (req.ret)
return req;
- if (req.percent > UCLAMP_PERCENT_SCALE) {
+ if ((u64)req.percent > UCLAMP_PERCENT_SCALE) {
req.ret = -ERANGE;
return req;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index fe4e0d775375..c1217bfe5e81 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3516,7 +3516,6 @@ update_cfs_rq_load_avg(u64 now, struct cfs_rq *cfs_rq)
* attach_entity_load_avg - attach this entity to its cfs_rq load avg
* @cfs_rq: cfs_rq to attach to
* @se: sched_entity to attach
- * @flags: migration hints
*
* Must call update_cfs_rq_load_avg() before this, since we rely on
* cfs_rq->avg.last_update_time being current.
@@ -5912,6 +5911,20 @@ static int select_idle_sibling(struct task_struct *p, int prev, int target)
(available_idle_cpu(prev) || sched_idle_cpu(prev)))
return prev;
+ /*
+ * Allow a per-cpu kthread to stack with the wakee if the
+ * kworker thread and the tasks previous CPUs are the same.
+ * The assumption is that the wakee queued work for the
+ * per-cpu kthread that is now complete and the wakeup is
+ * essentially a sync wakeup. An obvious example of this
+ * pattern is IO completions.
+ */
+ if (is_per_cpu_kthread(current) &&
+ prev == smp_processor_id() &&
+ this_rq()->nr_running <= 1) {
+ return prev;
+ }
+
/* Check a recently used CPU as a potential idle candidate: */
recent_used_cpu = p->recent_used_cpu;
if (recent_used_cpu != prev &&
@@ -8324,6 +8337,8 @@ static inline void update_sg_wakeup_stats(struct sched_domain *sd,
sgs->group_capacity = group->sgc->capacity;
+ sgs->group_weight = group->group_weight;
+
sgs->group_type = group_classify(sd->imbalance_pct, group, sgs);
/*
@@ -8658,10 +8673,6 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
/*
* Try to use spare capacity of local group without overloading it or
* emptying busiest.
- * XXX Spreading tasks across NUMA nodes is not always the best policy
- * and special care should be taken for SD_NUMA domain level before
- * spreading the tasks. For now, load_balance() fully relies on
- * NUMA_BALANCING and fbq_classify_group/rq to override the decision.
*/
if (local->group_type == group_has_spare) {
if (busiest->group_type > group_fully_busy) {
@@ -8701,16 +8712,37 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
env->migration_type = migrate_task;
lsub_positive(&nr_diff, local->sum_nr_running);
env->imbalance = nr_diff >> 1;
- return;
- }
+ } else {
- /*
- * If there is no overload, we just want to even the number of
- * idle cpus.
- */
- env->migration_type = migrate_task;
- env->imbalance = max_t(long, 0, (local->idle_cpus -
+ /*
+ * If there is no overload, we just want to even the number of
+ * idle cpus.
+ */
+ env->migration_type = migrate_task;
+ env->imbalance = max_t(long, 0, (local->idle_cpus -
busiest->idle_cpus) >> 1);
+ }
+
+ /* Consider allowing a small imbalance between NUMA groups */
+ if (env->sd->flags & SD_NUMA) {
+ unsigned int imbalance_min;
+
+ /*
+ * Compute an allowed imbalance based on a simple
+ * pair of communicating tasks that should remain
+ * local and ignore them.
+ *
+ * NOTE: Generally this would have been based on
+ * the domain size and this was evaluated. However,
+ * the benefit is similar across a range of workloads
+ * and machines but scaling by the domain size adds
+ * the risk that lower domains have to be rebalanced.
+ */
+ imbalance_min = 2;
+ if (busiest->sum_nr_running <= imbalance_min)
+ env->imbalance = 0;
+ }
+
return;
}
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
index 28a516575c18..de22da666ac7 100644
--- a/kernel/sched/loadavg.c
+++ b/kernel/sched/loadavg.c
@@ -231,16 +231,11 @@ static inline int calc_load_read_idx(void)
return calc_load_idx & 1;
}
-void calc_load_nohz_start(void)
+static void calc_load_nohz_fold(struct rq *rq)
{
- struct rq *this_rq = this_rq();
long delta;
- /*
- * We're going into NO_HZ mode, if there's any pending delta, fold it
- * into the pending NO_HZ delta.
- */
- delta = calc_load_fold_active(this_rq, 0);
+ delta = calc_load_fold_active(rq, 0);
if (delta) {
int idx = calc_load_write_idx();
@@ -248,6 +243,24 @@ void calc_load_nohz_start(void)
}
}
+void calc_load_nohz_start(void)
+{
+ /*
+ * We're going into NO_HZ mode, if there's any pending delta, fold it
+ * into the pending NO_HZ delta.
+ */
+ calc_load_nohz_fold(this_rq());
+}
+
+/*
+ * Keep track of the load for NOHZ_FULL, must be called between
+ * calc_load_nohz_{start,stop}().
+ */
+void calc_load_nohz_remote(struct rq *rq)
+{
+ calc_load_nohz_fold(rq);
+}
+
void calc_load_nohz_stop(void)
{
struct rq *this_rq = this_rq();
@@ -268,7 +281,7 @@ void calc_load_nohz_stop(void)
this_rq->calc_load_update += LOAD_FREQ;
}
-static long calc_load_nohz_fold(void)
+static long calc_load_nohz_read(void)
{
int idx = calc_load_read_idx();
long delta = 0;
@@ -323,7 +336,7 @@ static void calc_global_nohz(void)
}
#else /* !CONFIG_NO_HZ_COMMON */
-static inline long calc_load_nohz_fold(void) { return 0; }
+static inline long calc_load_nohz_read(void) { return 0; }
static inline void calc_global_nohz(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
@@ -346,7 +359,7 @@ void calc_global_load(unsigned long ticks)
/*
* Fold the 'old' NO_HZ-delta to include all NO_HZ CPUs.
*/
- delta = calc_load_nohz_fold();
+ delta = calc_load_nohz_read();
if (delta)
atomic_long_add(delta, &calc_load_tasks);
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index ac4bd0ca11cc..028520702717 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -1199,6 +1199,9 @@ static ssize_t psi_write(struct file *file, const char __user *user_buf,
if (static_branch_likely(&psi_disabled))
return -EOPNOTSUPP;
+ if (!nbytes)
+ return -EINVAL;
+
buf_size = min(nbytes, sizeof(buf));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1a88dc8ad11b..9ea647835fd6 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -896,7 +896,7 @@ struct rq {
*/
unsigned long nr_uninterruptible;
- struct task_struct *curr;
+ struct task_struct __rcu *curr;
struct task_struct *idle;
struct task_struct *stop;
unsigned long next_balance;
@@ -2479,3 +2479,16 @@ static inline void membarrier_switch_mm(struct rq *rq,
{
}
#endif
+
+#ifdef CONFIG_SMP
+static inline bool is_per_cpu_kthread(struct task_struct *p)
+{
+ if (!(p->flags & PF_KTHREAD))
+ return false;
+
+ if (p->nr_cpus_allowed != 1)
+ return false;
+
+ return true;
+}
+#endif
diff --git a/kernel/signal.c b/kernel/signal.c
index 9ad8dea93dbb..5b2396350dd1 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -413,27 +413,32 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
{
struct sigqueue *q = NULL;
struct user_struct *user;
+ int sigpending;
/*
* Protect access to @t credentials. This can go away when all
* callers hold rcu read lock.
+ *
+ * NOTE! A pending signal will hold on to the user refcount,
+ * and we get/put the refcount only when the sigpending count
+ * changes from/to zero.
*/
rcu_read_lock();
- user = get_uid(__task_cred(t)->user);
- atomic_inc(&user->sigpending);
+ user = __task_cred(t)->user;
+ sigpending = atomic_inc_return(&user->sigpending);
+ if (sigpending == 1)
+ get_uid(user);
rcu_read_unlock();
- if (override_rlimit ||
- atomic_read(&user->sigpending) <=
- task_rlimit(t, RLIMIT_SIGPENDING)) {
+ if (override_rlimit || likely(sigpending <= task_rlimit(t, RLIMIT_SIGPENDING))) {
q = kmem_cache_alloc(sigqueue_cachep, flags);
} else {
print_dropped_signal(sig);
}
if (unlikely(q == NULL)) {
- atomic_dec(&user->sigpending);
- free_uid(user);
+ if (atomic_dec_and_test(&user->sigpending))
+ free_uid(user);
} else {
INIT_LIST_HEAD(&q->list);
q->flags = 0;
@@ -447,8 +452,8 @@ static void __sigqueue_free(struct sigqueue *q)
{
if (q->flags & SIGQUEUE_PREALLOC)
return;
- atomic_dec(&q->user->sigpending);
- free_uid(q->user);
+ if (atomic_dec_and_test(&q->user->sigpending))
+ free_uid(q->user);
kmem_cache_free(sigqueue_cachep, q);
}
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index d396aaaf19a3..ad5b88a53c5a 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -805,15 +805,6 @@ static struct ctl_table kern_table[] = {
.extra2 = &maxolduid,
},
#ifdef CONFIG_S390
-#ifdef CONFIG_MATHEMU
- {
- .procname = "ieee_emulation_warnings",
- .data = &sysctl_ieee_emulation_warnings,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = proc_dointvec,
- },
-#endif
{
.procname = "userprocess_debug",
.data = &show_unhandled_signals,
diff --git a/kernel/time/time.c b/kernel/time/time.c
index cdd7386115ff..3985b2b32d08 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -449,49 +449,6 @@ time64_t mktime64(const unsigned int year0, const unsigned int mon0,
}
EXPORT_SYMBOL(mktime64);
-/**
- * ns_to_timespec - Convert nanoseconds to timespec
- * @nsec: the nanoseconds value to be converted
- *
- * Returns the timespec representation of the nsec parameter.
- */
-struct timespec ns_to_timespec(const s64 nsec)
-{
- struct timespec ts;
- s32 rem;
-
- if (!nsec)
- return (struct timespec) {0, 0};
-
- ts.tv_sec = div_s64_rem(nsec, NSEC_PER_SEC, &rem);
- if (unlikely(rem < 0)) {
- ts.tv_sec--;
- rem += NSEC_PER_SEC;
- }
- ts.tv_nsec = rem;
-
- return ts;
-}
-EXPORT_SYMBOL(ns_to_timespec);
-
-/**
- * ns_to_timeval - Convert nanoseconds to timeval
- * @nsec: the nanoseconds value to be converted
- *
- * Returns the timeval representation of the nsec parameter.
- */
-struct timeval ns_to_timeval(const s64 nsec)
-{
- struct timespec ts = ns_to_timespec(nsec);
- struct timeval tv;
-
- tv.tv_sec = ts.tv_sec;
- tv.tv_usec = (suseconds_t) ts.tv_nsec / 1000;
-
- return tv;
-}
-EXPORT_SYMBOL(ns_to_timeval);
-
struct __kernel_old_timeval ns_to_kernel_old_timeval(const s64 nsec)
{
struct timespec64 ts = ns_to_timespec64(nsec);
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 91e885194dbc..402eef84c859 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -143,8 +143,8 @@ if FTRACE
config BOOTTIME_TRACING
bool "Boot-time Tracing support"
- depends on BOOT_CONFIG && TRACING
- default y
+ depends on TRACING
+ select BOOT_CONFIG
help
Enable developer to setup ftrace subsystem via supplemental
kernel cmdline at boot time for debugging (tracing) driver
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 0735ae8545d8..ca39dc3230cb 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -335,6 +335,7 @@ static void put_probe_ref(void)
static void blk_trace_cleanup(struct blk_trace *bt)
{
+ synchronize_rcu();
blk_trace_free(bt);
put_probe_ref();
}
@@ -629,8 +630,10 @@ static int compat_blk_trace_setup(struct request_queue *q, char *name,
static int __blk_trace_startstop(struct request_queue *q, int start)
{
int ret;
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ bt = rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex));
if (bt == NULL)
return -EINVAL;
@@ -740,8 +743,8 @@ int blk_trace_ioctl(struct block_device *bdev, unsigned cmd, char __user *arg)
void blk_trace_shutdown(struct request_queue *q)
{
mutex_lock(&q->blk_trace_mutex);
-
- if (q->blk_trace) {
+ if (rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex))) {
__blk_trace_startstop(q, 0);
__blk_trace_remove(q);
}
@@ -752,8 +755,10 @@ void blk_trace_shutdown(struct request_queue *q)
#ifdef CONFIG_BLK_CGROUP
static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ /* We don't use the 'bt' value here except as an optimization... */
+ bt = rcu_dereference_protected(q->blk_trace, 1);
if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
return 0;
@@ -796,10 +801,14 @@ blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
static void blk_add_trace_rq(struct request *rq, int error,
unsigned int nr_bytes, u32 what, u64 cgid)
{
- struct blk_trace *bt = rq->q->blk_trace;
+ struct blk_trace *bt;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(rq->q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
if (blk_rq_is_passthrough(rq))
what |= BLK_TC_ACT(BLK_TC_PC);
@@ -808,6 +817,7 @@ static void blk_add_trace_rq(struct request *rq, int error,
__blk_add_trace(bt, blk_rq_trace_sector(rq), nr_bytes, req_op(rq),
rq->cmd_flags, what, error, 0, NULL, cgid);
+ rcu_read_unlock();
}
static void blk_add_trace_rq_insert(void *ignore,
@@ -853,14 +863,19 @@ static void blk_add_trace_rq_complete(void *ignore, struct request *rq,
static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
u32 what, int error)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio_op(bio), bio->bi_opf, what, error, 0, NULL,
blk_trace_bio_get_cgid(q, bio));
+ rcu_read_unlock();
}
static void blk_add_trace_bio_bounce(void *ignore,
@@ -905,11 +920,14 @@ static void blk_add_trace_getrq(void *ignore,
if (bio)
blk_add_trace_bio(q, bio, BLK_TA_GETRQ, 0);
else {
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
NULL, 0);
+ rcu_read_unlock();
}
}
@@ -921,27 +939,35 @@ static void blk_add_trace_sleeprq(void *ignore,
if (bio)
blk_add_trace_bio(q, bio, BLK_TA_SLEEPRQ, 0);
else {
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
0, 0, NULL, 0);
+ rcu_read_unlock();
}
}
static void blk_add_trace_plug(void *ignore, struct request_queue *q)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt)
__blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
+ rcu_read_unlock();
}
static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
unsigned int depth, bool explicit)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt) {
__be64 rpdu = cpu_to_be64(depth);
u32 what;
@@ -953,14 +979,17 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
__blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
}
+ rcu_read_unlock();
}
static void blk_add_trace_split(void *ignore,
struct request_queue *q, struct bio *bio,
unsigned int pdu)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
if (bt) {
__be64 rpdu = cpu_to_be64(pdu);
@@ -969,6 +998,7 @@ static void blk_add_trace_split(void *ignore,
BLK_TA_SPLIT, bio->bi_status, sizeof(rpdu),
&rpdu, blk_trace_bio_get_cgid(q, bio));
}
+ rcu_read_unlock();
}
/**
@@ -988,11 +1018,15 @@ static void blk_add_trace_bio_remap(void *ignore,
struct request_queue *q, struct bio *bio,
dev_t dev, sector_t from)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
struct blk_io_trace_remap r;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
r.device_from = cpu_to_be32(dev);
r.device_to = cpu_to_be32(bio_dev(bio));
@@ -1001,6 +1035,7 @@ static void blk_add_trace_bio_remap(void *ignore,
__blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
bio_op(bio), bio->bi_opf, BLK_TA_REMAP, bio->bi_status,
sizeof(r), &r, blk_trace_bio_get_cgid(q, bio));
+ rcu_read_unlock();
}
/**
@@ -1021,11 +1056,15 @@ static void blk_add_trace_rq_remap(void *ignore,
struct request *rq, dev_t dev,
sector_t from)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
struct blk_io_trace_remap r;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
r.device_from = cpu_to_be32(dev);
r.device_to = cpu_to_be32(disk_devt(rq->rq_disk));
@@ -1034,6 +1073,7 @@ static void blk_add_trace_rq_remap(void *ignore,
__blk_add_trace(bt, blk_rq_pos(rq), blk_rq_bytes(rq),
rq_data_dir(rq), 0, BLK_TA_REMAP, 0,
sizeof(r), &r, blk_trace_request_get_cgid(q, rq));
+ rcu_read_unlock();
}
/**
@@ -1051,14 +1091,19 @@ void blk_add_driver_data(struct request_queue *q,
struct request *rq,
void *data, size_t len)
{
- struct blk_trace *bt = q->blk_trace;
+ struct blk_trace *bt;
- if (likely(!bt))
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ if (likely(!bt)) {
+ rcu_read_unlock();
return;
+ }
__blk_add_trace(bt, blk_rq_trace_sector(rq), blk_rq_bytes(rq), 0, 0,
BLK_TA_DRV_DATA, 0, len, data,
blk_trace_request_get_cgid(q, rq));
+ rcu_read_unlock();
}
EXPORT_SYMBOL_GPL(blk_add_driver_data);
@@ -1597,6 +1642,7 @@ static int blk_trace_remove_queue(struct request_queue *q)
return -EINVAL;
put_probe_ref();
+ synchronize_rcu();
blk_trace_free(bt);
return 0;
}
@@ -1758,6 +1804,7 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
struct hd_struct *p = dev_to_part(dev);
struct request_queue *q;
struct block_device *bdev;
+ struct blk_trace *bt;
ssize_t ret = -ENXIO;
bdev = bdget(part_devt(p));
@@ -1770,21 +1817,23 @@ static ssize_t sysfs_blk_trace_attr_show(struct device *dev,
mutex_lock(&q->blk_trace_mutex);
+ bt = rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex));
if (attr == &dev_attr_enable) {
- ret = sprintf(buf, "%u\n", !!q->blk_trace);
+ ret = sprintf(buf, "%u\n", !!bt);
goto out_unlock_bdev;
}
- if (q->blk_trace == NULL)
+ if (bt == NULL)
ret = sprintf(buf, "disabled\n");
else if (attr == &dev_attr_act_mask)
- ret = blk_trace_mask2str(buf, q->blk_trace->act_mask);
+ ret = blk_trace_mask2str(buf, bt->act_mask);
else if (attr == &dev_attr_pid)
- ret = sprintf(buf, "%u\n", q->blk_trace->pid);
+ ret = sprintf(buf, "%u\n", bt->pid);
else if (attr == &dev_attr_start_lba)
- ret = sprintf(buf, "%llu\n", q->blk_trace->start_lba);
+ ret = sprintf(buf, "%llu\n", bt->start_lba);
else if (attr == &dev_attr_end_lba)
- ret = sprintf(buf, "%llu\n", q->blk_trace->end_lba);
+ ret = sprintf(buf, "%llu\n", bt->end_lba);
out_unlock_bdev:
mutex_unlock(&q->blk_trace_mutex);
@@ -1801,6 +1850,7 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
struct block_device *bdev;
struct request_queue *q;
struct hd_struct *p;
+ struct blk_trace *bt;
u64 value;
ssize_t ret = -EINVAL;
@@ -1831,8 +1881,10 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
mutex_lock(&q->blk_trace_mutex);
+ bt = rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex));
if (attr == &dev_attr_enable) {
- if (!!value == !!q->blk_trace) {
+ if (!!value == !!bt) {
ret = 0;
goto out_unlock_bdev;
}
@@ -1844,18 +1896,21 @@ static ssize_t sysfs_blk_trace_attr_store(struct device *dev,
}
ret = 0;
- if (q->blk_trace == NULL)
+ if (bt == NULL) {
ret = blk_trace_setup_queue(q, bdev);
+ bt = rcu_dereference_protected(q->blk_trace,
+ lockdep_is_held(&q->blk_trace_mutex));
+ }
if (ret == 0) {
if (attr == &dev_attr_act_mask)
- q->blk_trace->act_mask = value;
+ bt->act_mask = value;
else if (attr == &dev_attr_pid)
- q->blk_trace->pid = value;
+ bt->pid = value;
else if (attr == &dev_attr_start_lba)
- q->blk_trace->start_lba = value;
+ bt->start_lba = value;
else if (attr == &dev_attr_end_lba)
- q->blk_trace->end_lba = value;
+ bt->end_lba = value;
}
out_unlock_bdev:
diff --git a/kernel/trace/synth_event_gen_test.c b/kernel/trace/synth_event_gen_test.c
index 4aefe003cb7c..7d56d621ffea 100644
--- a/kernel/trace/synth_event_gen_test.c
+++ b/kernel/trace/synth_event_gen_test.c
@@ -111,11 +111,11 @@ static int __init test_gen_synth_cmd(void)
/* Create some bogus values just for testing */
vals[0] = 777; /* next_pid_field */
- vals[1] = (u64)"hula hoops"; /* next_comm_field */
+ vals[1] = (u64)(long)"hula hoops"; /* next_comm_field */
vals[2] = 1000000; /* ts_ns */
vals[3] = 1000; /* ts_ms */
- vals[4] = smp_processor_id(); /* cpu */
- vals[5] = (u64)"thneed"; /* my_string_field */
+ vals[4] = raw_smp_processor_id(); /* cpu */
+ vals[5] = (u64)(long)"thneed"; /* my_string_field */
vals[6] = 598; /* my_int_field */
/* Now generate a gen_synth_test event */
@@ -218,11 +218,11 @@ static int __init test_empty_synth_event(void)
/* Create some bogus values just for testing */
vals[0] = 777; /* next_pid_field */
- vals[1] = (u64)"tiddlywinks"; /* next_comm_field */
+ vals[1] = (u64)(long)"tiddlywinks"; /* next_comm_field */
vals[2] = 1000000; /* ts_ns */
vals[3] = 1000; /* ts_ms */
- vals[4] = smp_processor_id(); /* cpu */
- vals[5] = (u64)"thneed_2.0"; /* my_string_field */
+ vals[4] = raw_smp_processor_id(); /* cpu */
+ vals[5] = (u64)(long)"thneed_2.0"; /* my_string_field */
vals[6] = 399; /* my_int_field */
/* Now trace an empty_synth_test event */
@@ -290,11 +290,11 @@ static int __init test_create_synth_event(void)
/* Create some bogus values just for testing */
vals[0] = 777; /* next_pid_field */
- vals[1] = (u64)"tiddlywinks"; /* next_comm_field */
+ vals[1] = (u64)(long)"tiddlywinks"; /* next_comm_field */
vals[2] = 1000000; /* ts_ns */
vals[3] = 1000; /* ts_ms */
- vals[4] = smp_processor_id(); /* cpu */
- vals[5] = (u64)"thneed"; /* my_string_field */
+ vals[4] = raw_smp_processor_id(); /* cpu */
+ vals[5] = (u64)(long)"thneed"; /* my_string_field */
vals[6] = 398; /* my_int_field */
/* Now generate a create_synth_test event */
@@ -330,7 +330,7 @@ static int __init test_add_next_synth_val(void)
goto out;
/* next_comm_field */
- ret = synth_event_add_next_val((u64)"slinky", &trace_state);
+ ret = synth_event_add_next_val((u64)(long)"slinky", &trace_state);
if (ret)
goto out;
@@ -345,12 +345,12 @@ static int __init test_add_next_synth_val(void)
goto out;
/* cpu */
- ret = synth_event_add_next_val(smp_processor_id(), &trace_state);
+ ret = synth_event_add_next_val(raw_smp_processor_id(), &trace_state);
if (ret)
goto out;
/* my_string_field */
- ret = synth_event_add_next_val((u64)"thneed_2.01", &trace_state);
+ ret = synth_event_add_next_val((u64)(long)"thneed_2.01", &trace_state);
if (ret)
goto out;
@@ -388,7 +388,7 @@ static int __init test_add_synth_val(void)
if (ret)
goto out;
- ret = synth_event_add_val("cpu", smp_processor_id(), &trace_state);
+ ret = synth_event_add_val("cpu", raw_smp_processor_id(), &trace_state);
if (ret)
goto out;
@@ -396,12 +396,12 @@ static int __init test_add_synth_val(void)
if (ret)
goto out;
- ret = synth_event_add_val("next_comm_field", (u64)"silly putty",
+ ret = synth_event_add_val("next_comm_field", (u64)(long)"silly putty",
&trace_state);
if (ret)
goto out;
- ret = synth_event_add_val("my_string_field", (u64)"thneed_9",
+ ret = synth_event_add_val("my_string_field", (u64)(long)"thneed_9",
&trace_state);
if (ret)
goto out;
@@ -423,13 +423,13 @@ static int __init test_trace_synth_event(void)
/* Trace some bogus values just for testing */
ret = synth_event_trace(create_synth_test, 7, /* number of values */
- 444, /* next_pid_field */
- (u64)"clackers", /* next_comm_field */
- 1000000, /* ts_ns */
- 1000, /* ts_ms */
- smp_processor_id(), /* cpu */
- (u64)"Thneed", /* my_string_field */
- 999); /* my_int_field */
+ (u64)444, /* next_pid_field */
+ (u64)(long)"clackers", /* next_comm_field */
+ (u64)1000000, /* ts_ns */
+ (u64)1000, /* ts_ms */
+ (u64)raw_smp_processor_id(), /* cpu */
+ (u64)(long)"Thneed", /* my_string_field */
+ (u64)999); /* my_int_field */
return ret;
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c797a15a1fc7..6b11e4e2150c 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1837,6 +1837,7 @@ static __init int init_trace_selftests(void)
pr_info("Running postponed tracer tests:\n");
+ tracing_selftest_running = true;
list_for_each_entry_safe(p, n, &postponed_selftests, list) {
/* This loop can take minutes when sanitizers are enabled, so
* lets make sure we allow RCU processing.
@@ -1859,6 +1860,7 @@ static __init int init_trace_selftests(void)
list_del(&p->list);
kfree(p);
}
+ tracing_selftest_running = false;
out:
mutex_unlock(&trace_types_lock);
diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c
index e7ce7cdac62f..5f6834a2bf41 100644
--- a/kernel/trace/trace_events_hist.c
+++ b/kernel/trace/trace_events_hist.c
@@ -821,6 +821,29 @@ static const char *synth_field_fmt(char *type)
return fmt;
}
+static void print_synth_event_num_val(struct trace_seq *s,
+ char *print_fmt, char *name,
+ int size, u64 val, char *space)
+{
+ switch (size) {
+ case 1:
+ trace_seq_printf(s, print_fmt, name, (u8)val, space);
+ break;
+
+ case 2:
+ trace_seq_printf(s, print_fmt, name, (u16)val, space);
+ break;
+
+ case 4:
+ trace_seq_printf(s, print_fmt, name, (u32)val, space);
+ break;
+
+ default:
+ trace_seq_printf(s, print_fmt, name, val, space);
+ break;
+ }
+}
+
static enum print_line_t print_synth_event(struct trace_iterator *iter,
int flags,
struct trace_event *event)
@@ -859,10 +882,13 @@ static enum print_line_t print_synth_event(struct trace_iterator *iter,
} else {
struct trace_print_flags __flags[] = {
__def_gfpflag_names, {-1, NULL} };
+ char *space = (i == se->n_fields - 1 ? "" : " ");
- trace_seq_printf(s, print_fmt, se->fields[i]->name,
- entry->fields[n_u64],
- i == se->n_fields - 1 ? "" : " ");
+ print_synth_event_num_val(s, print_fmt,
+ se->fields[i]->name,
+ se->fields[i]->size,
+ entry->fields[n_u64],
+ space);
if (strcmp(se->fields[i]->type, "gfp_t") == 0) {
trace_seq_puts(s, " (");
@@ -1798,6 +1824,62 @@ void synth_event_cmd_init(struct dynevent_cmd *cmd, char *buf, int maxlen)
}
EXPORT_SYMBOL_GPL(synth_event_cmd_init);
+static inline int
+__synth_event_trace_start(struct trace_event_file *file,
+ struct synth_event_trace_state *trace_state)
+{
+ int entry_size, fields_size = 0;
+ int ret = 0;
+
+ memset(trace_state, '\0', sizeof(*trace_state));
+
+ /*
+ * Normal event tracing doesn't get called at all unless the
+ * ENABLED bit is set (which attaches the probe thus allowing
+ * this code to be called, etc). Because this is called
+ * directly by the user, we don't have that but we still need
+ * to honor not logging when disabled. For the the iterated
+ * trace case, we save the enabed state upon start and just
+ * ignore the following data calls.
+ */
+ if (!(file->flags & EVENT_FILE_FL_ENABLED) ||
+ trace_trigger_soft_disabled(file)) {
+ trace_state->disabled = true;
+ ret = -ENOENT;
+ goto out;
+ }
+
+ trace_state->event = file->event_call->data;
+
+ fields_size = trace_state->event->n_u64 * sizeof(u64);
+
+ /*
+ * Avoid ring buffer recursion detection, as this event
+ * is being performed within another event.
+ */
+ trace_state->buffer = file->tr->array_buffer.buffer;
+ ring_buffer_nest_start(trace_state->buffer);
+
+ entry_size = sizeof(*trace_state->entry) + fields_size;
+ trace_state->entry = trace_event_buffer_reserve(&trace_state->fbuffer,
+ file,
+ entry_size);
+ if (!trace_state->entry) {
+ ring_buffer_nest_end(trace_state->buffer);
+ ret = -EINVAL;
+ }
+out:
+ return ret;
+}
+
+static inline void
+__synth_event_trace_end(struct synth_event_trace_state *trace_state)
+{
+ trace_event_buffer_commit(&trace_state->fbuffer);
+
+ ring_buffer_nest_end(trace_state->buffer);
+}
+
/**
* synth_event_trace - Trace a synthetic event
* @file: The trace_event_file representing the synthetic event
@@ -1819,71 +1901,61 @@ EXPORT_SYMBOL_GPL(synth_event_cmd_init);
*/
int synth_event_trace(struct trace_event_file *file, unsigned int n_vals, ...)
{
- struct trace_event_buffer fbuffer;
- struct synth_trace_event *entry;
- struct trace_buffer *buffer;
- struct synth_event *event;
+ struct synth_event_trace_state state;
unsigned int i, n_u64;
- int fields_size = 0;
va_list args;
- int ret = 0;
-
- /*
- * Normal event generation doesn't get called at all unless
- * the ENABLED bit is set (which attaches the probe thus
- * allowing this code to be called, etc). Because this is
- * called directly by the user, we don't have that but we
- * still need to honor not logging when disabled.
- */
- if (!(file->flags & EVENT_FILE_FL_ENABLED))
- return 0;
-
- event = file->event_call->data;
-
- if (n_vals != event->n_fields)
- return -EINVAL;
-
- if (trace_trigger_soft_disabled(file))
- return -EINVAL;
-
- fields_size = event->n_u64 * sizeof(u64);
+ int ret;
- /*
- * Avoid ring buffer recursion detection, as this event
- * is being performed within another event.
- */
- buffer = file->tr->array_buffer.buffer;
- ring_buffer_nest_start(buffer);
+ ret = __synth_event_trace_start(file, &state);
+ if (ret) {
+ if (ret == -ENOENT)
+ ret = 0; /* just disabled, not really an error */
+ return ret;
+ }
- entry = trace_event_buffer_reserve(&fbuffer, file,
- sizeof(*entry) + fields_size);
- if (!entry) {
+ if (n_vals != state.event->n_fields) {
ret = -EINVAL;
goto out;
}
va_start(args, n_vals);
- for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
+ for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
u64 val;
val = va_arg(args, u64);
- if (event->fields[i]->is_string) {
+ if (state.event->fields[i]->is_string) {
char *str_val = (char *)(long)val;
- char *str_field = (char *)&entry->fields[n_u64];
+ char *str_field = (char *)&state.entry->fields[n_u64];
strscpy(str_field, str_val, STR_VAR_LEN_MAX);
n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
} else {
- entry->fields[n_u64] = val;
+ struct synth_field *field = state.event->fields[i];
+
+ switch (field->size) {
+ case 1:
+ *(u8 *)&state.entry->fields[n_u64] = (u8)val;
+ break;
+
+ case 2:
+ *(u16 *)&state.entry->fields[n_u64] = (u16)val;
+ break;
+
+ case 4:
+ *(u32 *)&state.entry->fields[n_u64] = (u32)val;
+ break;
+
+ default:
+ state.entry->fields[n_u64] = val;
+ break;
+ }
n_u64++;
}
}
va_end(args);
-
- trace_event_buffer_commit(&fbuffer);
out:
- ring_buffer_nest_end(buffer);
+ __synth_event_trace_end(&state);
return ret;
}
@@ -1910,64 +1982,55 @@ EXPORT_SYMBOL_GPL(synth_event_trace);
int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
unsigned int n_vals)
{
- struct trace_event_buffer fbuffer;
- struct synth_trace_event *entry;
- struct trace_buffer *buffer;
- struct synth_event *event;
+ struct synth_event_trace_state state;
unsigned int i, n_u64;
- int fields_size = 0;
- int ret = 0;
-
- /*
- * Normal event generation doesn't get called at all unless
- * the ENABLED bit is set (which attaches the probe thus
- * allowing this code to be called, etc). Because this is
- * called directly by the user, we don't have that but we
- * still need to honor not logging when disabled.
- */
- if (!(file->flags & EVENT_FILE_FL_ENABLED))
- return 0;
-
- event = file->event_call->data;
-
- if (n_vals != event->n_fields)
- return -EINVAL;
-
- if (trace_trigger_soft_disabled(file))
- return -EINVAL;
-
- fields_size = event->n_u64 * sizeof(u64);
+ int ret;
- /*
- * Avoid ring buffer recursion detection, as this event
- * is being performed within another event.
- */
- buffer = file->tr->array_buffer.buffer;
- ring_buffer_nest_start(buffer);
+ ret = __synth_event_trace_start(file, &state);
+ if (ret) {
+ if (ret == -ENOENT)
+ ret = 0; /* just disabled, not really an error */
+ return ret;
+ }
- entry = trace_event_buffer_reserve(&fbuffer, file,
- sizeof(*entry) + fields_size);
- if (!entry) {
+ if (n_vals != state.event->n_fields) {
ret = -EINVAL;
goto out;
}
- for (i = 0, n_u64 = 0; i < event->n_fields; i++) {
- if (event->fields[i]->is_string) {
+ for (i = 0, n_u64 = 0; i < state.event->n_fields; i++) {
+ if (state.event->fields[i]->is_string) {
char *str_val = (char *)(long)vals[i];
- char *str_field = (char *)&entry->fields[n_u64];
+ char *str_field = (char *)&state.entry->fields[n_u64];
strscpy(str_field, str_val, STR_VAR_LEN_MAX);
n_u64 += STR_VAR_LEN_MAX / sizeof(u64);
} else {
- entry->fields[n_u64] = vals[i];
+ struct synth_field *field = state.event->fields[i];
+ u64 val = vals[i];
+
+ switch (field->size) {
+ case 1:
+ *(u8 *)&state.entry->fields[n_u64] = (u8)val;
+ break;
+
+ case 2:
+ *(u16 *)&state.entry->fields[n_u64] = (u16)val;
+ break;
+
+ case 4:
+ *(u32 *)&state.entry->fields[n_u64] = (u32)val;
+ break;
+
+ default:
+ state.entry->fields[n_u64] = val;
+ break;
+ }
n_u64++;
}
}
-
- trace_event_buffer_commit(&fbuffer);
out:
- ring_buffer_nest_end(buffer);
+ __synth_event_trace_end(&state);
return ret;
}
@@ -2004,58 +2067,15 @@ EXPORT_SYMBOL_GPL(synth_event_trace_array);
int synth_event_trace_start(struct trace_event_file *file,
struct synth_event_trace_state *trace_state)
{
- struct synth_trace_event *entry;
- int fields_size = 0;
- int ret = 0;
-
- if (!trace_state) {
- ret = -EINVAL;
- goto out;
- }
-
- memset(trace_state, '\0', sizeof(*trace_state));
-
- /*
- * Normal event tracing doesn't get called at all unless the
- * ENABLED bit is set (which attaches the probe thus allowing
- * this code to be called, etc). Because this is called
- * directly by the user, we don't have that but we still need
- * to honor not logging when disabled. For the the iterated
- * trace case, we save the enabed state upon start and just
- * ignore the following data calls.
- */
- if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
- trace_state->enabled = false;
- goto out;
- }
-
- trace_state->enabled = true;
-
- trace_state->event = file->event_call->data;
-
- if (trace_trigger_soft_disabled(file)) {
- ret = -EINVAL;
- goto out;
- }
-
- fields_size = trace_state->event->n_u64 * sizeof(u64);
+ int ret;
- /*
- * Avoid ring buffer recursion detection, as this event
- * is being performed within another event.
- */
- trace_state->buffer = file->tr->array_buffer.buffer;
- ring_buffer_nest_start(trace_state->buffer);
+ if (!trace_state)
+ return -EINVAL;
- entry = trace_event_buffer_reserve(&trace_state->fbuffer, file,
- sizeof(*entry) + fields_size);
- if (!entry) {
- ret = -EINVAL;
- goto out;
- }
+ ret = __synth_event_trace_start(file, trace_state);
+ if (ret == -ENOENT)
+ ret = 0; /* just disabled, not really an error */
- trace_state->entry = entry;
-out:
return ret;
}
EXPORT_SYMBOL_GPL(synth_event_trace_start);
@@ -2088,7 +2108,7 @@ static int __synth_event_add_val(const char *field_name, u64 val,
trace_state->add_next = true;
}
- if (!trace_state->enabled)
+ if (trace_state->disabled)
goto out;
event = trace_state->event;
@@ -2122,8 +2142,25 @@ static int __synth_event_add_val(const char *field_name, u64 val,
str_field = (char *)&entry->fields[field->offset];
strscpy(str_field, str_val, STR_VAR_LEN_MAX);
- } else
- entry->fields[field->offset] = val;
+ } else {
+ switch (field->size) {
+ case 1:
+ *(u8 *)&trace_state->entry->fields[field->offset] = (u8)val;
+ break;
+
+ case 2:
+ *(u16 *)&trace_state->entry->fields[field->offset] = (u16)val;
+ break;
+
+ case 4:
+ *(u32 *)&trace_state->entry->fields[field->offset] = (u32)val;
+ break;
+
+ default:
+ trace_state->entry->fields[field->offset] = val;
+ break;
+ }
+ }
out:
return ret;
}
@@ -2223,9 +2260,7 @@ int synth_event_trace_end(struct synth_event_trace_state *trace_state)
if (!trace_state)
return -EINVAL;
- trace_event_buffer_commit(&trace_state->fbuffer);
-
- ring_buffer_nest_end(trace_state->buffer);
+ __synth_event_trace_end(trace_state);
return 0;
}
diff --git a/kernel/trace/trace_kprobe.c b/kernel/trace/trace_kprobe.c
index d8264ebb9581..362cca52f5de 100644
--- a/kernel/trace/trace_kprobe.c
+++ b/kernel/trace/trace_kprobe.c
@@ -1012,7 +1012,7 @@ int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...)
{
struct dynevent_arg arg;
va_list args;
- int ret;
+ int ret = 0;
if (cmd->type != DYNEVENT_TYPE_KPROBE)
return -EINVAL;
diff --git a/lib/Kconfig b/lib/Kconfig
index 0cf875fd627c..bc7e56370129 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -573,9 +573,6 @@ config DIMLIB
config LIBFDT
bool
-config LIBXBC
- bool
-
config OID_REGISTRY
tristate
help
diff --git a/lib/Makefile b/lib/Makefile
index 5d64890d6b6a..611872c06926 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -230,7 +230,7 @@ $(foreach file, $(libfdt_files), \
$(eval CFLAGS_$(file) = -I $(srctree)/scripts/dtc/libfdt))
lib-$(CONFIG_LIBFDT) += $(libfdt_files)
-lib-$(CONFIG_LIBXBC) += bootconfig.o
+lib-$(CONFIG_BOOT_CONFIG) += bootconfig.o
obj-$(CONFIG_RBTREE_TEST) += rbtree_test.o
obj-$(CONFIG_INTERVAL_TREE_TEST) += interval_tree_test.o
diff --git a/lib/bootconfig.c b/lib/bootconfig.c
index afb2e767e6fe..ec3ce7fd299f 100644
--- a/lib/bootconfig.c
+++ b/lib/bootconfig.c
@@ -6,12 +6,13 @@
#define pr_fmt(fmt) "bootconfig: " fmt
+#include <linux/bootconfig.h>
#include <linux/bug.h>
#include <linux/ctype.h>
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/memblock.h>
#include <linux/printk.h>
-#include <linux/bootconfig.h>
#include <linux/string.h>
/*
@@ -23,7 +24,7 @@
* node (for array).
*/
-static struct xbc_node xbc_nodes[XBC_NODE_MAX] __initdata;
+static struct xbc_node *xbc_nodes __initdata;
static int xbc_node_num __initdata;
static char *xbc_data __initdata;
static size_t xbc_data_size __initdata;
@@ -532,7 +533,7 @@ struct xbc_node *find_match_node(struct xbc_node *node, char *k)
static int __init __xbc_add_key(char *k)
{
- struct xbc_node *node;
+ struct xbc_node *node, *child;
if (!xbc_valid_keyword(k))
return xbc_parse_error("Invalid keyword", k);
@@ -542,8 +543,12 @@ static int __init __xbc_add_key(char *k)
if (!last_parent) /* the first level */
node = find_match_node(xbc_nodes, k);
- else
- node = find_match_node(xbc_node_get_child(last_parent), k);
+ else {
+ child = xbc_node_get_child(last_parent);
+ if (child && xbc_node_is_value(child))
+ return xbc_parse_error("Subkey is mixed with value", k);
+ node = find_match_node(child, k);
+ }
if (node)
last_parent = node;
@@ -573,10 +578,10 @@ static int __init __xbc_parse_keys(char *k)
return __xbc_add_key(k);
}
-static int __init xbc_parse_kv(char **k, char *v)
+static int __init xbc_parse_kv(char **k, char *v, int op)
{
struct xbc_node *prev_parent = last_parent;
- struct xbc_node *node;
+ struct xbc_node *child;
char *next;
int c, ret;
@@ -584,12 +589,19 @@ static int __init xbc_parse_kv(char **k, char *v)
if (ret)
return ret;
+ child = xbc_node_get_child(last_parent);
+ if (child) {
+ if (xbc_node_is_key(child))
+ return xbc_parse_error("Value is mixed with subkey", v);
+ else if (op == '=')
+ return xbc_parse_error("Value is redefined", v);
+ }
+
c = __xbc_parse_value(&v, &next);
if (c < 0)
return c;
- node = xbc_add_sibling(v, XBC_VALUE);
- if (!node)
+ if (!xbc_add_sibling(v, XBC_VALUE))
return -ENOMEM;
if (c == ',') { /* Array */
@@ -719,7 +731,8 @@ void __init xbc_destroy_all(void)
xbc_data = NULL;
xbc_data_size = 0;
xbc_node_num = 0;
- memset(xbc_nodes, 0, sizeof(xbc_nodes));
+ memblock_free(__pa(xbc_nodes), sizeof(struct xbc_node) * XBC_NODE_MAX);
+ xbc_nodes = NULL;
}
/**
@@ -748,13 +761,20 @@ int __init xbc_init(char *buf)
return -ERANGE;
}
+ xbc_nodes = memblock_alloc(sizeof(struct xbc_node) * XBC_NODE_MAX,
+ SMP_CACHE_BYTES);
+ if (!xbc_nodes) {
+ pr_err("Failed to allocate memory for bootconfig nodes.\n");
+ return -ENOMEM;
+ }
+ memset(xbc_nodes, 0, sizeof(struct xbc_node) * XBC_NODE_MAX);
xbc_data = buf;
xbc_data_size = ret + 1;
last_parent = NULL;
p = buf;
do {
- q = strpbrk(p, "{}=;\n#");
+ q = strpbrk(p, "{}=+;\n#");
if (!q) {
p = skip_spaces(p);
if (*p != '\0')
@@ -765,8 +785,15 @@ int __init xbc_init(char *buf)
c = *q;
*q++ = '\0';
switch (c) {
+ case '+':
+ if (*q++ != '=') {
+ ret = xbc_parse_error("Wrong '+' operator",
+ q - 2);
+ break;
+ }
+ /* Fall through */
case '=':
- ret = xbc_parse_kv(&p, q);
+ ret = xbc_parse_kv(&p, q, c);
break;
case '{':
ret = xbc_open_brace(&p, q);
diff --git a/lib/crypto/chacha20poly1305.c b/lib/crypto/chacha20poly1305.c
index 6d83cafebc69..ad0699ce702f 100644
--- a/lib/crypto/chacha20poly1305.c
+++ b/lib/crypto/chacha20poly1305.c
@@ -235,6 +235,9 @@ bool chacha20poly1305_crypt_sg_inplace(struct scatterlist *src,
__le64 lens[2];
} b __aligned(16);
+ if (WARN_ON(src_len > INT_MAX))
+ return false;
+
chacha_load_key(b.k, key);
b.iv[0] = 0;
diff --git a/lib/stackdepot.c b/lib/stackdepot.c
index ed717dd08ff3..81c69c08d1d1 100644
--- a/lib/stackdepot.c
+++ b/lib/stackdepot.c
@@ -83,15 +83,19 @@ static bool init_stack_slab(void **prealloc)
return true;
if (stack_slabs[depot_index] == NULL) {
stack_slabs[depot_index] = *prealloc;
+ *prealloc = NULL;
} else {
- stack_slabs[depot_index + 1] = *prealloc;
+ /* If this is the last depot slab, do not touch the next one. */
+ if (depot_index + 1 < STACK_ALLOC_MAX_SLABS) {
+ stack_slabs[depot_index + 1] = *prealloc;
+ *prealloc = NULL;
+ }
/*
* This smp_store_release pairs with smp_load_acquire() from
* |next_slab_inited| above and in stack_depot_save().
*/
smp_store_release(&next_slab_inited, 1);
}
- *prealloc = NULL;
return true;
}
diff --git a/lib/string.c b/lib/string.c
index f607b967d978..6012c385fb31 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -699,6 +699,14 @@ EXPORT_SYMBOL(sysfs_streq);
* @n: number of strings in the array or -1 for NULL terminated arrays
* @string: string to match with
*
+ * This routine will look for a string in an array of strings up to the
+ * n-th element in the array or until the first NULL element.
+ *
+ * Historically the value of -1 for @n, was used to search in arrays that
+ * are NULL terminated. However, the function does not make a distinction
+ * when finishing the search: either @n elements have been compared OR
+ * the first NULL element was found.
+ *
* Return:
* index of a @string in the @array if matches, or %-EINVAL otherwise.
*/
@@ -727,6 +735,14 @@ EXPORT_SYMBOL(match_string);
*
* Returns index of @str in the @array or -EINVAL, just like match_string().
* Uses sysfs_streq instead of strcmp for matching.
+ *
+ * This routine will look for a string in an array of strings up to the
+ * n-th element in the array or until the first NULL element.
+ *
+ * Historically the value of -1 for @n, was used to search in arrays that
+ * are NULL terminated. However, the function does not make a distinction
+ * when finishing the search: either @n elements have been compared OR
+ * the first NULL element was found.
*/
int __sysfs_match_string(const char * const *array, size_t n, const char *str)
{
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index b08b199f9a11..24ad53b4dfc0 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -3043,8 +3043,7 @@ void set_pmd_migration_entry(struct page_vma_mapped_walk *pvmw,
return;
flush_cache_range(vma, address, address + HPAGE_PMD_SIZE);
- pmdval = *pvmw->pmd;
- pmdp_invalidate(vma, address, pvmw->pmd);
+ pmdval = pmdp_invalidate(vma, address, pvmw->pmd);
if (pmd_dirty(pmdval))
set_page_dirty(page);
entry = make_migration_entry(page, pmd_write(pmdval));
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 6f6dc8712e39..d09776cd6e10 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -409,8 +409,10 @@ int memcg_expand_shrinker_maps(int new_id)
if (mem_cgroup_is_root(memcg))
continue;
ret = memcg_expand_one_shrinker_map(memcg, size, old_size);
- if (ret)
+ if (ret) {
+ mem_cgroup_iter_break(NULL, memcg);
goto unlock;
+ }
}
unlock:
if (!ret)
diff --git a/mm/memory.c b/mm/memory.c
index 0bccc622e482..e8bfdf0d9d1d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2257,7 +2257,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
bool ret;
void *kaddr;
void __user *uaddr;
- bool force_mkyoung;
+ bool locked = false;
struct vm_area_struct *vma = vmf->vma;
struct mm_struct *mm = vma->vm_mm;
unsigned long addr = vmf->address;
@@ -2282,11 +2282,11 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
* On architectures with software "accessed" bits, we would
* take a double page fault, so mark it accessed here.
*/
- force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte);
- if (force_mkyoung) {
+ if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
pte_t entry;
vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
+ locked = true;
if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
/*
* Other thread has already handled the fault
@@ -2310,18 +2310,37 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
* zeroes.
*/
if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
+ if (locked)
+ goto warn;
+
+ /* Re-validate under PTL if the page is still mapped */
+ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
+ locked = true;
+ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
+ /* The PTE changed under us. Retry page fault. */
+ ret = false;
+ goto pte_unlock;
+ }
+
/*
- * Give a warn in case there can be some obscure
- * use-case
+ * The same page can be mapped back since last copy attampt.
+ * Try to copy again under PTL.
*/
- WARN_ON_ONCE(1);
- clear_page(kaddr);
+ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
+ /*
+ * Give a warn in case there can be some obscure
+ * use-case
+ */
+warn:
+ WARN_ON_ONCE(1);
+ clear_page(kaddr);
+ }
}
ret = true;
pte_unlock:
- if (force_mkyoung)
+ if (locked)
pte_unmap_unlock(vmf->pte, vmf->ptl);
kunmap_atomic(kaddr);
flush_dcache_page(dst);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 0a54ffac8c68..19389cdc16a5 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -574,7 +574,13 @@ EXPORT_SYMBOL_GPL(restore_online_page_callback);
void generic_online_page(struct page *page, unsigned int order)
{
- kernel_map_pages(page, 1 << order, 1);
+ /*
+ * Freeing the page with debug_pagealloc enabled will try to unmap it,
+ * so we should map it first. This is better than introducing a special
+ * case in page freeing fast path.
+ */
+ if (debug_pagealloc_enabled_static())
+ kernel_map_pages(page, 1 << order, 1);
__free_pages_core(page, order);
totalram_pages_add(1UL << order);
#ifdef CONFIG_HIGHMEM
diff --git a/mm/mmap.c b/mm/mmap.c
index 6756b8bb0033..d681a20eb4ea 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -195,8 +195,6 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
bool downgraded = false;
LIST_HEAD(uf);
- brk = untagged_addr(brk);
-
if (down_write_killable(&mm->mmap_sem))
return -EINTR;
@@ -1557,8 +1555,6 @@ unsigned long ksys_mmap_pgoff(unsigned long addr, unsigned long len,
struct file *file = NULL;
unsigned long retval;
- addr = untagged_addr(addr);
-
if (!(flags & MAP_ANONYMOUS)) {
audit_mmap_fd(fd, flags);
file = fget(fd);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 7a8e84f86831..311c0dadf71c 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -161,6 +161,31 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
return pages;
}
+/*
+ * Used when setting automatic NUMA hinting protection where it is
+ * critical that a numa hinting PMD is not confused with a bad PMD.
+ */
+static inline int pmd_none_or_clear_bad_unless_trans_huge(pmd_t *pmd)
+{
+ pmd_t pmdval = pmd_read_atomic(pmd);
+
+ /* See pmd_none_or_trans_huge_or_clear_bad for info on barrier */
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+ barrier();
+#endif
+
+ if (pmd_none(pmdval))
+ return 1;
+ if (pmd_trans_huge(pmdval))
+ return 0;
+ if (unlikely(pmd_bad(pmdval))) {
+ pmd_clear_bad(pmd);
+ return 1;
+ }
+
+ return 0;
+}
+
static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
pud_t *pud, unsigned long addr, unsigned long end,
pgprot_t newprot, int dirty_accountable, int prot_numa)
@@ -178,8 +203,17 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
unsigned long this_pages;
next = pmd_addr_end(addr, end);
- if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
- && pmd_none_or_clear_bad(pmd))
+
+ /*
+ * Automatic NUMA balancing walks the tables with mmap_sem
+ * held for read. It's possible a parallel update to occur
+ * between pmd_trans_huge() and a pmd_none_or_clear_bad()
+ * check leading to a false positive and clearing.
+ * Hence, it's necessary to atomically read the PMD value
+ * for all the checks.
+ */
+ if (!is_swap_pmd(*pmd) && !pmd_devmap(*pmd) &&
+ pmd_none_or_clear_bad_unless_trans_huge(pmd))
goto next;
/* invoke the mmu notifier if the pmd is populated */
diff --git a/mm/mremap.c b/mm/mremap.c
index 122938dcec15..af363063ea23 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -607,7 +607,6 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
LIST_HEAD(uf_unmap);
addr = untagged_addr(addr);
- new_addr = untagged_addr(new_addr);
if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
return ret;
diff --git a/mm/shmem.c b/mm/shmem.c
index c8f7540ef048..aad3ba74b0e9 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3386,8 +3386,6 @@ static const struct constant_table shmem_param_enums_huge[] = {
{"always", SHMEM_HUGE_ALWAYS },
{"within_size", SHMEM_HUGE_WITHIN_SIZE },
{"advise", SHMEM_HUGE_ADVISE },
- {"deny", SHMEM_HUGE_DENY },
- {"force", SHMEM_HUGE_FORCE },
{}
};
diff --git a/mm/sparse.c b/mm/sparse.c
index c184b69460b7..596b2a45b100 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -876,7 +876,7 @@ int __meminit sparse_add_section(int nid, unsigned long start_pfn,
* Poison uninitialized struct pages in order to catch invalid flags
* combinations.
*/
- page_init_poison(pfn_to_page(start_pfn), sizeof(struct page) * nr_pages);
+ page_init_poison(memmap, sizeof(struct page) * nr_pages);
ms = __nr_to_section(section_nr);
set_section_nid(section_nr, nid);
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 2c33ff456ed5..b2a2e45c9a36 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3157,7 +3157,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
mapping = swap_file->f_mapping;
inode = mapping->host;
- /* If S_ISREG(inode->i_mode) will do inode_lock(inode); */
+ /* will take i_rwsem; */
error = claim_swapfile(p, inode);
if (unlikely(error))
goto bad_swap;
diff --git a/mm/vmscan.c b/mm/vmscan.c
index c05eb9efec07..876370565455 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -2415,10 +2415,13 @@ out:
/*
* Scan types proportional to swappiness and
* their relative recent reclaim efficiency.
- * Make sure we don't miss the last page
- * because of a round-off error.
+ * Make sure we don't miss the last page on
+ * the offlined memory cgroups because of a
+ * round-off error.
*/
- scan = DIV64_U64_ROUND_UP(scan * fraction[file],
+ scan = mem_cgroup_online(memcg) ?
+ div64_u64(scan * fraction[file], denominator) :
+ DIV64_U64_ROUND_UP(scan * fraction[file],
denominator);
break;
case SCAN_FILE:
diff --git a/mm/z3fold.c b/mm/z3fold.c
index 43754d8ebce8..42f31c4b53ad 100644
--- a/mm/z3fold.c
+++ b/mm/z3fold.c
@@ -41,7 +41,6 @@
#include <linux/workqueue.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/rwlock.h>
#include <linux/zpool.h>
#include <linux/magic.h>
diff --git a/net/Kconfig b/net/Kconfig
index b0937a700f01..2eeb0e55f7c9 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -189,7 +189,6 @@ config BRIDGE_NETFILTER
depends on NETFILTER_ADVANCED
select NETFILTER_FAMILY_BRIDGE
select SKB_EXTENSIONS
- default m
---help---
Enabling this option will let arptables resp. iptables see bridged
ARP resp. IP traffic. If you want a bridging firewall, you probably
diff --git a/net/bridge/br_device.c b/net/bridge/br_device.c
index dc3d2c1dd9d5..0e3dbc5f3c34 100644
--- a/net/bridge/br_device.c
+++ b/net/bridge/br_device.c
@@ -34,7 +34,6 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
const struct nf_br_ops *nf_ops;
u8 state = BR_STATE_FORWARDING;
const unsigned char *dest;
- struct ethhdr *eth;
u16 vid = 0;
rcu_read_lock();
@@ -54,15 +53,14 @@ netdev_tx_t br_dev_xmit(struct sk_buff *skb, struct net_device *dev)
BR_INPUT_SKB_CB(skb)->frag_max_size = 0;
skb_reset_mac_header(skb);
- eth = eth_hdr(skb);
skb_pull(skb, ETH_HLEN);
if (!br_allowed_ingress(br, br_vlan_group_rcu(br), skb, &vid, &state))
goto out;
if (IS_ENABLED(CONFIG_INET) &&
- (eth->h_proto == htons(ETH_P_ARP) ||
- eth->h_proto == htons(ETH_P_RARP)) &&
+ (eth_hdr(skb)->h_proto == htons(ETH_P_ARP) ||
+ eth_hdr(skb)->h_proto == htons(ETH_P_RARP)) &&
br_opt_get(br, BROPT_NEIGH_SUPPRESS_ENABLED)) {
br_do_proxy_suppress_arp(skb, br, vid, NULL);
} else if (IS_ENABLED(CONFIG_IPV6) &&
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index 6856a6d9282b..1f14b8455345 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -63,7 +63,8 @@ struct net_bridge_port *br_get_port(struct net_bridge *br, u16 port_no)
{
struct net_bridge_port *p;
- list_for_each_entry_rcu(p, &br->port_list, list) {
+ list_for_each_entry_rcu(p, &br->port_list, list,
+ lockdep_is_held(&br->lock)) {
if (p->port_no == port_no)
return p;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index a69e8bd7ed74..c6c985fe7b1b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -146,7 +146,6 @@
#include "net-sysfs.h"
#define MAX_GRO_SKBS 8
-#define MAX_NEST_DEV 8
/* This should be increased if a protocol with a bigger head is added. */
#define GRO_MAX_HEAD (MAX_HEADER + 128)
@@ -331,6 +330,12 @@ int netdev_name_node_alt_destroy(struct net_device *dev, const char *name)
name_node = netdev_name_node_lookup(net, name);
if (!name_node)
return -ENOENT;
+ /* lookup might have found our primary name or a name belonging
+ * to another device.
+ */
+ if (name_node == dev->name_node || name_node->dev != dev)
+ return -EINVAL;
+
__netdev_name_node_alt_destroy(name_node);
return 0;
@@ -3071,6 +3076,8 @@ static u16 skb_tx_hash(const struct net_device *dev,
if (skb_rx_queue_recorded(skb)) {
hash = skb_get_rx_queue(skb);
+ if (hash >= qoffset)
+ hash -= qoffset;
while (unlikely(hash >= qcount))
hash -= qcount;
return hash + qoffset;
@@ -3657,26 +3664,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
qdisc_calculate_pkt_len(skb, q);
if (q->flags & TCQ_F_NOLOCK) {
- if ((q->flags & TCQ_F_CAN_BYPASS) && READ_ONCE(q->empty) &&
- qdisc_run_begin(q)) {
- if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED,
- &q->state))) {
- __qdisc_drop(skb, &to_free);
- rc = NET_XMIT_DROP;
- goto end_run;
- }
- qdisc_bstats_cpu_update(q, skb);
-
- rc = NET_XMIT_SUCCESS;
- if (sch_direct_xmit(skb, q, dev, txq, NULL, true))
- __qdisc_run(q);
-
-end_run:
- qdisc_run_end(q);
- } else {
- rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
- qdisc_run(q);
- }
+ rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK;
+ qdisc_run(q);
if (unlikely(to_free))
kfree_skb_list(to_free);
@@ -4527,14 +4516,14 @@ static u32 netif_receive_generic_xdp(struct sk_buff *skb,
/* Reinjected packets coming from act_mirred or similar should
* not get XDP generic processing.
*/
- if (skb_cloned(skb) || skb_is_tc_redirected(skb))
+ if (skb_is_tc_redirected(skb))
return XDP_PASS;
/* XDP packets must be linear and must have sufficient headroom
* of XDP_PACKET_HEADROOM bytes. This is the guarantee that also
* native XDP provides, thus we need to do it here as well.
*/
- if (skb_is_nonlinear(skb) ||
+ if (skb_cloned(skb) || skb_is_nonlinear(skb) ||
skb_headroom(skb) < XDP_PACKET_HEADROOM) {
int hroom = XDP_PACKET_HEADROOM - skb_headroom(skb);
int troom = skb->tail + skb->data_len - skb->end;
@@ -7201,8 +7190,8 @@ static int __netdev_walk_all_lower_dev(struct net_device *dev,
return 0;
}
-static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
- struct list_head **iter)
+struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
+ struct list_head **iter)
{
struct netdev_adjacent *lower;
@@ -7214,6 +7203,7 @@ static struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
return lower->dev;
}
+EXPORT_SYMBOL(netdev_next_lower_dev_rcu);
static u8 __netdev_upper_depth(struct net_device *dev)
{
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 549ee56b7a21..5e220809844c 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -2103,11 +2103,11 @@ err_action_values_put:
static struct devlink_dpipe_table *
devlink_dpipe_table_find(struct list_head *dpipe_tables,
- const char *table_name)
+ const char *table_name, struct devlink *devlink)
{
struct devlink_dpipe_table *table;
-
- list_for_each_entry_rcu(table, dpipe_tables, list) {
+ list_for_each_entry_rcu(table, dpipe_tables, list,
+ lockdep_is_held(&devlink->lock)) {
if (!strcmp(table->name, table_name))
return table;
}
@@ -2226,7 +2226,7 @@ static int devlink_nl_cmd_dpipe_entries_get(struct sk_buff *skb,
table_name = nla_data(info->attrs[DEVLINK_ATTR_DPIPE_TABLE_NAME]);
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
- table_name);
+ table_name, devlink);
if (!table)
return -EINVAL;
@@ -2382,7 +2382,7 @@ static int devlink_dpipe_table_counters_set(struct devlink *devlink,
struct devlink_dpipe_table *table;
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
- table_name);
+ table_name, devlink);
if (!table)
return -EINVAL;
@@ -6854,7 +6854,7 @@ bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
rcu_read_lock();
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
- table_name);
+ table_name, devlink);
enabled = false;
if (table)
enabled = table->counters_enabled;
@@ -6878,26 +6878,34 @@ int devlink_dpipe_table_register(struct devlink *devlink,
void *priv, bool counter_control_extern)
{
struct devlink_dpipe_table *table;
-
- if (devlink_dpipe_table_find(&devlink->dpipe_table_list, table_name))
- return -EEXIST;
+ int err = 0;
if (WARN_ON(!table_ops->size_get))
return -EINVAL;
+ mutex_lock(&devlink->lock);
+
+ if (devlink_dpipe_table_find(&devlink->dpipe_table_list, table_name,
+ devlink)) {
+ err = -EEXIST;
+ goto unlock;
+ }
+
table = kzalloc(sizeof(*table), GFP_KERNEL);
- if (!table)
- return -ENOMEM;
+ if (!table) {
+ err = -ENOMEM;
+ goto unlock;
+ }
table->name = table_name;
table->table_ops = table_ops;
table->priv = priv;
table->counter_control_extern = counter_control_extern;
- mutex_lock(&devlink->lock);
list_add_tail_rcu(&table->list, &devlink->dpipe_table_list);
+unlock:
mutex_unlock(&devlink->lock);
- return 0;
+ return err;
}
EXPORT_SYMBOL_GPL(devlink_dpipe_table_register);
@@ -6914,7 +6922,7 @@ void devlink_dpipe_table_unregister(struct devlink *devlink,
mutex_lock(&devlink->lock);
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
- table_name);
+ table_name, devlink);
if (!table)
goto unlock;
list_del_rcu(&table->list);
@@ -7071,7 +7079,7 @@ int devlink_dpipe_table_resource_set(struct devlink *devlink,
mutex_lock(&devlink->lock);
table = devlink_dpipe_table_find(&devlink->dpipe_table_list,
- table_name);
+ table_name, devlink);
if (!table) {
err = -EINVAL;
goto out;
diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c
index 3e7e15278c46..bd7eba9066f8 100644
--- a/net/core/fib_rules.c
+++ b/net/core/fib_rules.c
@@ -974,7 +974,7 @@ static int fib_nl_fill_rule(struct sk_buff *skb, struct fib_rule *rule,
frh = nlmsg_data(nlh);
frh->family = ops->family;
- frh->table = rule->table;
+ frh->table = rule->table < 256 ? rule->table : RT_TABLE_COMPAT;
if (nla_put_u32(skb, FRA_TABLE, rule->table))
goto nla_put_failure;
if (nla_put_u32(skb, FRA_SUPPRESS_PREFIXLEN, rule->suppress_prefixlen))
diff --git a/net/core/page_pool.c b/net/core/page_pool.c
index 9b7cbe35df37..10d2b255df5e 100644
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
@@ -99,8 +99,7 @@ EXPORT_SYMBOL(page_pool_create);
static void __page_pool_return_page(struct page_pool *pool, struct page *page);
noinline
-static struct page *page_pool_refill_alloc_cache(struct page_pool *pool,
- bool refill)
+static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
{
struct ptr_ring *r = &pool->ring;
struct page *page;
@@ -141,8 +140,7 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool,
page = NULL;
break;
}
- } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL &&
- refill);
+ } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
/* Return last page */
if (likely(pool->alloc.count > 0))
@@ -155,20 +153,16 @@ static struct page *page_pool_refill_alloc_cache(struct page_pool *pool,
/* fast path */
static struct page *__page_pool_get_cached(struct page_pool *pool)
{
- bool refill = false;
struct page *page;
- /* Test for safe-context, caller should provide this guarantee */
- if (likely(in_serving_softirq())) {
- if (likely(pool->alloc.count)) {
- /* Fast-path */
- page = pool->alloc.cache[--pool->alloc.count];
- return page;
- }
- refill = true;
+ /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
+ if (likely(pool->alloc.count)) {
+ /* Fast-path */
+ page = pool->alloc.cache[--pool->alloc.count];
+ } else {
+ page = page_pool_refill_alloc_cache(pool);
}
- page = page_pool_refill_alloc_cache(pool, refill);
return page;
}
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 09c44bf2e1d2..e1152f4ffe33 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -3504,27 +3504,25 @@ static int rtnl_alt_ifname(int cmd, struct net_device *dev, struct nlattr *attr,
if (err)
return err;
- alt_ifname = nla_data(attr);
+ alt_ifname = nla_strdup(attr, GFP_KERNEL);
+ if (!alt_ifname)
+ return -ENOMEM;
+
if (cmd == RTM_NEWLINKPROP) {
- alt_ifname = kstrdup(alt_ifname, GFP_KERNEL);
- if (!alt_ifname)
- return -ENOMEM;
err = netdev_name_node_alt_create(dev, alt_ifname);
- if (err) {
- kfree(alt_ifname);
- return err;
- }
+ if (!err)
+ alt_ifname = NULL;
} else if (cmd == RTM_DELLINKPROP) {
err = netdev_name_node_alt_destroy(dev, alt_ifname);
- if (err)
- return err;
} else {
- WARN_ON(1);
- return 0;
+ WARN_ON_ONCE(1);
+ err = -EINVAL;
}
- *changed = true;
- return 0;
+ kfree(alt_ifname);
+ if (!err)
+ *changed = true;
+ return err;
}
static int rtnl_linkprop(int cmd, struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 864cb9e9622f..e1101a4f90a6 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -467,7 +467,6 @@ struct sk_buff *__netdev_alloc_skb(struct net_device *dev, unsigned int len,
return NULL;
}
- /* use OR instead of assignment to avoid clearing of bits in mask */
if (pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;
@@ -527,7 +526,6 @@ struct sk_buff *__napi_alloc_skb(struct napi_struct *napi, unsigned int len,
return NULL;
}
- /* use OR instead of assignment to avoid clearing of bits in mask */
if (nc->page.pfmemalloc)
skb->pfmemalloc = 1;
skb->head_frag = 1;
@@ -4805,9 +4803,9 @@ static __sum16 *skb_checksum_setup_ip(struct sk_buff *skb,
typeof(IPPROTO_IP) proto,
unsigned int off)
{
- switch (proto) {
- int err;
+ int err;
+ switch (proto) {
case IPPROTO_TCP:
err = skb_maybe_pull_tail(skb, off + sizeof(struct tcphdr),
off + MAX_TCP_HDR_LEN);
diff --git a/net/dsa/tag_ar9331.c b/net/dsa/tag_ar9331.c
index 466ffa92a474..55b00694cdba 100644
--- a/net/dsa/tag_ar9331.c
+++ b/net/dsa/tag_ar9331.c
@@ -31,7 +31,7 @@ static struct sk_buff *ar9331_tag_xmit(struct sk_buff *skb,
__le16 *phdr;
u16 hdr;
- if (skb_cow_head(skb, 0) < 0)
+ if (skb_cow_head(skb, AR9331_HDR_LEN) < 0)
return NULL;
phdr = skb_push(skb, AR9331_HDR_LEN);
diff --git a/net/dsa/tag_qca.c b/net/dsa/tag_qca.c
index c8a128c9e5e0..70db7c909f74 100644
--- a/net/dsa/tag_qca.c
+++ b/net/dsa/tag_qca.c
@@ -33,7 +33,7 @@ static struct sk_buff *qca_tag_xmit(struct sk_buff *skb, struct net_device *dev)
struct dsa_port *dp = dsa_slave_to_port(dev);
u16 *phdr, hdr;
- if (skb_cow_head(skb, 0) < 0)
+ if (skb_cow_head(skb, QCA_HDR_LEN) < 0)
return NULL;
skb_push(skb, QCA_HDR_LEN);
diff --git a/net/ethtool/bitset.c b/net/ethtool/bitset.c
index fce45dac4205..ef9197541cb3 100644
--- a/net/ethtool/bitset.c
+++ b/net/ethtool/bitset.c
@@ -305,7 +305,8 @@ nla_put_failure:
static const struct nla_policy bitset_policy[ETHTOOL_A_BITSET_MAX + 1] = {
[ETHTOOL_A_BITSET_UNSPEC] = { .type = NLA_REJECT },
[ETHTOOL_A_BITSET_NOMASK] = { .type = NLA_FLAG },
- [ETHTOOL_A_BITSET_SIZE] = { .type = NLA_U32 },
+ [ETHTOOL_A_BITSET_SIZE] = NLA_POLICY_MAX(NLA_U32,
+ ETHNL_MAX_BITSET_SIZE),
[ETHTOOL_A_BITSET_BITS] = { .type = NLA_NESTED },
[ETHTOOL_A_BITSET_VALUE] = { .type = NLA_BINARY },
[ETHTOOL_A_BITSET_MASK] = { .type = NLA_BINARY },
@@ -447,7 +448,10 @@ ethnl_update_bitset32_verbose(u32 *bitmap, unsigned int nbits,
"mask only allowed in compact bitset");
return -EINVAL;
}
+
no_mask = tb[ETHTOOL_A_BITSET_NOMASK];
+ if (no_mask)
+ ethnl_bitmap32_clear(bitmap, 0, nbits, mod);
nla_for_each_nested(bit_attr, tb[ETHTOOL_A_BITSET_BITS], rem) {
bool old_val, new_val;
diff --git a/net/ethtool/bitset.h b/net/ethtool/bitset.h
index b8247e34109d..b849f9d19676 100644
--- a/net/ethtool/bitset.h
+++ b/net/ethtool/bitset.h
@@ -3,6 +3,8 @@
#ifndef _NET_ETHTOOL_BITSET_H
#define _NET_ETHTOOL_BITSET_H
+#define ETHNL_MAX_BITSET_SIZE S16_MAX
+
typedef const char (*const ethnl_string_array_t)[ETH_GSTRING_LEN];
int ethnl_bitset_is_compact(const struct nlattr *bitset, bool *compact);
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 364ea2cc028e..3ba7f61be107 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -155,7 +155,8 @@ static struct hsr_node *hsr_add_node(struct hsr_priv *hsr,
new_node->seq_out[i] = seq_out;
spin_lock_bh(&hsr->list_lock);
- list_for_each_entry_rcu(node, node_db, mac_list) {
+ list_for_each_entry_rcu(node, node_db, mac_list,
+ lockdep_is_held(&hsr->list_lock)) {
if (ether_addr_equal(node->macaddress_A, addr))
goto out;
if (ether_addr_equal(node->macaddress_B, addr))
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 376882215919..0bd10a1f477f 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1724,6 +1724,7 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
{
unsigned char optbuf[sizeof(struct ip_options) + 40];
struct ip_options *opt = (struct ip_options *)optbuf;
+ int res;
if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
return;
@@ -1735,7 +1736,11 @@ void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
memset(opt, 0, sizeof(struct ip_options));
opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
- if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
+ rcu_read_lock();
+ res = __ip_options_compile(dev_net(skb->dev), opt, skb, NULL);
+ rcu_read_unlock();
+
+ if (res)
return;
if (gateway)
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 18068ed42f25..f369e7ce685b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -748,6 +748,39 @@ out:;
}
EXPORT_SYMBOL(__icmp_send);
+#if IS_ENABLED(CONFIG_NF_NAT)
+#include <net/netfilter/nf_conntrack.h>
+void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+ struct sk_buff *cloned_skb = NULL;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct;
+ __be32 orig_ip;
+
+ ct = nf_ct_get(skb_in, &ctinfo);
+ if (!ct || !(ct->status & IPS_SRC_NAT)) {
+ icmp_send(skb_in, type, code, info);
+ return;
+ }
+
+ if (skb_shared(skb_in))
+ skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
+
+ if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
+ (skb_network_header(skb_in) + sizeof(struct iphdr)) >
+ skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
+ skb_network_offset(skb_in) + sizeof(struct iphdr))))
+ goto out;
+
+ orig_ip = ip_hdr(skb_in)->saddr;
+ ip_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.ip;
+ icmp_send(skb_in, type, code, info);
+ ip_hdr(skb_in)->saddr = orig_ip;
+out:
+ consume_skb(cloned_skb);
+}
+EXPORT_SYMBOL(icmp_ndo_send);
+#endif
static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
{
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 316ebdf8151d..6b6b57000dad 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6124,7 +6124,11 @@ static void tcp_rcv_synrecv_state_fastopen(struct sock *sk)
{
struct request_sock *req;
- tcp_try_undo_loss(sk, false);
+ /* If we are still handling the SYNACK RTO, see if timestamp ECR allows
+ * undo. If peer SACKs triggered fast recovery, we can't undo here.
+ */
+ if (inet_csk(sk)->icsk_ca_state == TCP_CA_Loss)
+ tcp_try_undo_loss(sk, false);
/* Reset rtx states to prevent spurious retransmits_timed_out() */
tcp_sk(sk)->retrans_stamp = 0;
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index db76b9609299..08a41f1e1cd2 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1857,8 +1857,12 @@ int __udp_disconnect(struct sock *sk, int flags)
inet->inet_dport = 0;
sock_rps_reset_rxhash(sk);
sk->sk_bound_dev_if = 0;
- if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
+ if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK)) {
inet_reset_saddr(sk);
+ if (sk->sk_prot->rehash &&
+ (sk->sk_userlocks & SOCK_BINDPORT_LOCK))
+ sk->sk_prot->rehash(sk);
+ }
if (!(sk->sk_userlocks & SOCK_BINDPORT_LOCK)) {
sk->sk_prot->unhash(sk);
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 58fbde244381..72abf892302f 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1102,8 +1102,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct fib6_info *rt,
found++;
break;
}
- if (rt_can_ecmp)
- fallback_ins = fallback_ins ?: ins;
+ fallback_ins = fallback_ins ?: ins;
goto next_iter;
}
@@ -1146,7 +1145,9 @@ next_iter:
}
if (fallback_ins && !found) {
- /* No ECMP-able route found, replace first non-ECMP one */
+ /* No matching route with same ecmp-able-ness found, replace
+ * first matching route
+ */
ins = fallback_ins;
iter = rcu_dereference_protected(*ins,
lockdep_is_held(&rt->fib6_table->tb6_lock));
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 55bfc5149d0c..781ca8c07a0d 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -437,8 +437,6 @@ static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
return -ENOENT;
switch (type) {
- struct ipv6_tlv_tnl_enc_lim *tel;
- __u32 teli;
case ICMPV6_DEST_UNREACH:
net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
t->parms.name);
@@ -452,7 +450,10 @@ static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
break;
}
return 0;
- case ICMPV6_PARAMPROB:
+ case ICMPV6_PARAMPROB: {
+ struct ipv6_tlv_tnl_enc_lim *tel;
+ __u32 teli;
+
teli = 0;
if (code == ICMPV6_HDR_FIELD)
teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
@@ -468,6 +469,7 @@ static int ip6gre_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
t->parms.name);
}
return 0;
+ }
case ICMPV6_PKT_TOOBIG:
ip6_update_pmtu(skb, net, info, 0, 0, sock_net_uid(net, NULL));
return 0;
diff --git a/net/ipv6/ip6_icmp.c b/net/ipv6/ip6_icmp.c
index 02045494c24c..e0086758b6ee 100644
--- a/net/ipv6/ip6_icmp.c
+++ b/net/ipv6/ip6_icmp.c
@@ -45,4 +45,38 @@ out:
rcu_read_unlock();
}
EXPORT_SYMBOL(icmpv6_send);
+
+#if IS_ENABLED(CONFIG_NF_NAT)
+#include <net/netfilter/nf_conntrack.h>
+void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info)
+{
+ struct sk_buff *cloned_skb = NULL;
+ enum ip_conntrack_info ctinfo;
+ struct in6_addr orig_ip;
+ struct nf_conn *ct;
+
+ ct = nf_ct_get(skb_in, &ctinfo);
+ if (!ct || !(ct->status & IPS_SRC_NAT)) {
+ icmpv6_send(skb_in, type, code, info);
+ return;
+ }
+
+ if (skb_shared(skb_in))
+ skb_in = cloned_skb = skb_clone(skb_in, GFP_ATOMIC);
+
+ if (unlikely(!skb_in || skb_network_header(skb_in) < skb_in->head ||
+ (skb_network_header(skb_in) + sizeof(struct ipv6hdr)) >
+ skb_tail_pointer(skb_in) || skb_ensure_writable(skb_in,
+ skb_network_offset(skb_in) + sizeof(struct ipv6hdr))))
+ goto out;
+
+ orig_ip = ipv6_hdr(skb_in)->saddr;
+ ipv6_hdr(skb_in)->saddr = ct->tuplehash[0].tuple.src.u3.in6;
+ icmpv6_send(skb_in, type, code, info);
+ ipv6_hdr(skb_in)->saddr = orig_ip;
+out:
+ consume_skb(cloned_skb);
+}
+EXPORT_SYMBOL(icmpv6_ndo_send);
+#endif
#endif
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index b5dd20c4599b..4703b09808d0 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -121,6 +121,7 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
/**
* ip6_tnl_lookup - fetch tunnel matching the end-point addresses
+ * @link: ifindex of underlying interface
* @remote: the address of the tunnel exit-point
* @local: the address of the tunnel entry-point
*
@@ -134,37 +135,56 @@ static struct net_device_stats *ip6_get_stats(struct net_device *dev)
for (t = rcu_dereference(start); t; t = rcu_dereference(t->next))
static struct ip6_tnl *
-ip6_tnl_lookup(struct net *net, const struct in6_addr *remote, const struct in6_addr *local)
+ip6_tnl_lookup(struct net *net, int link,
+ const struct in6_addr *remote, const struct in6_addr *local)
{
unsigned int hash = HASH(remote, local);
- struct ip6_tnl *t;
+ struct ip6_tnl *t, *cand = NULL;
struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
struct in6_addr any;
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
- if (ipv6_addr_equal(local, &t->parms.laddr) &&
- ipv6_addr_equal(remote, &t->parms.raddr) &&
- (t->dev->flags & IFF_UP))
+ if (!ipv6_addr_equal(local, &t->parms.laddr) ||
+ !ipv6_addr_equal(remote, &t->parms.raddr) ||
+ !(t->dev->flags & IFF_UP))
+ continue;
+
+ if (link == t->parms.link)
return t;
+ else
+ cand = t;
}
memset(&any, 0, sizeof(any));
hash = HASH(&any, local);
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
- if (ipv6_addr_equal(local, &t->parms.laddr) &&
- ipv6_addr_any(&t->parms.raddr) &&
- (t->dev->flags & IFF_UP))
+ if (!ipv6_addr_equal(local, &t->parms.laddr) ||
+ !ipv6_addr_any(&t->parms.raddr) ||
+ !(t->dev->flags & IFF_UP))
+ continue;
+
+ if (link == t->parms.link)
return t;
+ else if (!cand)
+ cand = t;
}
hash = HASH(remote, &any);
for_each_ip6_tunnel_rcu(ip6n->tnls_r_l[hash]) {
- if (ipv6_addr_equal(remote, &t->parms.raddr) &&
- ipv6_addr_any(&t->parms.laddr) &&
- (t->dev->flags & IFF_UP))
+ if (!ipv6_addr_equal(remote, &t->parms.raddr) ||
+ !ipv6_addr_any(&t->parms.laddr) ||
+ !(t->dev->flags & IFF_UP))
+ continue;
+
+ if (link == t->parms.link)
return t;
+ else if (!cand)
+ cand = t;
}
+ if (cand)
+ return cand;
+
t = rcu_dereference(ip6n->collect_md_tun);
if (t && t->dev->flags & IFF_UP)
return t;
@@ -351,7 +371,8 @@ static struct ip6_tnl *ip6_tnl_locate(struct net *net,
(t = rtnl_dereference(*tp)) != NULL;
tp = &t->next) {
if (ipv6_addr_equal(local, &t->parms.laddr) &&
- ipv6_addr_equal(remote, &t->parms.raddr)) {
+ ipv6_addr_equal(remote, &t->parms.raddr) &&
+ p->link == t->parms.link) {
if (create)
return ERR_PTR(-EEXIST);
@@ -485,7 +506,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
processing of the error. */
rcu_read_lock();
- t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->daddr, &ipv6h->saddr);
+ t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->daddr, &ipv6h->saddr);
if (!t)
goto out;
@@ -496,8 +517,6 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
err = 0;
switch (*type) {
- struct ipv6_tlv_tnl_enc_lim *tel;
- __u32 mtu, teli;
case ICMPV6_DEST_UNREACH:
net_dbg_ratelimited("%s: Path to destination invalid or inactive!\n",
t->parms.name);
@@ -510,7 +529,10 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
rel_msg = 1;
}
break;
- case ICMPV6_PARAMPROB:
+ case ICMPV6_PARAMPROB: {
+ struct ipv6_tlv_tnl_enc_lim *tel;
+ __u32 teli;
+
teli = 0;
if ((*code) == ICMPV6_HDR_FIELD)
teli = ip6_tnl_parse_tlv_enc_lim(skb, skb->data);
@@ -527,7 +549,10 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
t->parms.name);
}
break;
- case ICMPV6_PKT_TOOBIG:
+ }
+ case ICMPV6_PKT_TOOBIG: {
+ __u32 mtu;
+
ip6_update_pmtu(skb, net, htonl(*info), 0, 0,
sock_net_uid(net, NULL));
mtu = *info - offset;
@@ -541,6 +566,7 @@ ip6_tnl_err(struct sk_buff *skb, __u8 ipproto, struct inet6_skb_parm *opt,
rel_msg = 1;
}
break;
+ }
case NDISC_REDIRECT:
ip6_redirect(skb, net, skb->dev->ifindex, 0,
sock_net_uid(net, NULL));
@@ -887,7 +913,7 @@ static int ipxip6_rcv(struct sk_buff *skb, u8 ipproto,
int ret = -1;
rcu_read_lock();
- t = ip6_tnl_lookup(dev_net(skb->dev), &ipv6h->saddr, &ipv6h->daddr);
+ t = ip6_tnl_lookup(dev_net(skb->dev), skb->dev->ifindex, &ipv6h->saddr, &ipv6h->daddr);
if (t) {
u8 tproto = READ_ONCE(t->parms.proto);
@@ -1420,8 +1446,10 @@ tx_err:
static void ip6_tnl_link_config(struct ip6_tnl *t)
{
struct net_device *dev = t->dev;
+ struct net_device *tdev = NULL;
struct __ip6_tnl_parm *p = &t->parms;
struct flowi6 *fl6 = &t->fl.u.ip6;
+ unsigned int mtu;
int t_hlen;
memcpy(dev->dev_addr, &p->laddr, sizeof(struct in6_addr));
@@ -1457,22 +1485,25 @@ static void ip6_tnl_link_config(struct ip6_tnl *t)
struct rt6_info *rt = rt6_lookup(t->net,
&p->raddr, &p->laddr,
p->link, NULL, strict);
+ if (rt) {
+ tdev = rt->dst.dev;
+ ip6_rt_put(rt);
+ }
- if (!rt)
- return;
+ if (!tdev && p->link)
+ tdev = __dev_get_by_index(t->net, p->link);
- if (rt->dst.dev) {
- dev->hard_header_len = rt->dst.dev->hard_header_len +
- t_hlen;
+ if (tdev) {
+ dev->hard_header_len = tdev->hard_header_len + t_hlen;
+ mtu = min_t(unsigned int, tdev->mtu, IP6_MAX_MTU);
- dev->mtu = rt->dst.dev->mtu - t_hlen;
+ dev->mtu = mtu - t_hlen;
if (!(t->parms.flags & IP6_TNL_F_IGN_ENCAP_LIMIT))
dev->mtu -= 8;
if (dev->mtu < IPV6_MIN_MTU)
dev->mtu = IPV6_MIN_MTU;
}
- ip6_rt_put(rt);
}
}
diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
index 79fc012dd2ca..debdaeba5d8c 100644
--- a/net/ipv6/ipv6_sockglue.c
+++ b/net/ipv6/ipv6_sockglue.c
@@ -183,9 +183,15 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
retv = -EBUSY;
break;
}
- } else if (sk->sk_protocol != IPPROTO_TCP)
+ } else if (sk->sk_protocol == IPPROTO_TCP) {
+ if (sk->sk_prot != &tcpv6_prot) {
+ retv = -EBUSY;
+ break;
+ }
break;
-
+ } else {
+ break;
+ }
if (sk->sk_state != TCP_ESTABLISHED) {
retv = -ENOTCONN;
break;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 4fbdc60b4e07..2931224b674e 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -5198,6 +5198,7 @@ static int ip6_route_multipath_add(struct fib6_config *cfg,
*/
cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
NLM_F_REPLACE);
+ cfg->fc_nlinfo.nlh->nlmsg_flags |= NLM_F_CREATE;
nhn++;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 000c742d0527..6aee699deb28 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3450,7 +3450,7 @@ int ieee80211_attach_ack_skb(struct ieee80211_local *local, struct sk_buff *skb,
spin_lock_irqsave(&local->ack_status_lock, spin_flags);
id = idr_alloc(&local->ack_status_frames, ack_skb,
- 1, 0x40, GFP_ATOMIC);
+ 1, 0x2000, GFP_ATOMIC);
spin_unlock_irqrestore(&local->ack_status_lock, spin_flags);
if (id < 0) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 5fa13176036f..88d7a692a965 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -8,7 +8,7 @@
* Copyright 2007, Michael Wu <[email protected]>
* Copyright 2013-2014 Intel Mobile Communications GmbH
* Copyright (C) 2015 - 2017 Intel Deutschland GmbH
- * Copyright (C) 2018 - 2019 Intel Corporation
+ * Copyright (C) 2018 - 2020 Intel Corporation
*/
#include <linux/delay.h>
@@ -1311,7 +1311,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
if (!res) {
ch_switch.timestamp = timestamp;
ch_switch.device_timestamp = device_timestamp;
- ch_switch.block_tx = beacon ? csa_ie.mode : 0;
+ ch_switch.block_tx = csa_ie.mode;
ch_switch.chandef = csa_ie.chandef;
ch_switch.count = csa_ie.count;
ch_switch.delay = csa_ie.max_switch_time;
@@ -1404,7 +1404,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
sdata->vif.csa_active = true;
sdata->csa_chandef = csa_ie.chandef;
- sdata->csa_block_tx = ch_switch.block_tx;
+ sdata->csa_block_tx = csa_ie.mode;
ifmgd->csa_ignored_same_chan = false;
if (sdata->csa_block_tx)
@@ -1438,7 +1438,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
* reset when the disconnection worker runs.
*/
sdata->vif.csa_active = true;
- sdata->csa_block_tx = ch_switch.block_tx;
+ sdata->csa_block_tx = csa_ie.mode;
ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
mutex_unlock(&local->chanctx_mtx);
@@ -2959,7 +2959,7 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
(auth_transaction == 2 &&
ifmgd->auth_data->expected_transaction == 2)) {
if (!ieee80211_mark_sta_auth(sdata, bssid))
- goto out_err;
+ return; /* ignore frame -- wait for timeout */
} else if (ifmgd->auth_data->algorithm == WLAN_AUTH_SAE &&
auth_transaction == 2) {
sdata_info(sdata, "SAE peer confirmed\n");
@@ -2967,10 +2967,6 @@ static void ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
}
cfg80211_rx_mlme_mgmt(sdata->dev, (u8 *)mgmt, len);
- return;
- out_err:
- mutex_unlock(&sdata->local->sta_mtx);
- /* ignore frame -- wait for timeout */
}
#define case_WLAN(type) \
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index 0e05ff037672..0ba98ad9bc85 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -4114,7 +4114,7 @@ void __ieee80211_check_fast_rx_iface(struct ieee80211_sub_if_data *sdata)
lockdep_assert_held(&local->sta_mtx);
- list_for_each_entry_rcu(sta, &local->sta_list, list) {
+ list_for_each_entry(sta, &local->sta_list, list) {
if (sdata != sta->sdata &&
(!sta->sdata->bss || sta->sdata->bss != sdata->bss))
continue;
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 4bd1faf4f779..87def9cb91ff 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -2442,7 +2442,7 @@ static int ieee80211_store_ack_skb(struct ieee80211_local *local,
spin_lock_irqsave(&local->ack_status_lock, flags);
id = idr_alloc(&local->ack_status_frames, ack_skb,
- 1, 0x40, GFP_ATOMIC);
+ 1, 0x2000, GFP_ATOMIC);
spin_unlock_irqrestore(&local->ack_status_lock, flags);
if (id >= 0) {
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 32a7a53833c0..decd46b38393 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1063,16 +1063,22 @@ _ieee802_11_parse_elems_crc(const u8 *start, size_t len, bool action,
elem_parse_failed = true;
break;
case WLAN_EID_VHT_OPERATION:
- if (elen >= sizeof(struct ieee80211_vht_operation))
+ if (elen >= sizeof(struct ieee80211_vht_operation)) {
elems->vht_operation = (void *)pos;
- else
- elem_parse_failed = true;
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+ break;
+ }
+ elem_parse_failed = true;
break;
case WLAN_EID_OPMODE_NOTIF:
- if (elen > 0)
+ if (elen > 0) {
elems->opmode_notif = pos;
- else
- elem_parse_failed = true;
+ if (calc_crc)
+ crc = crc32_be(crc, pos - 2, elen + 2);
+ break;
+ }
+ elem_parse_failed = true;
break;
case WLAN_EID_MESH_ID:
elems->mesh_id = pos;
@@ -2987,10 +2993,22 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
int cf0, cf1;
int ccfs0, ccfs1, ccfs2;
int ccf0, ccf1;
+ u32 vht_cap;
+ bool support_80_80 = false;
+ bool support_160 = false;
if (!oper || !htop)
return false;
+ vht_cap = hw->wiphy->bands[chandef->chan->band]->vht_cap.cap;
+ support_160 = (vht_cap & (IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK |
+ IEEE80211_VHT_CAP_EXT_NSS_BW_MASK));
+ support_80_80 = ((vht_cap &
+ IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ) ||
+ (vht_cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ &&
+ vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) ||
+ ((vht_cap & IEEE80211_VHT_CAP_EXT_NSS_BW_MASK) >>
+ IEEE80211_VHT_CAP_EXT_NSS_BW_SHIFT > 1));
ccfs0 = oper->center_freq_seg0_idx;
ccfs1 = oper->center_freq_seg1_idx;
ccfs2 = (le16_to_cpu(htop->operation_mode) &
@@ -3018,10 +3036,10 @@ bool ieee80211_chandef_vht_oper(struct ieee80211_hw *hw,
unsigned int diff;
diff = abs(ccf1 - ccf0);
- if (diff == 8) {
+ if ((diff == 8) && support_160) {
new.width = NL80211_CHAN_WIDTH_160;
new.center_freq1 = cf1;
- } else if (diff > 8) {
+ } else if ((diff > 8) && support_80_80) {
new.width = NL80211_CHAN_WIDTH_80P80;
new.center_freq2 = cf1;
}
diff --git a/net/mptcp/Kconfig b/net/mptcp/Kconfig
index 49f6054e7f4e..a9ed3bf1d93f 100644
--- a/net/mptcp/Kconfig
+++ b/net/mptcp/Kconfig
@@ -4,6 +4,7 @@ config MPTCP
depends on INET
select SKB_EXTENSIONS
select CRYPTO_LIB_SHA256
+ select CRYPTO
help
Multipath TCP (MPTCP) connections send and receive data over multiple
subflows in order to utilize multiple network paths. Each subflow
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 73780b4cb108..3c19a8efdcea 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -543,6 +543,11 @@ static void __mptcp_close_ssk(struct sock *sk, struct sock *ssk,
}
}
+static unsigned int mptcp_sync_mss(struct sock *sk, u32 pmtu)
+{
+ return 0;
+}
+
static int __mptcp_init_sock(struct sock *sk)
{
struct mptcp_sock *msk = mptcp_sk(sk);
@@ -551,6 +556,7 @@ static int __mptcp_init_sock(struct sock *sk)
__set_bit(MPTCP_SEND_SPACE, &msk->flags);
msk->first = NULL;
+ inet_csk(sk)->icsk_sync_mss = mptcp_sync_mss;
return 0;
}
@@ -643,7 +649,7 @@ static struct ipv6_pinfo *mptcp_inet6_sk(const struct sock *sk)
}
#endif
-struct sock *mptcp_sk_clone_lock(const struct sock *sk)
+static struct sock *mptcp_sk_clone_lock(const struct sock *sk)
{
struct sock *nsk = sk_clone_lock(sk, GFP_ATOMIC);
@@ -755,60 +761,50 @@ static int mptcp_setsockopt(struct sock *sk, int level, int optname,
char __user *optval, unsigned int optlen)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- int ret = -EOPNOTSUPP;
struct socket *ssock;
- struct sock *ssk;
pr_debug("msk=%p", msk);
/* @@ the meaning of setsockopt() when the socket is connected and
- * there are multiple subflows is not defined.
+ * there are multiple subflows is not yet defined. It is up to the
+ * MPTCP-level socket to configure the subflows until the subflow
+ * is in TCP fallback, when TCP socket options are passed through
+ * to the one remaining subflow.
*/
lock_sock(sk);
- ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
- if (IS_ERR(ssock)) {
- release_sock(sk);
- return ret;
- }
+ ssock = __mptcp_tcp_fallback(msk);
+ if (ssock)
+ return tcp_setsockopt(ssock->sk, level, optname, optval,
+ optlen);
- ssk = ssock->sk;
- sock_hold(ssk);
release_sock(sk);
- ret = tcp_setsockopt(ssk, level, optname, optval, optlen);
- sock_put(ssk);
-
- return ret;
+ return -EOPNOTSUPP;
}
static int mptcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *option)
{
struct mptcp_sock *msk = mptcp_sk(sk);
- int ret = -EOPNOTSUPP;
struct socket *ssock;
- struct sock *ssk;
pr_debug("msk=%p", msk);
- /* @@ the meaning of getsockopt() when the socket is connected and
- * there are multiple subflows is not defined.
+ /* @@ the meaning of setsockopt() when the socket is connected and
+ * there are multiple subflows is not yet defined. It is up to the
+ * MPTCP-level socket to configure the subflows until the subflow
+ * is in TCP fallback, when socket options are passed through
+ * to the one remaining subflow.
*/
lock_sock(sk);
- ssock = __mptcp_socket_create(msk, MPTCP_SAME_STATE);
- if (IS_ERR(ssock)) {
- release_sock(sk);
- return ret;
- }
+ ssock = __mptcp_tcp_fallback(msk);
+ if (ssock)
+ return tcp_getsockopt(ssock->sk, level, optname, optval,
+ option);
- ssk = ssock->sk;
- sock_hold(ssk);
release_sock(sk);
- ret = tcp_getsockopt(ssk, level, optname, optval, option);
- sock_put(ssk);
-
- return ret;
+ return -EOPNOTSUPP;
}
static int mptcp_get_port(struct sock *sk, unsigned short snum)
diff --git a/net/mptcp/protocol.h b/net/mptcp/protocol.h
index 8a99a2930284..9f8663b30456 100644
--- a/net/mptcp/protocol.h
+++ b/net/mptcp/protocol.h
@@ -56,8 +56,8 @@
#define MPTCP_DSS_FLAG_MASK (0x1F)
/* MPTCP socket flags */
-#define MPTCP_DATA_READY BIT(0)
-#define MPTCP_SEND_SPACE BIT(1)
+#define MPTCP_DATA_READY 0
+#define MPTCP_SEND_SPACE 1
/* MPTCP connection sock */
struct mptcp_sock {
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 69c107f9ba8d..8dd17589217d 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -723,6 +723,20 @@ ip_set_rcu_get(struct net *net, ip_set_id_t index)
return set;
}
+static inline void
+ip_set_lock(struct ip_set *set)
+{
+ if (!set->variant->region_lock)
+ spin_lock_bh(&set->lock);
+}
+
+static inline void
+ip_set_unlock(struct ip_set *set)
+{
+ if (!set->variant->region_lock)
+ spin_unlock_bh(&set->lock);
+}
+
int
ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
const struct xt_action_param *par, struct ip_set_adt_opt *opt)
@@ -744,9 +758,9 @@ ip_set_test(ip_set_id_t index, const struct sk_buff *skb,
if (ret == -EAGAIN) {
/* Type requests element to be completed */
pr_debug("element must be completed, ADD is triggered\n");
- spin_lock_bh(&set->lock);
+ ip_set_lock(set);
set->variant->kadt(set, skb, par, IPSET_ADD, opt);
- spin_unlock_bh(&set->lock);
+ ip_set_unlock(set);
ret = 1;
} else {
/* --return-nomatch: invert matched element */
@@ -775,9 +789,9 @@ ip_set_add(ip_set_id_t index, const struct sk_buff *skb,
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return -IPSET_ERR_TYPE_MISMATCH;
- spin_lock_bh(&set->lock);
+ ip_set_lock(set);
ret = set->variant->kadt(set, skb, par, IPSET_ADD, opt);
- spin_unlock_bh(&set->lock);
+ ip_set_unlock(set);
return ret;
}
@@ -797,9 +811,9 @@ ip_set_del(ip_set_id_t index, const struct sk_buff *skb,
!(opt->family == set->family || set->family == NFPROTO_UNSPEC))
return -IPSET_ERR_TYPE_MISMATCH;
- spin_lock_bh(&set->lock);
+ ip_set_lock(set);
ret = set->variant->kadt(set, skb, par, IPSET_DEL, opt);
- spin_unlock_bh(&set->lock);
+ ip_set_unlock(set);
return ret;
}
@@ -1264,9 +1278,9 @@ ip_set_flush_set(struct ip_set *set)
{
pr_debug("set: %s\n", set->name);
- spin_lock_bh(&set->lock);
+ ip_set_lock(set);
set->variant->flush(set);
- spin_unlock_bh(&set->lock);
+ ip_set_unlock(set);
}
static int ip_set_flush(struct net *net, struct sock *ctnl, struct sk_buff *skb,
@@ -1713,9 +1727,9 @@ call_ad(struct sock *ctnl, struct sk_buff *skb, struct ip_set *set,
bool eexist = flags & IPSET_FLAG_EXIST, retried = false;
do {
- spin_lock_bh(&set->lock);
+ ip_set_lock(set);
ret = set->variant->uadt(set, tb, adt, &lineno, flags, retried);
- spin_unlock_bh(&set->lock);
+ ip_set_unlock(set);
retried = true;
} while (ret == -EAGAIN &&
set->variant->resize &&
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 7480ce55b5c8..e52d7b7597a0 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -7,13 +7,21 @@
#include <linux/rcupdate.h>
#include <linux/jhash.h>
#include <linux/types.h>
+#include <linux/netfilter/nfnetlink.h>
#include <linux/netfilter/ipset/ip_set.h>
-#define __ipset_dereference_protected(p, c) rcu_dereference_protected(p, c)
-#define ipset_dereference_protected(p, set) \
- __ipset_dereference_protected(p, lockdep_is_held(&(set)->lock))
-
-#define rcu_dereference_bh_nfnl(p) rcu_dereference_bh_check(p, 1)
+#define __ipset_dereference(p) \
+ rcu_dereference_protected(p, 1)
+#define ipset_dereference_nfnl(p) \
+ rcu_dereference_protected(p, \
+ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
+#define ipset_dereference_set(p, set) \
+ rcu_dereference_protected(p, \
+ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET) || \
+ lockdep_is_held(&(set)->lock))
+#define ipset_dereference_bh_nfnl(p) \
+ rcu_dereference_bh_check(p, \
+ lockdep_nfnl_is_held(NFNL_SUBSYS_IPSET))
/* Hashing which uses arrays to resolve clashing. The hash table is resized
* (doubled) when searching becomes too long.
@@ -72,11 +80,35 @@ struct hbucket {
__aligned(__alignof__(u64));
};
+/* Region size for locking == 2^HTABLE_REGION_BITS */
+#define HTABLE_REGION_BITS 10
+#define ahash_numof_locks(htable_bits) \
+ ((htable_bits) < HTABLE_REGION_BITS ? 1 \
+ : jhash_size((htable_bits) - HTABLE_REGION_BITS))
+#define ahash_sizeof_regions(htable_bits) \
+ (ahash_numof_locks(htable_bits) * sizeof(struct ip_set_region))
+#define ahash_region(n, htable_bits) \
+ ((n) % ahash_numof_locks(htable_bits))
+#define ahash_bucket_start(h, htable_bits) \
+ ((htable_bits) < HTABLE_REGION_BITS ? 0 \
+ : (h) * jhash_size(HTABLE_REGION_BITS))
+#define ahash_bucket_end(h, htable_bits) \
+ ((htable_bits) < HTABLE_REGION_BITS ? jhash_size(htable_bits) \
+ : ((h) + 1) * jhash_size(HTABLE_REGION_BITS))
+
+struct htable_gc {
+ struct delayed_work dwork;
+ struct ip_set *set; /* Set the gc belongs to */
+ u32 region; /* Last gc run position */
+};
+
/* The hash table: the table size stored here in order to make resizing easy */
struct htable {
atomic_t ref; /* References for resizing */
- atomic_t uref; /* References for dumping */
+ atomic_t uref; /* References for dumping and gc */
u8 htable_bits; /* size of hash table == 2^htable_bits */
+ u32 maxelem; /* Maxelem per region */
+ struct ip_set_region *hregion; /* Region locks and ext sizes */
struct hbucket __rcu *bucket[0]; /* hashtable buckets */
};
@@ -162,6 +194,10 @@ htable_bits(u32 hashsize)
#define NLEN 0
#endif /* IP_SET_HASH_WITH_NETS */
+#define SET_ELEM_EXPIRED(set, d) \
+ (SET_WITH_TIMEOUT(set) && \
+ ip_set_timeout_expired(ext_timeout(d, set)))
+
#endif /* _IP_SET_HASH_GEN_H */
#ifndef MTYPE
@@ -205,10 +241,12 @@ htable_bits(u32 hashsize)
#undef mtype_test_cidrs
#undef mtype_test
#undef mtype_uref
-#undef mtype_expire
#undef mtype_resize
+#undef mtype_ext_size
+#undef mtype_resize_ad
#undef mtype_head
#undef mtype_list
+#undef mtype_gc_do
#undef mtype_gc
#undef mtype_gc_init
#undef mtype_variant
@@ -247,10 +285,12 @@ htable_bits(u32 hashsize)
#define mtype_test_cidrs IPSET_TOKEN(MTYPE, _test_cidrs)
#define mtype_test IPSET_TOKEN(MTYPE, _test)
#define mtype_uref IPSET_TOKEN(MTYPE, _uref)
-#define mtype_expire IPSET_TOKEN(MTYPE, _expire)
#define mtype_resize IPSET_TOKEN(MTYPE, _resize)
+#define mtype_ext_size IPSET_TOKEN(MTYPE, _ext_size)
+#define mtype_resize_ad IPSET_TOKEN(MTYPE, _resize_ad)
#define mtype_head IPSET_TOKEN(MTYPE, _head)
#define mtype_list IPSET_TOKEN(MTYPE, _list)
+#define mtype_gc_do IPSET_TOKEN(MTYPE, _gc_do)
#define mtype_gc IPSET_TOKEN(MTYPE, _gc)
#define mtype_gc_init IPSET_TOKEN(MTYPE, _gc_init)
#define mtype_variant IPSET_TOKEN(MTYPE, _variant)
@@ -275,8 +315,7 @@ htable_bits(u32 hashsize)
/* The generic hash structure */
struct htype {
struct htable __rcu *table; /* the hash table */
- struct timer_list gc; /* garbage collection when timeout enabled */
- struct ip_set *set; /* attached to this ip_set */
+ struct htable_gc gc; /* gc workqueue */
u32 maxelem; /* max elements in the hash */
u32 initval; /* random jhash init value */
#ifdef IP_SET_HASH_WITH_MARKMASK
@@ -288,21 +327,33 @@ struct htype {
#ifdef IP_SET_HASH_WITH_NETMASK
u8 netmask; /* netmask value for subnets to store */
#endif
+ struct list_head ad; /* Resize add|del backlist */
struct mtype_elem next; /* temporary storage for uadd */
#ifdef IP_SET_HASH_WITH_NETS
struct net_prefixes nets[NLEN]; /* book-keeping of prefixes */
#endif
};
+/* ADD|DEL entries saved during resize */
+struct mtype_resize_ad {
+ struct list_head list;
+ enum ipset_adt ad; /* ADD|DEL element */
+ struct mtype_elem d; /* Element value */
+ struct ip_set_ext ext; /* Extensions for ADD */
+ struct ip_set_ext mext; /* Target extensions for ADD */
+ u32 flags; /* Flags for ADD */
+};
+
#ifdef IP_SET_HASH_WITH_NETS
/* Network cidr size book keeping when the hash stores different
* sized networks. cidr == real cidr + 1 to support /0.
*/
static void
-mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
+mtype_add_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
{
int i, j;
+ spin_lock_bh(&set->lock);
/* Add in increasing prefix order, so larger cidr first */
for (i = 0, j = -1; i < NLEN && h->nets[i].cidr[n]; i++) {
if (j != -1) {
@@ -311,7 +362,7 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
j = i;
} else if (h->nets[i].cidr[n] == cidr) {
h->nets[CIDR_POS(cidr)].nets[n]++;
- return;
+ goto unlock;
}
}
if (j != -1) {
@@ -320,24 +371,29 @@ mtype_add_cidr(struct htype *h, u8 cidr, u8 n)
}
h->nets[i].cidr[n] = cidr;
h->nets[CIDR_POS(cidr)].nets[n] = 1;
+unlock:
+ spin_unlock_bh(&set->lock);
}
static void
-mtype_del_cidr(struct htype *h, u8 cidr, u8 n)
+mtype_del_cidr(struct ip_set *set, struct htype *h, u8 cidr, u8 n)
{
u8 i, j, net_end = NLEN - 1;
+ spin_lock_bh(&set->lock);
for (i = 0; i < NLEN; i++) {
if (h->nets[i].cidr[n] != cidr)
continue;
h->nets[CIDR_POS(cidr)].nets[n]--;
if (h->nets[CIDR_POS(cidr)].nets[n] > 0)
- return;
+ goto unlock;
for (j = i; j < net_end && h->nets[j].cidr[n]; j++)
h->nets[j].cidr[n] = h->nets[j + 1].cidr[n];
h->nets[j].cidr[n] = 0;
- return;
+ goto unlock;
}
+unlock:
+ spin_unlock_bh(&set->lock);
}
#endif
@@ -345,7 +401,7 @@ mtype_del_cidr(struct htype *h, u8 cidr, u8 n)
static size_t
mtype_ahash_memsize(const struct htype *h, const struct htable *t)
{
- return sizeof(*h) + sizeof(*t);
+ return sizeof(*h) + sizeof(*t) + ahash_sizeof_regions(t->htable_bits);
}
/* Get the ith element from the array block n */
@@ -369,24 +425,29 @@ mtype_flush(struct ip_set *set)
struct htype *h = set->data;
struct htable *t;
struct hbucket *n;
- u32 i;
-
- t = ipset_dereference_protected(h->table, set);
- for (i = 0; i < jhash_size(t->htable_bits); i++) {
- n = __ipset_dereference_protected(hbucket(t, i), 1);
- if (!n)
- continue;
- if (set->extensions & IPSET_EXT_DESTROY)
- mtype_ext_cleanup(set, n);
- /* FIXME: use slab cache */
- rcu_assign_pointer(hbucket(t, i), NULL);
- kfree_rcu(n, rcu);
+ u32 r, i;
+
+ t = ipset_dereference_nfnl(h->table);
+ for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
+ spin_lock_bh(&t->hregion[r].lock);
+ for (i = ahash_bucket_start(r, t->htable_bits);
+ i < ahash_bucket_end(r, t->htable_bits); i++) {
+ n = __ipset_dereference(hbucket(t, i));
+ if (!n)
+ continue;
+ if (set->extensions & IPSET_EXT_DESTROY)
+ mtype_ext_cleanup(set, n);
+ /* FIXME: use slab cache */
+ rcu_assign_pointer(hbucket(t, i), NULL);
+ kfree_rcu(n, rcu);
+ }
+ t->hregion[r].ext_size = 0;
+ t->hregion[r].elements = 0;
+ spin_unlock_bh(&t->hregion[r].lock);
}
#ifdef IP_SET_HASH_WITH_NETS
memset(h->nets, 0, sizeof(h->nets));
#endif
- set->elements = 0;
- set->ext_size = 0;
}
/* Destroy the hashtable part of the set */
@@ -397,7 +458,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
u32 i;
for (i = 0; i < jhash_size(t->htable_bits); i++) {
- n = __ipset_dereference_protected(hbucket(t, i), 1);
+ n = __ipset_dereference(hbucket(t, i));
if (!n)
continue;
if (set->extensions & IPSET_EXT_DESTROY && ext_destroy)
@@ -406,6 +467,7 @@ mtype_ahash_destroy(struct ip_set *set, struct htable *t, bool ext_destroy)
kfree(n);
}
+ ip_set_free(t->hregion);
ip_set_free(t);
}
@@ -414,28 +476,21 @@ static void
mtype_destroy(struct ip_set *set)
{
struct htype *h = set->data;
+ struct list_head *l, *lt;
if (SET_WITH_TIMEOUT(set))
- del_timer_sync(&h->gc);
+ cancel_delayed_work_sync(&h->gc.dwork);
- mtype_ahash_destroy(set,
- __ipset_dereference_protected(h->table, 1), true);
+ mtype_ahash_destroy(set, ipset_dereference_nfnl(h->table), true);
+ list_for_each_safe(l, lt, &h->ad) {
+ list_del(l);
+ kfree(l);
+ }
kfree(h);
set->data = NULL;
}
-static void
-mtype_gc_init(struct ip_set *set, void (*gc)(struct timer_list *t))
-{
- struct htype *h = set->data;
-
- timer_setup(&h->gc, gc, 0);
- mod_timer(&h->gc, jiffies + IPSET_GC_PERIOD(set->timeout) * HZ);
- pr_debug("gc initialized, run in every %u\n",
- IPSET_GC_PERIOD(set->timeout));
-}
-
static bool
mtype_same_set(const struct ip_set *a, const struct ip_set *b)
{
@@ -454,11 +509,9 @@ mtype_same_set(const struct ip_set *a, const struct ip_set *b)
a->extensions == b->extensions;
}
-/* Delete expired elements from the hashtable */
static void
-mtype_expire(struct ip_set *set, struct htype *h)
+mtype_gc_do(struct ip_set *set, struct htype *h, struct htable *t, u32 r)
{
- struct htable *t;
struct hbucket *n, *tmp;
struct mtype_elem *data;
u32 i, j, d;
@@ -466,10 +519,12 @@ mtype_expire(struct ip_set *set, struct htype *h)
#ifdef IP_SET_HASH_WITH_NETS
u8 k;
#endif
+ u8 htable_bits = t->htable_bits;
- t = ipset_dereference_protected(h->table, set);
- for (i = 0; i < jhash_size(t->htable_bits); i++) {
- n = __ipset_dereference_protected(hbucket(t, i), 1);
+ spin_lock_bh(&t->hregion[r].lock);
+ for (i = ahash_bucket_start(r, htable_bits);
+ i < ahash_bucket_end(r, htable_bits); i++) {
+ n = __ipset_dereference(hbucket(t, i));
if (!n)
continue;
for (j = 0, d = 0; j < n->pos; j++) {
@@ -485,58 +540,100 @@ mtype_expire(struct ip_set *set, struct htype *h)
smp_mb__after_atomic();
#ifdef IP_SET_HASH_WITH_NETS
for (k = 0; k < IPSET_NET_COUNT; k++)
- mtype_del_cidr(h,
+ mtype_del_cidr(set, h,
NCIDR_PUT(DCIDR_GET(data->cidr, k)),
k);
#endif
+ t->hregion[r].elements--;
ip_set_ext_destroy(set, data);
- set->elements--;
d++;
}
if (d >= AHASH_INIT_SIZE) {
if (d >= n->size) {
+ t->hregion[r].ext_size -=
+ ext_size(n->size, dsize);
rcu_assign_pointer(hbucket(t, i), NULL);
kfree_rcu(n, rcu);
continue;
}
tmp = kzalloc(sizeof(*tmp) +
- (n->size - AHASH_INIT_SIZE) * dsize,
- GFP_ATOMIC);
+ (n->size - AHASH_INIT_SIZE) * dsize,
+ GFP_ATOMIC);
if (!tmp)
- /* Still try to delete expired elements */
+ /* Still try to delete expired elements. */
continue;
tmp->size = n->size - AHASH_INIT_SIZE;
for (j = 0, d = 0; j < n->pos; j++) {
if (!test_bit(j, n->used))
continue;
data = ahash_data(n, j, dsize);
- memcpy(tmp->value + d * dsize, data, dsize);
+ memcpy(tmp->value + d * dsize,
+ data, dsize);
set_bit(d, tmp->used);
d++;
}
tmp->pos = d;
- set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize);
+ t->hregion[r].ext_size -=
+ ext_size(AHASH_INIT_SIZE, dsize);
rcu_assign_pointer(hbucket(t, i), tmp);
kfree_rcu(n, rcu);
}
}
+ spin_unlock_bh(&t->hregion[r].lock);
}
static void
-mtype_gc(struct timer_list *t)
+mtype_gc(struct work_struct *work)
{
- struct htype *h = from_timer(h, t, gc);
- struct ip_set *set = h->set;
+ struct htable_gc *gc;
+ struct ip_set *set;
+ struct htype *h;
+ struct htable *t;
+ u32 r, numof_locks;
+ unsigned int next_run;
+
+ gc = container_of(work, struct htable_gc, dwork.work);
+ set = gc->set;
+ h = set->data;
- pr_debug("called\n");
spin_lock_bh(&set->lock);
- mtype_expire(set, h);
+ t = ipset_dereference_set(h->table, set);
+ atomic_inc(&t->uref);
+ numof_locks = ahash_numof_locks(t->htable_bits);
+ r = gc->region++;
+ if (r >= numof_locks) {
+ r = gc->region = 0;
+ }
+ next_run = (IPSET_GC_PERIOD(set->timeout) * HZ) / numof_locks;
+ if (next_run < HZ/10)
+ next_run = HZ/10;
spin_unlock_bh(&set->lock);
- h->gc.expires = jiffies + IPSET_GC_PERIOD(set->timeout) * HZ;
- add_timer(&h->gc);
+ mtype_gc_do(set, h, t, r);
+
+ if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
+ pr_debug("Table destroy after resize by expire: %p\n", t);
+ mtype_ahash_destroy(set, t, false);
+ }
+
+ queue_delayed_work(system_power_efficient_wq, &gc->dwork, next_run);
+
+}
+
+static void
+mtype_gc_init(struct htable_gc *gc)
+{
+ INIT_DEFERRABLE_WORK(&gc->dwork, mtype_gc);
+ queue_delayed_work(system_power_efficient_wq, &gc->dwork, HZ);
}
+static int
+mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags);
+static int
+mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
+ struct ip_set_ext *mext, u32 flags);
+
/* Resize a hash: create a new hash table with doubling the hashsize
* and inserting the elements to it. Repeat until we succeed or
* fail due to memory pressures.
@@ -547,7 +644,7 @@ mtype_resize(struct ip_set *set, bool retried)
struct htype *h = set->data;
struct htable *t, *orig;
u8 htable_bits;
- size_t extsize, dsize = set->dsize;
+ size_t dsize = set->dsize;
#ifdef IP_SET_HASH_WITH_NETS
u8 flags;
struct mtype_elem *tmp;
@@ -555,7 +652,9 @@ mtype_resize(struct ip_set *set, bool retried)
struct mtype_elem *data;
struct mtype_elem *d;
struct hbucket *n, *m;
- u32 i, j, key;
+ struct list_head *l, *lt;
+ struct mtype_resize_ad *x;
+ u32 i, j, r, nr, key;
int ret;
#ifdef IP_SET_HASH_WITH_NETS
@@ -563,10 +662,8 @@ mtype_resize(struct ip_set *set, bool retried)
if (!tmp)
return -ENOMEM;
#endif
- rcu_read_lock_bh();
- orig = rcu_dereference_bh_nfnl(h->table);
+ orig = ipset_dereference_bh_nfnl(h->table);
htable_bits = orig->htable_bits;
- rcu_read_unlock_bh();
retry:
ret = 0;
@@ -583,88 +680,124 @@ retry:
ret = -ENOMEM;
goto out;
}
+ t->hregion = ip_set_alloc(ahash_sizeof_regions(htable_bits));
+ if (!t->hregion) {
+ kfree(t);
+ ret = -ENOMEM;
+ goto out;
+ }
t->htable_bits = htable_bits;
+ t->maxelem = h->maxelem / ahash_numof_locks(htable_bits);
+ for (i = 0; i < ahash_numof_locks(htable_bits); i++)
+ spin_lock_init(&t->hregion[i].lock);
- spin_lock_bh(&set->lock);
- orig = __ipset_dereference_protected(h->table, 1);
- /* There can't be another parallel resizing, but dumping is possible */
+ /* There can't be another parallel resizing,
+ * but dumping, gc, kernel side add/del are possible
+ */
+ orig = ipset_dereference_bh_nfnl(h->table);
atomic_set(&orig->ref, 1);
atomic_inc(&orig->uref);
- extsize = 0;
pr_debug("attempt to resize set %s from %u to %u, t %p\n",
set->name, orig->htable_bits, htable_bits, orig);
- for (i = 0; i < jhash_size(orig->htable_bits); i++) {
- n = __ipset_dereference_protected(hbucket(orig, i), 1);
- if (!n)
- continue;
- for (j = 0; j < n->pos; j++) {
- if (!test_bit(j, n->used))
+ for (r = 0; r < ahash_numof_locks(orig->htable_bits); r++) {
+ /* Expire may replace a hbucket with another one */
+ rcu_read_lock_bh();
+ for (i = ahash_bucket_start(r, orig->htable_bits);
+ i < ahash_bucket_end(r, orig->htable_bits); i++) {
+ n = __ipset_dereference(hbucket(orig, i));
+ if (!n)
continue;
- data = ahash_data(n, j, dsize);
+ for (j = 0; j < n->pos; j++) {
+ if (!test_bit(j, n->used))
+ continue;
+ data = ahash_data(n, j, dsize);
+ if (SET_ELEM_EXPIRED(set, data))
+ continue;
#ifdef IP_SET_HASH_WITH_NETS
- /* We have readers running parallel with us,
- * so the live data cannot be modified.
- */
- flags = 0;
- memcpy(tmp, data, dsize);
- data = tmp;
- mtype_data_reset_flags(data, &flags);
+ /* We have readers running parallel with us,
+ * so the live data cannot be modified.
+ */
+ flags = 0;
+ memcpy(tmp, data, dsize);
+ data = tmp;
+ mtype_data_reset_flags(data, &flags);
#endif
- key = HKEY(data, h->initval, htable_bits);
- m = __ipset_dereference_protected(hbucket(t, key), 1);
- if (!m) {
- m = kzalloc(sizeof(*m) +
+ key = HKEY(data, h->initval, htable_bits);
+ m = __ipset_dereference(hbucket(t, key));
+ nr = ahash_region(key, htable_bits);
+ if (!m) {
+ m = kzalloc(sizeof(*m) +
AHASH_INIT_SIZE * dsize,
GFP_ATOMIC);
- if (!m) {
- ret = -ENOMEM;
- goto cleanup;
- }
- m->size = AHASH_INIT_SIZE;
- extsize += ext_size(AHASH_INIT_SIZE, dsize);
- RCU_INIT_POINTER(hbucket(t, key), m);
- } else if (m->pos >= m->size) {
- struct hbucket *ht;
-
- if (m->size >= AHASH_MAX(h)) {
- ret = -EAGAIN;
- } else {
- ht = kzalloc(sizeof(*ht) +
+ if (!m) {
+ ret = -ENOMEM;
+ goto cleanup;
+ }
+ m->size = AHASH_INIT_SIZE;
+ t->hregion[nr].ext_size +=
+ ext_size(AHASH_INIT_SIZE,
+ dsize);
+ RCU_INIT_POINTER(hbucket(t, key), m);
+ } else if (m->pos >= m->size) {
+ struct hbucket *ht;
+
+ if (m->size >= AHASH_MAX(h)) {
+ ret = -EAGAIN;
+ } else {
+ ht = kzalloc(sizeof(*ht) +
(m->size + AHASH_INIT_SIZE)
* dsize,
GFP_ATOMIC);
- if (!ht)
- ret = -ENOMEM;
+ if (!ht)
+ ret = -ENOMEM;
+ }
+ if (ret < 0)
+ goto cleanup;
+ memcpy(ht, m, sizeof(struct hbucket) +
+ m->size * dsize);
+ ht->size = m->size + AHASH_INIT_SIZE;
+ t->hregion[nr].ext_size +=
+ ext_size(AHASH_INIT_SIZE,
+ dsize);
+ kfree(m);
+ m = ht;
+ RCU_INIT_POINTER(hbucket(t, key), ht);
}
- if (ret < 0)
- goto cleanup;
- memcpy(ht, m, sizeof(struct hbucket) +
- m->size * dsize);
- ht->size = m->size + AHASH_INIT_SIZE;
- extsize += ext_size(AHASH_INIT_SIZE, dsize);
- kfree(m);
- m = ht;
- RCU_INIT_POINTER(hbucket(t, key), ht);
- }
- d = ahash_data(m, m->pos, dsize);
- memcpy(d, data, dsize);
- set_bit(m->pos++, m->used);
+ d = ahash_data(m, m->pos, dsize);
+ memcpy(d, data, dsize);
+ set_bit(m->pos++, m->used);
+ t->hregion[nr].elements++;
#ifdef IP_SET_HASH_WITH_NETS
- mtype_data_reset_flags(d, &flags);
+ mtype_data_reset_flags(d, &flags);
#endif
+ }
}
+ rcu_read_unlock_bh();
}
- rcu_assign_pointer(h->table, t);
- set->ext_size = extsize;
- spin_unlock_bh(&set->lock);
+ /* There can't be any other writer. */
+ rcu_assign_pointer(h->table, t);
/* Give time to other readers of the set */
synchronize_rcu();
pr_debug("set %s resized from %u (%p) to %u (%p)\n", set->name,
orig->htable_bits, orig, t->htable_bits, t);
- /* If there's nobody else dumping the table, destroy it */
+ /* Add/delete elements processed by the SET target during resize.
+ * Kernel-side add cannot trigger a resize and userspace actions
+ * are serialized by the mutex.
+ */
+ list_for_each_safe(l, lt, &h->ad) {
+ x = list_entry(l, struct mtype_resize_ad, list);
+ if (x->ad == IPSET_ADD) {
+ mtype_add(set, &x->d, &x->ext, &x->mext, x->flags);
+ } else {
+ mtype_del(set, &x->d, NULL, NULL, 0);
+ }
+ list_del(l);
+ kfree(l);
+ }
+ /* If there's nobody else using the table, destroy it */
if (atomic_dec_and_test(&orig->uref)) {
pr_debug("Table destroy by resize %p\n", orig);
mtype_ahash_destroy(set, orig, false);
@@ -677,15 +810,44 @@ out:
return ret;
cleanup:
+ rcu_read_unlock_bh();
atomic_set(&orig->ref, 0);
atomic_dec(&orig->uref);
- spin_unlock_bh(&set->lock);
mtype_ahash_destroy(set, t, false);
if (ret == -EAGAIN)
goto retry;
goto out;
}
+/* Get the current number of elements and ext_size in the set */
+static void
+mtype_ext_size(struct ip_set *set, u32 *elements, size_t *ext_size)
+{
+ struct htype *h = set->data;
+ const struct htable *t;
+ u32 i, j, r;
+ struct hbucket *n;
+ struct mtype_elem *data;
+
+ t = rcu_dereference_bh(h->table);
+ for (r = 0; r < ahash_numof_locks(t->htable_bits); r++) {
+ for (i = ahash_bucket_start(r, t->htable_bits);
+ i < ahash_bucket_end(r, t->htable_bits); i++) {
+ n = rcu_dereference_bh(hbucket(t, i));
+ if (!n)
+ continue;
+ for (j = 0; j < n->pos; j++) {
+ if (!test_bit(j, n->used))
+ continue;
+ data = ahash_data(n, j, set->dsize);
+ if (!SET_ELEM_EXPIRED(set, data))
+ (*elements)++;
+ }
+ }
+ *ext_size += t->hregion[r].ext_size;
+ }
+}
+
/* Add an element to a hash and update the internal counters when succeeded,
* otherwise report the proper error code.
*/
@@ -698,32 +860,49 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
const struct mtype_elem *d = value;
struct mtype_elem *data;
struct hbucket *n, *old = ERR_PTR(-ENOENT);
- int i, j = -1;
+ int i, j = -1, ret;
bool flag_exist = flags & IPSET_FLAG_EXIST;
bool deleted = false, forceadd = false, reuse = false;
- u32 key, multi = 0;
+ u32 r, key, multi = 0, elements, maxelem;
- if (set->elements >= h->maxelem) {
- if (SET_WITH_TIMEOUT(set))
- /* FIXME: when set is full, we slow down here */
- mtype_expire(set, h);
- if (set->elements >= h->maxelem && SET_WITH_FORCEADD(set))
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
+ key = HKEY(value, h->initval, t->htable_bits);
+ r = ahash_region(key, t->htable_bits);
+ atomic_inc(&t->uref);
+ elements = t->hregion[r].elements;
+ maxelem = t->maxelem;
+ if (elements >= maxelem) {
+ u32 e;
+ if (SET_WITH_TIMEOUT(set)) {
+ rcu_read_unlock_bh();
+ mtype_gc_do(set, h, t, r);
+ rcu_read_lock_bh();
+ }
+ maxelem = h->maxelem;
+ elements = 0;
+ for (e = 0; e < ahash_numof_locks(t->htable_bits); e++)
+ elements += t->hregion[e].elements;
+ if (elements >= maxelem && SET_WITH_FORCEADD(set))
forceadd = true;
}
+ rcu_read_unlock_bh();
- t = ipset_dereference_protected(h->table, set);
- key = HKEY(value, h->initval, t->htable_bits);
- n = __ipset_dereference_protected(hbucket(t, key), 1);
+ spin_lock_bh(&t->hregion[r].lock);
+ n = rcu_dereference_bh(hbucket(t, key));
if (!n) {
- if (forceadd || set->elements >= h->maxelem)
+ if (forceadd || elements >= maxelem)
goto set_full;
old = NULL;
n = kzalloc(sizeof(*n) + AHASH_INIT_SIZE * set->dsize,
GFP_ATOMIC);
- if (!n)
- return -ENOMEM;
+ if (!n) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
n->size = AHASH_INIT_SIZE;
- set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize);
+ t->hregion[r].ext_size +=
+ ext_size(AHASH_INIT_SIZE, set->dsize);
goto copy_elem;
}
for (i = 0; i < n->pos; i++) {
@@ -737,38 +916,37 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
}
data = ahash_data(n, i, set->dsize);
if (mtype_data_equal(data, d, &multi)) {
- if (flag_exist ||
- (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(data, set)))) {
+ if (flag_exist || SET_ELEM_EXPIRED(set, data)) {
/* Just the extensions could be overwritten */
j = i;
goto overwrite_extensions;
}
- return -IPSET_ERR_EXIST;
+ ret = -IPSET_ERR_EXIST;
+ goto unlock;
}
/* Reuse first timed out entry */
- if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(data, set)) &&
- j == -1) {
+ if (SET_ELEM_EXPIRED(set, data) && j == -1) {
j = i;
reuse = true;
}
}
if (reuse || forceadd) {
+ if (j == -1)
+ j = 0;
data = ahash_data(n, j, set->dsize);
if (!deleted) {
#ifdef IP_SET_HASH_WITH_NETS
for (i = 0; i < IPSET_NET_COUNT; i++)
- mtype_del_cidr(h,
+ mtype_del_cidr(set, h,
NCIDR_PUT(DCIDR_GET(data->cidr, i)),
i);
#endif
ip_set_ext_destroy(set, data);
- set->elements--;
+ t->hregion[r].elements--;
}
goto copy_data;
}
- if (set->elements >= h->maxelem)
+ if (elements >= maxelem)
goto set_full;
/* Create a new slot */
if (n->pos >= n->size) {
@@ -776,28 +954,32 @@ mtype_add(struct ip_set *set, void *value, const struct ip_set_ext *ext,
if (n->size >= AHASH_MAX(h)) {
/* Trigger rehashing */
mtype_data_next(&h->next, d);
- return -EAGAIN;
+ ret = -EAGAIN;
+ goto resize;
}
old = n;
n = kzalloc(sizeof(*n) +
(old->size + AHASH_INIT_SIZE) * set->dsize,
GFP_ATOMIC);
- if (!n)
- return -ENOMEM;
+ if (!n) {
+ ret = -ENOMEM;
+ goto unlock;
+ }
memcpy(n, old, sizeof(struct hbucket) +
old->size * set->dsize);
n->size = old->size + AHASH_INIT_SIZE;
- set->ext_size += ext_size(AHASH_INIT_SIZE, set->dsize);
+ t->hregion[r].ext_size +=
+ ext_size(AHASH_INIT_SIZE, set->dsize);
}
copy_elem:
j = n->pos++;
data = ahash_data(n, j, set->dsize);
copy_data:
- set->elements++;
+ t->hregion[r].elements++;
#ifdef IP_SET_HASH_WITH_NETS
for (i = 0; i < IPSET_NET_COUNT; i++)
- mtype_add_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
+ mtype_add_cidr(set, h, NCIDR_PUT(DCIDR_GET(d->cidr, i)), i);
#endif
memcpy(data, d, sizeof(struct mtype_elem));
overwrite_extensions:
@@ -820,13 +1002,41 @@ overwrite_extensions:
if (old)
kfree_rcu(old, rcu);
}
+ ret = 0;
+resize:
+ spin_unlock_bh(&t->hregion[r].lock);
+ if (atomic_read(&t->ref) && ext->target) {
+ /* Resize is in process and kernel side add, save values */
+ struct mtype_resize_ad *x;
+
+ x = kzalloc(sizeof(struct mtype_resize_ad), GFP_ATOMIC);
+ if (!x)
+ /* Don't bother */
+ goto out;
+ x->ad = IPSET_ADD;
+ memcpy(&x->d, value, sizeof(struct mtype_elem));
+ memcpy(&x->ext, ext, sizeof(struct ip_set_ext));
+ memcpy(&x->mext, mext, sizeof(struct ip_set_ext));
+ x->flags = flags;
+ spin_lock_bh(&set->lock);
+ list_add_tail(&x->list, &h->ad);
+ spin_unlock_bh(&set->lock);
+ }
+ goto out;
- return 0;
set_full:
if (net_ratelimit())
pr_warn("Set %s is full, maxelem %u reached\n",
- set->name, h->maxelem);
- return -IPSET_ERR_HASH_FULL;
+ set->name, maxelem);
+ ret = -IPSET_ERR_HASH_FULL;
+unlock:
+ spin_unlock_bh(&t->hregion[r].lock);
+out:
+ if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
+ pr_debug("Table destroy after resize by add: %p\n", t);
+ mtype_ahash_destroy(set, t, false);
+ }
+ return ret;
}
/* Delete an element from the hash and free up space if possible.
@@ -840,13 +1050,23 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
const struct mtype_elem *d = value;
struct mtype_elem *data;
struct hbucket *n;
- int i, j, k, ret = -IPSET_ERR_EXIST;
+ struct mtype_resize_ad *x = NULL;
+ int i, j, k, r, ret = -IPSET_ERR_EXIST;
u32 key, multi = 0;
size_t dsize = set->dsize;
- t = ipset_dereference_protected(h->table, set);
+ /* Userspace add and resize is excluded by the mutex.
+ * Kernespace add does not trigger resize.
+ */
+ rcu_read_lock_bh();
+ t = rcu_dereference_bh(h->table);
key = HKEY(value, h->initval, t->htable_bits);
- n = __ipset_dereference_protected(hbucket(t, key), 1);
+ r = ahash_region(key, t->htable_bits);
+ atomic_inc(&t->uref);
+ rcu_read_unlock_bh();
+
+ spin_lock_bh(&t->hregion[r].lock);
+ n = rcu_dereference_bh(hbucket(t, key));
if (!n)
goto out;
for (i = 0, k = 0; i < n->pos; i++) {
@@ -857,8 +1077,7 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
data = ahash_data(n, i, dsize);
if (!mtype_data_equal(data, d, &multi))
continue;
- if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(data, set)))
+ if (SET_ELEM_EXPIRED(set, data))
goto out;
ret = 0;
@@ -866,20 +1085,33 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
smp_mb__after_atomic();
if (i + 1 == n->pos)
n->pos--;
- set->elements--;
+ t->hregion[r].elements--;
#ifdef IP_SET_HASH_WITH_NETS
for (j = 0; j < IPSET_NET_COUNT; j++)
- mtype_del_cidr(h, NCIDR_PUT(DCIDR_GET(d->cidr, j)),
- j);
+ mtype_del_cidr(set, h,
+ NCIDR_PUT(DCIDR_GET(d->cidr, j)), j);
#endif
ip_set_ext_destroy(set, data);
+ if (atomic_read(&t->ref) && ext->target) {
+ /* Resize is in process and kernel side del,
+ * save values
+ */
+ x = kzalloc(sizeof(struct mtype_resize_ad),
+ GFP_ATOMIC);
+ if (x) {
+ x->ad = IPSET_DEL;
+ memcpy(&x->d, value,
+ sizeof(struct mtype_elem));
+ x->flags = flags;
+ }
+ }
for (; i < n->pos; i++) {
if (!test_bit(i, n->used))
k++;
}
if (n->pos == 0 && k == 0) {
- set->ext_size -= ext_size(n->size, dsize);
+ t->hregion[r].ext_size -= ext_size(n->size, dsize);
rcu_assign_pointer(hbucket(t, key), NULL);
kfree_rcu(n, rcu);
} else if (k >= AHASH_INIT_SIZE) {
@@ -898,7 +1130,8 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
k++;
}
tmp->pos = k;
- set->ext_size -= ext_size(AHASH_INIT_SIZE, dsize);
+ t->hregion[r].ext_size -=
+ ext_size(AHASH_INIT_SIZE, dsize);
rcu_assign_pointer(hbucket(t, key), tmp);
kfree_rcu(n, rcu);
}
@@ -906,6 +1139,16 @@ mtype_del(struct ip_set *set, void *value, const struct ip_set_ext *ext,
}
out:
+ spin_unlock_bh(&t->hregion[r].lock);
+ if (x) {
+ spin_lock_bh(&set->lock);
+ list_add(&x->list, &h->ad);
+ spin_unlock_bh(&set->lock);
+ }
+ if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
+ pr_debug("Table destroy after resize by del: %p\n", t);
+ mtype_ahash_destroy(set, t, false);
+ }
return ret;
}
@@ -991,6 +1234,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
int i, ret = 0;
u32 key, multi = 0;
+ rcu_read_lock_bh();
t = rcu_dereference_bh(h->table);
#ifdef IP_SET_HASH_WITH_NETS
/* If we test an IP address and not a network address,
@@ -1022,6 +1266,7 @@ mtype_test(struct ip_set *set, void *value, const struct ip_set_ext *ext,
goto out;
}
out:
+ rcu_read_unlock_bh();
return ret;
}
@@ -1033,23 +1278,14 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
const struct htable *t;
struct nlattr *nested;
size_t memsize;
+ u32 elements = 0;
+ size_t ext_size = 0;
u8 htable_bits;
- /* If any members have expired, set->elements will be wrong
- * mytype_expire function will update it with the right count.
- * we do not hold set->lock here, so grab it first.
- * set->elements can still be incorrect in the case of a huge set,
- * because elements might time out during the listing.
- */
- if (SET_WITH_TIMEOUT(set)) {
- spin_lock_bh(&set->lock);
- mtype_expire(set, h);
- spin_unlock_bh(&set->lock);
- }
-
rcu_read_lock_bh();
- t = rcu_dereference_bh_nfnl(h->table);
- memsize = mtype_ahash_memsize(h, t) + set->ext_size;
+ t = rcu_dereference_bh(h->table);
+ mtype_ext_size(set, &elements, &ext_size);
+ memsize = mtype_ahash_memsize(h, t) + ext_size + set->ext_size;
htable_bits = t->htable_bits;
rcu_read_unlock_bh();
@@ -1071,7 +1307,7 @@ mtype_head(struct ip_set *set, struct sk_buff *skb)
#endif
if (nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref)) ||
nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)) ||
- nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(set->elements)))
+ nla_put_net32(skb, IPSET_ATTR_ELEMENTS, htonl(elements)))
goto nla_put_failure;
if (unlikely(ip_set_put_flags(skb, set)))
goto nla_put_failure;
@@ -1091,15 +1327,15 @@ mtype_uref(struct ip_set *set, struct netlink_callback *cb, bool start)
if (start) {
rcu_read_lock_bh();
- t = rcu_dereference_bh_nfnl(h->table);
+ t = ipset_dereference_bh_nfnl(h->table);
atomic_inc(&t->uref);
cb->args[IPSET_CB_PRIVATE] = (unsigned long)t;
rcu_read_unlock_bh();
} else if (cb->args[IPSET_CB_PRIVATE]) {
t = (struct htable *)cb->args[IPSET_CB_PRIVATE];
if (atomic_dec_and_test(&t->uref) && atomic_read(&t->ref)) {
- /* Resizing didn't destroy the hash table */
- pr_debug("Table destroy by dump: %p\n", t);
+ pr_debug("Table destroy after resize "
+ " by dump: %p\n", t);
mtype_ahash_destroy(set, t, false);
}
cb->args[IPSET_CB_PRIVATE] = 0;
@@ -1141,8 +1377,7 @@ mtype_list(const struct ip_set *set,
if (!test_bit(i, n->used))
continue;
e = ahash_data(n, i, set->dsize);
- if (SET_WITH_TIMEOUT(set) &&
- ip_set_timeout_expired(ext_timeout(e, set)))
+ if (SET_ELEM_EXPIRED(set, e))
continue;
pr_debug("list hash %lu hbucket %p i %u, data %p\n",
cb->args[IPSET_CB_ARG0], n, i, e);
@@ -1208,6 +1443,7 @@ static const struct ip_set_type_variant mtype_variant = {
.uref = mtype_uref,
.resize = mtype_resize,
.same_set = mtype_same_set,
+ .region_lock = true,
};
#ifdef IP_SET_EMIT_CREATE
@@ -1226,6 +1462,7 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
size_t hsize;
struct htype *h;
struct htable *t;
+ u32 i;
pr_debug("Create set %s with family %s\n",
set->name, set->family == NFPROTO_IPV4 ? "inet" : "inet6");
@@ -1294,6 +1531,15 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
kfree(h);
return -ENOMEM;
}
+ t->hregion = ip_set_alloc(ahash_sizeof_regions(hbits));
+ if (!t->hregion) {
+ kfree(t);
+ kfree(h);
+ return -ENOMEM;
+ }
+ h->gc.set = set;
+ for (i = 0; i < ahash_numof_locks(hbits); i++)
+ spin_lock_init(&t->hregion[i].lock);
h->maxelem = maxelem;
#ifdef IP_SET_HASH_WITH_NETMASK
h->netmask = netmask;
@@ -1304,9 +1550,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
get_random_bytes(&h->initval, sizeof(h->initval));
t->htable_bits = hbits;
+ t->maxelem = h->maxelem / ahash_numof_locks(hbits);
RCU_INIT_POINTER(h->table, t);
- h->set = set;
+ INIT_LIST_HEAD(&h->ad);
set->data = h;
#ifndef IP_SET_PROTO_UNDEF
if (set->family == NFPROTO_IPV4) {
@@ -1329,12 +1576,10 @@ IPSET_TOKEN(HTYPE, _create)(struct net *net, struct ip_set *set,
#ifndef IP_SET_PROTO_UNDEF
if (set->family == NFPROTO_IPV4)
#endif
- IPSET_TOKEN(HTYPE, 4_gc_init)(set,
- IPSET_TOKEN(HTYPE, 4_gc));
+ IPSET_TOKEN(HTYPE, 4_gc_init)(&h->gc);
#ifndef IP_SET_PROTO_UNDEF
else
- IPSET_TOKEN(HTYPE, 6_gc_init)(set,
- IPSET_TOKEN(HTYPE, 6_gc));
+ IPSET_TOKEN(HTYPE, 6_gc_init)(&h->gc);
#endif
}
pr_debug("create %s hashsize %u (%u) maxelem %u: %p(%p)\n",
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index d1305423640f..1927fc296f95 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -894,32 +894,175 @@ static void nf_ct_acct_merge(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
}
}
-/* Resolve race on insertion if this protocol allows this. */
+static void __nf_conntrack_insert_prepare(struct nf_conn *ct)
+{
+ struct nf_conn_tstamp *tstamp;
+
+ atomic_inc(&ct->ct_general.use);
+ ct->status |= IPS_CONFIRMED;
+
+ /* set conntrack timestamp, if enabled. */
+ tstamp = nf_conn_tstamp_find(ct);
+ if (tstamp)
+ tstamp->start = ktime_get_real_ns();
+}
+
+static int __nf_ct_resolve_clash(struct sk_buff *skb,
+ struct nf_conntrack_tuple_hash *h)
+{
+ /* This is the conntrack entry already in hashes that won race. */
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *loser_ct;
+
+ loser_ct = nf_ct_get(skb, &ctinfo);
+
+ if (nf_ct_is_dying(ct))
+ return NF_DROP;
+
+ if (!atomic_inc_not_zero(&ct->ct_general.use))
+ return NF_DROP;
+
+ if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
+ nf_ct_match(ct, loser_ct)) {
+ struct net *net = nf_ct_net(ct);
+
+ nf_ct_acct_merge(ct, ctinfo, loser_ct);
+ nf_ct_add_to_dying_list(loser_ct);
+ nf_conntrack_put(&loser_ct->ct_general);
+ nf_ct_set(skb, ct, ctinfo);
+
+ NF_CT_STAT_INC(net, insert_failed);
+ return NF_ACCEPT;
+ }
+
+ nf_ct_put(ct);
+ return NF_DROP;
+}
+
+/**
+ * nf_ct_resolve_clash_harder - attempt to insert clashing conntrack entry
+ *
+ * @skb: skb that causes the collision
+ * @repl_idx: hash slot for reply direction
+ *
+ * Called when origin or reply direction had a clash.
+ * The skb can be handled without packet drop provided the reply direction
+ * is unique or there the existing entry has the identical tuple in both
+ * directions.
+ *
+ * Caller must hold conntrack table locks to prevent concurrent updates.
+ *
+ * Returns NF_DROP if the clash could not be handled.
+ */
+static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
+{
+ struct nf_conn *loser_ct = (struct nf_conn *)skb_nfct(skb);
+ const struct nf_conntrack_zone *zone;
+ struct nf_conntrack_tuple_hash *h;
+ struct hlist_nulls_node *n;
+ struct net *net;
+
+ zone = nf_ct_zone(loser_ct);
+ net = nf_ct_net(loser_ct);
+
+ /* Reply direction must never result in a clash, unless both origin
+ * and reply tuples are identical.
+ */
+ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[repl_idx], hnnode) {
+ if (nf_ct_key_equal(h,
+ &loser_ct->tuplehash[IP_CT_DIR_REPLY].tuple,
+ zone, net))
+ return __nf_ct_resolve_clash(skb, h);
+ }
+
+ /* We want the clashing entry to go away real soon: 1 second timeout. */
+ loser_ct->timeout = nfct_time_stamp + HZ;
+
+ /* IPS_NAT_CLASH removes the entry automatically on the first
+ * reply. Also prevents UDP tracker from moving the entry to
+ * ASSURED state, i.e. the entry can always be evicted under
+ * pressure.
+ */
+ loser_ct->status |= IPS_FIXED_TIMEOUT | IPS_NAT_CLASH;
+
+ __nf_conntrack_insert_prepare(loser_ct);
+
+ /* fake add for ORIGINAL dir: we want lookups to only find the entry
+ * already in the table. This also hides the clashing entry from
+ * ctnetlink iteration, i.e. conntrack -L won't show them.
+ */
+ hlist_nulls_add_fake(&loser_ct->tuplehash[IP_CT_DIR_ORIGINAL].hnnode);
+
+ hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
+ &nf_conntrack_hash[repl_idx]);
+ return NF_ACCEPT;
+}
+
+/**
+ * nf_ct_resolve_clash - attempt to handle clash without packet drop
+ *
+ * @skb: skb that causes the clash
+ * @h: tuplehash of the clashing entry already in table
+ * @hash_reply: hash slot for reply direction
+ *
+ * A conntrack entry can be inserted to the connection tracking table
+ * if there is no existing entry with an identical tuple.
+ *
+ * If there is one, @skb (and the assocated, unconfirmed conntrack) has
+ * to be dropped. In case @skb is retransmitted, next conntrack lookup
+ * will find the already-existing entry.
+ *
+ * The major problem with such packet drop is the extra delay added by
+ * the packet loss -- it will take some time for a retransmit to occur
+ * (or the sender to time out when waiting for a reply).
+ *
+ * This function attempts to handle the situation without packet drop.
+ *
+ * If @skb has no NAT transformation or if the colliding entries are
+ * exactly the same, only the to-be-confirmed conntrack entry is discarded
+ * and @skb is associated with the conntrack entry already in the table.
+ *
+ * Failing that, the new, unconfirmed conntrack is still added to the table
+ * provided that the collision only occurs in the ORIGINAL direction.
+ * The new entry will be added after the existing one in the hash list,
+ * so packets in the ORIGINAL direction will continue to match the existing
+ * entry. The new entry will also have a fixed timeout so it expires --
+ * due to the collision, it will not see bidirectional traffic.
+ *
+ * Returns NF_DROP if the clash could not be resolved.
+ */
static __cold noinline int
-nf_ct_resolve_clash(struct net *net, struct sk_buff *skb,
- enum ip_conntrack_info ctinfo,
- struct nf_conntrack_tuple_hash *h)
+nf_ct_resolve_clash(struct sk_buff *skb, struct nf_conntrack_tuple_hash *h,
+ u32 reply_hash)
{
/* This is the conntrack entry already in hashes that won race. */
struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
const struct nf_conntrack_l4proto *l4proto;
- enum ip_conntrack_info oldinfo;
- struct nf_conn *loser_ct = nf_ct_get(skb, &oldinfo);
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *loser_ct;
+ struct net *net;
+ int ret;
+
+ loser_ct = nf_ct_get(skb, &ctinfo);
+ net = nf_ct_net(loser_ct);
l4proto = nf_ct_l4proto_find(nf_ct_protonum(ct));
- if (l4proto->allow_clash &&
- !nf_ct_is_dying(ct) &&
- atomic_inc_not_zero(&ct->ct_general.use)) {
- if (((ct->status & IPS_NAT_DONE_MASK) == 0) ||
- nf_ct_match(ct, loser_ct)) {
- nf_ct_acct_merge(ct, ctinfo, loser_ct);
- nf_conntrack_put(&loser_ct->ct_general);
- nf_ct_set(skb, ct, oldinfo);
- return NF_ACCEPT;
- }
- nf_ct_put(ct);
- }
+ if (!l4proto->allow_clash)
+ goto drop;
+
+ ret = __nf_ct_resolve_clash(skb, h);
+ if (ret == NF_ACCEPT)
+ return ret;
+
+ ret = nf_ct_resolve_clash_harder(skb, reply_hash);
+ if (ret == NF_ACCEPT)
+ return ret;
+
+drop:
+ nf_ct_add_to_dying_list(loser_ct);
NF_CT_STAT_INC(net, drop);
+ NF_CT_STAT_INC(net, insert_failed);
return NF_DROP;
}
@@ -932,7 +1075,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
struct nf_conntrack_tuple_hash *h;
struct nf_conn *ct;
struct nf_conn_help *help;
- struct nf_conn_tstamp *tstamp;
struct hlist_nulls_node *n;
enum ip_conntrack_info ctinfo;
struct net *net;
@@ -989,6 +1131,7 @@ __nf_conntrack_confirm(struct sk_buff *skb)
if (unlikely(nf_ct_is_dying(ct))) {
nf_ct_add_to_dying_list(ct);
+ NF_CT_STAT_INC(net, insert_failed);
goto dying;
}
@@ -1009,13 +1152,8 @@ __nf_conntrack_confirm(struct sk_buff *skb)
setting time, otherwise we'd get timer wrap in
weird delay cases. */
ct->timeout += nfct_time_stamp;
- atomic_inc(&ct->ct_general.use);
- ct->status |= IPS_CONFIRMED;
- /* set conntrack timestamp, if enabled. */
- tstamp = nf_conn_tstamp_find(ct);
- if (tstamp)
- tstamp->start = ktime_get_real_ns();
+ __nf_conntrack_insert_prepare(ct);
/* Since the lookup is lockless, hash insertion must be done after
* starting the timer and setting the CONFIRMED bit. The RCU barriers
@@ -1035,11 +1173,9 @@ __nf_conntrack_confirm(struct sk_buff *skb)
return NF_ACCEPT;
out:
- nf_ct_add_to_dying_list(ct);
- ret = nf_ct_resolve_clash(net, skb, ctinfo, h);
+ ret = nf_ct_resolve_clash(skb, h, reply_hash);
dying:
nf_conntrack_double_unlock(hash, reply_hash);
- NF_CT_STAT_INC(net, insert_failed);
local_bh_enable();
return ret;
}
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
index 7365b43f8f98..760ca2422816 100644
--- a/net/netfilter/nf_conntrack_proto_udp.c
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -81,6 +81,18 @@ static bool udp_error(struct sk_buff *skb,
return false;
}
+static void nf_conntrack_udp_refresh_unreplied(struct nf_conn *ct,
+ struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo,
+ u32 extra_jiffies)
+{
+ if (unlikely(ctinfo == IP_CT_ESTABLISHED_REPLY &&
+ ct->status & IPS_NAT_CLASH))
+ nf_ct_kill(ct);
+ else
+ nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies);
+}
+
/* Returns verdict for packet, and may modify conntracktype */
int nf_conntrack_udp_packet(struct nf_conn *ct,
struct sk_buff *skb,
@@ -116,8 +128,8 @@ int nf_conntrack_udp_packet(struct nf_conn *ct,
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
- nf_ct_refresh_acct(ct, ctinfo, skb,
- timeouts[UDP_CT_UNREPLIED]);
+ nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo,
+ timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
@@ -198,8 +210,8 @@ int nf_conntrack_udplite_packet(struct nf_conn *ct,
if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status))
nf_conntrack_event_cache(IPCT_ASSURED, ct);
} else {
- nf_ct_refresh_acct(ct, ctinfo, skb,
- timeouts[UDP_CT_UNREPLIED]);
+ nf_conntrack_udp_refresh_unreplied(ct, skb, ctinfo,
+ timeouts[UDP_CT_UNREPLIED]);
}
return NF_ACCEPT;
}
diff --git a/net/netfilter/nf_flow_table_offload.c b/net/netfilter/nf_flow_table_offload.c
index 83e1db37c3b0..06f00cdc3891 100644
--- a/net/netfilter/nf_flow_table_offload.c
+++ b/net/netfilter/nf_flow_table_offload.c
@@ -847,9 +847,6 @@ static int nf_flow_table_offload_cmd(struct flow_block_offload *bo,
{
int err;
- if (!nf_flowtable_hw_offload(flowtable))
- return 0;
-
if (!dev->netdev_ops->ndo_setup_tc)
return -EOPNOTSUPP;
@@ -876,6 +873,9 @@ int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
struct flow_block_offload bo;
int err;
+ if (!nf_flowtable_hw_offload(flowtable))
+ return 0;
+
err = nf_flow_table_offload_cmd(&bo, flowtable, dev, cmd, &extack);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_set_pipapo.c b/net/netfilter/nft_set_pipapo.c
index f0cb1e13af50..4fc0c924ed5d 100644
--- a/net/netfilter/nft_set_pipapo.c
+++ b/net/netfilter/nft_set_pipapo.c
@@ -203,7 +203,7 @@
* ::
*
* rule indices in last field: 0 1
- * map to elements: 0x42 0x66
+ * map to elements: 0x66 0x42
*
*
* Matching
@@ -298,7 +298,7 @@
* ::
*
* rule indices in last field: 0 1
- * map to elements: 0x42 0x66
+ * map to elements: 0x66 0x42
*
* the matching element is at 0x42.
*
@@ -503,7 +503,7 @@ static int pipapo_refill(unsigned long *map, int len, int rules,
return -1;
}
- if (unlikely(match_only)) {
+ if (match_only) {
bitmap_clear(map, i, 1);
return i;
}
@@ -1766,11 +1766,13 @@ static bool pipapo_match_field(struct nft_pipapo_field *f,
static void nft_pipapo_remove(const struct net *net, const struct nft_set *set,
const struct nft_set_elem *elem)
{
- const u8 *data = (const u8 *)elem->key.val.data;
struct nft_pipapo *priv = nft_set_priv(set);
struct nft_pipapo_match *m = priv->clone;
+ struct nft_pipapo_elem *e = elem->priv;
int rules_f0, first_rule = 0;
- struct nft_pipapo_elem *e;
+ const u8 *data;
+
+ data = (const u8 *)nft_set_ext_key(&e->ext);
e = pipapo_get(net, set, data, 0);
if (IS_ERR(e))
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index bccd47cd7190..8c835ad63729 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -36,6 +36,7 @@
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
+#include <linux/refcount.h>
#include <uapi/linux/netfilter/xt_hashlimit.h>
#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
@@ -114,7 +115,7 @@ struct dsthash_ent {
struct xt_hashlimit_htable {
struct hlist_node node; /* global list of all htables */
- int use;
+ refcount_t use;
u_int8_t family;
bool rnd_initialized;
@@ -315,7 +316,7 @@ static int htable_create(struct net *net, struct hashlimit_cfg3 *cfg,
for (i = 0; i < hinfo->cfg.size; i++)
INIT_HLIST_HEAD(&hinfo->hash[i]);
- hinfo->use = 1;
+ refcount_set(&hinfo->use, 1);
hinfo->count = 0;
hinfo->family = family;
hinfo->rnd_initialized = false;
@@ -401,15 +402,6 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
remove_proc_entry(hinfo->name, parent);
}
-static void htable_destroy(struct xt_hashlimit_htable *hinfo)
-{
- cancel_delayed_work_sync(&hinfo->gc_work);
- htable_remove_proc_entry(hinfo);
- htable_selective_cleanup(hinfo, true);
- kfree(hinfo->name);
- vfree(hinfo);
-}
-
static struct xt_hashlimit_htable *htable_find_get(struct net *net,
const char *name,
u_int8_t family)
@@ -420,7 +412,7 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
if (!strcmp(name, hinfo->name) &&
hinfo->family == family) {
- hinfo->use++;
+ refcount_inc(&hinfo->use);
return hinfo;
}
}
@@ -429,12 +421,16 @@ static struct xt_hashlimit_htable *htable_find_get(struct net *net,
static void htable_put(struct xt_hashlimit_htable *hinfo)
{
- mutex_lock(&hashlimit_mutex);
- if (--hinfo->use == 0) {
+ if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) {
hlist_del(&hinfo->node);
- htable_destroy(hinfo);
+ htable_remove_proc_entry(hinfo);
+ mutex_unlock(&hashlimit_mutex);
+
+ cancel_delayed_work_sync(&hinfo->gc_work);
+ htable_selective_cleanup(hinfo, true);
+ kfree(hinfo->name);
+ vfree(hinfo);
}
- mutex_unlock(&hashlimit_mutex);
}
/* The algorithm used is the Simple Token Bucket Filter (TBF)
@@ -837,6 +833,8 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par)
return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
}
+#define HASHLIMIT_MAX_SIZE 1048576
+
static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
struct xt_hashlimit_htable **hinfo,
struct hashlimit_cfg3 *cfg,
@@ -847,6 +845,14 @@ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
if (cfg->gc_interval == 0 || cfg->expire == 0)
return -EINVAL;
+ if (cfg->size > HASHLIMIT_MAX_SIZE) {
+ cfg->size = HASHLIMIT_MAX_SIZE;
+ pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
+ }
+ if (cfg->max > HASHLIMIT_MAX_SIZE) {
+ cfg->max = HASHLIMIT_MAX_SIZE;
+ pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
+ }
if (par->family == NFPROTO_IPV4) {
if (cfg->srcmask > 32 || cfg->dstmask > 32)
return -EINVAL;
diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c
index f5d34da0646e..a1f2320ecc16 100644
--- a/net/netlabel/netlabel_domainhash.c
+++ b/net/netlabel/netlabel_domainhash.c
@@ -143,7 +143,8 @@ static struct netlbl_dom_map *netlbl_domhsh_search(const char *domain,
if (domain != NULL) {
bkt = netlbl_domhsh_hash(domain);
bkt_list = &netlbl_domhsh_rcu_deref(netlbl_domhsh)->tbl[bkt];
- list_for_each_entry_rcu(iter, bkt_list, list)
+ list_for_each_entry_rcu(iter, bkt_list, list,
+ lockdep_is_held(&netlbl_domhsh_lock))
if (iter->valid &&
netlbl_family_match(iter->family, family) &&
strcmp(iter->domain, domain) == 0)
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index d2e4ab8d1cb1..77bb1bb22c3b 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -207,7 +207,8 @@ static struct netlbl_unlhsh_iface *netlbl_unlhsh_search_iface(int ifindex)
bkt = netlbl_unlhsh_hash(ifindex);
bkt_list = &netlbl_unlhsh_rcu_deref(netlbl_unlhsh)->tbl[bkt];
- list_for_each_entry_rcu(iter, bkt_list, list)
+ list_for_each_entry_rcu(iter, bkt_list, list,
+ lockdep_is_held(&netlbl_unlhsh_lock))
if (iter->valid && iter->ifindex == ifindex)
return iter;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 4e31721e7293..edf3e285e242 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -1014,7 +1014,8 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
if (nlk->netlink_bind && groups) {
int group;
- for (group = 0; group < nlk->ngroups; group++) {
+ /* nl_groups is a u32, so cap the maximum groups we can bind */
+ for (group = 0; group < BITS_PER_TYPE(u32); group++) {
if (!test_bit(group, &groups))
continue;
err = nlk->netlink_bind(net, group + 1);
@@ -1033,7 +1034,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
netlink_insert(sk, nladdr->nl_pid) :
netlink_autobind(sock);
if (err) {
- netlink_undo_bind(nlk->ngroups, groups, sk);
+ netlink_undo_bind(BITS_PER_TYPE(u32), groups, sk);
goto unlock;
}
}
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 0522b2b1fd95..9f357aa22b94 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -497,8 +497,9 @@ genl_family_rcv_msg_attrs_parse(const struct genl_family *family,
err = __nlmsg_parse(nlh, hdrlen, attrbuf, family->maxattr,
family->policy, validate, extack);
- if (err && parallel) {
- kfree(attrbuf);
+ if (err) {
+ if (parallel)
+ kfree(attrbuf);
return ERR_PTR(err);
}
return attrbuf;
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 659c2a790fe7..c047afd12116 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -179,7 +179,8 @@ struct vport *ovs_lookup_vport(const struct datapath *dp, u16 port_no)
struct hlist_head *head;
head = vport_hash_bucket(dp, port_no);
- hlist_for_each_entry_rcu(vport, head, dp_hash_node) {
+ hlist_for_each_entry_rcu(vport, head, dp_hash_node,
+ lockdep_ovsl_is_held()) {
if (vport->port_no == port_no)
return vport;
}
@@ -2042,7 +2043,8 @@ static unsigned int ovs_get_max_headroom(struct datapath *dp)
int i;
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++) {
- hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node) {
+ hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
+ lockdep_ovsl_is_held()) {
dev = vport->dev;
dev_headroom = netdev_get_fwd_headroom(dev);
if (dev_headroom > max_headroom)
@@ -2061,7 +2063,8 @@ static void ovs_update_headroom(struct datapath *dp, unsigned int new_headroom)
dp->max_headroom = new_headroom;
for (i = 0; i < DP_VPORT_HASH_BUCKETS; i++)
- hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node)
+ hlist_for_each_entry_rcu(vport, &dp->ports[i], dp_hash_node,
+ lockdep_ovsl_is_held())
netdev_set_rx_headroom(vport->dev, new_headroom);
}
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 7da4230627f5..288122eec7c8 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -2708,10 +2708,6 @@ static int validate_set(const struct nlattr *a,
return -EINVAL;
switch (key_type) {
- const struct ovs_key_ipv4 *ipv4_key;
- const struct ovs_key_ipv6 *ipv6_key;
- int err;
-
case OVS_KEY_ATTR_PRIORITY:
case OVS_KEY_ATTR_SKB_MARK:
case OVS_KEY_ATTR_CT_MARK:
@@ -2723,7 +2719,9 @@ static int validate_set(const struct nlattr *a,
return -EINVAL;
break;
- case OVS_KEY_ATTR_TUNNEL:
+ case OVS_KEY_ATTR_TUNNEL: {
+ int err;
+
if (masked)
return -EINVAL; /* Masked tunnel set not supported. */
@@ -2732,8 +2730,10 @@ static int validate_set(const struct nlattr *a,
if (err)
return err;
break;
+ }
+ case OVS_KEY_ATTR_IPV4: {
+ const struct ovs_key_ipv4 *ipv4_key;
- case OVS_KEY_ATTR_IPV4:
if (eth_type != htons(ETH_P_IP))
return -EINVAL;
@@ -2753,8 +2753,10 @@ static int validate_set(const struct nlattr *a,
return -EINVAL;
}
break;
+ }
+ case OVS_KEY_ATTR_IPV6: {
+ const struct ovs_key_ipv6 *ipv6_key;
- case OVS_KEY_ATTR_IPV6:
if (eth_type != htons(ETH_P_IPV6))
return -EINVAL;
@@ -2781,7 +2783,7 @@ static int validate_set(const struct nlattr *a,
return -EINVAL;
break;
-
+ }
case OVS_KEY_ATTR_TCP:
if ((eth_type != htons(ETH_P_IP) &&
eth_type != htons(ETH_P_IPV6)) ||
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 5904e93e5765..fd8a01ca7a2d 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -585,7 +585,8 @@ static struct sw_flow *masked_flow_lookup(struct table_instance *ti,
head = find_bucket(ti, hash);
(*n_mask_hit)++;
- hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver]) {
+ hlist_for_each_entry_rcu(flow, head, flow_table.node[ti->node_ver],
+ lockdep_ovsl_is_held()) {
if (flow->mask == mask && flow->flow_table.hash == hash &&
flow_cmp_masked_key(flow, &masked_key, &mask->range))
return flow;
@@ -769,7 +770,8 @@ struct sw_flow *ovs_flow_tbl_lookup_ufid(struct flow_table *tbl,
hash = ufid_hash(ufid);
head = find_bucket(ti, hash);
- hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver]) {
+ hlist_for_each_entry_rcu(flow, head, ufid_table.node[ti->node_ver],
+ lockdep_ovsl_is_held()) {
if (flow->ufid_table.hash == hash &&
ovs_flow_cmp_ufid(flow, ufid))
return flow;
diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
index 3323b79ff548..5010d1ddd4bd 100644
--- a/net/openvswitch/meter.c
+++ b/net/openvswitch/meter.c
@@ -61,7 +61,8 @@ static struct dp_meter *lookup_meter(const struct datapath *dp,
struct hlist_head *head;
head = meter_hash_bucket(dp, meter_id);
- hlist_for_each_entry_rcu(meter, head, dp_hash_node) {
+ hlist_for_each_entry_rcu(meter, head, dp_hash_node,
+ lockdep_ovsl_is_held()) {
if (meter->id == meter_id)
return meter;
}
diff --git a/net/openvswitch/vport.c b/net/openvswitch/vport.c
index 5da9392b03d6..47febb4504f0 100644
--- a/net/openvswitch/vport.c
+++ b/net/openvswitch/vport.c
@@ -96,7 +96,8 @@ struct vport *ovs_vport_locate(const struct net *net, const char *name)
struct hlist_head *bucket = hash_bucket(net, name);
struct vport *vport;
- hlist_for_each_entry_rcu(vport, bucket, hash_node)
+ hlist_for_each_entry_rcu(vport, bucket, hash_node,
+ lockdep_ovsl_is_held())
if (!strcmp(name, ovs_vport_name(vport)) &&
net_eq(ovs_dp_get_net(vport->dp), net))
return vport;
diff --git a/net/rds/rdma.c b/net/rds/rdma.c
index 3341eee87bf9..585e6b3b69ce 100644
--- a/net/rds/rdma.c
+++ b/net/rds/rdma.c
@@ -162,10 +162,9 @@ static int rds_pin_pages(unsigned long user_addr, unsigned int nr_pages,
if (write)
gup_flags |= FOLL_WRITE;
- ret = get_user_pages_fast(user_addr, nr_pages, gup_flags, pages);
+ ret = pin_user_pages_fast(user_addr, nr_pages, gup_flags, pages);
if (ret >= 0 && ret < nr_pages) {
- while (ret--)
- put_page(pages[ret]);
+ unpin_user_pages(pages, ret);
ret = -EFAULT;
}
@@ -300,8 +299,7 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
* to release anything.
*/
if (!need_odp) {
- for (i = 0 ; i < nents; i++)
- put_page(sg_page(&sg[i]));
+ unpin_user_pages(pages, nr_pages);
kfree(sg);
}
ret = PTR_ERR(trans_private);
@@ -325,7 +323,12 @@ static int __rds_rdma_map(struct rds_sock *rs, struct rds_get_mr_args *args,
if (cookie_ret)
*cookie_ret = cookie;
- if (args->cookie_addr && put_user(cookie, (u64 __user *)(unsigned long) args->cookie_addr)) {
+ if (args->cookie_addr &&
+ put_user(cookie, (u64 __user *)(unsigned long)args->cookie_addr)) {
+ if (!need_odp) {
+ unpin_user_pages(pages, nr_pages);
+ kfree(sg);
+ }
ret = -EFAULT;
goto out;
}
@@ -496,9 +499,7 @@ void rds_rdma_free_op(struct rm_rdma_op *ro)
* is the case for a RDMA_READ which copies from remote
* to local memory
*/
- if (!ro->op_write)
- set_page_dirty(page);
- put_page(page);
+ unpin_user_pages_dirty_lock(&page, 1, !ro->op_write);
}
}
@@ -515,8 +516,7 @@ void rds_atomic_free_op(struct rm_atomic_op *ao)
/* Mark page dirty if it was possibly modified, which
* is the case for a RDMA_READ which copies from remote
* to local memory */
- set_page_dirty(page);
- put_page(page);
+ unpin_user_pages_dirty_lock(&page, 1, true);
kfree(ao->op_notifier);
ao->op_notifier = NULL;
@@ -944,7 +944,7 @@ int rds_cmsg_atomic(struct rds_sock *rs, struct rds_message *rm,
return ret;
err:
if (page)
- put_page(page);
+ unpin_user_page(page);
rm->atomic.op_active = 0;
kfree(rm->atomic.op_notifier);
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index 90a31b15585f..8c466a712cda 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -186,6 +186,7 @@ static size_t tcf_action_shared_attrs_size(const struct tc_action *act)
+ nla_total_size(IFNAMSIZ) /* TCA_ACT_KIND */
+ cookie_len /* TCA_ACT_COOKIE */
+ nla_total_size(0) /* TCA_ACT_STATS nested */
+ + nla_total_size(sizeof(struct nla_bitfield32)) /* TCA_ACT_FLAGS */
/* TCA_STATS_BASIC */
+ nla_total_size_64bit(sizeof(struct gnet_stats_basic))
/* TCA_STATS_PKT64 */
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index f9c0d1e8d380..d32d4233d337 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -305,6 +305,7 @@ static int fl_classify(struct sk_buff *skb, const struct tcf_proto *tp,
struct cls_fl_filter *f;
list_for_each_entry_rcu(mask, &head->masks, list) {
+ flow_dissector_init_keys(&skb_key.control, &skb_key.basic);
fl_clear_masked_range(&skb_key, mask);
skb_flow_dissect_meta(skb, &mask->dissector, &skb_key);
@@ -691,6 +692,7 @@ static const struct nla_policy fl_policy[TCA_FLOWER_MAX + 1] = {
.len = 128 / BITS_PER_BYTE },
[TCA_FLOWER_KEY_CT_LABELS_MASK] = { .type = NLA_BINARY,
.len = 128 / BITS_PER_BYTE },
+ [TCA_FLOWER_FLAGS] = { .type = NLA_U32 },
};
static const struct nla_policy
diff --git a/net/sched/cls_matchall.c b/net/sched/cls_matchall.c
index 039cc86974f4..610a0b728161 100644
--- a/net/sched/cls_matchall.c
+++ b/net/sched/cls_matchall.c
@@ -157,6 +157,7 @@ static void *mall_get(struct tcf_proto *tp, u32 handle)
static const struct nla_policy mall_policy[TCA_MATCHALL_MAX + 1] = {
[TCA_MATCHALL_UNSPEC] = { .type = NLA_UNSPEC },
[TCA_MATCHALL_CLASSID] = { .type = NLA_U32 },
+ [TCA_MATCHALL_FLAGS] = { .type = NLA_U32 },
};
static int mall_set_parms(struct net *net, struct tcf_proto *tp,
diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
index 748e3b19ec1d..6a16af4b1ef6 100644
--- a/net/sctp/sm_statefuns.c
+++ b/net/sctp/sm_statefuns.c
@@ -170,6 +170,16 @@ static inline bool sctp_chunk_length_valid(struct sctp_chunk *chunk,
return true;
}
+/* Check for format error in an ABORT chunk */
+static inline bool sctp_err_chunk_valid(struct sctp_chunk *chunk)
+{
+ struct sctp_errhdr *err;
+
+ sctp_walk_errors(err, chunk->chunk_hdr);
+
+ return (void *)err == (void *)chunk->chunk_end;
+}
+
/**********************************************************
* These are the state functions for handling chunk events.
**********************************************************/
@@ -2255,6 +2265,9 @@ enum sctp_disposition sctp_sf_shutdown_pending_abort(
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
+ if (!sctp_err_chunk_valid(chunk))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
}
@@ -2298,6 +2311,9 @@ enum sctp_disposition sctp_sf_shutdown_sent_abort(
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
+ if (!sctp_err_chunk_valid(chunk))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
/* Stop the T2-shutdown timer. */
sctp_add_cmd_sf(commands, SCTP_CMD_TIMER_STOP,
SCTP_TO(SCTP_EVENT_TIMEOUT_T2_SHUTDOWN));
@@ -2565,6 +2581,9 @@ enum sctp_disposition sctp_sf_do_9_1_abort(
sctp_bind_addr_state(&asoc->base.bind_addr, &chunk->dest))
return sctp_sf_discard_chunk(net, ep, asoc, type, arg, commands);
+ if (!sctp_err_chunk_valid(chunk))
+ return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
+
return __sctp_sf_do_9_1_abort(net, ep, asoc, type, arg, commands);
}
@@ -2582,16 +2601,8 @@ static enum sctp_disposition __sctp_sf_do_9_1_abort(
/* See if we have an error cause code in the chunk. */
len = ntohs(chunk->chunk_hdr->length);
- if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr)) {
- struct sctp_errhdr *err;
-
- sctp_walk_errors(err, chunk->chunk_hdr);
- if ((void *)err != (void *)chunk->chunk_end)
- return sctp_sf_pdiscard(net, ep, asoc, type, arg,
- commands);
-
+ if (len >= sizeof(struct sctp_chunkhdr) + sizeof(struct sctp_errhdr))
error = ((struct sctp_errhdr *)chunk->skb->data)->cause;
- }
sctp_add_cmd_sf(commands, SCTP_CMD_SET_SK_ERR, SCTP_ERROR(ECONNRESET));
/* ASSOC_FAILED will DELETE_TCB. */
diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c
index cee5bf4a9bb9..6fd44bdb0fc3 100644
--- a/net/smc/af_smc.c
+++ b/net/smc/af_smc.c
@@ -470,6 +470,8 @@ static void smc_switch_to_fallback(struct smc_sock *smc)
if (smc->sk.sk_socket && smc->sk.sk_socket->file) {
smc->clcsock->file = smc->sk.sk_socket->file;
smc->clcsock->file->private_data = smc->clcsock;
+ smc->clcsock->wq.fasync_list =
+ smc->sk.sk_socket->wq.fasync_list;
}
}
@@ -510,15 +512,18 @@ static int smc_connect_decline_fallback(struct smc_sock *smc, int reason_code)
static int smc_connect_abort(struct smc_sock *smc, int reason_code,
int local_contact)
{
+ bool is_smcd = smc->conn.lgr->is_smcd;
+
if (local_contact == SMC_FIRST_CONTACT)
- smc_lgr_forget(smc->conn.lgr);
- if (smc->conn.lgr->is_smcd)
+ smc_lgr_cleanup_early(&smc->conn);
+ else
+ smc_conn_free(&smc->conn);
+ if (is_smcd)
/* there is only one lgr role for SMC-D; use server lock */
mutex_unlock(&smc_server_lgr_pending);
else
mutex_unlock(&smc_client_lgr_pending);
- smc_conn_free(&smc->conn);
smc->connect_nonblock = 0;
return reason_code;
}
@@ -1089,7 +1094,6 @@ static void smc_listen_out_err(struct smc_sock *new_smc)
if (newsmcsk->sk_state == SMC_INIT)
sock_put(&new_smc->sk); /* passive closing */
newsmcsk->sk_state = SMC_CLOSED;
- smc_conn_free(&new_smc->conn);
smc_listen_out(new_smc);
}
@@ -1100,12 +1104,13 @@ static void smc_listen_decline(struct smc_sock *new_smc, int reason_code,
{
/* RDMA setup failed, switch back to TCP */
if (local_contact == SMC_FIRST_CONTACT)
- smc_lgr_forget(new_smc->conn.lgr);
+ smc_lgr_cleanup_early(&new_smc->conn);
+ else
+ smc_conn_free(&new_smc->conn);
if (reason_code < 0) { /* error, no fallback possible */
smc_listen_out_err(new_smc);
return;
}
- smc_conn_free(&new_smc->conn);
smc_switch_to_fallback(new_smc);
new_smc->fallback_rsn = reason_code;
if (reason_code && reason_code != SMC_CLC_DECL_PEERDECL) {
@@ -1168,16 +1173,18 @@ static int smc_listen_ism_init(struct smc_sock *new_smc,
new_smc->conn.lgr->vlan_id,
new_smc->conn.lgr->smcd)) {
if (ini->cln_first_contact == SMC_FIRST_CONTACT)
- smc_lgr_forget(new_smc->conn.lgr);
- smc_conn_free(&new_smc->conn);
+ smc_lgr_cleanup_early(&new_smc->conn);
+ else
+ smc_conn_free(&new_smc->conn);
return SMC_CLC_DECL_SMCDNOTALK;
}
/* Create send and receive buffers */
if (smc_buf_create(new_smc, true)) {
if (ini->cln_first_contact == SMC_FIRST_CONTACT)
- smc_lgr_forget(new_smc->conn.lgr);
- smc_conn_free(&new_smc->conn);
+ smc_lgr_cleanup_early(&new_smc->conn);
+ else
+ smc_conn_free(&new_smc->conn);
return SMC_CLC_DECL_MEM;
}
diff --git a/net/smc/smc_clc.c b/net/smc/smc_clc.c
index 0879f7bed967..86cccc24e52e 100644
--- a/net/smc/smc_clc.c
+++ b/net/smc/smc_clc.c
@@ -372,7 +372,9 @@ int smc_clc_send_decline(struct smc_sock *smc, u32 peer_diag_info)
dclc.hdr.length = htons(sizeof(struct smc_clc_msg_decline));
dclc.hdr.version = SMC_CLC_V1;
dclc.hdr.flag = (peer_diag_info == SMC_CLC_DECL_SYNCERR) ? 1 : 0;
- memcpy(dclc.id_for_peer, local_systemid, sizeof(local_systemid));
+ if (smc->conn.lgr && !smc->conn.lgr->is_smcd)
+ memcpy(dclc.id_for_peer, local_systemid,
+ sizeof(local_systemid));
dclc.peer_diagnosis = htonl(peer_diag_info);
memcpy(dclc.trl.eyecatcher, SMC_EYECATCHER, sizeof(SMC_EYECATCHER));
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 2249de5379ee..5b085efa3bce 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -162,6 +162,18 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
conn->lgr = NULL;
}
+void smc_lgr_cleanup_early(struct smc_connection *conn)
+{
+ struct smc_link_group *lgr = conn->lgr;
+
+ if (!lgr)
+ return;
+
+ smc_conn_free(conn);
+ smc_lgr_forget(lgr);
+ smc_lgr_schedule_free_work_fast(lgr);
+}
+
/* Send delete link, either as client to request the initiation
* of the DELETE LINK sequence from server; or as server to
* initiate the delete processing. See smc_llc_rx_delete_link().
diff --git a/net/smc/smc_core.h b/net/smc/smc_core.h
index c472e12951d1..234ae25f0025 100644
--- a/net/smc/smc_core.h
+++ b/net/smc/smc_core.h
@@ -296,6 +296,7 @@ struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local;
void smc_lgr_forget(struct smc_link_group *lgr);
+void smc_lgr_cleanup_early(struct smc_connection *conn);
void smc_lgr_terminate(struct smc_link_group *lgr, bool soft);
void smc_port_terminate(struct smc_ib_device *smcibdev, u8 ibport);
void smc_smcd_terminate(struct smcd_dev *dev, u64 peer_gid,
@@ -316,7 +317,6 @@ int smc_vlan_by_tcpsk(struct socket *clcsock, struct smc_init_info *ini);
void smc_conn_free(struct smc_connection *conn);
int smc_conn_create(struct smc_sock *smc, struct smc_init_info *ini);
-void smcd_conn_free(struct smc_connection *conn);
void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr);
int smc_core_init(void);
void smc_core_exit(void);
diff --git a/net/smc/smc_diag.c b/net/smc/smc_diag.c
index f38727ecf8b2..e1f64f4ba236 100644
--- a/net/smc/smc_diag.c
+++ b/net/smc/smc_diag.c
@@ -39,16 +39,15 @@ static void smc_diag_msg_common_fill(struct smc_diag_msg *r, struct sock *sk)
{
struct smc_sock *smc = smc_sk(sk);
+ memset(r, 0, sizeof(*r));
r->diag_family = sk->sk_family;
+ sock_diag_save_cookie(sk, r->id.idiag_cookie);
if (!smc->clcsock)
return;
r->id.idiag_sport = htons(smc->clcsock->sk->sk_num);
r->id.idiag_dport = smc->clcsock->sk->sk_dport;
r->id.idiag_if = smc->clcsock->sk->sk_bound_dev_if;
- sock_diag_save_cookie(sk, r->id.idiag_cookie);
if (sk->sk_protocol == SMCPROTO_SMC) {
- memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src));
- memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst));
r->id.idiag_src[0] = smc->clcsock->sk->sk_rcv_saddr;
r->id.idiag_dst[0] = smc->clcsock->sk->sk_daddr;
#if IS_ENABLED(CONFIG_IPV6)
diff --git a/net/smc/smc_ib.c b/net/smc/smc_ib.c
index 548632621f4b..d6ba186f67e2 100644
--- a/net/smc/smc_ib.c
+++ b/net/smc/smc_ib.c
@@ -573,6 +573,8 @@ static void smc_ib_remove_dev(struct ib_device *ibdev, void *client_data)
struct smc_ib_device *smcibdev;
smcibdev = ib_get_client_data(ibdev, &smc_ib_client);
+ if (!smcibdev || smcibdev->ibdev != ibdev)
+ return;
ib_set_client_data(ibdev, &smc_ib_client, NULL);
spin_lock(&smc_ib_devices.lock);
list_del_init(&smcibdev->list); /* remove from smc_ib_devices */
diff --git a/net/sunrpc/xprtrdma/frwr_ops.c b/net/sunrpc/xprtrdma/frwr_ops.c
index 095be887753e..125297c9aa3e 100644
--- a/net/sunrpc/xprtrdma/frwr_ops.c
+++ b/net/sunrpc/xprtrdma/frwr_ops.c
@@ -288,8 +288,8 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
{
struct rpcrdma_ia *ia = &r_xprt->rx_ia;
struct ib_reg_wr *reg_wr;
+ int i, n, dma_nents;
struct ib_mr *ibmr;
- int i, n;
u8 key;
if (nsegs > ia->ri_max_frwr_depth)
@@ -313,15 +313,16 @@ struct rpcrdma_mr_seg *frwr_map(struct rpcrdma_xprt *r_xprt,
break;
}
mr->mr_dir = rpcrdma_data_dir(writing);
+ mr->mr_nents = i;
- mr->mr_nents =
- ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, i, mr->mr_dir);
- if (!mr->mr_nents)
+ dma_nents = ib_dma_map_sg(ia->ri_id->device, mr->mr_sg, mr->mr_nents,
+ mr->mr_dir);
+ if (!dma_nents)
goto out_dmamap_err;
ibmr = mr->frwr.fr_mr;
- n = ib_map_mr_sg(ibmr, mr->mr_sg, mr->mr_nents, NULL, PAGE_SIZE);
- if (unlikely(n != mr->mr_nents))
+ n = ib_map_mr_sg(ibmr, mr->mr_sg, dma_nents, NULL, PAGE_SIZE);
+ if (n != dma_nents)
goto out_mapmr_err;
ibmr->iova &= 0x00000000ffffffff;
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 99b28b69fc17..0c88778c88b5 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -278,7 +278,7 @@ struct tipc_crypto *tipc_node_crypto_rx_by_list(struct list_head *pos)
}
#endif
-void tipc_node_free(struct rcu_head *rp)
+static void tipc_node_free(struct rcu_head *rp)
{
struct tipc_node *n = container_of(rp, struct tipc_node, rcu);
@@ -2798,7 +2798,7 @@ static int tipc_nl_retrieve_nodeid(struct nlattr **attrs, u8 **node_id)
return 0;
}
-int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
+static int __tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
{
struct nlattr *attrs[TIPC_NLA_NODE_MAX + 1];
struct net *net = sock_net(skb->sk);
@@ -2875,7 +2875,8 @@ int tipc_nl_node_set_key(struct sk_buff *skb, struct genl_info *info)
return err;
}
-int __tipc_nl_node_flush_key(struct sk_buff *skb, struct genl_info *info)
+static int __tipc_nl_node_flush_key(struct sk_buff *skb,
+ struct genl_info *info)
{
struct net *net = sock_net(skb->sk);
struct tipc_net *tn = tipc_net(net);
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index f9b4fb92c0b1..693e8902161e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -2441,6 +2441,8 @@ static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
return -ETIMEDOUT;
if (signal_pending(current))
return sock_intr_errno(*timeo_p);
+ if (sk->sk_state == TIPC_DISCONNECTING)
+ break;
add_wait_queue(sk_sleep(sk), &wait);
done = sk_wait_event(sk, timeo_p, tipc_sk_connected(sk),
diff --git a/net/tls/tls_device.c b/net/tls/tls_device.c
index 1ba5a92832bb..1c5574e2e058 100644
--- a/net/tls/tls_device.c
+++ b/net/tls/tls_device.c
@@ -593,7 +593,7 @@ struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn)
{
u64 record_sn = context->hint_record_sn;
- struct tls_record_info *info;
+ struct tls_record_info *info, *last;
info = context->retransmit_hint;
if (!info ||
@@ -605,6 +605,24 @@ struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
struct tls_record_info, list);
if (!info)
return NULL;
+ /* send the start_marker record if seq number is before the
+ * tls offload start marker sequence number. This record is
+ * required to handle TCP packets which are before TLS offload
+ * started.
+ * And if it's not start marker, look if this seq number
+ * belongs to the list.
+ */
+ if (likely(!tls_record_is_start_marker(info))) {
+ /* we have the first record, get the last record to see
+ * if this seq number belongs to the list.
+ */
+ last = list_last_entry(&context->records_list,
+ struct tls_record_info, list);
+
+ if (!between(seq, tls_record_start_seq(info),
+ last->end_seq))
+ return NULL;
+ }
record_sn = context->unacked_record_sn;
}
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 62c12cb5763e..68debcb28fa4 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -682,6 +682,7 @@ static int unix_set_peek_off(struct sock *sk, int val)
return 0;
}
+#ifdef CONFIG_PROC_FS
static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
{
struct sock *sk = sock->sk;
@@ -692,6 +693,9 @@ static void unix_show_fdinfo(struct seq_file *m, struct socket *sock)
seq_printf(m, "scm_fds: %u\n", READ_ONCE(u->scm_stat.nr_fds));
}
}
+#else
+#define unix_show_fdinfo NULL
+#endif
static const struct proto_ops unix_stream_ops = {
.family = PF_UNIX,
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 9c5b2a91baad..a5f28708e0e7 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -451,6 +451,12 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
if (vsk->transport == new_transport)
return 0;
+ /* transport->release() must be called with sock lock acquired.
+ * This path can only be taken during vsock_stream_connect(),
+ * where we have already held the sock lock.
+ * In the other cases, this function is called on a new socket
+ * which is not assigned to any transport.
+ */
vsk->transport->release(vsk);
vsock_deassign_transport(vsk);
}
@@ -753,20 +759,18 @@ static void __vsock_release(struct sock *sk, int level)
vsk = vsock_sk(sk);
pending = NULL; /* Compiler warning. */
- /* The release call is supposed to use lock_sock_nested()
- * rather than lock_sock(), if a sock lock should be acquired.
- */
- if (vsk->transport)
- vsk->transport->release(vsk);
- else if (sk->sk_type == SOCK_STREAM)
- vsock_remove_sock(vsk);
-
/* When "level" is SINGLE_DEPTH_NESTING, use the nested
* version to avoid the warning "possible recursive locking
* detected". When "level" is 0, lock_sock_nested(sk, level)
* is the same as lock_sock(sk).
*/
lock_sock_nested(sk, level);
+
+ if (vsk->transport)
+ vsk->transport->release(vsk);
+ else if (sk->sk_type == SOCK_STREAM)
+ vsock_remove_sock(vsk);
+
sock_orphan(sk);
sk->sk_shutdown = SHUTDOWN_MASK;
diff --git a/net/vmw_vsock/hyperv_transport.c b/net/vmw_vsock/hyperv_transport.c
index 3492c021925f..630b851f8150 100644
--- a/net/vmw_vsock/hyperv_transport.c
+++ b/net/vmw_vsock/hyperv_transport.c
@@ -526,12 +526,9 @@ static bool hvs_close_lock_held(struct vsock_sock *vsk)
static void hvs_release(struct vsock_sock *vsk)
{
- struct sock *sk = sk_vsock(vsk);
bool remove_sock;
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
remove_sock = hvs_close_lock_held(vsk);
- release_sock(sk);
if (remove_sock)
vsock_remove_sock(vsk);
}
diff --git a/net/vmw_vsock/virtio_transport_common.c b/net/vmw_vsock/virtio_transport_common.c
index d9f0c9c5425a..f3c4bab2f737 100644
--- a/net/vmw_vsock/virtio_transport_common.c
+++ b/net/vmw_vsock/virtio_transport_common.c
@@ -829,7 +829,6 @@ void virtio_transport_release(struct vsock_sock *vsk)
struct sock *sk = &vsk->sk;
bool remove_sock = true;
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
if (sk->sk_type == SOCK_STREAM)
remove_sock = virtio_transport_close(vsk);
@@ -837,7 +836,6 @@ void virtio_transport_release(struct vsock_sock *vsk)
list_del(&pkt->list);
virtio_transport_free_pkt(pkt);
}
- release_sock(sk);
if (remove_sock)
vsock_remove_sock(vsk);
diff --git a/net/wireless/ethtool.c b/net/wireless/ethtool.c
index a9c0f368db5d..24e18405cdb4 100644
--- a/net/wireless/ethtool.c
+++ b/net/wireless/ethtool.c
@@ -7,9 +7,13 @@
void cfg80211_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
{
struct wireless_dev *wdev = dev->ieee80211_ptr;
+ struct device *pdev = wiphy_dev(wdev->wiphy);
- strlcpy(info->driver, wiphy_dev(wdev->wiphy)->driver->name,
- sizeof(info->driver));
+ if (pdev->driver)
+ strlcpy(info->driver, pdev->driver->name,
+ sizeof(info->driver));
+ else
+ strlcpy(info->driver, "N/A", sizeof(info->driver));
strlcpy(info->version, init_utsname()->release, sizeof(info->version));
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index 123b8d720a59..5b19e9fac4aa 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -20,6 +20,7 @@
#include <linux/netlink.h>
#include <linux/nospec.h>
#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
#include <net/net_namespace.h>
#include <net/genetlink.h>
#include <net/cfg80211.h>
@@ -437,6 +438,7 @@ const struct nla_policy nl80211_policy[NUM_NL80211_ATTR] = {
[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG },
[NL80211_ATTR_CONTROL_PORT_OVER_NL80211] = { .type = NLA_FLAG },
[NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG },
+ [NL80211_ATTR_STATUS_CODE] = { .type = NLA_U16 },
[NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 },
[NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 },
[NL80211_ATTR_PID] = { .type = NLA_U32 },
@@ -4799,8 +4801,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
err = nl80211_parse_he_obss_pd(
info->attrs[NL80211_ATTR_HE_OBSS_PD],
&params.he_obss_pd);
- if (err)
- return err;
+ goto out;
}
nl80211_calculate_ap_params(&params);
@@ -4822,6 +4823,7 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
}
wdev_unlock(wdev);
+out:
kfree(params.acl);
return err;
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index fff9a74891fc..1a8218f1bbe0 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -2276,7 +2276,7 @@ static void handle_channel_custom(struct wiphy *wiphy,
break;
}
- if (IS_ERR(reg_rule)) {
+ if (IS_ERR_OR_NULL(reg_rule)) {
pr_debug("Disabling freq %d MHz as custom regd has no rule that fits it\n",
chan->center_freq);
if (wiphy->regulatory_flags & REGULATORY_WIPHY_SELF_MANAGED) {
diff --git a/net/xdp/xsk.c b/net/xdp/xsk.c
index df600487a68d..356f90e4522b 100644
--- a/net/xdp/xsk.c
+++ b/net/xdp/xsk.c
@@ -217,6 +217,7 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
static void xsk_flush(struct xdp_sock *xs)
{
xskq_prod_submit(xs->rx);
+ __xskq_cons_release(xs->umem->fq);
sock_def_readable(&xs->sk);
}
@@ -304,6 +305,7 @@ void xsk_umem_consume_tx_done(struct xdp_umem *umem)
rcu_read_lock();
list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
+ __xskq_cons_release(xs->tx);
xs->sk.sk_write_space(&xs->sk);
}
rcu_read_unlock();
diff --git a/net/xdp/xsk_queue.h b/net/xdp/xsk_queue.h
index bec2af11853a..89a01ac4e079 100644
--- a/net/xdp/xsk_queue.h
+++ b/net/xdp/xsk_queue.h
@@ -271,7 +271,8 @@ static inline void xskq_cons_release(struct xsk_queue *q)
{
/* To improve performance, only update local state here.
* Reflect this to global state when we get new entries
- * from the ring in xskq_cons_get_entries().
+ * from the ring in xskq_cons_get_entries() and whenever
+ * Rx or Tx processing are completed in the NAPI loop.
*/
q->cached_cons++;
}
diff --git a/net/xfrm/xfrm_interface.c b/net/xfrm/xfrm_interface.c
index dc651a628dcf..3361e3ac5714 100644
--- a/net/xfrm/xfrm_interface.c
+++ b/net/xfrm/xfrm_interface.c
@@ -300,10 +300,10 @@ xfrmi_xmit2(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
- icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+ icmpv6_ndo_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
} else {
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
- htonl(mtu));
+ icmp_ndo_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+ htonl(mtu));
}
dst_release(dst);
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index bae62549e3d2..752ff0a225a9 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -300,15 +300,15 @@ DT_BINDING_DIR := Documentation/devicetree/bindings
DT_TMP_SCHEMA := $(objtree)/$(DT_BINDING_DIR)/processed-schema.yaml
quiet_cmd_dtb_check = CHECK $@
- cmd_dtb_check = $(DT_CHECKER) -u $(srctree)/$(DT_BINDING_DIR) -p $(DT_TMP_SCHEMA) $@ ;
+ cmd_dtb_check = $(DT_CHECKER) -u $(srctree)/$(DT_BINDING_DIR) -p $(DT_TMP_SCHEMA) $@
-define rule_dtc_dt_yaml
+define rule_dtc
$(call cmd_and_fixdep,dtc,yaml)
$(call cmd,dtb_check)
endef
$(obj)/%.dt.yaml: $(src)/%.dts $(DTC) $(DT_TMP_SCHEMA) FORCE
- $(call if_changed_rule,dtc_dt_yaml)
+ $(call if_changed_rule,dtc)
dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp)
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 34085d146fa2..6cbcd1a3e113 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -932,10 +932,6 @@ sub get_maintainers {
}
}
- foreach my $fix (@fixes) {
- vcs_add_commit_signers($fix, "blamed_fixes");
- }
-
foreach my $email (@email_to, @list_to) {
$email->[0] = deduplicate_email($email->[0]);
}
@@ -974,6 +970,10 @@ sub get_maintainers {
}
}
+ foreach my $fix (@fixes) {
+ vcs_add_commit_signers($fix, "blamed_fixes");
+ }
+
my @to = ();
if ($email || $email_list) {
if ($email) {
@@ -1341,35 +1341,11 @@ sub add_categories {
}
}
} elsif ($ptype eq "M") {
- my ($name, $address) = parse_email($pvalue);
- if ($name eq "") {
- if ($i > 0) {
- my $tv = $typevalue[$i - 1];
- if ($tv =~ m/^([A-Z]):\s*(.*)/) {
- if ($1 eq "P") {
- $name = $2;
- $pvalue = format_email($name, $address, $email_usename);
- }
- }
- }
- }
if ($email_maintainer) {
my $role = get_maintainer_role($i);
push_email_addresses($pvalue, $role);
}
} elsif ($ptype eq "R") {
- my ($name, $address) = parse_email($pvalue);
- if ($name eq "") {
- if ($i > 0) {
- my $tv = $typevalue[$i - 1];
- if ($tv =~ m/^([A-Z]):\s*(.*)/) {
- if ($1 eq "P") {
- $name = $2;
- $pvalue = format_email($name, $address, $email_usename);
- }
- }
- }
- }
if ($email_reviewer) {
my $subsystem = get_subsystem_name($i);
push_email_addresses($pvalue, "reviewer:$subsystem");
diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c
index a566d8201b56..0133dfaaf352 100644
--- a/scripts/kallsyms.c
+++ b/scripts/kallsyms.c
@@ -210,7 +210,7 @@ static struct sym_entry *read_symbol(FILE *in)
len = strlen(name) + 1;
- sym = malloc(sizeof(*sym) + len);
+ sym = malloc(sizeof(*sym) + len + 1);
if (!sym) {
fprintf(stderr, "kallsyms failure: "
"unable to allocate required amount of memory\n");
@@ -219,7 +219,7 @@ static struct sym_entry *read_symbol(FILE *in)
sym->addr = addr;
sym->len = len;
sym->sym[0] = type;
- memcpy(sym_name(sym), name, len);
+ strcpy(sym_name(sym), name);
sym->percpu_absolute = 0;
return sym;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
index 1919c311c149..dd484e92752e 100755
--- a/scripts/link-vmlinux.sh
+++ b/scripts/link-vmlinux.sh
@@ -239,7 +239,7 @@ else
fi;
# final build of init/
-${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
+${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init need-builtin=1
#link vmlinux.o
info LD vmlinux.o
diff --git a/scripts/parse-maintainers.pl b/scripts/parse-maintainers.pl
index 255cef1b098d..255cef1b098d 100644..100755
--- a/scripts/parse-maintainers.pl
+++ b/scripts/parse-maintainers.pl
diff --git a/security/integrity/ima/Kconfig b/security/integrity/ima/Kconfig
index 711ff10fa36e..3f3ee4e2eb0d 100644
--- a/security/integrity/ima/Kconfig
+++ b/security/integrity/ima/Kconfig
@@ -112,6 +112,10 @@ choice
config IMA_DEFAULT_HASH_WP512
bool "WP512"
depends on CRYPTO_WP512=y && !IMA_TEMPLATE
+
+ config IMA_DEFAULT_HASH_SM3
+ bool "SM3"
+ depends on CRYPTO_SM3=y && !IMA_TEMPLATE
endchoice
config IMA_DEFAULT_HASH
@@ -121,6 +125,7 @@ config IMA_DEFAULT_HASH
default "sha256" if IMA_DEFAULT_HASH_SHA256
default "sha512" if IMA_DEFAULT_HASH_SHA512
default "wp512" if IMA_DEFAULT_HASH_WP512
+ default "sm3" if IMA_DEFAULT_HASH_SM3
config IMA_WRITE_POLICY
bool "Enable multiple writes to the IMA policy"
diff --git a/security/integrity/platform_certs/load_uefi.c b/security/integrity/platform_certs/load_uefi.c
index 111898aad56e..f0c908241966 100644
--- a/security/integrity/platform_certs/load_uefi.c
+++ b/security/integrity/platform_certs/load_uefi.c
@@ -35,16 +35,18 @@ static __init bool uefi_check_ignore_db(void)
* Get a certificate list blob from the named EFI variable.
*/
static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
- unsigned long *size)
+ unsigned long *size, efi_status_t *status)
{
- efi_status_t status;
unsigned long lsize = 4;
unsigned long tmpdb[4];
void *db;
- status = efi.get_variable(name, guid, NULL, &lsize, &tmpdb);
- if (status != EFI_BUFFER_TOO_SMALL) {
- pr_err("Couldn't get size: 0x%lx\n", status);
+ *status = efi.get_variable(name, guid, NULL, &lsize, &tmpdb);
+ if (*status == EFI_NOT_FOUND)
+ return NULL;
+
+ if (*status != EFI_BUFFER_TOO_SMALL) {
+ pr_err("Couldn't get size: 0x%lx\n", *status);
return NULL;
}
@@ -52,10 +54,10 @@ static __init void *get_cert_list(efi_char16_t *name, efi_guid_t *guid,
if (!db)
return NULL;
- status = efi.get_variable(name, guid, NULL, &lsize, db);
- if (status != EFI_SUCCESS) {
+ *status = efi.get_variable(name, guid, NULL, &lsize, db);
+ if (*status != EFI_SUCCESS) {
kfree(db);
- pr_err("Error reading db var: 0x%lx\n", status);
+ pr_err("Error reading db var: 0x%lx\n", *status);
return NULL;
}
@@ -74,6 +76,7 @@ static int __init load_uefi_certs(void)
efi_guid_t mok_var = EFI_SHIM_LOCK_GUID;
void *db = NULL, *dbx = NULL, *mok = NULL;
unsigned long dbsize = 0, dbxsize = 0, moksize = 0;
+ efi_status_t status;
int rc = 0;
if (!efi.get_variable)
@@ -83,9 +86,12 @@ static int __init load_uefi_certs(void)
* an error if we can't get them.
*/
if (!uefi_check_ignore_db()) {
- db = get_cert_list(L"db", &secure_var, &dbsize);
+ db = get_cert_list(L"db", &secure_var, &dbsize, &status);
if (!db) {
- pr_err("MODSIGN: Couldn't get UEFI db list\n");
+ if (status == EFI_NOT_FOUND)
+ pr_debug("MODSIGN: db variable wasn't found\n");
+ else
+ pr_err("MODSIGN: Couldn't get UEFI db list\n");
} else {
rc = parse_efi_signature_list("UEFI:db",
db, dbsize, get_handler_for_db);
@@ -96,9 +102,12 @@ static int __init load_uefi_certs(void)
}
}
- mok = get_cert_list(L"MokListRT", &mok_var, &moksize);
+ mok = get_cert_list(L"MokListRT", &mok_var, &moksize, &status);
if (!mok) {
- pr_info("Couldn't get UEFI MokListRT\n");
+ if (status == EFI_NOT_FOUND)
+ pr_debug("MokListRT variable wasn't found\n");
+ else
+ pr_info("Couldn't get UEFI MokListRT\n");
} else {
rc = parse_efi_signature_list("UEFI:MokListRT",
mok, moksize, get_handler_for_db);
@@ -107,9 +116,12 @@ static int __init load_uefi_certs(void)
kfree(mok);
}
- dbx = get_cert_list(L"dbx", &secure_var, &dbxsize);
+ dbx = get_cert_list(L"dbx", &secure_var, &dbxsize, &status);
if (!dbx) {
- pr_info("Couldn't get UEFI dbx list\n");
+ if (status == EFI_NOT_FOUND)
+ pr_debug("dbx variable wasn't found\n");
+ else
+ pr_info("Couldn't get UEFI dbx list\n");
} else {
rc = parse_efi_signature_list("UEFI:dbx",
dbx, dbxsize,
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index 4b6991e178d3..1659b59fb5d7 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -698,7 +698,7 @@ static int selinux_set_mnt_opts(struct super_block *sb,
if (!strcmp(sb->s_type->name, "debugfs") ||
!strcmp(sb->s_type->name, "tracefs") ||
- !strcmp(sb->s_type->name, "binderfs") ||
+ !strcmp(sb->s_type->name, "binder") ||
!strcmp(sb->s_type->name, "pstore"))
sbsec->flags |= SE_SBGENFS;
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index a308ce1e6a13..f511ffccb131 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -518,19 +518,13 @@ void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
const char *str, u32 str_len)
{
struct sidtab_str_cache *cache, *victim = NULL;
+ unsigned long flags;
/* do not cache invalid contexts */
if (entry->context.len)
return;
- /*
- * Skip the put operation when in non-task context to avoid the need
- * to disable interrupts while holding s->cache_lock.
- */
- if (!in_task())
- return;
-
- spin_lock(&s->cache_lock);
+ spin_lock_irqsave(&s->cache_lock, flags);
cache = rcu_dereference_protected(entry->cache,
lockdep_is_held(&s->cache_lock));
@@ -561,7 +555,7 @@ void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
rcu_assign_pointer(entry->cache, cache);
out_unlock:
- spin_unlock(&s->cache_lock);
+ spin_unlock_irqrestore(&s->cache_lock, flags);
kfree_rcu(victim, rcu_member);
}
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 336406bcb59e..d5443eeb8b63 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2594,7 +2594,8 @@ void snd_pcm_release_substream(struct snd_pcm_substream *substream)
snd_pcm_drop(substream);
if (substream->hw_opened) {
- do_hw_free(substream);
+ if (substream->runtime->status->state != SNDRV_PCM_STATE_OPEN)
+ do_hw_free(substream);
substream->ops->close(substream);
substream->hw_opened = 0;
}
diff --git a/sound/core/seq/seq_clientmgr.c b/sound/core/seq/seq_clientmgr.c
index 6d9592f0ae1d..cc93157fa950 100644
--- a/sound/core/seq/seq_clientmgr.c
+++ b/sound/core/seq/seq_clientmgr.c
@@ -580,7 +580,7 @@ static int update_timestamp_of_queue(struct snd_seq_event *event,
event->queue = queue;
event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
if (real_time) {
- event->time.time = snd_seq_timer_get_cur_time(q->timer);
+ event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
} else {
event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
@@ -1659,7 +1659,7 @@ static int snd_seq_ioctl_get_queue_status(struct snd_seq_client *client,
tmr = queue->timer;
status->events = queue->tickq->cells + queue->timeq->cells;
- status->time = snd_seq_timer_get_cur_time(tmr);
+ status->time = snd_seq_timer_get_cur_time(tmr, true);
status->tick = snd_seq_timer_get_cur_tick(tmr);
status->running = tmr->running;
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c
index caf68bf42f13..71a6ea62c3be 100644
--- a/sound/core/seq/seq_queue.c
+++ b/sound/core/seq/seq_queue.c
@@ -238,6 +238,8 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
{
unsigned long flags;
struct snd_seq_event_cell *cell;
+ snd_seq_tick_time_t cur_tick;
+ snd_seq_real_time_t cur_time;
if (q == NULL)
return;
@@ -254,17 +256,18 @@ void snd_seq_check_queue(struct snd_seq_queue *q, int atomic, int hop)
__again:
/* Process tick queue... */
+ cur_tick = snd_seq_timer_get_cur_tick(q->timer);
for (;;) {
- cell = snd_seq_prioq_cell_out(q->tickq,
- &q->timer->tick.cur_tick);
+ cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
if (!cell)
break;
snd_seq_dispatch_event(cell, atomic, hop);
}
/* Process time queue... */
+ cur_time = snd_seq_timer_get_cur_time(q->timer, false);
for (;;) {
- cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
+ cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
if (!cell)
break;
snd_seq_dispatch_event(cell, atomic, hop);
@@ -392,6 +395,7 @@ int snd_seq_queue_check_access(int queueid, int client)
int snd_seq_queue_set_owner(int queueid, int client, int locked)
{
struct snd_seq_queue *q = queueptr(queueid);
+ unsigned long flags;
if (q == NULL)
return -EINVAL;
@@ -401,8 +405,10 @@ int snd_seq_queue_set_owner(int queueid, int client, int locked)
return -EPERM;
}
+ spin_lock_irqsave(&q->owner_lock, flags);
q->locked = locked ? 1 : 0;
q->owner = client;
+ spin_unlock_irqrestore(&q->owner_lock, flags);
queue_access_unlock(q);
queuefree(q);
@@ -539,15 +545,17 @@ void snd_seq_queue_client_termination(int client)
unsigned long flags;
int i;
struct snd_seq_queue *q;
+ bool matched;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
if ((q = queueptr(i)) == NULL)
continue;
spin_lock_irqsave(&q->owner_lock, flags);
- if (q->owner == client)
+ matched = (q->owner == client);
+ if (matched)
q->klocked = 1;
spin_unlock_irqrestore(&q->owner_lock, flags);
- if (q->owner == client) {
+ if (matched) {
if (q->timer->running)
snd_seq_timer_stop(q->timer);
snd_seq_timer_reset(q->timer);
@@ -739,6 +747,8 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
int i, bpm;
struct snd_seq_queue *q;
struct snd_seq_timer *tmr;
+ bool locked;
+ int owner;
for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
if ((q = queueptr(i)) == NULL)
@@ -750,9 +760,14 @@ void snd_seq_info_queues_read(struct snd_info_entry *entry,
else
bpm = 0;
+ spin_lock_irq(&q->owner_lock);
+ locked = q->locked;
+ owner = q->owner;
+ spin_unlock_irq(&q->owner_lock);
+
snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
- snd_iprintf(buffer, "owned by client : %d\n", q->owner);
- snd_iprintf(buffer, "lock status : %s\n", q->locked ? "Locked" : "Free");
+ snd_iprintf(buffer, "owned by client : %d\n", owner);
+ snd_iprintf(buffer, "lock status : %s\n", locked ? "Locked" : "Free");
snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
snd_iprintf(buffer, "timer state : %s\n", tmr->running ? "Running" : "Stopped");
diff --git a/sound/core/seq/seq_timer.c b/sound/core/seq/seq_timer.c
index be59b59c9be4..1645e4142e30 100644
--- a/sound/core/seq/seq_timer.c
+++ b/sound/core/seq/seq_timer.c
@@ -428,14 +428,15 @@ int snd_seq_timer_continue(struct snd_seq_timer *tmr)
}
/* return current 'real' time. use timeofday() to get better granularity. */
-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
+snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
+ bool adjust_ktime)
{
snd_seq_real_time_t cur_time;
unsigned long flags;
spin_lock_irqsave(&tmr->lock, flags);
cur_time = tmr->cur_time;
- if (tmr->running) {
+ if (adjust_ktime && tmr->running) {
struct timespec64 tm;
ktime_get_ts64(&tm);
@@ -452,7 +453,13 @@ snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
high PPQ values) */
snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr)
{
- return tmr->tick.cur_tick;
+ snd_seq_tick_time_t cur_tick;
+ unsigned long flags;
+
+ spin_lock_irqsave(&tmr->lock, flags);
+ cur_tick = tmr->tick.cur_tick;
+ spin_unlock_irqrestore(&tmr->lock, flags);
+ return cur_tick;
}
diff --git a/sound/core/seq/seq_timer.h b/sound/core/seq/seq_timer.h
index 66c3e344eae3..4bec57df8158 100644
--- a/sound/core/seq/seq_timer.h
+++ b/sound/core/seq/seq_timer.h
@@ -120,7 +120,8 @@ int snd_seq_timer_set_tempo_ppq(struct snd_seq_timer *tmr, int tempo, int ppq);
int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr);
+snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
+ bool adjust_ktime);
snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr);
extern int seq_default_timer_class;
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index a684f0520b4b..4d060d5b1db6 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -254,6 +254,7 @@ EXPORT_SYMBOL_GPL(snd_hdac_ext_bus_link_power_down_all);
int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
struct hdac_ext_link *link)
{
+ unsigned long codec_mask;
int ret = 0;
mutex_lock(&bus->lock);
@@ -280,9 +281,11 @@ int snd_hdac_ext_bus_link_get(struct hdac_bus *bus,
* HDA spec section 4.3 - Codec Discovery
*/
udelay(521);
- bus->codec_mask = snd_hdac_chip_readw(bus, STATESTS);
- dev_dbg(bus->dev, "codec_mask = 0x%lx\n", bus->codec_mask);
- snd_hdac_chip_writew(bus, STATESTS, bus->codec_mask);
+ codec_mask = snd_hdac_chip_readw(bus, STATESTS);
+ dev_dbg(bus->dev, "codec_mask = 0x%lx\n", codec_mask);
+ snd_hdac_chip_writew(bus, STATESTS, codec_mask);
+ if (!bus->codec_mask)
+ bus->codec_mask = codec_mask;
}
mutex_unlock(&bus->lock);
diff --git a/sound/hda/hdmi_chmap.c b/sound/hda/hdmi_chmap.c
index 5fd6d575e123..aad5c4bf4d34 100644
--- a/sound/hda/hdmi_chmap.c
+++ b/sound/hda/hdmi_chmap.c
@@ -250,7 +250,7 @@ void snd_hdac_print_channel_allocation(int spk_alloc, char *buf, int buflen)
for (i = 0, j = 0; i < ARRAY_SIZE(cea_speaker_allocation_names); i++) {
if (spk_alloc & (1 << i))
- j += snprintf(buf + j, buflen - j, " %s",
+ j += scnprintf(buf + j, buflen - j, " %s",
cea_speaker_allocation_names[i]);
}
buf[j] = '\0'; /* necessary when j == 0 */
diff --git a/sound/mips/sgio2audio.c b/sound/mips/sgio2audio.c
index 9f60a5037f8b..5bf1ea150f26 100644
--- a/sound/mips/sgio2audio.c
+++ b/sound/mips/sgio2audio.c
@@ -649,8 +649,6 @@ snd_sgio2audio_pcm_pointer(struct snd_pcm_substream *substream)
static const struct snd_pcm_ops snd_sgio2audio_playback1_ops = {
.open = snd_sgio2audio_playback1_open,
.close = snd_sgio2audio_pcm_close,
- .hw_params = snd_sgio2audio_pcm_hw_params,
- .hw_free = snd_sgio2audio_pcm_hw_free,
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
@@ -659,8 +657,6 @@ static const struct snd_pcm_ops snd_sgio2audio_playback1_ops = {
static const struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
.open = snd_sgio2audio_playback2_open,
.close = snd_sgio2audio_pcm_close,
- .hw_params = snd_sgio2audio_pcm_hw_params,
- .hw_free = snd_sgio2audio_pcm_hw_free,
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
@@ -669,8 +665,6 @@ static const struct snd_pcm_ops snd_sgio2audio_playback2_ops = {
static const struct snd_pcm_ops snd_sgio2audio_capture_ops = {
.open = snd_sgio2audio_capture_open,
.close = snd_sgio2audio_pcm_close,
- .hw_params = snd_sgio2audio_pcm_hw_params,
- .hw_free = snd_sgio2audio_pcm_hw_free,
.prepare = snd_sgio2audio_pcm_prepare,
.trigger = snd_sgio2audio_pcm_trigger,
.pointer = snd_sgio2audio_pcm_pointer,
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 5dc42f932739..53e7732ef752 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4022,7 +4022,7 @@ void snd_print_pcm_bits(int pcm, char *buf, int buflen)
for (i = 0, j = 0; i < ARRAY_SIZE(bits); i++)
if (pcm & (AC_SUPPCM_BITS_8 << i))
- j += snprintf(buf + j, buflen - j, " %d", bits[i]);
+ j += scnprintf(buf + j, buflen - j, " %d", bits[i]);
buf[j] = '\0'; /* necessary when j == 0 */
}
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c
index bb46c89b7f63..136477ed46ae 100644
--- a/sound/pci/hda/hda_eld.c
+++ b/sound/pci/hda/hda_eld.c
@@ -360,7 +360,7 @@ static void hdmi_print_pcm_rates(int pcm, char *buf, int buflen)
for (i = 0, j = 0; i < ARRAY_SIZE(alsa_rates); i++)
if (pcm & (1 << i))
- j += snprintf(buf + j, buflen - j, " %d",
+ j += scnprintf(buf + j, buflen - j, " %d",
alsa_rates[i]);
buf[j] = '\0'; /* necessary when j == 0 */
diff --git a/sound/pci/hda/hda_sysfs.c b/sound/pci/hda/hda_sysfs.c
index 0607ed5d1959..eb8ec109d7ad 100644
--- a/sound/pci/hda/hda_sysfs.c
+++ b/sound/pci/hda/hda_sysfs.c
@@ -222,7 +222,7 @@ static ssize_t init_verbs_show(struct device *dev,
int i, len = 0;
mutex_lock(&codec->user_mutex);
snd_array_for_each(&codec->init_verbs, i, v) {
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"0x%02x 0x%03x 0x%04x\n",
v->nid, v->verb, v->param);
}
@@ -272,7 +272,7 @@ static ssize_t hints_show(struct device *dev,
int i, len = 0;
mutex_lock(&codec->user_mutex);
snd_array_for_each(&codec->hints, i, hint) {
- len += snprintf(buf + len, PAGE_SIZE - len,
+ len += scnprintf(buf + len, PAGE_SIZE - len,
"%s = %s\n", hint->key, hint->val);
}
mutex_unlock(&codec->user_mutex);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 4770fb3f51fb..0ac06ff1a17c 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2447,6 +2447,10 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
+ SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950),
+ SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950),
SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
@@ -5701,8 +5705,11 @@ static void alc_fixup_headset_jack(struct hda_codec *codec,
break;
case HDA_FIXUP_ACT_INIT:
switch (codec->core.vendor_id) {
+ case 0x10ec0215:
case 0x10ec0225:
+ case 0x10ec0285:
case 0x10ec0295:
+ case 0x10ec0289:
case 0x10ec0299:
alc_write_coef_idx(codec, 0x48, 0xd011);
alc_update_coef_idx(codec, 0x49, 0x007f, 0x0045);
@@ -5914,7 +5921,8 @@ enum {
ALC289_FIXUP_DUAL_SPK,
ALC294_FIXUP_SPK2_TO_DAC1,
ALC294_FIXUP_ASUS_DUAL_SPK,
-
+ ALC285_FIXUP_THINKPAD_HEADSET_JACK,
+ ALC294_FIXUP_ASUS_HPE,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -6678,6 +6686,8 @@ static const struct hda_fixup alc269_fixups[] = {
[ALC285_FIXUP_SPEAKER2_TO_DAC1] = {
.type = HDA_FIXUP_FUNC,
.v.func = alc285_fixup_speaker2_to_dac1,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_THINKPAD_ACPI
},
[ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER] = {
.type = HDA_FIXUP_PINS,
@@ -7034,7 +7044,23 @@ static const struct hda_fixup alc269_fixups[] = {
.chained = true,
.chain_id = ALC294_FIXUP_SPK2_TO_DAC1
},
-
+ [ALC285_FIXUP_THINKPAD_HEADSET_JACK] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_headset_jack,
+ .chained = true,
+ .chain_id = ALC285_FIXUP_SPEAKER2_TO_DAC1
+ },
+ [ALC294_FIXUP_ASUS_HPE] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* Set EAPD high */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x0f },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x7774 },
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC294_FIXUP_ASUS_HEADSET_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -7109,6 +7135,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1028, 0x0935, "Dell", ALC274_FIXUP_DELL_AIO_LINEOUT_VERB),
SND_PCI_QUIRK(0x1028, 0x097e, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
SND_PCI_QUIRK(0x1028, 0x097d, "Dell Precision", ALC289_FIXUP_DUAL_SPK),
+ SND_PCI_QUIRK(0x1028, 0x098d, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1028, 0x09bf, "Dell Precision", ALC233_FIXUP_ASUS_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -7198,6 +7226,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x17d1, "ASUS UX431FL", ALC294_FIXUP_ASUS_DUAL_SPK),
SND_PCI_QUIRK(0x1043, 0x18b1, "Asus MJ401TA", ALC256_FIXUP_ASUS_HEADSET_MIC),
+ SND_PCI_QUIRK(0x1043, 0x19ce, "ASUS B9450FA", ALC294_FIXUP_ASUS_HPE),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
SND_PCI_QUIRK(0x1043, 0x1a30, "ASUS X705UD", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1b13, "Asus U41SV", ALC269_FIXUP_INV_DMIC),
@@ -7268,8 +7297,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
SND_PCI_QUIRK(0x17aa, 0x224c, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x224d, "Thinkpad", ALC298_FIXUP_TPT470_DOCK),
SND_PCI_QUIRK(0x17aa, 0x225d, "Thinkpad T480", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
- SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
- SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_SPEAKER2_TO_DAC1),
+ SND_PCI_QUIRK(0x17aa, 0x2292, "Thinkpad X1 Yoga 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
+ SND_PCI_QUIRK(0x17aa, 0x2293, "Thinkpad X1 Carbon 7th", ALC285_FIXUP_THINKPAD_HEADSET_JACK),
SND_PCI_QUIRK(0x17aa, 0x30bb, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x30e2, "ThinkCentre AIO", ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY),
SND_PCI_QUIRK(0x17aa, 0x310c, "ThinkCentre Station", ALC294_FIXUP_LENOVO_MIC_LOCATION),
diff --git a/sound/soc/amd/raven/acp3x-i2s.c b/sound/soc/amd/raven/acp3x-i2s.c
index 31cd4008e33f..91a388184e52 100644
--- a/sound/soc/amd/raven/acp3x-i2s.c
+++ b/sound/soc/amd/raven/acp3x-i2s.c
@@ -170,6 +170,7 @@ static int acp3x_i2s_trigger(struct snd_pcm_substream *substream,
struct snd_soc_card *card;
struct acp3x_platform_info *pinfo;
u32 ret, val, period_bytes, reg_val, ier_val, water_val;
+ u32 buf_size, buf_reg;
prtd = substream->private_data;
rtd = substream->runtime->private_data;
@@ -183,6 +184,8 @@ static int acp3x_i2s_trigger(struct snd_pcm_substream *substream,
}
period_bytes = frames_to_bytes(substream->runtime,
substream->runtime->period_size);
+ buf_size = frames_to_bytes(substream->runtime,
+ substream->runtime->buffer_size);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
@@ -196,6 +199,7 @@ static int acp3x_i2s_trigger(struct snd_pcm_substream *substream,
mmACP_BT_TX_INTR_WATERMARK_SIZE;
reg_val = mmACP_BTTDM_ITER;
ier_val = mmACP_BTTDM_IER;
+ buf_reg = mmACP_BT_TX_RINGBUFSIZE;
break;
case I2S_SP_INSTANCE:
default:
@@ -203,6 +207,7 @@ static int acp3x_i2s_trigger(struct snd_pcm_substream *substream,
mmACP_I2S_TX_INTR_WATERMARK_SIZE;
reg_val = mmACP_I2STDM_ITER;
ier_val = mmACP_I2STDM_IER;
+ buf_reg = mmACP_I2S_TX_RINGBUFSIZE;
}
} else {
switch (rtd->i2s_instance) {
@@ -211,6 +216,7 @@ static int acp3x_i2s_trigger(struct snd_pcm_substream *substream,
mmACP_BT_RX_INTR_WATERMARK_SIZE;
reg_val = mmACP_BTTDM_IRER;
ier_val = mmACP_BTTDM_IER;
+ buf_reg = mmACP_BT_RX_RINGBUFSIZE;
break;
case I2S_SP_INSTANCE:
default:
@@ -218,9 +224,11 @@ static int acp3x_i2s_trigger(struct snd_pcm_substream *substream,
mmACP_I2S_RX_INTR_WATERMARK_SIZE;
reg_val = mmACP_I2STDM_IRER;
ier_val = mmACP_I2STDM_IER;
+ buf_reg = mmACP_I2S_RX_RINGBUFSIZE;
}
}
rv_writel(period_bytes, rtd->acp3x_base + water_val);
+ rv_writel(buf_size, rtd->acp3x_base + buf_reg);
val = rv_readl(rtd->acp3x_base + reg_val);
val = val | BIT(0);
rv_writel(val, rtd->acp3x_base + reg_val);
diff --git a/sound/soc/amd/raven/acp3x-pcm-dma.c b/sound/soc/amd/raven/acp3x-pcm-dma.c
index aecc3c061679..d62c0d90c41e 100644
--- a/sound/soc/amd/raven/acp3x-pcm-dma.c
+++ b/sound/soc/amd/raven/acp3x-pcm-dma.c
@@ -110,7 +110,7 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
{
u16 page_idx;
u32 low, high, val, acp_fifo_addr, reg_fifo_addr;
- u32 reg_ringbuf_size, reg_dma_size, reg_fifo_size;
+ u32 reg_dma_size, reg_fifo_size;
dma_addr_t addr;
addr = rtd->dma_addr;
@@ -157,7 +157,6 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
if (direction == SNDRV_PCM_STREAM_PLAYBACK) {
switch (rtd->i2s_instance) {
case I2S_BT_INSTANCE:
- reg_ringbuf_size = mmACP_BT_TX_RINGBUFSIZE;
reg_dma_size = mmACP_BT_TX_DMA_SIZE;
acp_fifo_addr = ACP_SRAM_PTE_OFFSET +
BT_PB_FIFO_ADDR_OFFSET;
@@ -169,7 +168,6 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
case I2S_SP_INSTANCE:
default:
- reg_ringbuf_size = mmACP_I2S_TX_RINGBUFSIZE;
reg_dma_size = mmACP_I2S_TX_DMA_SIZE;
acp_fifo_addr = ACP_SRAM_PTE_OFFSET +
SP_PB_FIFO_ADDR_OFFSET;
@@ -181,7 +179,6 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
} else {
switch (rtd->i2s_instance) {
case I2S_BT_INSTANCE:
- reg_ringbuf_size = mmACP_BT_RX_RINGBUFSIZE;
reg_dma_size = mmACP_BT_RX_DMA_SIZE;
acp_fifo_addr = ACP_SRAM_PTE_OFFSET +
BT_CAPT_FIFO_ADDR_OFFSET;
@@ -193,7 +190,6 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
case I2S_SP_INSTANCE:
default:
- reg_ringbuf_size = mmACP_I2S_RX_RINGBUFSIZE;
reg_dma_size = mmACP_I2S_RX_DMA_SIZE;
acp_fifo_addr = ACP_SRAM_PTE_OFFSET +
SP_CAPT_FIFO_ADDR_OFFSET;
@@ -203,7 +199,6 @@ static void config_acp3x_dma(struct i2s_stream_instance *rtd, int direction)
rtd->acp3x_base + mmACP_I2S_RX_RINGBUFADDR);
}
}
- rv_writel(MAX_BUFFER, rtd->acp3x_base + reg_ringbuf_size);
rv_writel(DMA_SIZE, rtd->acp3x_base + reg_dma_size);
rv_writel(acp_fifo_addr, rtd->acp3x_base + reg_fifo_addr);
rv_writel(FIFO_SIZE, rtd->acp3x_base + reg_fifo_size);
diff --git a/sound/soc/amd/raven/pci-acp3x.c b/sound/soc/amd/raven/pci-acp3x.c
index 65330bb50e74..da60e2ec5535 100644
--- a/sound/soc/amd/raven/pci-acp3x.c
+++ b/sound/soc/amd/raven/pci-acp3x.c
@@ -45,23 +45,6 @@ static int acp3x_power_on(void __iomem *acp3x_base)
return -ETIMEDOUT;
}
-static int acp3x_power_off(void __iomem *acp3x_base)
-{
- u32 val;
- int timeout;
-
- rv_writel(ACP_PGFSM_CNTL_POWER_OFF_MASK,
- acp3x_base + mmACP_PGFSM_CONTROL);
- timeout = 0;
- while (++timeout < 500) {
- val = rv_readl(acp3x_base + mmACP_PGFSM_STATUS);
- if ((val & ACP_PGFSM_STATUS_MASK) == ACP_POWERED_OFF)
- return 0;
- udelay(1);
- }
- return -ETIMEDOUT;
-}
-
static int acp3x_reset(void __iomem *acp3x_base)
{
u32 val;
@@ -115,12 +98,6 @@ static int acp3x_deinit(void __iomem *acp3x_base)
pr_err("ACP3x reset failed\n");
return ret;
}
- /* power off */
- ret = acp3x_power_off(acp3x_base);
- if (ret) {
- pr_err("ACP3x power off failed\n");
- return ret;
- }
return 0;
}
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index d1dc8e6366dc..71f2d42188c4 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -10,11 +10,11 @@ config SND_ATMEL_SOC
if SND_ATMEL_SOC
config SND_ATMEL_SOC_PDC
- tristate
+ bool
depends on HAS_DMA
config SND_ATMEL_SOC_DMA
- tristate
+ bool
select SND_SOC_GENERIC_DMAENGINE_PCM
config SND_ATMEL_SOC_SSC
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index 1f6890ed3738..c7d2989791be 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -6,8 +6,14 @@ snd-soc-atmel_ssc_dai-objs := atmel_ssc_dai.o
snd-soc-atmel-i2s-objs := atmel-i2s.o
snd-soc-mchp-i2s-mcc-objs := mchp-i2s-mcc.o
-obj-$(CONFIG_SND_ATMEL_SOC_PDC) += snd-soc-atmel-pcm-pdc.o
-obj-$(CONFIG_SND_ATMEL_SOC_DMA) += snd-soc-atmel-pcm-dma.o
+# pdc and dma need to both be built-in if any user of
+# ssc is built-in.
+ifdef CONFIG_SND_ATMEL_SOC_PDC
+obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-pdc.o
+endif
+ifdef CONFIG_SND_ATMEL_SOC_DMA
+obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel-pcm-dma.o
+endif
obj-$(CONFIG_SND_ATMEL_SOC_SSC) += snd-soc-atmel_ssc_dai.o
obj-$(CONFIG_SND_ATMEL_SOC_I2S) += snd-soc-atmel-i2s.o
obj-$(CONFIG_SND_MCHP_SOC_I2S_MCC) += snd-soc-mchp-i2s-mcc.o
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 7e90f5d83097..ea912439e446 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -1406,7 +1406,7 @@ config SND_SOC_WM8737
depends on SND_SOC_I2C_AND_SPI
config SND_SOC_WM8741
- tristate "Wolfson Microelectronics WM8737 DAC"
+ tristate "Wolfson Microelectronics WM8741 DAC"
depends on SND_SOC_I2C_AND_SPI
config SND_SOC_WM8750
diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c
index 444cc4e3374e..f005751da2cc 100644
--- a/sound/soc/codecs/hdmi-codec.c
+++ b/sound/soc/codecs/hdmi-codec.c
@@ -779,7 +779,17 @@ static int hdmi_of_xlate_dai_id(struct snd_soc_component *component,
return ret;
}
+static void hdmi_remove(struct snd_soc_component *component)
+{
+ struct hdmi_codec_priv *hcp = snd_soc_component_get_drvdata(component);
+
+ if (hcp->hcd.ops->hook_plugged_cb)
+ hcp->hcd.ops->hook_plugged_cb(component->dev->parent,
+ hcp->hcd.data, NULL, NULL);
+}
+
static const struct snd_soc_component_driver hdmi_driver = {
+ .remove = hdmi_remove,
.dapm_widgets = hdmi_widgets,
.num_dapm_widgets = ARRAY_SIZE(hdmi_widgets),
.of_xlate_dai_id = hdmi_of_xlate_dai_id,
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index 5bc2c6411b33..032adc14562d 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -5,150 +5,24 @@
* Copyright 2011-2012 Maxim Integrated Products
*/
-#include <linux/acpi.h>
-#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
-#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/regmap.h>
#include <linux/slab.h>
+#include <linux/acpi.h>
+#include <linux/clk.h>
#include <sound/jack.h>
-#include <sound/max98090.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/tlv.h>
+#include <sound/max98090.h>
#include "max98090.h"
-static void max98090_shdn_save_locked(struct max98090_priv *max98090)
-{
- int shdn = 0;
-
- /* saved_shdn, saved_count, SHDN are protected by card->dapm_mutex */
- regmap_read(max98090->regmap, M98090_REG_DEVICE_SHUTDOWN, &shdn);
- max98090->saved_shdn |= shdn;
- ++max98090->saved_count;
-
- if (shdn)
- regmap_write(max98090->regmap, M98090_REG_DEVICE_SHUTDOWN, 0x0);
-}
-
-static void max98090_shdn_restore_locked(struct max98090_priv *max98090)
-{
- /* saved_shdn, saved_count, SHDN are protected by card->dapm_mutex */
- if (--max98090->saved_count == 0) {
- if (max98090->saved_shdn) {
- regmap_write(max98090->regmap,
- M98090_REG_DEVICE_SHUTDOWN,
- M98090_SHDNN_MASK);
- max98090->saved_shdn = 0;
- }
- }
-}
-
-static void max98090_shdn_save(struct max98090_priv *max98090)
-{
- mutex_lock_nested(&max98090->component->card->dapm_mutex,
- SND_SOC_DAPM_CLASS_RUNTIME);
- max98090_shdn_save_locked(max98090);
-}
-
-static void max98090_shdn_restore(struct max98090_priv *max98090)
-{
- max98090_shdn_restore_locked(max98090);
- mutex_unlock(&max98090->component->card->dapm_mutex);
-}
-
-static int max98090_put_volsw(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component =
- snd_soc_kcontrol_component(kcontrol);
- struct max98090_priv *max98090 =
- snd_soc_component_get_drvdata(component);
- int ret;
-
- max98090_shdn_save(max98090);
- ret = snd_soc_put_volsw(kcontrol, ucontrol);
- max98090_shdn_restore(max98090);
-
- return ret;
-}
-
-static int max98090_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component =
- snd_soc_dapm_kcontrol_component(kcontrol);
- struct max98090_priv *max98090 =
- snd_soc_component_get_drvdata(component);
- int ret;
-
- max98090_shdn_save(max98090);
- ret = snd_soc_dapm_put_enum_double_locked(kcontrol, ucontrol);
- max98090_shdn_restore(max98090);
-
- return ret;
-}
-
-static int max98090_put_enum_double(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component =
- snd_soc_kcontrol_component(kcontrol);
- struct max98090_priv *max98090 =
- snd_soc_component_get_drvdata(component);
- int ret;
-
- max98090_shdn_save(max98090);
- ret = snd_soc_put_enum_double(kcontrol, ucontrol);
- max98090_shdn_restore(max98090);
-
- return ret;
-}
-
-static int max98090_bytes_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- struct snd_soc_component *component =
- snd_soc_kcontrol_component(kcontrol);
- struct max98090_priv *max98090 =
- snd_soc_component_get_drvdata(component);
- int ret;
-
- max98090_shdn_save(max98090);
- ret = snd_soc_bytes_put(kcontrol, ucontrol);
- max98090_shdn_restore(max98090);
-
- return ret;
-}
-
-static int max98090_dapm_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_component *component =
- snd_soc_dapm_to_component(w->dapm);
- struct max98090_priv *max98090 =
- snd_soc_component_get_drvdata(component);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- case SND_SOC_DAPM_PRE_PMD:
- max98090_shdn_save_locked(max98090);
- break;
- case SND_SOC_DAPM_POST_PMU:
- case SND_SOC_DAPM_POST_PMD:
- max98090_shdn_restore_locked(max98090);
- break;
- }
-
- return 0;
-}
-
/* Allows for sparsely populated register maps */
static const struct reg_default max98090_reg[] = {
{ 0x00, 0x00 }, /* 00 Software Reset */
@@ -632,13 +506,10 @@ static SOC_ENUM_SINGLE_DECL(max98090_adchp_enum,
max98090_pwr_perf_text);
static const struct snd_kcontrol_new max98090_snd_controls[] = {
- SOC_ENUM_EXT("MIC Bias VCM Bandgap", max98090_vcmbandgap_enum,
- snd_soc_get_enum_double, max98090_put_enum_double),
+ SOC_ENUM("MIC Bias VCM Bandgap", max98090_vcmbandgap_enum),
- SOC_SINGLE_EXT("DMIC MIC Comp Filter Config",
- M98090_REG_DIGITAL_MIC_CONFIG,
- M98090_DMIC_COMP_SHIFT, M98090_DMIC_COMP_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
+ SOC_SINGLE("DMIC MIC Comp Filter Config", M98090_REG_DIGITAL_MIC_CONFIG,
+ M98090_DMIC_COMP_SHIFT, M98090_DMIC_COMP_NUM - 1, 0),
SOC_SINGLE_EXT_TLV("MIC1 Boost Volume",
M98090_REG_MIC1_INPUT_LEVEL, M98090_MIC_PA1EN_SHIFT,
@@ -693,34 +564,24 @@ static const struct snd_kcontrol_new max98090_snd_controls[] = {
M98090_AVR_SHIFT, M98090_AVR_NUM - 1, 1,
max98090_av_tlv),
- SOC_ENUM_EXT("ADC Oversampling Rate", max98090_osr128_enum,
- snd_soc_get_enum_double, max98090_put_enum_double),
- SOC_SINGLE_EXT("ADC Quantizer Dither", M98090_REG_ADC_CONTROL,
- M98090_ADCDITHER_SHIFT, M98090_ADCDITHER_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
- SOC_ENUM_EXT("ADC High Performance Mode", max98090_adchp_enum,
- snd_soc_get_enum_double, max98090_put_enum_double),
-
- SOC_SINGLE_EXT("DAC Mono Mode", M98090_REG_IO_CONFIGURATION,
- M98090_DMONO_SHIFT, M98090_DMONO_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
- SOC_SINGLE_EXT("SDIN Mode", M98090_REG_IO_CONFIGURATION,
- M98090_SDIEN_SHIFT, M98090_SDIEN_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
- SOC_SINGLE_EXT("SDOUT Mode", M98090_REG_IO_CONFIGURATION,
- M98090_SDOEN_SHIFT, M98090_SDOEN_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
- SOC_SINGLE_EXT("SDOUT Hi-Z Mode", M98090_REG_IO_CONFIGURATION,
- M98090_HIZOFF_SHIFT, M98090_HIZOFF_NUM - 1, 1,
- snd_soc_get_volsw, max98090_put_volsw),
- SOC_ENUM_EXT("Filter Mode", max98090_mode_enum,
- snd_soc_get_enum_double, max98090_put_enum_double),
- SOC_SINGLE_EXT("Record Path DC Blocking", M98090_REG_FILTER_CONFIG,
- M98090_AHPF_SHIFT, M98090_AHPF_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
- SOC_SINGLE_EXT("Playback Path DC Blocking", M98090_REG_FILTER_CONFIG,
- M98090_DHPF_SHIFT, M98090_DHPF_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
+ SOC_ENUM("ADC Oversampling Rate", max98090_osr128_enum),
+ SOC_SINGLE("ADC Quantizer Dither", M98090_REG_ADC_CONTROL,
+ M98090_ADCDITHER_SHIFT, M98090_ADCDITHER_NUM - 1, 0),
+ SOC_ENUM("ADC High Performance Mode", max98090_adchp_enum),
+
+ SOC_SINGLE("DAC Mono Mode", M98090_REG_IO_CONFIGURATION,
+ M98090_DMONO_SHIFT, M98090_DMONO_NUM - 1, 0),
+ SOC_SINGLE("SDIN Mode", M98090_REG_IO_CONFIGURATION,
+ M98090_SDIEN_SHIFT, M98090_SDIEN_NUM - 1, 0),
+ SOC_SINGLE("SDOUT Mode", M98090_REG_IO_CONFIGURATION,
+ M98090_SDOEN_SHIFT, M98090_SDOEN_NUM - 1, 0),
+ SOC_SINGLE("SDOUT Hi-Z Mode", M98090_REG_IO_CONFIGURATION,
+ M98090_HIZOFF_SHIFT, M98090_HIZOFF_NUM - 1, 1),
+ SOC_ENUM("Filter Mode", max98090_mode_enum),
+ SOC_SINGLE("Record Path DC Blocking", M98090_REG_FILTER_CONFIG,
+ M98090_AHPF_SHIFT, M98090_AHPF_NUM - 1, 0),
+ SOC_SINGLE("Playback Path DC Blocking", M98090_REG_FILTER_CONFIG,
+ M98090_DHPF_SHIFT, M98090_DHPF_NUM - 1, 0),
SOC_SINGLE_TLV("Digital BQ Volume", M98090_REG_ADC_BIQUAD_LEVEL,
M98090_AVBQ_SHIFT, M98090_AVBQ_NUM - 1, 1, max98090_dv_tlv),
SOC_SINGLE_EXT_TLV("Digital Sidetone Volume",
@@ -733,17 +594,13 @@ static const struct snd_kcontrol_new max98090_snd_controls[] = {
SOC_SINGLE_TLV("Digital Volume", M98090_REG_DAI_PLAYBACK_LEVEL,
M98090_DV_SHIFT, M98090_DV_NUM - 1, 1,
max98090_dv_tlv),
- SND_SOC_BYTES_E("EQ Coefficients", M98090_REG_EQUALIZER_BASE, 105,
- snd_soc_bytes_get, max98090_bytes_put),
- SOC_SINGLE_EXT("Digital EQ 3 Band Switch", M98090_REG_DSP_FILTER_ENABLE,
- M98090_EQ3BANDEN_SHIFT, M98090_EQ3BANDEN_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
- SOC_SINGLE_EXT("Digital EQ 5 Band Switch", M98090_REG_DSP_FILTER_ENABLE,
- M98090_EQ5BANDEN_SHIFT, M98090_EQ5BANDEN_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
- SOC_SINGLE_EXT("Digital EQ 7 Band Switch", M98090_REG_DSP_FILTER_ENABLE,
- M98090_EQ7BANDEN_SHIFT, M98090_EQ7BANDEN_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
+ SND_SOC_BYTES("EQ Coefficients", M98090_REG_EQUALIZER_BASE, 105),
+ SOC_SINGLE("Digital EQ 3 Band Switch", M98090_REG_DSP_FILTER_ENABLE,
+ M98090_EQ3BANDEN_SHIFT, M98090_EQ3BANDEN_NUM - 1, 0),
+ SOC_SINGLE("Digital EQ 5 Band Switch", M98090_REG_DSP_FILTER_ENABLE,
+ M98090_EQ5BANDEN_SHIFT, M98090_EQ5BANDEN_NUM - 1, 0),
+ SOC_SINGLE("Digital EQ 7 Band Switch", M98090_REG_DSP_FILTER_ENABLE,
+ M98090_EQ7BANDEN_SHIFT, M98090_EQ7BANDEN_NUM - 1, 0),
SOC_SINGLE("Digital EQ Clipping Detection", M98090_REG_DAI_PLAYBACK_LEVEL_EQ,
M98090_EQCLPN_SHIFT, M98090_EQCLPN_NUM - 1,
1),
@@ -751,34 +608,25 @@ static const struct snd_kcontrol_new max98090_snd_controls[] = {
M98090_DVEQ_SHIFT, M98090_DVEQ_NUM - 1, 1,
max98090_dv_tlv),
- SOC_SINGLE_EXT("ALC Enable", M98090_REG_DRC_TIMING,
- M98090_DRCEN_SHIFT, M98090_DRCEN_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
- SOC_ENUM_EXT("ALC Attack Time", max98090_drcatk_enum,
- snd_soc_get_enum_double, max98090_put_enum_double),
- SOC_ENUM_EXT("ALC Release Time", max98090_drcrls_enum,
- snd_soc_get_enum_double, max98090_put_enum_double),
+ SOC_SINGLE("ALC Enable", M98090_REG_DRC_TIMING,
+ M98090_DRCEN_SHIFT, M98090_DRCEN_NUM - 1, 0),
+ SOC_ENUM("ALC Attack Time", max98090_drcatk_enum),
+ SOC_ENUM("ALC Release Time", max98090_drcrls_enum),
SOC_SINGLE_TLV("ALC Make Up Volume", M98090_REG_DRC_GAIN,
M98090_DRCG_SHIFT, M98090_DRCG_NUM - 1, 0,
max98090_alcmakeup_tlv),
- SOC_ENUM_EXT("ALC Compression Ratio", max98090_alccmp_enum,
- snd_soc_get_enum_double, max98090_put_enum_double),
- SOC_ENUM_EXT("ALC Expansion Ratio", max98090_drcexp_enum,
- snd_soc_get_enum_double, max98090_put_enum_double),
- SOC_SINGLE_EXT_TLV("ALC Compression Threshold Volume",
+ SOC_ENUM("ALC Compression Ratio", max98090_alccmp_enum),
+ SOC_ENUM("ALC Expansion Ratio", max98090_drcexp_enum),
+ SOC_SINGLE_TLV("ALC Compression Threshold Volume",
M98090_REG_DRC_COMPRESSOR, M98090_DRCTHC_SHIFT,
- M98090_DRCTHC_NUM - 1, 1,
- snd_soc_get_volsw, max98090_put_volsw, max98090_alccomp_tlv),
- SOC_SINGLE_EXT_TLV("ALC Expansion Threshold Volume",
+ M98090_DRCTHC_NUM - 1, 1, max98090_alccomp_tlv),
+ SOC_SINGLE_TLV("ALC Expansion Threshold Volume",
M98090_REG_DRC_EXPANDER, M98090_DRCTHE_SHIFT,
- M98090_DRCTHE_NUM - 1, 1,
- snd_soc_get_volsw, max98090_put_volsw, max98090_drcexp_tlv),
+ M98090_DRCTHE_NUM - 1, 1, max98090_drcexp_tlv),
- SOC_ENUM_EXT("DAC HP Playback Performance Mode",
- max98090_dac_perfmode_enum,
- snd_soc_get_enum_double, max98090_put_enum_double),
- SOC_ENUM_EXT("DAC High Performance Mode", max98090_dachp_enum,
- snd_soc_get_enum_double, max98090_put_enum_double),
+ SOC_ENUM("DAC HP Playback Performance Mode",
+ max98090_dac_perfmode_enum),
+ SOC_ENUM("DAC High Performance Mode", max98090_dachp_enum),
SOC_SINGLE_TLV("Headphone Left Mixer Volume",
M98090_REG_HP_CONTROL, M98090_MIXHPLG_SHIFT,
@@ -836,12 +684,9 @@ static const struct snd_kcontrol_new max98090_snd_controls[] = {
SOC_SINGLE("Volume Adjustment Smoothing", M98090_REG_LEVEL_CONTROL,
M98090_VSENN_SHIFT, M98090_VSENN_NUM - 1, 1),
- SND_SOC_BYTES_E("Biquad Coefficients",
- M98090_REG_RECORD_BIQUAD_BASE, 15,
- snd_soc_bytes_get, max98090_bytes_put),
- SOC_SINGLE_EXT("Biquad Switch", M98090_REG_DSP_FILTER_ENABLE,
- M98090_ADCBQEN_SHIFT, M98090_ADCBQEN_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
+ SND_SOC_BYTES("Biquad Coefficients", M98090_REG_RECORD_BIQUAD_BASE, 15),
+ SOC_SINGLE("Biquad Switch", M98090_REG_DSP_FILTER_ENABLE,
+ M98090_ADCBQEN_SHIFT, M98090_ADCBQEN_NUM - 1, 0),
};
static const struct snd_kcontrol_new max98091_snd_controls[] = {
@@ -850,12 +695,10 @@ static const struct snd_kcontrol_new max98091_snd_controls[] = {
M98090_DMIC34_ZEROPAD_SHIFT,
M98090_DMIC34_ZEROPAD_NUM - 1, 0),
- SOC_ENUM_EXT("Filter DMIC34 Mode", max98090_filter_dmic34mode_enum,
- snd_soc_get_enum_double, max98090_put_enum_double),
- SOC_SINGLE_EXT("DMIC34 DC Blocking", M98090_REG_FILTER_CONFIG,
+ SOC_ENUM("Filter DMIC34 Mode", max98090_filter_dmic34mode_enum),
+ SOC_SINGLE("DMIC34 DC Blocking", M98090_REG_FILTER_CONFIG,
M98090_FLT_DMIC34HPF_SHIFT,
- M98090_FLT_DMIC34HPF_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
+ M98090_FLT_DMIC34HPF_NUM - 1, 0),
SOC_SINGLE_TLV("DMIC3 Boost Volume", M98090_REG_DMIC3_VOLUME,
M98090_DMIC_AV3G_SHIFT, M98090_DMIC_AV3G_NUM - 1, 0,
@@ -873,9 +716,8 @@ static const struct snd_kcontrol_new max98091_snd_controls[] = {
SND_SOC_BYTES("DMIC34 Biquad Coefficients",
M98090_REG_DMIC34_BIQUAD_BASE, 15),
- SOC_SINGLE_EXT("DMIC34 Biquad Switch", M98090_REG_DSP_FILTER_ENABLE,
- M98090_DMIC34BQEN_SHIFT, M98090_DMIC34BQEN_NUM - 1, 0,
- snd_soc_get_volsw, max98090_put_volsw),
+ SOC_SINGLE("DMIC34 Biquad Switch", M98090_REG_DSP_FILTER_ENABLE,
+ M98090_DMIC34BQEN_SHIFT, M98090_DMIC34BQEN_NUM - 1, 0),
SOC_SINGLE_TLV("DMIC34 BQ PreAttenuation Volume",
M98090_REG_DMIC34_BQ_PREATTEN, M98090_AV34BQ_SHIFT,
@@ -929,6 +771,19 @@ static int max98090_micinput_event(struct snd_soc_dapm_widget *w,
return 0;
}
+static int max98090_shdn_event(struct snd_soc_dapm_widget *w,
+ struct snd_kcontrol *kcontrol, int event)
+{
+ struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm);
+ struct max98090_priv *max98090 = snd_soc_component_get_drvdata(component);
+
+ if (event & SND_SOC_DAPM_POST_PMU)
+ max98090->shdn_pending = true;
+
+ return 0;
+
+}
+
static const char *mic1_mux_text[] = { "IN12", "IN56" };
static SOC_ENUM_SINGLE_DECL(mic1_mux_enum,
@@ -1029,14 +884,10 @@ static SOC_ENUM_SINGLE_DECL(ltenr_mux_enum,
lten_mux_text);
static const struct snd_kcontrol_new max98090_ltenl_mux =
- SOC_DAPM_ENUM_EXT("LTENL Mux", ltenl_mux_enum,
- snd_soc_dapm_get_enum_double,
- max98090_dapm_put_enum_double);
+ SOC_DAPM_ENUM("LTENL Mux", ltenl_mux_enum);
static const struct snd_kcontrol_new max98090_ltenr_mux =
- SOC_DAPM_ENUM_EXT("LTENR Mux", ltenr_mux_enum,
- snd_soc_dapm_get_enum_double,
- max98090_dapm_put_enum_double);
+ SOC_DAPM_ENUM("LTENR Mux", ltenr_mux_enum);
static const char *lben_mux_text[] = { "Normal", "Loopback" };
@@ -1051,14 +902,10 @@ static SOC_ENUM_SINGLE_DECL(lbenr_mux_enum,
lben_mux_text);
static const struct snd_kcontrol_new max98090_lbenl_mux =
- SOC_DAPM_ENUM_EXT("LBENL Mux", lbenl_mux_enum,
- snd_soc_dapm_get_enum_double,
- max98090_dapm_put_enum_double);
+ SOC_DAPM_ENUM("LBENL Mux", lbenl_mux_enum);
static const struct snd_kcontrol_new max98090_lbenr_mux =
- SOC_DAPM_ENUM_EXT("LBENR Mux", lbenr_mux_enum,
- snd_soc_dapm_get_enum_double,
- max98090_dapm_put_enum_double);
+ SOC_DAPM_ENUM("LBENR Mux", lbenr_mux_enum);
static const char *stenl_mux_text[] = { "Normal", "Sidetone Left" };
@@ -1225,25 +1072,21 @@ static const struct snd_soc_dapm_widget max98090_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("IN56"),
SND_SOC_DAPM_SUPPLY("MICBIAS", M98090_REG_INPUT_ENABLE,
- M98090_MBEN_SHIFT, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ M98090_MBEN_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("SHDN", M98090_REG_DEVICE_SHUTDOWN,
M98090_SHDNN_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("SDIEN", M98090_REG_IO_CONFIGURATION,
- M98090_SDIEN_SHIFT, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ M98090_SDIEN_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("SDOEN", M98090_REG_IO_CONFIGURATION,
- M98090_SDOEN_SHIFT, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ M98090_SDOEN_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("DMICL_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
- M98090_DIGMICL_SHIFT, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ M98090_DIGMICL_SHIFT, 0, max98090_shdn_event,
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("DMICR_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
- M98090_DIGMICR_SHIFT, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ M98090_DIGMICR_SHIFT, 0, max98090_shdn_event,
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_SUPPLY("AHPF", M98090_REG_FILTER_CONFIG,
- M98090_AHPF_SHIFT, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ M98090_AHPF_SHIFT, 0, NULL, 0),
/*
* Note: Sysclk and misc power supplies are taken care of by SHDN
@@ -1273,12 +1116,10 @@ static const struct snd_soc_dapm_widget max98090_dapm_widgets[] = {
&max98090_lineb_mixer_controls[0],
ARRAY_SIZE(max98090_lineb_mixer_controls)),
- SND_SOC_DAPM_PGA_E("LINEA Input", M98090_REG_INPUT_ENABLE,
- M98090_LINEAEN_SHIFT, 0, NULL, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
- SND_SOC_DAPM_PGA_E("LINEB Input", M98090_REG_INPUT_ENABLE,
- M98090_LINEBEN_SHIFT, 0, NULL, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PGA("LINEA Input", M98090_REG_INPUT_ENABLE,
+ M98090_LINEAEN_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("LINEB Input", M98090_REG_INPUT_ENABLE,
+ M98090_LINEBEN_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_MIXER("Left ADC Mixer", SND_SOC_NOPM, 0, 0,
&max98090_left_adc_mixer_controls[0],
@@ -1289,11 +1130,11 @@ static const struct snd_soc_dapm_widget max98090_dapm_widgets[] = {
ARRAY_SIZE(max98090_right_adc_mixer_controls)),
SND_SOC_DAPM_ADC_E("ADCL", NULL, M98090_REG_INPUT_ENABLE,
- M98090_ADLEN_SHIFT, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ M98090_ADLEN_SHIFT, 0, max98090_shdn_event,
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_ADC_E("ADCR", NULL, M98090_REG_INPUT_ENABLE,
- M98090_ADREN_SHIFT, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ M98090_ADREN_SHIFT, 0, max98090_shdn_event,
+ SND_SOC_DAPM_POST_PMU),
SND_SOC_DAPM_AIF_OUT("AIFOUTL", "HiFi Capture", 0,
SND_SOC_NOPM, 0, 0),
@@ -1321,12 +1162,10 @@ static const struct snd_soc_dapm_widget max98090_dapm_widgets[] = {
SND_SOC_DAPM_AIF_IN("AIFINL", "HiFi Playback", 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_AIF_IN("AIFINR", "HiFi Playback", 1, SND_SOC_NOPM, 0, 0),
- SND_SOC_DAPM_DAC_E("DACL", NULL, M98090_REG_OUTPUT_ENABLE,
- M98090_DALEN_SHIFT, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
- SND_SOC_DAPM_DAC_E("DACR", NULL, M98090_REG_OUTPUT_ENABLE,
- M98090_DAREN_SHIFT, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_DAC("DACL", NULL, M98090_REG_OUTPUT_ENABLE,
+ M98090_DALEN_SHIFT, 0),
+ SND_SOC_DAPM_DAC("DACR", NULL, M98090_REG_OUTPUT_ENABLE,
+ M98090_DAREN_SHIFT, 0),
SND_SOC_DAPM_MIXER("Left Headphone Mixer", SND_SOC_NOPM, 0, 0,
&max98090_left_hp_mixer_controls[0],
@@ -1361,26 +1200,20 @@ static const struct snd_soc_dapm_widget max98090_dapm_widgets[] = {
SND_SOC_DAPM_MUX("MIXHPRSEL Mux", SND_SOC_NOPM, 0, 0,
&max98090_mixhprsel_mux),
- SND_SOC_DAPM_PGA_E("HP Left Out", M98090_REG_OUTPUT_ENABLE,
- M98090_HPLEN_SHIFT, 0, NULL, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
- SND_SOC_DAPM_PGA_E("HP Right Out", M98090_REG_OUTPUT_ENABLE,
- M98090_HPREN_SHIFT, 0, NULL, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
-
- SND_SOC_DAPM_PGA_E("SPK Left Out", M98090_REG_OUTPUT_ENABLE,
- M98090_SPLEN_SHIFT, 0, NULL, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
- SND_SOC_DAPM_PGA_E("SPK Right Out", M98090_REG_OUTPUT_ENABLE,
- M98090_SPREN_SHIFT, 0, NULL, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
-
- SND_SOC_DAPM_PGA_E("RCV Left Out", M98090_REG_OUTPUT_ENABLE,
- M98090_RCVLEN_SHIFT, 0, NULL, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
- SND_SOC_DAPM_PGA_E("RCV Right Out", M98090_REG_OUTPUT_ENABLE,
- M98090_RCVREN_SHIFT, 0, NULL, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ SND_SOC_DAPM_PGA("HP Left Out", M98090_REG_OUTPUT_ENABLE,
+ M98090_HPLEN_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("HP Right Out", M98090_REG_OUTPUT_ENABLE,
+ M98090_HPREN_SHIFT, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("SPK Left Out", M98090_REG_OUTPUT_ENABLE,
+ M98090_SPLEN_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("SPK Right Out", M98090_REG_OUTPUT_ENABLE,
+ M98090_SPREN_SHIFT, 0, NULL, 0),
+
+ SND_SOC_DAPM_PGA("RCV Left Out", M98090_REG_OUTPUT_ENABLE,
+ M98090_RCVLEN_SHIFT, 0, NULL, 0),
+ SND_SOC_DAPM_PGA("RCV Right Out", M98090_REG_OUTPUT_ENABLE,
+ M98090_RCVREN_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_OUTPUT("HPL"),
SND_SOC_DAPM_OUTPUT("HPR"),
@@ -1395,11 +1228,9 @@ static const struct snd_soc_dapm_widget max98091_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("DMIC4"),
SND_SOC_DAPM_SUPPLY("DMIC3_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
- M98090_DIGMIC3_SHIFT, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ M98090_DIGMIC3_SHIFT, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY("DMIC4_ENA", M98090_REG_DIGITAL_MIC_ENABLE,
- M98090_DIGMIC4_SHIFT, 0, max98090_dapm_event,
- SND_SOC_DAPM_PRE_POST_PMU | SND_SOC_DAPM_PRE_POST_PMD),
+ M98090_DIGMIC4_SHIFT, 0, NULL, 0),
};
static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
@@ -1670,11 +1501,6 @@ static void max98090_configure_bclk(struct snd_soc_component *component)
return;
}
- /*
- * Master mode: no need to save and restore SHDN for the following
- * sensitive registers.
- */
-
/* Check for supported PCLK to LRCLK ratios */
for (i = 0; i < ARRAY_SIZE(pclk_rates); i++) {
if ((pclk_rates[i] == max98090->sysclk) &&
@@ -1761,14 +1587,12 @@ static int max98090_dai_set_fmt(struct snd_soc_dai *codec_dai,
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
/* Set to slave mode PLL - MAS mode off */
- max98090_shdn_save(max98090);
snd_soc_component_write(component,
M98090_REG_CLOCK_RATIO_NI_MSB, 0x00);
snd_soc_component_write(component,
M98090_REG_CLOCK_RATIO_NI_LSB, 0x00);
snd_soc_component_update_bits(component, M98090_REG_CLOCK_MODE,
M98090_USE_M1_MASK, 0);
- max98090_shdn_restore(max98090);
max98090->master = false;
break;
case SND_SOC_DAIFMT_CBM_CFM:
@@ -1794,9 +1618,7 @@ static int max98090_dai_set_fmt(struct snd_soc_dai *codec_dai,
dev_err(component->dev, "DAI clock mode unsupported");
return -EINVAL;
}
- max98090_shdn_save(max98090);
snd_soc_component_write(component, M98090_REG_MASTER_MODE, regval);
- max98090_shdn_restore(max98090);
regval = 0;
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
@@ -1841,10 +1663,8 @@ static int max98090_dai_set_fmt(struct snd_soc_dai *codec_dai,
if (max98090->tdm_slots > 1)
regval ^= M98090_BCI_MASK;
- max98090_shdn_save(max98090);
snd_soc_component_write(component,
M98090_REG_INTERFACE_FORMAT, regval);
- max98090_shdn_restore(max98090);
}
return 0;
@@ -1856,7 +1676,6 @@ static int max98090_set_tdm_slot(struct snd_soc_dai *codec_dai,
struct snd_soc_component *component = codec_dai->component;
struct max98090_priv *max98090 = snd_soc_component_get_drvdata(component);
struct max98090_cdata *cdata;
-
cdata = &max98090->dai[0];
if (slots < 0 || slots > 4)
@@ -1866,7 +1685,6 @@ static int max98090_set_tdm_slot(struct snd_soc_dai *codec_dai,
max98090->tdm_width = slot_width;
if (max98090->tdm_slots > 1) {
- max98090_shdn_save(max98090);
/* SLOTL SLOTR SLOTDLY */
snd_soc_component_write(component, M98090_REG_TDM_FORMAT,
0 << M98090_TDM_SLOTL_SHIFT |
@@ -1877,7 +1695,6 @@ static int max98090_set_tdm_slot(struct snd_soc_dai *codec_dai,
snd_soc_component_update_bits(component, M98090_REG_TDM_CONTROL,
M98090_TDM_MASK,
M98090_TDM_MASK);
- max98090_shdn_restore(max98090);
}
/*
@@ -2077,7 +1894,6 @@ static int max98090_configure_dmic(struct max98090_priv *max98090,
dmic_freq = dmic_table[pclk_index].settings[micclk_index].freq;
dmic_comp = dmic_table[pclk_index].settings[micclk_index].comp[i];
- max98090_shdn_save(max98090);
regmap_update_bits(max98090->regmap, M98090_REG_DIGITAL_MIC_ENABLE,
M98090_MICCLK_MASK,
micclk_index << M98090_MICCLK_SHIFT);
@@ -2086,7 +1902,6 @@ static int max98090_configure_dmic(struct max98090_priv *max98090,
M98090_DMIC_COMP_MASK | M98090_DMIC_FREQ_MASK,
dmic_comp << M98090_DMIC_COMP_SHIFT |
dmic_freq << M98090_DMIC_FREQ_SHIFT);
- max98090_shdn_restore(max98090);
return 0;
}
@@ -2123,10 +1938,8 @@ static int max98090_dai_hw_params(struct snd_pcm_substream *substream,
switch (params_width(params)) {
case 16:
- max98090_shdn_save(max98090);
snd_soc_component_update_bits(component, M98090_REG_INTERFACE_FORMAT,
M98090_WS_MASK, 0);
- max98090_shdn_restore(max98090);
break;
default:
return -EINVAL;
@@ -2137,7 +1950,6 @@ static int max98090_dai_hw_params(struct snd_pcm_substream *substream,
cdata->rate = max98090->lrclk;
- max98090_shdn_save(max98090);
/* Update filter mode */
if (max98090->lrclk < 24000)
snd_soc_component_update_bits(component, M98090_REG_FILTER_CONFIG,
@@ -2153,7 +1965,6 @@ static int max98090_dai_hw_params(struct snd_pcm_substream *substream,
else
snd_soc_component_update_bits(component, M98090_REG_FILTER_CONFIG,
M98090_DHF_MASK, M98090_DHF_MASK);
- max98090_shdn_restore(max98090);
max98090_configure_dmic(max98090, max98090->dmic_freq, max98090->pclk,
max98090->lrclk);
@@ -2184,7 +1995,6 @@ static int max98090_dai_set_sysclk(struct snd_soc_dai *dai,
* 0x02 (when master clk is 20MHz to 40MHz)..
* 0x03 (when master clk is 40MHz to 60MHz)..
*/
- max98090_shdn_save(max98090);
if ((freq >= 10000000) && (freq <= 20000000)) {
snd_soc_component_write(component, M98090_REG_SYSTEM_CLOCK,
M98090_PSCLK_DIV1);
@@ -2199,10 +2009,8 @@ static int max98090_dai_set_sysclk(struct snd_soc_dai *dai,
max98090->pclk = freq >> 2;
} else {
dev_err(component->dev, "Invalid master clock frequency\n");
- max98090_shdn_restore(max98090);
return -EINVAL;
}
- max98090_shdn_restore(max98090);
max98090->sysclk = freq;
@@ -2314,12 +2122,10 @@ static void max98090_pll_work(struct max98090_priv *max98090)
*/
/* Toggle shutdown OFF then ON */
- mutex_lock(&component->card->dapm_mutex);
snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
M98090_SHDNN_MASK, 0);
snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
M98090_SHDNN_MASK, M98090_SHDNN_MASK);
- mutex_unlock(&component->card->dapm_mutex);
for (i = 0; i < 10; ++i) {
/* Give PLL time to lock */
@@ -2642,12 +2448,7 @@ static int max98090_probe(struct snd_soc_component *component)
*/
snd_soc_component_read32(component, M98090_REG_DEVICE_STATUS);
- /*
- * SHDN should be 0 at the point, no need to save/restore for the
- * following registers.
- *
- * High Performance is default
- */
+ /* High Performance is default */
snd_soc_component_update_bits(component, M98090_REG_DAC_CONTROL,
M98090_DACHP_MASK,
1 << M98090_DACHP_SHIFT);
@@ -2658,12 +2459,7 @@ static int max98090_probe(struct snd_soc_component *component)
M98090_ADCHP_MASK,
1 << M98090_ADCHP_SHIFT);
- /*
- * SHDN should be 0 at the point, no need to save/restore for the
- * following registers.
- *
- * Turn on VCM bandgap reference
- */
+ /* Turn on VCM bandgap reference */
snd_soc_component_write(component, M98090_REG_BIAS_CONTROL,
M98090_VCM_MODE_MASK);
@@ -2695,9 +2491,25 @@ static void max98090_remove(struct snd_soc_component *component)
max98090->component = NULL;
}
+static void max98090_seq_notifier(struct snd_soc_component *component,
+ enum snd_soc_dapm_type event, int subseq)
+{
+ struct max98090_priv *max98090 = snd_soc_component_get_drvdata(component);
+
+ if (max98090->shdn_pending) {
+ snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
+ M98090_SHDNN_MASK, 0);
+ msleep(40);
+ snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
+ M98090_SHDNN_MASK, M98090_SHDNN_MASK);
+ max98090->shdn_pending = false;
+ }
+}
+
static const struct snd_soc_component_driver soc_component_dev_max98090 = {
.probe = max98090_probe,
.remove = max98090_remove,
+ .seq_notifier = max98090_seq_notifier,
.set_bias_level = max98090_set_bias_level,
.idle_bias_on = 1,
.use_pmdown_time = 1,
diff --git a/sound/soc/codecs/max98090.h b/sound/soc/codecs/max98090.h
index 0a31708b7df7..a197114b0dad 100644
--- a/sound/soc/codecs/max98090.h
+++ b/sound/soc/codecs/max98090.h
@@ -1539,8 +1539,7 @@ struct max98090_priv {
unsigned int pa2en;
unsigned int sidetone;
bool master;
- int saved_count;
- int saved_shdn;
+ bool shdn_pending;
};
int max98090_mic_detect(struct snd_soc_component *component,
diff --git a/sound/soc/codecs/pcm512x.c b/sound/soc/codecs/pcm512x.c
index 861210f6bf4f..4cbef9affffd 100644
--- a/sound/soc/codecs/pcm512x.c
+++ b/sound/soc/codecs/pcm512x.c
@@ -1564,13 +1564,15 @@ int pcm512x_probe(struct device *dev, struct regmap *regmap)
}
pcm512x->sclk = devm_clk_get(dev, NULL);
- if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER)
- return -EPROBE_DEFER;
+ if (PTR_ERR(pcm512x->sclk) == -EPROBE_DEFER) {
+ ret = -EPROBE_DEFER;
+ goto err;
+ }
if (!IS_ERR(pcm512x->sclk)) {
ret = clk_prepare_enable(pcm512x->sclk);
if (ret != 0) {
dev_err(dev, "Failed to enable SCLK: %d\n", ret);
- return ret;
+ goto err;
}
}
diff --git a/sound/soc/codecs/rt1015.c b/sound/soc/codecs/rt1015.c
index 6d490e2dbc25..66eb55b4ffd4 100644
--- a/sound/soc/codecs/rt1015.c
+++ b/sound/soc/codecs/rt1015.c
@@ -664,7 +664,7 @@ static int rt1015_hw_params(struct snd_pcm_substream *substream,
snd_soc_component_update_bits(component, RT1015_TDM_MASTER,
RT1015_I2S_DL_MASK, val_len);
snd_soc_component_update_bits(component, RT1015_CLK2,
- RT1015_FS_PD_MASK, pre_div);
+ RT1015_FS_PD_MASK, pre_div << RT1015_FS_PD_SFT);
return 0;
}
@@ -857,6 +857,7 @@ struct snd_soc_dai_driver rt1015_dai[] = {
.rates = RT1015_STEREO_RATES,
.formats = RT1015_FORMATS,
},
+ .ops = &rt1015_aif_dai_ops,
}
};
diff --git a/sound/soc/codecs/tas2562.c b/sound/soc/codecs/tas2562.c
index 729acd874c48..be52886a5edb 100644
--- a/sound/soc/codecs/tas2562.c
+++ b/sound/soc/codecs/tas2562.c
@@ -215,7 +215,8 @@ static int tas2562_set_bitwidth(struct tas2562_data *tas2562, int bitwidth)
break;
default:
- dev_info(tas2562->dev, "Not supported params format\n");
+ dev_info(tas2562->dev, "Unsupported bitwidth format\n");
+ return -EINVAL;
}
ret = snd_soc_component_update_bits(tas2562->component,
@@ -251,7 +252,7 @@ static int tas2562_hw_params(struct snd_pcm_substream *substream,
ret = tas2562_set_samplerate(tas2562, params_rate(params));
if (ret)
- dev_err(tas2562->dev, "set bitwidth failed, %d\n", ret);
+ dev_err(tas2562->dev, "set sample rate failed, %d\n", ret);
return ret;
}
diff --git a/sound/soc/fsl/fsl_sai.c b/sound/soc/fsl/fsl_sai.c
index 8c3ea7300972..9d436b0c5718 100644
--- a/sound/soc/fsl/fsl_sai.c
+++ b/sound/soc/fsl/fsl_sai.c
@@ -1020,12 +1020,24 @@ static int fsl_sai_probe(struct platform_device *pdev)
ret = devm_snd_soc_register_component(&pdev->dev, &fsl_component,
&fsl_sai_dai, 1);
if (ret)
- return ret;
+ goto err_pm_disable;
- if (sai->soc_data->use_imx_pcm)
- return imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
- else
- return devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ if (sai->soc_data->use_imx_pcm) {
+ ret = imx_pcm_dma_init(pdev, IMX_SAI_DMABUF_SIZE);
+ if (ret)
+ goto err_pm_disable;
+ } else {
+ ret = devm_snd_dmaengine_pcm_register(&pdev->dev, NULL, 0);
+ if (ret)
+ goto err_pm_disable;
+ }
+
+ return ret;
+
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
+
+ return ret;
}
static int fsl_sai_remove(struct platform_device *pdev)
diff --git a/sound/soc/intel/skylake/skl-debug.c b/sound/soc/intel/skylake/skl-debug.c
index 3466675f2678..a15aa2ffa681 100644
--- a/sound/soc/intel/skylake/skl-debug.c
+++ b/sound/soc/intel/skylake/skl-debug.c
@@ -34,8 +34,8 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
int i;
ssize_t ret = 0;
- for (i = 0; i < max_pin; i++)
- ret += snprintf(buf + size, MOD_BUF - size,
+ for (i = 0; i < max_pin; i++) {
+ ret += scnprintf(buf + size, MOD_BUF - size,
"%s %d\n\tModule %d\n\tInstance %d\n\t"
"In-used %s\n\tType %s\n"
"\tState %d\n\tIndex %d\n",
@@ -45,13 +45,15 @@ static ssize_t skl_print_pins(struct skl_module_pin *m_pin, char *buf,
m_pin[i].in_use ? "Used" : "Unused",
m_pin[i].is_dynamic ? "Dynamic" : "Static",
m_pin[i].pin_state, i);
+ size += ret;
+ }
return ret;
}
static ssize_t skl_print_fmt(struct skl_module_fmt *fmt, char *buf,
ssize_t size, bool direction)
{
- return snprintf(buf + size, MOD_BUF - size,
+ return scnprintf(buf + size, MOD_BUF - size,
"%s\n\tCh %d\n\tFreq %d\n\tBit depth %d\n\t"
"Valid bit depth %d\n\tCh config %#x\n\tInterleaving %d\n\t"
"Sample Type %d\n\tCh Map %#x\n",
@@ -75,16 +77,16 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
if (!buf)
return -ENOMEM;
- ret = snprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
+ ret = scnprintf(buf, MOD_BUF, "Module:\n\tUUID %pUL\n\tModule id %d\n"
"\tInstance id %d\n\tPvt_id %d\n", mconfig->guid,
mconfig->id.module_id, mconfig->id.instance_id,
mconfig->id.pvt_id);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"Resources:\n\tCPC %#x\n\tIBS %#x\n\tOBS %#x\t\n",
res->cpc, res->ibs, res->obs);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"Module data:\n\tCore %d\n\tIn queue %d\n\t"
"Out queue %d\n\tType %s\n",
mconfig->core_id, mconfig->max_in_queue,
@@ -94,38 +96,38 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
ret += skl_print_fmt(mconfig->in_fmt, buf, ret, true);
ret += skl_print_fmt(mconfig->out_fmt, buf, ret, false);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"Fixup:\n\tParams %#x\n\tConverter %#x\n",
mconfig->params_fixup, mconfig->converter);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"Module Gateway:\n\tType %#x\n\tVbus %#x\n\tHW conn %#x\n\tSlot %#x\n",
mconfig->dev_type, mconfig->vbus_id,
mconfig->hw_conn_type, mconfig->time_slot);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"Pipeline:\n\tID %d\n\tPriority %d\n\tConn Type %d\n\t"
"Pages %#x\n", mconfig->pipe->ppl_id,
mconfig->pipe->pipe_priority, mconfig->pipe->conn_type,
mconfig->pipe->memory_pages);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"\tParams:\n\t\tHost DMA %d\n\t\tLink DMA %d\n",
mconfig->pipe->p_params->host_dma_id,
mconfig->pipe->p_params->link_dma_id);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"\tPCM params:\n\t\tCh %d\n\t\tFreq %d\n\t\tFormat %d\n",
mconfig->pipe->p_params->ch,
mconfig->pipe->p_params->s_freq,
mconfig->pipe->p_params->s_fmt);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"\tLink %#x\n\tStream %#x\n",
mconfig->pipe->p_params->linktype,
mconfig->pipe->p_params->stream);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"\tState %d\n\tPassthru %s\n",
mconfig->pipe->state,
mconfig->pipe->passthru ? "true" : "false");
@@ -135,7 +137,7 @@ static ssize_t module_read(struct file *file, char __user *user_buf,
ret += skl_print_pins(mconfig->m_out_pin, buf,
mconfig->max_out_queue, ret, false);
- ret += snprintf(buf + ret, MOD_BUF - ret,
+ ret += scnprintf(buf + ret, MOD_BUF - ret,
"Other:\n\tDomain %d\n\tHomogeneous Input %s\n\t"
"Homogeneous Output %s\n\tIn Queue Mask %d\n\t"
"Out Queue Mask %d\n\tDMA ID %d\n\tMem Pages %d\n\t"
@@ -191,7 +193,7 @@ static ssize_t fw_softreg_read(struct file *file, char __user *user_buf,
__ioread32_copy(d->fw_read_buff, fw_reg_addr, w0_stat_sz >> 2);
for (offset = 0; offset < FW_REG_SIZE; offset += 16) {
- ret += snprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
+ ret += scnprintf(tmp + ret, FW_REG_BUF - ret, "%#.4x: ", offset);
hex_dump_to_buffer(d->fw_read_buff + offset, 16, 16, 4,
tmp + ret, FW_REG_BUF - ret, 0);
ret += strlen(tmp + ret);
diff --git a/sound/soc/intel/skylake/skl-ssp-clk.c b/sound/soc/intel/skylake/skl-ssp-clk.c
index 1c0e5226cb5b..bd43885f3805 100644
--- a/sound/soc/intel/skylake/skl-ssp-clk.c
+++ b/sound/soc/intel/skylake/skl-ssp-clk.c
@@ -384,9 +384,11 @@ static int skl_clk_dev_probe(struct platform_device *pdev)
&clks[i], clk_pdata, i);
if (IS_ERR(data->clk[data->avail_clk_cnt])) {
- ret = PTR_ERR(data->clk[data->avail_clk_cnt++]);
+ ret = PTR_ERR(data->clk[data->avail_clk_cnt]);
goto err_unreg_skl_clk;
}
+
+ data->avail_clk_cnt++;
}
platform_set_drvdata(pdev, data);
diff --git a/sound/soc/meson/g12a-tohdmitx.c b/sound/soc/meson/g12a-tohdmitx.c
index 9cfbd343a00c..8a0db28a6a40 100644
--- a/sound/soc/meson/g12a-tohdmitx.c
+++ b/sound/soc/meson/g12a-tohdmitx.c
@@ -8,6 +8,7 @@
#include <linux/module.h>
#include <sound/pcm_params.h>
#include <linux/regmap.h>
+#include <linux/reset.h>
#include <sound/soc.h>
#include <sound/soc-dai.h>
@@ -378,6 +379,11 @@ static int g12a_tohdmitx_probe(struct platform_device *pdev)
struct device *dev = &pdev->dev;
void __iomem *regs;
struct regmap *map;
+ int ret;
+
+ ret = device_reset(dev);
+ if (ret)
+ return ret;
regs = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(regs))
diff --git a/sound/soc/soc-component.c b/sound/soc/soc-component.c
index 14e175cdeeb8..785a0385cc7f 100644
--- a/sound/soc/soc-component.c
+++ b/sound/soc/soc-component.c
@@ -451,7 +451,7 @@ int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream)
int i, ret;
for_each_rtd_components(rtd, i, component) {
- if (component->driver->ioctl) {
+ if (component->driver->sync_stop) {
ret = component->driver->sync_stop(component,
substream);
if (ret < 0)
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index 223cd045719e..392a1c5b15d3 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -299,7 +299,7 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
for_each_dpcm_be(fe, stream, dpcm)
dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
- snd_soc_dapm_stream_stop(fe, stream);
+ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index bc20ad9abf8b..9fb54e6fe254 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3441,8 +3441,17 @@ int snd_soc_dapm_get_enum_double(struct snd_kcontrol *kcontrol,
}
EXPORT_SYMBOL_GPL(snd_soc_dapm_get_enum_double);
-static int __snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol, int locked)
+/**
+ * snd_soc_dapm_put_enum_double - dapm enumerated double mixer set callback
+ * @kcontrol: mixer control
+ * @ucontrol: control element information
+ *
+ * Callback to set the value of a dapm enumerated double mixer control.
+ *
+ * Returns 0 for success.
+ */
+int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
{
struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
struct snd_soc_card *card = dapm->card;
@@ -3465,9 +3474,7 @@ static int __snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
mask |= e->mask << e->shift_r;
}
- if (!locked)
- mutex_lock_nested(&card->dapm_mutex,
- SND_SOC_DAPM_CLASS_RUNTIME);
+ mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
change = dapm_kcontrol_set_value(kcontrol, val);
@@ -3489,51 +3496,16 @@ static int __snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
card->update = NULL;
}
- if (!locked)
- mutex_unlock(&card->dapm_mutex);
+ mutex_unlock(&card->dapm_mutex);
if (ret > 0)
soc_dpcm_runtime_update(card);
return change;
}
-
-/**
- * snd_soc_dapm_put_enum_double - dapm enumerated double mixer set callback
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback to set the value of a dapm enumerated double mixer control.
- *
- * Returns 0 for success.
- */
-int snd_soc_dapm_put_enum_double(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- return __snd_soc_dapm_put_enum_double(kcontrol, ucontrol, 0);
-}
EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_double);
/**
- * snd_soc_dapm_put_enum_double_locked - dapm enumerated double mixer set
- * callback
- * @kcontrol: mixer control
- * @ucontrol: control element information
- *
- * Callback to set the value of a dapm enumerated double mixer control.
- * Must acquire dapm_mutex before calling the function.
- *
- * Returns 0 for success.
- */
-int snd_soc_dapm_put_enum_double_locked(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- dapm_assert_locked(snd_soc_dapm_kcontrol_dapm(kcontrol));
- return __snd_soc_dapm_put_enum_double(kcontrol, ucontrol, 1);
-}
-EXPORT_SYMBOL_GPL(snd_soc_dapm_put_enum_double_locked);
-
-/**
* snd_soc_dapm_info_pin_switch - Info for a pin switch
*
* @kcontrol: mixer control
@@ -3916,9 +3888,6 @@ snd_soc_dai_link_event_pre_pmu(struct snd_soc_dapm_widget *w,
runtime->rate = params_rate(params);
out:
- if (ret < 0)
- kfree(runtime);
-
kfree(params);
return ret;
}
@@ -4803,7 +4772,7 @@ static void soc_dapm_shutdown_dapm(struct snd_soc_dapm_context *dapm)
continue;
if (w->power) {
dapm_seq_insert(w, &down_list, false);
- w->power = 0;
+ w->new_power = 0;
powerdown = 1;
}
}
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index ff1b7c7078e5..2c59b3688ca0 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -2006,7 +2006,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
soc_pcm_close(substream);
/* run the stream event for each BE */
- snd_soc_dapm_stream_stop(fe, stream);
+ dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
@@ -3171,16 +3171,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
unsigned long flags;
/* FE state */
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
"[%s - %s]\n", fe->dai_link->name,
stream ? "Capture" : "Playback");
- offset += snprintf(buf + offset, size - offset, "State: %s\n",
+ offset += scnprintf(buf + offset, size - offset, "State: %s\n",
dpcm_state_string(fe->dpcm[stream].state));
if ((fe->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
(fe->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
"Hardware Params: "
"Format = %s, Channels = %d, Rate = %d\n",
snd_pcm_format_name(params_format(params)),
@@ -3188,10 +3188,10 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
params_rate(params));
/* BEs state */
- offset += snprintf(buf + offset, size - offset, "Backends:\n");
+ offset += scnprintf(buf + offset, size - offset, "Backends:\n");
if (list_empty(&fe->dpcm[stream].be_clients)) {
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
" No active DSP links\n");
goto out;
}
@@ -3201,16 +3201,16 @@ static ssize_t dpcm_show_state(struct snd_soc_pcm_runtime *fe,
struct snd_soc_pcm_runtime *be = dpcm->be;
params = &dpcm->hw_params;
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
"- %s\n", be->dai_link->name);
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
" State: %s\n",
dpcm_state_string(be->dpcm[stream].state));
if ((be->dpcm[stream].state >= SND_SOC_DPCM_STATE_HW_PARAMS) &&
(be->dpcm[stream].state <= SND_SOC_DPCM_STATE_STOP))
- offset += snprintf(buf + offset, size - offset,
+ offset += scnprintf(buf + offset, size - offset,
" Hardware Params: "
"Format = %s, Channels = %d, Rate = %d\n",
snd_pcm_format_name(params_format(params)),
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index d2ee6ad20e83..575da6aba807 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -2377,8 +2377,11 @@ static int soc_tplg_link_elems_load(struct soc_tplg *tplg,
}
ret = soc_tplg_link_config(tplg, _link);
- if (ret < 0)
+ if (ret < 0) {
+ if (!abi_match)
+ kfree(_link);
return ret;
+ }
/* offset by version-specific struct size and
* real priv data size
@@ -2542,7 +2545,7 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
{
struct snd_soc_tplg_manifest *manifest, *_manifest;
bool abi_match;
- int err;
+ int ret = 0;
if (tplg->pass != SOC_TPLG_PASS_MANIFEST)
return 0;
@@ -2555,19 +2558,19 @@ static int soc_tplg_manifest_load(struct soc_tplg *tplg,
_manifest = manifest;
} else {
abi_match = false;
- err = manifest_new_ver(tplg, manifest, &_manifest);
- if (err < 0)
- return err;
+ ret = manifest_new_ver(tplg, manifest, &_manifest);
+ if (ret < 0)
+ return ret;
}
/* pass control to component driver for optional further init */
if (tplg->comp && tplg->ops && tplg->ops->manifest)
- return tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
+ ret = tplg->ops->manifest(tplg->comp, tplg->index, _manifest);
if (!abi_match) /* free the duplicated one */
kfree(_manifest);
- return 0;
+ return ret;
}
/* validate header magic, size and type */
diff --git a/sound/soc/sof/intel/hda-codec.c b/sound/soc/sof/intel/hda-codec.c
index 9106ab8dac6f..ff45075ef720 100644
--- a/sound/soc/sof/intel/hda-codec.c
+++ b/sound/soc/sof/intel/hda-codec.c
@@ -174,8 +174,10 @@ void hda_codec_i915_display_power(struct snd_sof_dev *sdev, bool enable)
{
struct hdac_bus *bus = sof_to_bus(sdev);
- dev_dbg(bus->dev, "Turning i915 HDAC power %d\n", enable);
- snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, enable);
+ if (HDA_IDISP_CODEC(bus->codec_mask)) {
+ dev_dbg(bus->dev, "Turning i915 HDAC power %d\n", enable);
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, enable);
+ }
}
EXPORT_SYMBOL_NS(hda_codec_i915_display_power, SND_SOC_SOF_HDA_AUDIO_CODEC_I915);
@@ -189,7 +191,8 @@ int hda_codec_i915_init(struct snd_sof_dev *sdev)
if (ret < 0)
return ret;
- hda_codec_i915_display_power(sdev, true);
+ /* codec_mask not yet known, power up for probe */
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, true);
return 0;
}
@@ -200,7 +203,8 @@ int hda_codec_i915_exit(struct snd_sof_dev *sdev)
struct hdac_bus *bus = sof_to_bus(sdev);
int ret;
- hda_codec_i915_display_power(sdev, false);
+ /* power down unconditionally */
+ snd_hdac_display_power(bus, HDA_CODEC_IDX_CONTROLLER, false);
ret = snd_hdac_i915_exit(bus);
diff --git a/sound/soc/sof/intel/hda-dsp.c b/sound/soc/sof/intel/hda-dsp.c
index 4a4d318f97ff..0848b79967a9 100644
--- a/sound/soc/sof/intel/hda-dsp.c
+++ b/sound/soc/sof/intel/hda-dsp.c
@@ -428,6 +428,9 @@ static int hda_suspend(struct snd_sof_dev *sdev, bool runtime_suspend)
return ret;
}
+ /* display codec can powered off after link reset */
+ hda_codec_i915_display_power(sdev, false);
+
return 0;
}
@@ -439,6 +442,9 @@ static int hda_resume(struct snd_sof_dev *sdev, bool runtime_resume)
#endif
int ret;
+ /* display codec must be powered before link reset */
+ hda_codec_i915_display_power(sdev, true);
+
/*
* clear TCSEL to clear playback on some HD Audio
* codecs. PCI TCSEL is defined in the Intel manuals.
@@ -482,6 +488,8 @@ int hda_dsp_resume(struct snd_sof_dev *sdev)
struct pci_dev *pci = to_pci_dev(sdev->dev);
if (sdev->s0_suspend) {
+ hda_codec_i915_display_power(sdev, true);
+
/* restore L1SEN bit */
if (hda->l1_support_changed)
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
@@ -531,6 +539,9 @@ int hda_dsp_suspend(struct snd_sof_dev *sdev)
int ret;
if (sdev->s0_suspend) {
+ /* we can't keep a wakeref to display driver at suspend */
+ hda_codec_i915_display_power(sdev, false);
+
/* enable L1SEN to make sure the system can enter S0Ix */
hda->l1_support_changed =
snd_sof_dsp_update_bits(sdev, HDA_DSP_HDA_BAR,
diff --git a/sound/soc/sof/intel/hda.c b/sound/soc/sof/intel/hda.c
index 65b86dd044f1..25946a1c2822 100644
--- a/sound/soc/sof/intel/hda.c
+++ b/sound/soc/sof/intel/hda.c
@@ -286,6 +286,13 @@ static int hda_init(struct snd_sof_dev *sdev)
/* HDA base */
sdev->bar[HDA_DSP_HDA_BAR] = bus->remap_addr;
+ /* init i915 and HDMI codecs */
+ ret = hda_codec_i915_init(sdev);
+ if (ret < 0) {
+ dev_err(sdev->dev, "error: init i915 and HDMI codec failed\n");
+ return ret;
+ }
+
/* get controller capabilities */
ret = hda_dsp_ctrl_get_caps(sdev);
if (ret < 0)
@@ -353,15 +360,6 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
if (bus->ppcap)
dev_dbg(sdev->dev, "PP capability, will probe DSP later.\n");
-#if IS_ENABLED(CONFIG_SND_SOC_SOF_HDA)
- /* init i915 and HDMI codecs */
- ret = hda_codec_i915_init(sdev);
- if (ret < 0) {
- dev_err(sdev->dev, "error: init i915 and HDMI codec failed\n");
- return ret;
- }
-#endif
-
/* Init HDA controller after i915 init */
ret = hda_dsp_ctrl_init_chip(sdev, true);
if (ret < 0) {
@@ -381,7 +379,7 @@ static int hda_init_caps(struct snd_sof_dev *sdev)
hda_codec_probe_bus(sdev, hda_codec_use_common_hdmi);
if (!HDA_IDISP_CODEC(bus->codec_mask))
- hda_codec_i915_display_power(sdev, false);
+ hda_codec_i915_exit(sdev);
/*
* we are done probing so decrement link counts
@@ -611,6 +609,7 @@ free_streams:
iounmap(sdev->bar[HDA_DSP_BAR]);
hdac_bus_unmap:
iounmap(bus->remap_addr);
+ hda_codec_i915_exit(sdev);
err:
return ret;
}
diff --git a/sound/soc/sof/ipc.c b/sound/soc/sof/ipc.c
index b63fc529b456..78aa1da7c7a9 100644
--- a/sound/soc/sof/ipc.c
+++ b/sound/soc/sof/ipc.c
@@ -499,7 +499,7 @@ int snd_sof_ipc_stream_posn(struct snd_soc_component *scomp,
/* send IPC to the DSP */
err = sof_ipc_tx_message(sdev->ipc,
- stream.hdr.cmd, &stream, sizeof(stream), &posn,
+ stream.hdr.cmd, &stream, sizeof(stream), posn,
sizeof(*posn));
if (err < 0) {
dev_err(sdev->dev, "error: failed to get stream %d position\n",
diff --git a/sound/soc/stm/stm32_sai_sub.c b/sound/soc/stm/stm32_sai_sub.c
index 30bcd5d3a32a..10eb4b8e8e7e 100644
--- a/sound/soc/stm/stm32_sai_sub.c
+++ b/sound/soc/stm/stm32_sai_sub.c
@@ -1543,20 +1543,20 @@ static int stm32_sai_sub_probe(struct platform_device *pdev)
return ret;
}
- ret = devm_snd_soc_register_component(&pdev->dev, &stm32_component,
- &sai->cpu_dai_drv, 1);
+ ret = snd_dmaengine_pcm_register(&pdev->dev, conf, 0);
+ if (ret) {
+ dev_err(&pdev->dev, "Could not register pcm dma\n");
+ return ret;
+ }
+
+ ret = snd_soc_register_component(&pdev->dev, &stm32_component,
+ &sai->cpu_dai_drv, 1);
if (ret)
return ret;
if (STM_SAI_PROTOCOL_IS_SPDIF(sai))
conf = &stm32_sai_pcm_config_spdif;
- ret = devm_snd_dmaengine_pcm_register(&pdev->dev, conf, 0);
- if (ret) {
- dev_err(&pdev->dev, "Could not register pcm dma\n");
- return ret;
- }
-
return 0;
}
@@ -1565,6 +1565,8 @@ static int stm32_sai_sub_remove(struct platform_device *pdev)
struct stm32_sai_sub_data *sai = dev_get_drvdata(&pdev->dev);
clk_unprepare(sai->pdata->pclk);
+ snd_dmaengine_pcm_unregister(&pdev->dev);
+ snd_soc_unregister_component(&pdev->dev);
return 0;
}
diff --git a/sound/soc/sunxi/sun8i-codec.c b/sound/soc/sunxi/sun8i-codec.c
index 55798bc8eae2..686561df8e13 100644
--- a/sound/soc/sunxi/sun8i-codec.c
+++ b/sound/soc/sunxi/sun8i-codec.c
@@ -80,6 +80,7 @@
#define SUN8I_SYS_SR_CTRL_AIF1_FS_MASK GENMASK(15, 12)
#define SUN8I_SYS_SR_CTRL_AIF2_FS_MASK GENMASK(11, 8)
+#define SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK GENMASK(3, 2)
#define SUN8I_AIF1CLK_CTRL_AIF1_WORD_SIZ_MASK GENMASK(5, 4)
#define SUN8I_AIF1CLK_CTRL_AIF1_LRCK_DIV_MASK GENMASK(8, 6)
#define SUN8I_AIF1CLK_CTRL_AIF1_BCLK_DIV_MASK GENMASK(12, 9)
@@ -241,7 +242,7 @@ static int sun8i_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
return -EINVAL;
}
regmap_update_bits(scodec->regmap, SUN8I_AIF1CLK_CTRL,
- BIT(SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT),
+ SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT_MASK,
value << SUN8I_AIF1CLK_CTRL_AIF1_DATA_FMT);
return 0;
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 018b1ecb5404..a48313dfa967 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -151,8 +151,34 @@ static int uac_clock_selector_set_val(struct snd_usb_audio *chip, int selector_i
return ret;
}
+/*
+ * Assume the clock is valid if clock source supports only one single sample
+ * rate, the terminal is connected directly to it (there is no clock selector)
+ * and clock type is internal. This is to deal with some Denon DJ controllers
+ * that always reports that clock is invalid.
+ */
+static bool uac_clock_source_is_valid_quirk(struct snd_usb_audio *chip,
+ struct audioformat *fmt,
+ int source_id)
+{
+ if (fmt->protocol == UAC_VERSION_2) {
+ struct uac_clock_source_descriptor *cs_desc =
+ snd_usb_find_clock_source(chip->ctrl_intf, source_id);
+
+ if (!cs_desc)
+ return false;
+
+ return (fmt->nr_rates == 1 &&
+ (fmt->clock & 0xff) == cs_desc->bClockID &&
+ (cs_desc->bmAttributes & 0x3) !=
+ UAC_CLOCK_SOURCE_TYPE_EXT);
+ }
+
+ return false;
+}
+
static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
- int protocol,
+ struct audioformat *fmt,
int source_id)
{
int err;
@@ -160,7 +186,7 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
struct usb_device *dev = chip->dev;
u32 bmControls;
- if (protocol == UAC_VERSION_3) {
+ if (fmt->protocol == UAC_VERSION_3) {
struct uac3_clock_source_descriptor *cs_desc =
snd_usb_find_clock_source_v3(chip->ctrl_intf, source_id);
@@ -194,10 +220,14 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip,
return false;
}
- return data ? true : false;
+ if (data)
+ return true;
+ else
+ return uac_clock_source_is_valid_quirk(chip, fmt, source_id);
}
-static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
+static int __uac_clock_find_source(struct snd_usb_audio *chip,
+ struct audioformat *fmt, int entity_id,
unsigned long *visited, bool validate)
{
struct uac_clock_source_descriptor *source;
@@ -217,7 +247,7 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
source = snd_usb_find_clock_source(chip->ctrl_intf, entity_id);
if (source) {
entity_id = source->bClockID;
- if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_2,
+ if (validate && !uac_clock_source_is_valid(chip, fmt,
entity_id)) {
usb_audio_err(chip,
"clock source %d is not valid, cannot use\n",
@@ -248,8 +278,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
}
cur = ret;
- ret = __uac_clock_find_source(chip, selector->baCSourceID[ret - 1],
- visited, validate);
+ ret = __uac_clock_find_source(chip, fmt,
+ selector->baCSourceID[ret - 1],
+ visited, validate);
if (!validate || ret > 0 || !chip->autoclock)
return ret;
@@ -260,8 +291,9 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
if (i == cur)
continue;
- ret = __uac_clock_find_source(chip, selector->baCSourceID[i - 1],
- visited, true);
+ ret = __uac_clock_find_source(chip, fmt,
+ selector->baCSourceID[i - 1],
+ visited, true);
if (ret < 0)
continue;
@@ -281,14 +313,16 @@ static int __uac_clock_find_source(struct snd_usb_audio *chip, int entity_id,
/* FIXME: multipliers only act as pass-thru element for now */
multiplier = snd_usb_find_clock_multiplier(chip->ctrl_intf, entity_id);
if (multiplier)
- return __uac_clock_find_source(chip, multiplier->bCSourceID,
- visited, validate);
+ return __uac_clock_find_source(chip, fmt,
+ multiplier->bCSourceID,
+ visited, validate);
return -EINVAL;
}
-static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
- unsigned long *visited, bool validate)
+static int __uac3_clock_find_source(struct snd_usb_audio *chip,
+ struct audioformat *fmt, int entity_id,
+ unsigned long *visited, bool validate)
{
struct uac3_clock_source_descriptor *source;
struct uac3_clock_selector_descriptor *selector;
@@ -307,7 +341,7 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
source = snd_usb_find_clock_source_v3(chip->ctrl_intf, entity_id);
if (source) {
entity_id = source->bClockID;
- if (validate && !uac_clock_source_is_valid(chip, UAC_VERSION_3,
+ if (validate && !uac_clock_source_is_valid(chip, fmt,
entity_id)) {
usb_audio_err(chip,
"clock source %d is not valid, cannot use\n",
@@ -338,7 +372,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
}
cur = ret;
- ret = __uac3_clock_find_source(chip, selector->baCSourceID[ret - 1],
+ ret = __uac3_clock_find_source(chip, fmt,
+ selector->baCSourceID[ret - 1],
visited, validate);
if (!validate || ret > 0 || !chip->autoclock)
return ret;
@@ -350,8 +385,9 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
if (i == cur)
continue;
- ret = __uac3_clock_find_source(chip, selector->baCSourceID[i - 1],
- visited, true);
+ ret = __uac3_clock_find_source(chip, fmt,
+ selector->baCSourceID[i - 1],
+ visited, true);
if (ret < 0)
continue;
@@ -372,7 +408,8 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
multiplier = snd_usb_find_clock_multiplier_v3(chip->ctrl_intf,
entity_id);
if (multiplier)
- return __uac3_clock_find_source(chip, multiplier->bCSourceID,
+ return __uac3_clock_find_source(chip, fmt,
+ multiplier->bCSourceID,
visited, validate);
return -EINVAL;
@@ -389,18 +426,18 @@ static int __uac3_clock_find_source(struct snd_usb_audio *chip, int entity_id,
*
* Returns the clock source UnitID (>=0) on success, or an error.
*/
-int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
- int entity_id, bool validate)
+int snd_usb_clock_find_source(struct snd_usb_audio *chip,
+ struct audioformat *fmt, bool validate)
{
DECLARE_BITMAP(visited, 256);
memset(visited, 0, sizeof(visited));
- switch (protocol) {
+ switch (fmt->protocol) {
case UAC_VERSION_2:
- return __uac_clock_find_source(chip, entity_id, visited,
+ return __uac_clock_find_source(chip, fmt, fmt->clock, visited,
validate);
case UAC_VERSION_3:
- return __uac3_clock_find_source(chip, entity_id, visited,
+ return __uac3_clock_find_source(chip, fmt, fmt->clock, visited,
validate);
default:
return -EINVAL;
@@ -501,8 +538,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
* automatic clock selection if the current clock is not
* valid.
*/
- clock = snd_usb_clock_find_source(chip, fmt->protocol,
- fmt->clock, true);
+ clock = snd_usb_clock_find_source(chip, fmt, true);
if (clock < 0) {
/* We did not find a valid clock, but that might be
* because the current sample rate does not match an
@@ -510,8 +546,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
* and we will do another validation after setting the
* rate.
*/
- clock = snd_usb_clock_find_source(chip, fmt->protocol,
- fmt->clock, false);
+ clock = snd_usb_clock_find_source(chip, fmt, false);
if (clock < 0)
return clock;
}
@@ -577,7 +612,7 @@ static int set_sample_rate_v2v3(struct snd_usb_audio *chip, int iface,
validation:
/* validate clock after rate change */
- if (!uac_clock_source_is_valid(chip, fmt->protocol, clock))
+ if (!uac_clock_source_is_valid(chip, fmt, clock))
return -ENXIO;
return 0;
}
diff --git a/sound/usb/clock.h b/sound/usb/clock.h
index 076e31b79ee0..68df0fbe09d0 100644
--- a/sound/usb/clock.h
+++ b/sound/usb/clock.h
@@ -6,7 +6,7 @@ int snd_usb_init_sample_rate(struct snd_usb_audio *chip, int iface,
struct usb_host_interface *alts,
struct audioformat *fmt, int rate);
-int snd_usb_clock_find_source(struct snd_usb_audio *chip, int protocol,
- int entity_id, bool validate);
+int snd_usb_clock_find_source(struct snd_usb_audio *chip,
+ struct audioformat *fmt, bool validate);
#endif /* __USBAUDIO_CLOCK_H */
diff --git a/sound/usb/format.c b/sound/usb/format.c
index 9260136e4c9b..9f5cb4ed3a0c 100644
--- a/sound/usb/format.c
+++ b/sound/usb/format.c
@@ -151,6 +151,19 @@ static u64 parse_audio_format_i_type(struct snd_usb_audio *chip,
return pcm_formats;
}
+static int set_fixed_rate(struct audioformat *fp, int rate, int rate_bits)
+{
+ kfree(fp->rate_table);
+ fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL);
+ if (!fp->rate_table)
+ return -ENOMEM;
+ fp->nr_rates = 1;
+ fp->rate_min = rate;
+ fp->rate_max = rate;
+ fp->rates = rate_bits;
+ fp->rate_table[0] = rate;
+ return 0;
+}
/*
* parse the format descriptor and stores the possible sample rates
@@ -223,6 +236,14 @@ static int parse_audio_format_rates_v1(struct snd_usb_audio *chip, struct audiof
fp->rate_min = combine_triple(&fmt[offset + 1]);
fp->rate_max = combine_triple(&fmt[offset + 4]);
}
+
+ /* Jabra Evolve 65 headset */
+ if (chip->usb_id == USB_ID(0x0b0e, 0x030b)) {
+ /* only 48kHz for playback while keeping 16kHz for capture */
+ if (fp->nr_rates != 1)
+ return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
+ }
+
return 0;
}
@@ -299,17 +320,7 @@ static int line6_parse_audio_format_rates_quirk(struct snd_usb_audio *chip,
case USB_ID(0x0e41, 0x4248): /* Line6 Helix >= fw 2.82 */
case USB_ID(0x0e41, 0x4249): /* Line6 Helix Rack >= fw 2.82 */
case USB_ID(0x0e41, 0x424a): /* Line6 Helix LT >= fw 2.82 */
- /* supported rates: 48Khz */
- kfree(fp->rate_table);
- fp->rate_table = kmalloc(sizeof(int), GFP_KERNEL);
- if (!fp->rate_table)
- return -ENOMEM;
- fp->nr_rates = 1;
- fp->rate_min = 48000;
- fp->rate_max = 48000;
- fp->rates = SNDRV_PCM_RATE_48000;
- fp->rate_table[0] = 48000;
- return 0;
+ return set_fixed_rate(fp, 48000, SNDRV_PCM_RATE_48000);
}
return -ENODEV;
@@ -325,8 +336,7 @@ static int parse_audio_format_rates_v2v3(struct snd_usb_audio *chip,
struct usb_device *dev = chip->dev;
unsigned char tmp[2], *data;
int nr_triplets, data_size, ret = 0, ret_l6;
- int clock = snd_usb_clock_find_source(chip, fp->protocol,
- fp->clock, false);
+ int clock = snd_usb_clock_find_source(chip, fp, false);
if (clock < 0) {
dev_err(&dev->dev,
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index d659fdb475e2..81b2db0edd5f 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -897,6 +897,15 @@ static int parse_term_proc_unit(struct mixer_build *state,
return 0;
}
+static int parse_term_effect_unit(struct mixer_build *state,
+ struct usb_audio_term *term,
+ void *p1, int id)
+{
+ term->type = UAC3_EFFECT_UNIT << 16; /* virtual type */
+ term->id = id;
+ return 0;
+}
+
static int parse_term_uac2_clock_source(struct mixer_build *state,
struct usb_audio_term *term,
void *p1, int id)
@@ -981,8 +990,7 @@ static int __check_input_term(struct mixer_build *state, int id,
UAC3_PROCESSING_UNIT);
case PTYPE(UAC_VERSION_2, UAC2_EFFECT_UNIT):
case PTYPE(UAC_VERSION_3, UAC3_EFFECT_UNIT):
- return parse_term_proc_unit(state, term, p1, id,
- UAC3_EFFECT_UNIT);
+ return parse_term_effect_unit(state, term, p1, id);
case PTYPE(UAC_VERSION_1, UAC1_EXTENSION_UNIT):
case PTYPE(UAC_VERSION_2, UAC2_EXTENSION_UNIT_V2):
case PTYPE(UAC_VERSION_3, UAC3_EXTENSION_UNIT):
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index 3a5242e383b2..7f558f4b4520 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1440,6 +1440,7 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
case USB_ID(0x1395, 0x740a): /* Sennheiser DECT */
case USB_ID(0x1901, 0x0191): /* GE B850V3 CP2114 audio interface */
case USB_ID(0x21b4, 0x0081): /* AudioQuest DragonFly */
+ case USB_ID(0x2912, 0x30c8): /* Audioengine D1 */
return true;
}
diff --git a/tools/arch/arm64/include/uapi/asm/kvm.h b/tools/arch/arm64/include/uapi/asm/kvm.h
index 820e5751ada7..ba85bb23f060 100644
--- a/tools/arch/arm64/include/uapi/asm/kvm.h
+++ b/tools/arch/arm64/include/uapi/asm/kvm.h
@@ -220,10 +220,18 @@ struct kvm_vcpu_events {
#define KVM_REG_ARM_PTIMER_CVAL ARM64_SYS_REG(3, 3, 14, 2, 2)
#define KVM_REG_ARM_PTIMER_CNT ARM64_SYS_REG(3, 3, 14, 0, 1)
-/* EL0 Virtual Timer Registers */
+/*
+ * EL0 Virtual Timer Registers
+ *
+ * WARNING:
+ * KVM_REG_ARM_TIMER_CVAL and KVM_REG_ARM_TIMER_CNT are not defined
+ * with the appropriate register encodings. Their values have been
+ * accidentally swapped. As this is set API, the definitions here
+ * must be used, rather than ones derived from the encodings.
+ */
#define KVM_REG_ARM_TIMER_CTL ARM64_SYS_REG(3, 3, 14, 3, 1)
-#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
#define KVM_REG_ARM_TIMER_CVAL ARM64_SYS_REG(3, 3, 14, 0, 2)
+#define KVM_REG_ARM_TIMER_CNT ARM64_SYS_REG(3, 3, 14, 3, 2)
/* KVM-as-firmware specific pseudo-registers */
#define KVM_REG_ARM_FW (0x0014 << KVM_REG_ARM_COPROC_SHIFT)
diff --git a/tools/arch/arm64/include/uapi/asm/unistd.h b/tools/arch/arm64/include/uapi/asm/unistd.h
index 4703d218663a..f83a70e07df8 100644
--- a/tools/arch/arm64/include/uapi/asm/unistd.h
+++ b/tools/arch/arm64/include/uapi/asm/unistd.h
@@ -19,5 +19,6 @@
#define __ARCH_WANT_NEW_STAT
#define __ARCH_WANT_SET_GET_RLIMIT
#define __ARCH_WANT_TIME32_SYSCALLS
+#define __ARCH_WANT_SYS_CLONE3
#include <asm-generic/unistd.h>
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index e9b62498fe75..f3327cb56edf 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -220,6 +220,7 @@
#define X86_FEATURE_ZEN ( 7*32+28) /* "" CPU is AMD family 0x17 (Zen) */
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
+#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
/* Virtualization flags: Linux defined, word 8 */
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
@@ -357,6 +358,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_FSRM (18*32+ 4) /* Fast Short Rep Mov */
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
diff --git a/tools/arch/x86/include/asm/disabled-features.h b/tools/arch/x86/include/asm/disabled-features.h
index 8e1d0bb46361..4ea8584682f9 100644
--- a/tools/arch/x86/include/asm/disabled-features.h
+++ b/tools/arch/x86/include/asm/disabled-features.h
@@ -10,12 +10,6 @@
* cpu_feature_enabled().
*/
-#ifdef CONFIG_X86_INTEL_MPX
-# define DISABLE_MPX 0
-#else
-# define DISABLE_MPX (1<<(X86_FEATURE_MPX & 31))
-#endif
-
#ifdef CONFIG_X86_SMAP
# define DISABLE_SMAP 0
#else
@@ -74,7 +68,7 @@
#define DISABLED_MASK6 0
#define DISABLED_MASK7 (DISABLE_PTI)
#define DISABLED_MASK8 0
-#define DISABLED_MASK9 (DISABLE_MPX|DISABLE_SMAP)
+#define DISABLED_MASK9 (DISABLE_SMAP)
#define DISABLED_MASK10 0
#define DISABLED_MASK11 0
#define DISABLED_MASK12 0
diff --git a/tools/arch/x86/include/asm/msr-index.h b/tools/arch/x86/include/asm/msr-index.h
index ebe1685e92dd..d5e517d1c3dd 100644
--- a/tools/arch/x86/include/asm/msr-index.h
+++ b/tools/arch/x86/include/asm/msr-index.h
@@ -512,6 +512,8 @@
#define MSR_K7_HWCR 0xc0010015
#define MSR_K7_HWCR_SMMLOCK_BIT 0
#define MSR_K7_HWCR_SMMLOCK BIT_ULL(MSR_K7_HWCR_SMMLOCK_BIT)
+#define MSR_K7_HWCR_IRPERF_EN_BIT 30
+#define MSR_K7_HWCR_IRPERF_EN BIT_ULL(MSR_K7_HWCR_IRPERF_EN_BIT)
#define MSR_K7_FID_VID_CTL 0xc0010041
#define MSR_K7_FID_VID_STATUS 0xc0010042
diff --git a/tools/arch/x86/include/uapi/asm/kvm.h b/tools/arch/x86/include/uapi/asm/kvm.h
index 503d3f42da16..3f3f780c8c65 100644
--- a/tools/arch/x86/include/uapi/asm/kvm.h
+++ b/tools/arch/x86/include/uapi/asm/kvm.h
@@ -390,6 +390,7 @@ struct kvm_sync_regs {
#define KVM_STATE_NESTED_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_RUN_PENDING 0x00000002
#define KVM_STATE_NESTED_EVMCS 0x00000004
+#define KVM_STATE_NESTED_MTF_PENDING 0x00000008
#define KVM_STATE_NESTED_SMM_GUEST_MODE 0x00000001
#define KVM_STATE_NESTED_SMM_VMXON 0x00000002
diff --git a/tools/bootconfig/include/linux/memblock.h b/tools/bootconfig/include/linux/memblock.h
new file mode 100644
index 000000000000..7862f217d85d
--- /dev/null
+++ b/tools/bootconfig/include/linux/memblock.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _XBC_LINUX_MEMBLOCK_H
+#define _XBC_LINUX_MEMBLOCK_H
+
+#include <stdlib.h>
+
+#define __pa(addr) (addr)
+#define SMP_CACHE_BYTES 0
+#define memblock_alloc(size, align) malloc(size)
+#define memblock_free(paddr, size) free(paddr)
+
+#endif
diff --git a/tools/bootconfig/include/linux/printk.h b/tools/bootconfig/include/linux/printk.h
index 017bcd6912a5..036e667596eb 100644
--- a/tools/bootconfig/include/linux/printk.h
+++ b/tools/bootconfig/include/linux/printk.h
@@ -4,10 +4,7 @@
#include <stdio.h>
-/* controllable printf */
-extern int pr_output;
-#define printk(fmt, ...) \
- (pr_output ? printf(fmt, __VA_ARGS__) : 0)
+#define printk(fmt, ...) printf(fmt, ##__VA_ARGS__)
#define pr_err printk
#define pr_warn printk
diff --git a/tools/bootconfig/main.c b/tools/bootconfig/main.c
index 47f488458328..a9b97814d1a9 100644
--- a/tools/bootconfig/main.c
+++ b/tools/bootconfig/main.c
@@ -14,8 +14,6 @@
#include <linux/kernel.h>
#include <linux/bootconfig.h>
-int pr_output = 1;
-
static int xbc_show_array(struct xbc_node *node)
{
const char *val;
@@ -131,16 +129,27 @@ int load_xbc_from_initrd(int fd, char **buf)
struct stat stat;
int ret;
u32 size = 0, csum = 0, rcsum;
+ char magic[BOOTCONFIG_MAGIC_LEN];
ret = fstat(fd, &stat);
if (ret < 0)
return -errno;
- if (stat.st_size < 8)
+ if (stat.st_size < 8 + BOOTCONFIG_MAGIC_LEN)
return 0;
- if (lseek(fd, -8, SEEK_END) < 0) {
- printf("Failed to lseek: %d\n", -errno);
+ if (lseek(fd, -BOOTCONFIG_MAGIC_LEN, SEEK_END) < 0) {
+ pr_err("Failed to lseek: %d\n", -errno);
+ return -errno;
+ }
+ if (read(fd, magic, BOOTCONFIG_MAGIC_LEN) < 0)
+ return -errno;
+ /* Check the bootconfig magic bytes */
+ if (memcmp(magic, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN) != 0)
+ return 0;
+
+ if (lseek(fd, -(8 + BOOTCONFIG_MAGIC_LEN), SEEK_END) < 0) {
+ pr_err("Failed to lseek: %d\n", -errno);
return -errno;
}
@@ -150,12 +159,15 @@ int load_xbc_from_initrd(int fd, char **buf)
if (read(fd, &csum, sizeof(u32)) < 0)
return -errno;
- /* Wrong size, maybe no boot config here */
- if (stat.st_size < size + 8)
- return 0;
+ /* Wrong size error */
+ if (stat.st_size < size + 8 + BOOTCONFIG_MAGIC_LEN) {
+ pr_err("bootconfig size is too big\n");
+ return -E2BIG;
+ }
- if (lseek(fd, stat.st_size - 8 - size, SEEK_SET) < 0) {
- printf("Failed to lseek: %d\n", -errno);
+ if (lseek(fd, stat.st_size - (size + 8 + BOOTCONFIG_MAGIC_LEN),
+ SEEK_SET) < 0) {
+ pr_err("Failed to lseek: %d\n", -errno);
return -errno;
}
@@ -163,17 +175,17 @@ int load_xbc_from_initrd(int fd, char **buf)
if (ret < 0)
return ret;
- /* Wrong Checksum, maybe no boot config here */
+ /* Wrong Checksum */
rcsum = checksum((unsigned char *)*buf, size);
if (csum != rcsum) {
- printf("checksum error: %d != %d\n", csum, rcsum);
- return 0;
+ pr_err("checksum error: %d != %d\n", csum, rcsum);
+ return -EINVAL;
}
ret = xbc_init(*buf);
- /* Wrong data, maybe no boot config here */
+ /* Wrong data */
if (ret < 0)
- return 0;
+ return ret;
return size;
}
@@ -185,13 +197,13 @@ int show_xbc(const char *path)
fd = open(path, O_RDONLY);
if (fd < 0) {
- printf("Failed to open initrd %s: %d\n", path, fd);
+ pr_err("Failed to open initrd %s: %d\n", path, fd);
return -errno;
}
ret = load_xbc_from_initrd(fd, &buf);
if (ret < 0)
- printf("Failed to load a boot config from initrd: %d\n", ret);
+ pr_err("Failed to load a boot config from initrd: %d\n", ret);
else
xbc_show_compact_tree();
@@ -209,24 +221,19 @@ int delete_xbc(const char *path)
fd = open(path, O_RDWR);
if (fd < 0) {
- printf("Failed to open initrd %s: %d\n", path, fd);
+ pr_err("Failed to open initrd %s: %d\n", path, fd);
return -errno;
}
- /*
- * Suppress error messages in xbc_init() because it can be just a
- * data which concidentally matches the size and checksum footer.
- */
- pr_output = 0;
size = load_xbc_from_initrd(fd, &buf);
- pr_output = 1;
if (size < 0) {
ret = size;
- printf("Failed to load a boot config from initrd: %d\n", ret);
+ pr_err("Failed to load a boot config from initrd: %d\n", ret);
} else if (size > 0) {
ret = fstat(fd, &stat);
if (!ret)
- ret = ftruncate(fd, stat.st_size - size - 8);
+ ret = ftruncate(fd, stat.st_size
+ - size - 8 - BOOTCONFIG_MAGIC_LEN);
if (ret)
ret = -errno;
} /* Ignore if there is no boot config in initrd */
@@ -245,7 +252,7 @@ int apply_xbc(const char *path, const char *xbc_path)
ret = load_xbc_file(xbc_path, &buf);
if (ret < 0) {
- printf("Failed to load %s : %d\n", xbc_path, ret);
+ pr_err("Failed to load %s : %d\n", xbc_path, ret);
return ret;
}
size = strlen(buf) + 1;
@@ -262,7 +269,7 @@ int apply_xbc(const char *path, const char *xbc_path)
/* Check the data format */
ret = xbc_init(buf);
if (ret < 0) {
- printf("Failed to parse %s: %d\n", xbc_path, ret);
+ pr_err("Failed to parse %s: %d\n", xbc_path, ret);
free(data);
free(buf);
return ret;
@@ -279,20 +286,26 @@ int apply_xbc(const char *path, const char *xbc_path)
/* Remove old boot config if exists */
ret = delete_xbc(path);
if (ret < 0) {
- printf("Failed to delete previous boot config: %d\n", ret);
+ pr_err("Failed to delete previous boot config: %d\n", ret);
return ret;
}
/* Apply new one */
fd = open(path, O_RDWR | O_APPEND);
if (fd < 0) {
- printf("Failed to open %s: %d\n", path, fd);
+ pr_err("Failed to open %s: %d\n", path, fd);
return fd;
}
/* TODO: Ensure the @path is initramfs/initrd image */
ret = write(fd, data, size + 8);
if (ret < 0) {
- printf("Failed to apply a boot config: %d\n", ret);
+ pr_err("Failed to apply a boot config: %d\n", ret);
+ return ret;
+ }
+ /* Write a magic word of the bootconfig */
+ ret = write(fd, BOOTCONFIG_MAGIC, BOOTCONFIG_MAGIC_LEN);
+ if (ret < 0) {
+ pr_err("Failed to apply a boot config magic: %d\n", ret);
return ret;
}
close(fd);
@@ -334,12 +347,12 @@ int main(int argc, char **argv)
}
if (apply && delete) {
- printf("Error: You can not specify both -a and -d at once.\n");
+ pr_err("Error: You can not specify both -a and -d at once.\n");
return usage();
}
if (optind >= argc) {
- printf("Error: No initrd is specified.\n");
+ pr_err("Error: No initrd is specified.\n");
return usage();
}
diff --git a/tools/bootconfig/samples/bad-mixed-kv1.bconf b/tools/bootconfig/samples/bad-mixed-kv1.bconf
new file mode 100644
index 000000000000..1761547dd05c
--- /dev/null
+++ b/tools/bootconfig/samples/bad-mixed-kv1.bconf
@@ -0,0 +1,3 @@
+# value -> subkey pattern
+key = value
+key.subkey = another-value
diff --git a/tools/bootconfig/samples/bad-mixed-kv2.bconf b/tools/bootconfig/samples/bad-mixed-kv2.bconf
new file mode 100644
index 000000000000..6b32e0c3878c
--- /dev/null
+++ b/tools/bootconfig/samples/bad-mixed-kv2.bconf
@@ -0,0 +1,3 @@
+# subkey -> value pattern
+key.subkey = value
+key = another-value
diff --git a/tools/bootconfig/samples/bad-samekey.bconf b/tools/bootconfig/samples/bad-samekey.bconf
new file mode 100644
index 000000000000..e8d983a4563c
--- /dev/null
+++ b/tools/bootconfig/samples/bad-samekey.bconf
@@ -0,0 +1,6 @@
+# Same key value is not allowed
+key {
+ foo = value
+ bar = value2
+}
+key.foo = value
diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh
index 87725e8723f8..1411f4c3454f 100755
--- a/tools/bootconfig/test-bootconfig.sh
+++ b/tools/bootconfig/test-bootconfig.sh
@@ -9,7 +9,7 @@ TEMPCONF=`mktemp temp-XXXX.bconf`
NG=0
cleanup() {
- rm -f $INITRD $TEMPCONF
+ rm -f $INITRD $TEMPCONF $OUTFILE
exit $NG
}
@@ -49,7 +49,7 @@ xpass $BOOTCONF -a $TEMPCONF $INITRD
new_size=$(stat -c %s $INITRD)
echo "File size check"
-xpass test $new_size -eq $(expr $bconf_size + $initrd_size + 9)
+xpass test $new_size -eq $(expr $bconf_size + $initrd_size + 9 + 12)
echo "Apply command repeat test"
xpass $BOOTCONF -a $TEMPCONF $INITRD
@@ -64,6 +64,14 @@ echo "File size check"
new_size=$(stat -c %s $INITRD)
xpass test $new_size -eq $initrd_size
+echo "No error messge while applying"
+OUTFILE=`mktemp tempout-XXXX`
+dd if=/dev/zero of=$INITRD bs=4096 count=1
+printf " \0\0\0 \0\0\0" >> $INITRD
+$BOOTCONF -a $TEMPCONF $INITRD > $OUTFILE 2>&1
+xfail grep -i "failed" $OUTFILE
+xfail grep -i "error" $OUTFILE
+
echo "Max node number check"
echo -n > $TEMPCONF
@@ -87,6 +95,19 @@ truncate -s 32764 $TEMPCONF
echo "\"" >> $TEMPCONF # add 2 bytes + terminal ('\"\n\0')
xpass $BOOTCONF -a $TEMPCONF $INITRD
+echo "Adding same-key values"
+cat > $TEMPCONF << EOF
+key = bar, baz
+key += qux
+EOF
+echo > $INITRD
+
+xpass $BOOTCONF -a $TEMPCONF $INITRD
+$BOOTCONF $INITRD > $OUTFILE
+xpass grep -q "bar" $OUTFILE
+xpass grep -q "baz" $OUTFILE
+xpass grep -q "qux" $OUTFILE
+
echo "=== expected failure cases ==="
for i in samples/bad-* ; do
xfail $BOOTCONF -a $i $INITRD
diff --git a/tools/include/uapi/asm-generic/mman-common.h b/tools/include/uapi/asm-generic/mman-common.h
index c160a5354eb6..f94f65d429be 100644
--- a/tools/include/uapi/asm-generic/mman-common.h
+++ b/tools/include/uapi/asm-generic/mman-common.h
@@ -11,6 +11,8 @@
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
+/* 0x10 reserved for arch-specific use */
+/* 0x20 reserved for arch-specific use */
#define PROT_NONE 0x0 /* page can not be accessed */
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
diff --git a/tools/include/uapi/asm-generic/unistd.h b/tools/include/uapi/asm-generic/unistd.h
index 1fc8faa6e973..3a3201e4618e 100644
--- a/tools/include/uapi/asm-generic/unistd.h
+++ b/tools/include/uapi/asm-generic/unistd.h
@@ -851,8 +851,13 @@ __SYSCALL(__NR_pidfd_open, sys_pidfd_open)
__SYSCALL(__NR_clone3, sys_clone3)
#endif
+#define __NR_openat2 437
+__SYSCALL(__NR_openat2, sys_openat2)
+#define __NR_pidfd_getfd 438
+__SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd)
+
#undef __NR_syscalls
-#define __NR_syscalls 436
+#define __NR_syscalls 439
/*
* 32 bit systems traditionally used different
diff --git a/tools/include/uapi/drm/i915_drm.h b/tools/include/uapi/drm/i915_drm.h
index 5400d7e057f1..829c0a48577f 100644
--- a/tools/include/uapi/drm/i915_drm.h
+++ b/tools/include/uapi/drm/i915_drm.h
@@ -395,6 +395,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
@@ -793,6 +794,37 @@ struct drm_i915_gem_mmap_gtt {
__u64 offset;
};
+struct drm_i915_gem_mmap_offset {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+
+ /**
+ * Flags for extended behaviour.
+ *
+ * It is mandatory that one of the MMAP_OFFSET types
+ * (GTT, WC, WB, UC, etc) should be included.
+ */
+ __u64 flags;
+#define I915_MMAP_OFFSET_GTT 0
+#define I915_MMAP_OFFSET_WC 1
+#define I915_MMAP_OFFSET_WB 2
+#define I915_MMAP_OFFSET_UC 3
+
+ /*
+ * Zero-terminated chain of extensions.
+ *
+ * No current extensions defined; mbz.
+ */
+ __u64 extensions;
+};
+
struct drm_i915_gem_set_domain {
/** Handle for the object */
__u32 handle;
diff --git a/tools/include/uapi/linux/bpf.h b/tools/include/uapi/linux/bpf.h
index f1d74a2bd234..22f235260a3a 100644
--- a/tools/include/uapi/linux/bpf.h
+++ b/tools/include/uapi/linux/bpf.h
@@ -1045,9 +1045,9 @@ union bpf_attr {
* supports redirection to the egress interface, and accepts no
* flag at all.
*
- * The same effect can be attained with the more generic
- * **bpf_redirect_map**\ (), which requires specific maps to be
- * used but offers better performance.
+ * The same effect can also be attained with the more generic
+ * **bpf_redirect_map**\ (), which uses a BPF map to store the
+ * redirect target instead of providing it directly to the helper.
* Return
* For XDP, the helper returns **XDP_REDIRECT** on success or
* **XDP_ABORTED** on error. For other program types, the values
@@ -1611,13 +1611,11 @@ union bpf_attr {
* the caller. Any higher bits in the *flags* argument must be
* unset.
*
- * When used to redirect packets to net devices, this helper
- * provides a high performance increase over **bpf_redirect**\ ().
- * This is due to various implementation details of the underlying
- * mechanisms, one of which is the fact that **bpf_redirect_map**\
- * () tries to send packet as a "bulk" to the device.
+ * See also bpf_redirect(), which only supports redirecting to an
+ * ifindex, but doesn't require a map to do so.
* Return
- * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
+ * **XDP_REDIRECT** on success, or the value of the two lower bits
+ * of the **flags* argument on error.
*
* int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
* Description
diff --git a/tools/include/uapi/linux/fcntl.h b/tools/include/uapi/linux/fcntl.h
index 1f97b33c840e..ca88b7bce553 100644
--- a/tools/include/uapi/linux/fcntl.h
+++ b/tools/include/uapi/linux/fcntl.h
@@ -3,6 +3,7 @@
#define _UAPI_LINUX_FCNTL_H
#include <asm/fcntl.h>
+#include <linux/openat2.h>
#define F_SETLEASE (F_LINUX_SPECIFIC_BASE + 0)
#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1)
@@ -100,5 +101,4 @@
#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
-
#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/tools/include/uapi/linux/fscrypt.h b/tools/include/uapi/linux/fscrypt.h
index 1beb174ad950..0d8a6f47711c 100644
--- a/tools/include/uapi/linux/fscrypt.h
+++ b/tools/include/uapi/linux/fscrypt.h
@@ -8,6 +8,7 @@
#ifndef _UAPI_LINUX_FSCRYPT_H
#define _UAPI_LINUX_FSCRYPT_H
+#include <linux/ioctl.h>
#include <linux/types.h>
/* Encryption policy flags */
@@ -109,11 +110,22 @@ struct fscrypt_key_specifier {
} u;
};
+/*
+ * Payload of Linux keyring key of type "fscrypt-provisioning", referenced by
+ * fscrypt_add_key_arg::key_id as an alternative to fscrypt_add_key_arg::raw.
+ */
+struct fscrypt_provisioning_key_payload {
+ __u32 type;
+ __u32 __reserved;
+ __u8 raw[];
+};
+
/* Struct passed to FS_IOC_ADD_ENCRYPTION_KEY */
struct fscrypt_add_key_arg {
struct fscrypt_key_specifier key_spec;
__u32 raw_size;
- __u32 __reserved[9];
+ __u32 key_id;
+ __u32 __reserved[8];
__u8 raw[];
};
diff --git a/tools/include/uapi/linux/kvm.h b/tools/include/uapi/linux/kvm.h
index f0a16b4adbbd..4b95f9a31a2f 100644
--- a/tools/include/uapi/linux/kvm.h
+++ b/tools/include/uapi/linux/kvm.h
@@ -1009,6 +1009,7 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_GUEST_DEBUG_SSTEP 176
#define KVM_CAP_ARM_NISV_TO_USER 177
#define KVM_CAP_ARM_INJECT_EXT_DABT 178
+#define KVM_CAP_S390_VCPU_RESETS 179
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1473,6 +1474,10 @@ struct kvm_enc_region {
/* Available with KVM_CAP_ARM_SVE */
#define KVM_ARM_VCPU_FINALIZE _IOW(KVMIO, 0xc2, int)
+/* Available with KVM_CAP_S390_VCPU_RESETS */
+#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
+#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
diff --git a/tools/include/uapi/linux/openat2.h b/tools/include/uapi/linux/openat2.h
new file mode 100644
index 000000000000..58b1eb711360
--- /dev/null
+++ b/tools/include/uapi/linux/openat2.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_OPENAT2_H
+#define _UAPI_LINUX_OPENAT2_H
+
+#include <linux/types.h>
+
+/*
+ * Arguments for how openat2(2) should open the target path. If only @flags and
+ * @mode are non-zero, then openat2(2) operates very similarly to openat(2).
+ *
+ * However, unlike openat(2), unknown or invalid bits in @flags result in
+ * -EINVAL rather than being silently ignored. @mode must be zero unless one of
+ * {O_CREAT, O_TMPFILE} are set.
+ *
+ * @flags: O_* flags.
+ * @mode: O_CREAT/O_TMPFILE file mode.
+ * @resolve: RESOLVE_* flags.
+ */
+struct open_how {
+ __u64 flags;
+ __u64 mode;
+ __u64 resolve;
+};
+
+/* how->resolve flags for openat2(2). */
+#define RESOLVE_NO_XDEV 0x01 /* Block mount-point crossings
+ (includes bind-mounts). */
+#define RESOLVE_NO_MAGICLINKS 0x02 /* Block traversal through procfs-style
+ "magic-links". */
+#define RESOLVE_NO_SYMLINKS 0x04 /* Block traversal through all symlinks
+ (implies OEXT_NO_MAGICLINKS) */
+#define RESOLVE_BENEATH 0x08 /* Block "lexical" trickery like
+ "..", symlinks, and absolute
+ paths which escape the dirfd. */
+#define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".."
+ be scoped inside the dirfd
+ (similar to chroot(2)). */
+
+#endif /* _UAPI_LINUX_OPENAT2_H */
diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h
index 7da1b37b27aa..07b4f8131e36 100644
--- a/tools/include/uapi/linux/prctl.h
+++ b/tools/include/uapi/linux/prctl.h
@@ -234,4 +234,8 @@ struct prctl_mm_map {
#define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+/* Control reclaim behavior when allocating memory */
+#define PR_SET_IO_FLUSHER 57
+#define PR_GET_IO_FLUSHER 58
+
#endif /* _LINUX_PRCTL_H */
diff --git a/tools/include/uapi/linux/sched.h b/tools/include/uapi/linux/sched.h
index 4a0217832464..2e3bc22c6f20 100644
--- a/tools/include/uapi/linux/sched.h
+++ b/tools/include/uapi/linux/sched.h
@@ -36,6 +36,12 @@
/* Flags for the clone3() syscall. */
#define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */
+/*
+ * cloning flags intersect with CSIGNAL so can be used with unshare and clone3
+ * syscalls only:
+ */
+#define CLONE_NEWTIME 0x00000080 /* New time namespace */
+
#ifndef __ASSEMBLY__
/**
* struct clone_args - arguments for the clone3 syscall
diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h
index df1153cea0b7..535a7229e1d9 100644
--- a/tools/include/uapi/sound/asound.h
+++ b/tools/include/uapi/sound/asound.h
@@ -26,7 +26,9 @@
#if defined(__KERNEL__) || defined(__linux__)
#include <linux/types.h>
+#include <asm/byteorder.h>
#else
+#include <endian.h>
#include <sys/ioctl.h>
#endif
@@ -154,7 +156,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 14)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 15)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
@@ -301,7 +303,9 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
-
+#if (__BITS_PER_LONG == 32 && defined(__USE_TIME_BITS64)) || defined __KERNEL__
+#define __SND_STRUCT_TIME64
+#endif
typedef int __bitwise snd_pcm_state_t;
#define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */
@@ -317,8 +321,17 @@ typedef int __bitwise snd_pcm_state_t;
enum {
SNDRV_PCM_MMAP_OFFSET_DATA = 0x00000000,
- SNDRV_PCM_MMAP_OFFSET_STATUS = 0x80000000,
- SNDRV_PCM_MMAP_OFFSET_CONTROL = 0x81000000,
+ SNDRV_PCM_MMAP_OFFSET_STATUS_OLD = 0x80000000,
+ SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD = 0x81000000,
+ SNDRV_PCM_MMAP_OFFSET_STATUS_NEW = 0x82000000,
+ SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW = 0x83000000,
+#ifdef __SND_STRUCT_TIME64
+ SNDRV_PCM_MMAP_OFFSET_STATUS = SNDRV_PCM_MMAP_OFFSET_STATUS_NEW,
+ SNDRV_PCM_MMAP_OFFSET_CONTROL = SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW,
+#else
+ SNDRV_PCM_MMAP_OFFSET_STATUS = SNDRV_PCM_MMAP_OFFSET_STATUS_OLD,
+ SNDRV_PCM_MMAP_OFFSET_CONTROL = SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD,
+#endif
};
union snd_pcm_sync_id {
@@ -456,8 +469,13 @@ enum {
SNDRV_PCM_AUDIO_TSTAMP_TYPE_LAST = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED
};
+#ifndef __KERNEL__
+/* explicit padding avoids incompatibility between i386 and x86-64 */
+typedef struct { unsigned char pad[sizeof(time_t) - sizeof(int)]; } __time_pad;
+
struct snd_pcm_status {
snd_pcm_state_t state; /* stream state */
+ __time_pad pad1; /* align to timespec */
struct timespec trigger_tstamp; /* time when stream was started/stopped/paused */
struct timespec tstamp; /* reference timestamp */
snd_pcm_uframes_t appl_ptr; /* appl ptr */
@@ -473,17 +491,48 @@ struct snd_pcm_status {
__u32 audio_tstamp_accuracy; /* in ns units, only valid if indicated in audio_tstamp_data */
unsigned char reserved[52-2*sizeof(struct timespec)]; /* must be filled with zero */
};
+#endif
+
+/*
+ * For mmap operations, we need the 64-bit layout, both for compat mode,
+ * and for y2038 compatibility. For 64-bit applications, the two definitions
+ * are identical, so we keep the traditional version.
+ */
+#ifdef __SND_STRUCT_TIME64
+#define __snd_pcm_mmap_status64 snd_pcm_mmap_status
+#define __snd_pcm_mmap_control64 snd_pcm_mmap_control
+#define __snd_pcm_sync_ptr64 snd_pcm_sync_ptr
+#ifdef __KERNEL__
+#define __snd_timespec64 __kernel_timespec
+#else
+#define __snd_timespec64 timespec
+#endif
+struct __snd_timespec {
+ __s32 tv_sec;
+ __s32 tv_nsec;
+};
+#else
+#define __snd_pcm_mmap_status snd_pcm_mmap_status
+#define __snd_pcm_mmap_control snd_pcm_mmap_control
+#define __snd_pcm_sync_ptr snd_pcm_sync_ptr
+#define __snd_timespec timespec
+struct __snd_timespec64 {
+ __s64 tv_sec;
+ __s64 tv_nsec;
+};
-struct snd_pcm_mmap_status {
+#endif
+
+struct __snd_pcm_mmap_status {
snd_pcm_state_t state; /* RO: state - SNDRV_PCM_STATE_XXXX */
int pad1; /* Needed for 64 bit alignment */
snd_pcm_uframes_t hw_ptr; /* RO: hw ptr (0...boundary-1) */
- struct timespec tstamp; /* Timestamp */
+ struct __snd_timespec tstamp; /* Timestamp */
snd_pcm_state_t suspended_state; /* RO: suspended stream state */
- struct timespec audio_tstamp; /* from sample counter or wall clock */
+ struct __snd_timespec audio_tstamp; /* from sample counter or wall clock */
};
-struct snd_pcm_mmap_control {
+struct __snd_pcm_mmap_control {
snd_pcm_uframes_t appl_ptr; /* RW: appl ptr (0...boundary-1) */
snd_pcm_uframes_t avail_min; /* RW: min available frames for wakeup */
};
@@ -492,14 +541,59 @@ struct snd_pcm_mmap_control {
#define SNDRV_PCM_SYNC_PTR_APPL (1<<1) /* get appl_ptr from driver (r/w op) */
#define SNDRV_PCM_SYNC_PTR_AVAIL_MIN (1<<2) /* get avail_min from driver */
-struct snd_pcm_sync_ptr {
+struct __snd_pcm_sync_ptr {
unsigned int flags;
union {
- struct snd_pcm_mmap_status status;
+ struct __snd_pcm_mmap_status status;
+ unsigned char reserved[64];
+ } s;
+ union {
+ struct __snd_pcm_mmap_control control;
+ unsigned char reserved[64];
+ } c;
+};
+
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
+typedef char __pad_before_uframe[sizeof(__u64) - sizeof(snd_pcm_uframes_t)];
+typedef char __pad_after_uframe[0];
+#endif
+
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
+typedef char __pad_before_uframe[0];
+typedef char __pad_after_uframe[sizeof(__u64) - sizeof(snd_pcm_uframes_t)];
+#endif
+
+struct __snd_pcm_mmap_status64 {
+ snd_pcm_state_t state; /* RO: state - SNDRV_PCM_STATE_XXXX */
+ __u32 pad1; /* Needed for 64 bit alignment */
+ __pad_before_uframe __pad1;
+ snd_pcm_uframes_t hw_ptr; /* RO: hw ptr (0...boundary-1) */
+ __pad_after_uframe __pad2;
+ struct __snd_timespec64 tstamp; /* Timestamp */
+ snd_pcm_state_t suspended_state;/* RO: suspended stream state */
+ __u32 pad3; /* Needed for 64 bit alignment */
+ struct __snd_timespec64 audio_tstamp; /* sample counter or wall clock */
+};
+
+struct __snd_pcm_mmap_control64 {
+ __pad_before_uframe __pad1;
+ snd_pcm_uframes_t appl_ptr; /* RW: appl ptr (0...boundary-1) */
+ __pad_before_uframe __pad2;
+
+ __pad_before_uframe __pad3;
+ snd_pcm_uframes_t avail_min; /* RW: min available frames for wakeup */
+ __pad_after_uframe __pad4;
+};
+
+struct __snd_pcm_sync_ptr64 {
+ __u32 flags;
+ __u32 pad1;
+ union {
+ struct __snd_pcm_mmap_status64 status;
unsigned char reserved[64];
} s;
union {
- struct snd_pcm_mmap_control control;
+ struct __snd_pcm_mmap_control64 control;
unsigned char reserved[64];
} c;
};
@@ -584,6 +678,8 @@ enum {
#define SNDRV_PCM_IOCTL_STATUS _IOR('A', 0x20, struct snd_pcm_status)
#define SNDRV_PCM_IOCTL_DELAY _IOR('A', 0x21, snd_pcm_sframes_t)
#define SNDRV_PCM_IOCTL_HWSYNC _IO('A', 0x22)
+#define __SNDRV_PCM_IOCTL_SYNC_PTR _IOWR('A', 0x23, struct __snd_pcm_sync_ptr)
+#define __SNDRV_PCM_IOCTL_SYNC_PTR64 _IOWR('A', 0x23, struct __snd_pcm_sync_ptr64)
#define SNDRV_PCM_IOCTL_SYNC_PTR _IOWR('A', 0x23, struct snd_pcm_sync_ptr)
#define SNDRV_PCM_IOCTL_STATUS_EXT _IOWR('A', 0x24, struct snd_pcm_status)
#define SNDRV_PCM_IOCTL_CHANNEL_INFO _IOR('A', 0x32, struct snd_pcm_channel_info)
@@ -614,7 +710,7 @@ enum {
* Raw MIDI section - /dev/snd/midi??
*/
-#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 0)
+#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 1)
enum {
SNDRV_RAWMIDI_STREAM_OUTPUT = 0,
@@ -648,13 +744,16 @@ struct snd_rawmidi_params {
unsigned char reserved[16]; /* reserved for future use */
};
+#ifndef __KERNEL__
struct snd_rawmidi_status {
int stream;
+ __time_pad pad1;
struct timespec tstamp; /* Timestamp */
size_t avail; /* available bytes */
size_t xruns; /* count of overruns since last status (in bytes) */
unsigned char reserved[16]; /* reserved for future use */
};
+#endif
#define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int)
#define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info)
@@ -667,7 +766,7 @@ struct snd_rawmidi_status {
* Timer section - /dev/snd/timer
*/
-#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 6)
+#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 7)
enum {
SNDRV_TIMER_CLASS_NONE = -1,
@@ -761,6 +860,7 @@ struct snd_timer_params {
unsigned char reserved[60]; /* reserved */
};
+#ifndef __KERNEL__
struct snd_timer_status {
struct timespec tstamp; /* Timestamp - last update */
unsigned int resolution; /* current period resolution in ns */
@@ -769,10 +869,11 @@ struct snd_timer_status {
unsigned int queue; /* used queue size */
unsigned char reserved[64]; /* reserved */
};
+#endif
#define SNDRV_TIMER_IOCTL_PVERSION _IOR('T', 0x00, int)
#define SNDRV_TIMER_IOCTL_NEXT_DEVICE _IOWR('T', 0x01, struct snd_timer_id)
-#define SNDRV_TIMER_IOCTL_TREAD _IOW('T', 0x02, int)
+#define SNDRV_TIMER_IOCTL_TREAD_OLD _IOW('T', 0x02, int)
#define SNDRV_TIMER_IOCTL_GINFO _IOWR('T', 0x03, struct snd_timer_ginfo)
#define SNDRV_TIMER_IOCTL_GPARAMS _IOW('T', 0x04, struct snd_timer_gparams)
#define SNDRV_TIMER_IOCTL_GSTATUS _IOWR('T', 0x05, struct snd_timer_gstatus)
@@ -785,6 +886,15 @@ struct snd_timer_status {
#define SNDRV_TIMER_IOCTL_STOP _IO('T', 0xa1)
#define SNDRV_TIMER_IOCTL_CONTINUE _IO('T', 0xa2)
#define SNDRV_TIMER_IOCTL_PAUSE _IO('T', 0xa3)
+#define SNDRV_TIMER_IOCTL_TREAD64 _IOW('T', 0xa4, int)
+
+#if __BITS_PER_LONG == 64
+#define SNDRV_TIMER_IOCTL_TREAD SNDRV_TIMER_IOCTL_TREAD_OLD
+#else
+#define SNDRV_TIMER_IOCTL_TREAD ((sizeof(__kernel_long_t) >= sizeof(time_t)) ? \
+ SNDRV_TIMER_IOCTL_TREAD_OLD : \
+ SNDRV_TIMER_IOCTL_TREAD64)
+#endif
struct snd_timer_read {
unsigned int resolution;
@@ -810,11 +920,15 @@ enum {
SNDRV_TIMER_EVENT_MRESUME = SNDRV_TIMER_EVENT_RESUME + 10,
};
+#ifndef __KERNEL__
struct snd_timer_tread {
int event;
+ __time_pad pad1;
struct timespec tstamp;
unsigned int val;
+ __time_pad pad2;
};
+#endif
/****************************************************************************
* *
@@ -822,7 +936,7 @@ struct snd_timer_tread {
* *
****************************************************************************/
-#define SNDRV_CTL_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 7)
+#define SNDRV_CTL_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 8)
struct snd_ctl_card_info {
int card; /* card number */
@@ -860,7 +974,7 @@ typedef int __bitwise snd_ctl_elem_iface_t;
#define SNDRV_CTL_ELEM_ACCESS_WRITE (1<<1)
#define SNDRV_CTL_ELEM_ACCESS_READWRITE (SNDRV_CTL_ELEM_ACCESS_READ|SNDRV_CTL_ELEM_ACCESS_WRITE)
#define SNDRV_CTL_ELEM_ACCESS_VOLATILE (1<<2) /* control value may be changed without a notification */
-#define SNDRV_CTL_ELEM_ACCESS_TIMESTAMP (1<<3) /* when was control changed */
+// (1 << 3) is unused.
#define SNDRV_CTL_ELEM_ACCESS_TLV_READ (1<<4) /* TLV read is possible */
#define SNDRV_CTL_ELEM_ACCESS_TLV_WRITE (1<<5) /* TLV write is possible */
#define SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE (SNDRV_CTL_ELEM_ACCESS_TLV_READ|SNDRV_CTL_ELEM_ACCESS_TLV_WRITE)
@@ -926,11 +1040,7 @@ struct snd_ctl_elem_info {
} enumerated;
unsigned char reserved[128];
} value;
- union {
- unsigned short d[4]; /* dimensions */
- unsigned short *d_ptr; /* indirect - obsoleted */
- } dimen;
- unsigned char reserved[64-4*sizeof(unsigned short)];
+ unsigned char reserved[64];
};
struct snd_ctl_elem_value {
@@ -955,8 +1065,7 @@ struct snd_ctl_elem_value {
} bytes;
struct snd_aes_iec958 iec958;
} value; /* RO */
- struct timespec tstamp;
- unsigned char reserved[128-sizeof(struct timespec)];
+ unsigned char reserved[128];
};
struct snd_ctl_tlv {
diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c
index 514b1a524abb..7469c7dcc15e 100644
--- a/tools/lib/bpf/libbpf.c
+++ b/tools/lib/bpf/libbpf.c
@@ -24,6 +24,7 @@
#include <endian.h>
#include <fcntl.h>
#include <errno.h>
+#include <ctype.h>
#include <asm/unistd.h>
#include <linux/err.h>
#include <linux/kernel.h>
@@ -1283,7 +1284,7 @@ static size_t bpf_map_mmap_sz(const struct bpf_map *map)
static char *internal_map_name(struct bpf_object *obj,
enum libbpf_map_type type)
{
- char map_name[BPF_OBJ_NAME_LEN];
+ char map_name[BPF_OBJ_NAME_LEN], *p;
const char *sfx = libbpf_type_to_btf_name[type];
int sfx_len = max((size_t)7, strlen(sfx));
int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
@@ -1292,6 +1293,11 @@ static char *internal_map_name(struct bpf_object *obj,
snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
sfx_len, libbpf_type_to_btf_name[type]);
+ /* sanitise map name to characters allowed by kernel */
+ for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
+ if (!isalnum(*p) && *p != '_' && *p != '.')
+ *p = '_';
+
return strdup(map_name);
}
diff --git a/tools/perf/Documentation/perf-config.txt b/tools/perf/Documentation/perf-config.txt
index c4dd23c4b478..8ead55593984 100644
--- a/tools/perf/Documentation/perf-config.txt
+++ b/tools/perf/Documentation/perf-config.txt
@@ -239,7 +239,6 @@ buildid.*::
set buildid.dir to /dev/null. The default is $HOME/.debug
annotate.*::
- These options work only for TUI.
These are in control of addresses, jump function, source code
in lines of assembly code from a specific program.
@@ -269,6 +268,8 @@ annotate.*::
│ mov (%rdi),%rdx
│ return n;
+ This option works with tui, stdio2 browsers.
+
annotate.use_offset::
Basing on a first address of a loaded function, offset can be used.
Instead of using original addresses of assembly code,
@@ -287,6 +288,8 @@ annotate.*::
368:│ mov 0x8(%r14),%rdi
+ This option works with tui, stdio2 browsers.
+
annotate.jump_arrows::
There can be jump instruction among assembly code.
Depending on a boolean value of jump_arrows,
@@ -306,6 +309,8 @@ annotate.*::
│1330: mov %r15,%r10
│1333: cmp %r15,%r14
+ This option works with tui browser.
+
annotate.show_linenr::
When showing source code if this option is 'true',
line numbers are printed as below.
@@ -325,6 +330,8 @@ annotate.*::
│ array++;
│ }
+ This option works with tui, stdio2 browsers.
+
annotate.show_nr_jumps::
Let's see a part of assembly code.
@@ -335,6 +342,8 @@ annotate.*::
│1 1382: movb $0x1,-0x270(%rbp)
+ This option works with tui, stdio2 browsers.
+
annotate.show_total_period::
To compare two records on an instruction base, with this option
provided, display total number of samples that belong to a line
@@ -348,11 +357,30 @@ annotate.*::
99.93 │ mov %eax,%eax
+ This option works with tui, stdio2, stdio browsers.
+
+ annotate.show_nr_samples::
+ By default perf annotate shows percentage of samples. This option
+ can be used to print absolute number of samples. Ex, when set as
+ false:
+
+ Percent│
+ 74.03 │ mov %fs:0x28,%rax
+
+ When set as true:
+
+ Samples│
+ 6 │ mov %fs:0x28,%rax
+
+ This option works with tui, stdio2, stdio browsers.
+
annotate.offset_level::
Default is '1', meaning just jump targets will have offsets show right beside
the instruction. When set to '2' 'call' instructions will also have its offsets
shown, 3 or higher will show offsets for all instructions.
+ This option works with tui, stdio2 browsers.
+
hist.*::
hist.percentage::
This option control the way to calculate overhead of filtered entries -
@@ -490,6 +518,12 @@ top.*::
column by default.
The default is 'true'.
+ top.call-graph::
+ This is identical to 'call-graph.record-mode', except it is
+ applicable only for 'top' subcommand. This option ONLY setup
+ the unwind method. To enable 'perf top' to actually use it,
+ the command line option -g must be specified.
+
man.*::
man.viewer::
This option can assign a tool to view manual pages when 'help'
@@ -517,6 +551,16 @@ record.*::
But if this option is 'no-cache', it will not update the build-id cache.
'skip' skips post-processing and does not update the cache.
+ record.call-graph::
+ This is identical to 'call-graph.record-mode', except it is
+ applicable only for 'record' subcommand. This option ONLY setup
+ the unwind method. To enable 'perf record' to actually use it,
+ the command line option -g must be specified.
+
+ record.aio::
+ Use 'n' control blocks in asynchronous (Posix AIO) trace writing
+ mode ('n' default: 1, max: 4).
+
diff.*::
diff.order::
This option sets the number of columns to sort the result.
@@ -566,6 +610,11 @@ trace.*::
"libbeauty", the default, to use the same argument beautifiers used in the
strace-like sys_enter+sys_exit lines.
+ftrace.*::
+ ftrace.tracer::
+ Can be used to select the default tracer. Possible values are
+ 'function' and 'function_graph'.
+
llvm.*::
llvm.clang-path::
Path to clang. If omit, search it from $PATH.
@@ -610,6 +659,29 @@ scripts.*::
The script gets the same options passed as a full perf script,
in particular -i perfdata file, --cpu, --tid
+convert.*::
+
+ convert.queue-size::
+ Limit the size of ordered_events queue, so we could control
+ allocation size of perf data files without proper finished
+ round events.
+
+intel-pt.*::
+
+ intel-pt.cache-divisor::
+
+ intel-pt.mispred-all::
+ If set, Intel PT decoder will set the mispred flag on all
+ branches.
+
+auxtrace.*::
+
+ auxtrace.dumpdir::
+ s390 only. The directory to save the auxiliary trace buffer
+ can be changed using this option. Ex, auxtrace.dumpdir=/tmp.
+ If the directory does not exist or has the wrong file type,
+ the current directory is used.
+
SEE ALSO
--------
linkperf:perf[1]
diff --git a/tools/perf/arch/arm/util/cs-etm.c b/tools/perf/arch/arm/util/cs-etm.c
index 2898cfdf8fe1..941f814820b8 100644
--- a/tools/perf/arch/arm/util/cs-etm.c
+++ b/tools/perf/arch/arm/util/cs-etm.c
@@ -858,21 +858,6 @@ static void cs_etm_recording_free(struct auxtrace_record *itr)
free(ptr);
}
-static int cs_etm_read_finish(struct auxtrace_record *itr, int idx)
-{
- struct cs_etm_recording *ptr =
- container_of(itr, struct cs_etm_recording, itr);
- struct evsel *evsel;
-
- evlist__for_each_entry(ptr->evlist, evsel) {
- if (evsel->core.attr.type == ptr->cs_etm_pmu->type)
- return perf_evlist__enable_event_idx(ptr->evlist,
- evsel, idx);
- }
-
- return -EINVAL;
-}
-
struct auxtrace_record *cs_etm_record_init(int *err)
{
struct perf_pmu *cs_etm_pmu;
@@ -892,6 +877,7 @@ struct auxtrace_record *cs_etm_record_init(int *err)
}
ptr->cs_etm_pmu = cs_etm_pmu;
+ ptr->itr.pmu = cs_etm_pmu;
ptr->itr.parse_snapshot_options = cs_etm_parse_snapshot_options;
ptr->itr.recording_options = cs_etm_recording_options;
ptr->itr.info_priv_size = cs_etm_info_priv_size;
@@ -901,7 +887,7 @@ struct auxtrace_record *cs_etm_record_init(int *err)
ptr->itr.snapshot_finish = cs_etm_snapshot_finish;
ptr->itr.reference = cs_etm_reference;
ptr->itr.free = cs_etm_recording_free;
- ptr->itr.read_finish = cs_etm_read_finish;
+ ptr->itr.read_finish = auxtrace_record__read_finish;
*err = 0;
return &ptr->itr;
diff --git a/tools/perf/arch/arm64/util/arm-spe.c b/tools/perf/arch/arm64/util/arm-spe.c
index eba6541ec0f1..8d6821d9c3f6 100644
--- a/tools/perf/arch/arm64/util/arm-spe.c
+++ b/tools/perf/arch/arm64/util/arm-spe.c
@@ -158,20 +158,6 @@ static void arm_spe_recording_free(struct auxtrace_record *itr)
free(sper);
}
-static int arm_spe_read_finish(struct auxtrace_record *itr, int idx)
-{
- struct arm_spe_recording *sper =
- container_of(itr, struct arm_spe_recording, itr);
- struct evsel *evsel;
-
- evlist__for_each_entry(sper->evlist, evsel) {
- if (evsel->core.attr.type == sper->arm_spe_pmu->type)
- return perf_evlist__enable_event_idx(sper->evlist,
- evsel, idx);
- }
- return -EINVAL;
-}
-
struct auxtrace_record *arm_spe_recording_init(int *err,
struct perf_pmu *arm_spe_pmu)
{
@@ -189,12 +175,13 @@ struct auxtrace_record *arm_spe_recording_init(int *err,
}
sper->arm_spe_pmu = arm_spe_pmu;
+ sper->itr.pmu = arm_spe_pmu;
sper->itr.recording_options = arm_spe_recording_options;
sper->itr.info_priv_size = arm_spe_info_priv_size;
sper->itr.info_fill = arm_spe_info_fill;
sper->itr.free = arm_spe_recording_free;
sper->itr.reference = arm_spe_reference;
- sper->itr.read_finish = arm_spe_read_finish;
+ sper->itr.read_finish = auxtrace_record__read_finish;
sper->itr.alignment = 0;
*err = 0;
diff --git a/tools/perf/arch/arm64/util/header.c b/tools/perf/arch/arm64/util/header.c
index a32e4b72a98f..d730666ab95d 100644
--- a/tools/perf/arch/arm64/util/header.c
+++ b/tools/perf/arch/arm64/util/header.c
@@ -1,8 +1,10 @@
#include <stdio.h>
#include <stdlib.h>
#include <perf/cpumap.h>
+#include <util/cpumap.h>
#include <internal/cpumap.h>
#include <api/fs/fs.h>
+#include <errno.h>
#include "debug.h"
#include "header.h"
@@ -12,26 +14,21 @@
#define MIDR_VARIANT_SHIFT 20
#define MIDR_VARIANT_MASK (0xf << MIDR_VARIANT_SHIFT)
-char *get_cpuid_str(struct perf_pmu *pmu)
+static int _get_cpuid(char *buf, size_t sz, struct perf_cpu_map *cpus)
{
- char *buf = NULL;
- char path[PATH_MAX];
const char *sysfs = sysfs__mountpoint();
- int cpu;
u64 midr = 0;
- struct perf_cpu_map *cpus;
- FILE *file;
+ int cpu;
- if (!sysfs || !pmu || !pmu->cpus)
- return NULL;
+ if (!sysfs || sz < MIDR_SIZE)
+ return EINVAL;
- buf = malloc(MIDR_SIZE);
- if (!buf)
- return NULL;
+ cpus = perf_cpu_map__get(cpus);
- /* read midr from list of cpus mapped to this pmu */
- cpus = perf_cpu_map__get(pmu->cpus);
for (cpu = 0; cpu < perf_cpu_map__nr(cpus); cpu++) {
+ char path[PATH_MAX];
+ FILE *file;
+
scnprintf(path, PATH_MAX, "%s/devices/system/cpu/cpu%d"MIDR,
sysfs, cpus->map[cpu]);
@@ -57,12 +54,48 @@ char *get_cpuid_str(struct perf_pmu *pmu)
break;
}
- if (!midr) {
+ perf_cpu_map__put(cpus);
+
+ if (!midr)
+ return EINVAL;
+
+ return 0;
+}
+
+int get_cpuid(char *buf, size_t sz)
+{
+ struct perf_cpu_map *cpus = perf_cpu_map__new(NULL);
+ int ret;
+
+ if (!cpus)
+ return EINVAL;
+
+ ret = _get_cpuid(buf, sz, cpus);
+
+ perf_cpu_map__put(cpus);
+
+ return ret;
+}
+
+char *get_cpuid_str(struct perf_pmu *pmu)
+{
+ char *buf = NULL;
+ int res;
+
+ if (!pmu || !pmu->cpus)
+ return NULL;
+
+ buf = malloc(MIDR_SIZE);
+ if (!buf)
+ return NULL;
+
+ /* read midr from list of cpus mapped to this pmu */
+ res = _get_cpuid(buf, MIDR_SIZE, pmu->cpus);
+ if (res) {
pr_err("failed to get cpuid string for PMU %s\n", pmu->name);
free(buf);
buf = NULL;
}
- perf_cpu_map__put(cpus);
return buf;
}
diff --git a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
index 43f736ed47f2..35b61bfc1b1a 100644
--- a/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
+++ b/tools/perf/arch/powerpc/entry/syscalls/syscall.tbl
@@ -517,3 +517,5 @@
433 common fspick sys_fspick
434 common pidfd_open sys_pidfd_open
435 nospu clone3 ppc_clone3
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
diff --git a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
index c29976eca4a8..44d510bc9b78 100644
--- a/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
+++ b/tools/perf/arch/x86/entry/syscalls/syscall_64.tbl
@@ -357,6 +357,8 @@
433 common fspick __x64_sys_fspick
434 common pidfd_open __x64_sys_pidfd_open
435 common clone3 __x64_sys_clone3/ptregs
+437 common openat2 __x64_sys_openat2
+438 common pidfd_getfd __x64_sys_pidfd_getfd
#
# x32-specific system call numbers start at 512 to avoid cache impact
diff --git a/tools/perf/arch/x86/util/intel-bts.c b/tools/perf/arch/x86/util/intel-bts.c
index 27d9e214d068..26cee1052179 100644
--- a/tools/perf/arch/x86/util/intel-bts.c
+++ b/tools/perf/arch/x86/util/intel-bts.c
@@ -413,20 +413,6 @@ out_err:
return err;
}
-static int intel_bts_read_finish(struct auxtrace_record *itr, int idx)
-{
- struct intel_bts_recording *btsr =
- container_of(itr, struct intel_bts_recording, itr);
- struct evsel *evsel;
-
- evlist__for_each_entry(btsr->evlist, evsel) {
- if (evsel->core.attr.type == btsr->intel_bts_pmu->type)
- return perf_evlist__enable_event_idx(btsr->evlist,
- evsel, idx);
- }
- return -EINVAL;
-}
-
struct auxtrace_record *intel_bts_recording_init(int *err)
{
struct perf_pmu *intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
@@ -447,6 +433,7 @@ struct auxtrace_record *intel_bts_recording_init(int *err)
}
btsr->intel_bts_pmu = intel_bts_pmu;
+ btsr->itr.pmu = intel_bts_pmu;
btsr->itr.recording_options = intel_bts_recording_options;
btsr->itr.info_priv_size = intel_bts_info_priv_size;
btsr->itr.info_fill = intel_bts_info_fill;
@@ -456,7 +443,7 @@ struct auxtrace_record *intel_bts_recording_init(int *err)
btsr->itr.find_snapshot = intel_bts_find_snapshot;
btsr->itr.parse_snapshot_options = intel_bts_parse_snapshot_options;
btsr->itr.reference = intel_bts_reference;
- btsr->itr.read_finish = intel_bts_read_finish;
+ btsr->itr.read_finish = auxtrace_record__read_finish;
btsr->itr.alignment = sizeof(struct branch);
return &btsr->itr;
}
diff --git a/tools/perf/arch/x86/util/intel-pt.c b/tools/perf/arch/x86/util/intel-pt.c
index 20df442fdf36..7eea4fd7ce58 100644
--- a/tools/perf/arch/x86/util/intel-pt.c
+++ b/tools/perf/arch/x86/util/intel-pt.c
@@ -1166,20 +1166,6 @@ static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
return rdtsc();
}
-static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
-{
- struct intel_pt_recording *ptr =
- container_of(itr, struct intel_pt_recording, itr);
- struct evsel *evsel;
-
- evlist__for_each_entry(ptr->evlist, evsel) {
- if (evsel->core.attr.type == ptr->intel_pt_pmu->type)
- return perf_evlist__enable_event_idx(ptr->evlist, evsel,
- idx);
- }
- return -EINVAL;
-}
-
struct auxtrace_record *intel_pt_recording_init(int *err)
{
struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
@@ -1200,6 +1186,7 @@ struct auxtrace_record *intel_pt_recording_init(int *err)
}
ptr->intel_pt_pmu = intel_pt_pmu;
+ ptr->itr.pmu = intel_pt_pmu;
ptr->itr.recording_options = intel_pt_recording_options;
ptr->itr.info_priv_size = intel_pt_info_priv_size;
ptr->itr.info_fill = intel_pt_info_fill;
@@ -1209,7 +1196,7 @@ struct auxtrace_record *intel_pt_recording_init(int *err)
ptr->itr.find_snapshot = intel_pt_find_snapshot;
ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
ptr->itr.reference = intel_pt_reference;
- ptr->itr.read_finish = intel_pt_read_finish;
+ ptr->itr.read_finish = auxtrace_record__read_finish;
/*
* Decoding starts at a PSB packet. Minimum PSB period is 2K so 4K
* should give at least 1 PSB per sample.
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index ff61795a4d13..6c0a0412502e 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -566,6 +566,8 @@ int cmd_annotate(int argc, const char **argv)
if (ret < 0)
return ret;
+ annotation_config__init(&annotate.opts);
+
argc = parse_options(argc, argv, options, annotate_usage, 0);
if (argc) {
/*
@@ -605,8 +607,6 @@ int cmd_annotate(int argc, const char **argv)
if (ret < 0)
goto out_delete;
- annotation_config__init();
-
symbol_conf.try_vmlinux_path = true;
ret = symbol__init(&annotate.session->header.env);
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 26bc5923e6b5..70548df2abb9 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -449,7 +449,8 @@ static int perf_del_probe_events(struct strfilter *filter)
ret = probe_file__del_strlist(kfd, klist);
if (ret < 0)
goto error;
- }
+ } else if (ret == -ENOMEM)
+ goto error;
ret2 = probe_file__get_events(ufd, filter, ulist);
if (ret2 == 0) {
@@ -459,7 +460,8 @@ static int perf_del_probe_events(struct strfilter *filter)
ret2 = probe_file__del_strlist(ufd, ulist);
if (ret2 < 0)
goto error;
- }
+ } else if (ret2 == -ENOMEM)
+ goto error;
if (ret == -ENOENT && ret2 == -ENOENT)
pr_warning("\"%s\" does not hit any event.\n", str);
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 9483b3f0cae3..72a12b69f120 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -1507,7 +1507,7 @@ repeat:
symbol_conf.priv_size += sizeof(u32);
symbol_conf.sort_by_name = true;
}
- annotation_config__init();
+ annotation_config__init(&report.annotation_opts);
}
if (symbol__init(&session->header.env) < 0)
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 8affcab75604..f6dd1a63f159 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -143,7 +143,7 @@ static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
return err;
}
- err = symbol__annotate(&he->ms, evsel, 0, &top->annotation_opts, NULL);
+ err = symbol__annotate(&he->ms, evsel, &top->annotation_opts, NULL);
if (err == 0) {
top->sym_filter_entry = he;
} else {
@@ -1683,7 +1683,7 @@ int cmd_top(int argc, const char **argv)
if (status < 0)
goto out_delete_evlist;
- annotation_config__init();
+ annotation_config__init(&top.annotation_opts);
symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
status = symbol__init(NULL);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 46a72ecac427..01d542007c8b 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -1065,7 +1065,9 @@ static struct syscall_fmt syscall_fmts[] = {
{ .name = "poll", .timeout = true, },
{ .name = "ppoll", .timeout = true, },
{ .name = "prctl",
- .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */ },
+ .arg = { [0] = { .scnprintf = SCA_PRCTL_OPTION, /* option */
+ .strtoul = STUL_STRARRAY,
+ .parm = &strarray__prctl_options, },
[1] = { .scnprintf = SCA_PRCTL_ARG2, /* arg2 */ },
[2] = { .scnprintf = SCA_PRCTL_ARG3, /* arg3 */ }, }, },
{ .name = "pread", .alias = "pread64", },
diff --git a/tools/perf/check-headers.sh b/tools/perf/check-headers.sh
index 68039a96c1dc..bfb21d049e6c 100755
--- a/tools/perf/check-headers.sh
+++ b/tools/perf/check-headers.sh
@@ -13,6 +13,7 @@ include/uapi/linux/kcmp.h
include/uapi/linux/kvm.h
include/uapi/linux/in.h
include/uapi/linux/mount.h
+include/uapi/linux/openat2.h
include/uapi/linux/perf_event.h
include/uapi/linux/prctl.h
include/uapi/linux/sched.h
diff --git a/tools/perf/include/bpf/pid_filter.h b/tools/perf/include/bpf/pid_filter.h
index 607189a315b2..6e61c4bdf548 100644
--- a/tools/perf/include/bpf/pid_filter.h
+++ b/tools/perf/include/bpf/pid_filter.h
@@ -3,7 +3,7 @@
#ifndef _PERF_BPF_PID_FILTER_
#define _PERF_BPF_PID_FILTER_
-#include <bpf/bpf.h>
+#include <bpf.h>
#define pid_filter(name) pid_map(name, bool)
diff --git a/tools/perf/include/bpf/stdio.h b/tools/perf/include/bpf/stdio.h
index 7ca6fa5463ee..316af5b2ff35 100644
--- a/tools/perf/include/bpf/stdio.h
+++ b/tools/perf/include/bpf/stdio.h
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
-#include <bpf/bpf.h>
+#include <bpf.h>
struct bpf_map SEC("maps") __bpf_stdout__ = {
.type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
diff --git a/tools/perf/include/bpf/unistd.h b/tools/perf/include/bpf/unistd.h
index d1a35b6c649d..ca7877f9a976 100644
--- a/tools/perf/include/bpf/unistd.h
+++ b/tools/perf/include/bpf/unistd.h
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: LGPL-2.1
-#include <bpf/bpf.h>
+#include <bpf.h>
static int (*bpf_get_current_pid_tgid)(void) = (void *)BPF_FUNC_get_current_pid_tgid;
diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
index 7cb99b433888..c2cc42daf924 100644
--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
@@ -14,7 +14,7 @@ add_probe_vfs_getname() {
if [ $had_vfs_getname -eq 1 ] ; then
line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
+ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
fi
}
diff --git a/tools/perf/trace/beauty/beauty.h b/tools/perf/trace/beauty/beauty.h
index 5a61043c2ff7..d6dfe68a7612 100644
--- a/tools/perf/trace/beauty/beauty.h
+++ b/tools/perf/trace/beauty/beauty.h
@@ -213,6 +213,8 @@ size_t syscall_arg__scnprintf_x86_arch_prctl_code(char *bf, size_t size, struct
size_t syscall_arg__scnprintf_prctl_option(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_PRCTL_OPTION syscall_arg__scnprintf_prctl_option
+extern struct strarray strarray__prctl_options;
+
size_t syscall_arg__scnprintf_prctl_arg2(char *bf, size_t size, struct syscall_arg *arg);
#define SCA_PRCTL_ARG2 syscall_arg__scnprintf_prctl_arg2
diff --git a/tools/perf/trace/beauty/prctl.c b/tools/perf/trace/beauty/prctl.c
index ba2179abed00..6fe5ad5f5d3a 100644
--- a/tools/perf/trace/beauty/prctl.c
+++ b/tools/perf/trace/beauty/prctl.c
@@ -11,9 +11,10 @@
#include "trace/beauty/generated/prctl_option_array.c"
+DEFINE_STRARRAY(prctl_options, "PR_");
+
static size_t prctl__scnprintf_option(int option, char *bf, size_t size, bool show_prefix)
{
- static DEFINE_STRARRAY(prctl_options, "PR_");
return strarray__scnprintf(&strarray__prctl_options, bf, size, "%d", show_prefix, option);
}
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index badbddbb30f8..9023267e5643 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -754,10 +754,9 @@ static int annotate_browser__run(struct annotate_browser *browser,
"? Search string backwards\n");
continue;
case 'r':
- {
- script_browse(NULL, NULL);
- continue;
- }
+ script_browse(NULL, NULL);
+ annotate_browser__show(&browser->b, title, help);
+ continue;
case 'k':
notes->options->show_linenr = !notes->options->show_linenr;
break;
@@ -834,13 +833,13 @@ show_sup_ins:
map_symbol__annotation_dump(ms, evsel, browser->opts);
continue;
case 't':
- if (notes->options->show_total_period) {
- notes->options->show_total_period = false;
- notes->options->show_nr_samples = true;
- } else if (notes->options->show_nr_samples)
- notes->options->show_nr_samples = false;
+ if (symbol_conf.show_total_period) {
+ symbol_conf.show_total_period = false;
+ symbol_conf.show_nr_samples = true;
+ } else if (symbol_conf.show_nr_samples)
+ symbol_conf.show_nr_samples = false;
else
- notes->options->show_total_period = true;
+ symbol_conf.show_total_period = true;
annotation__update_column_widths(notes);
continue;
case 'c':
diff --git a/tools/perf/ui/gtk/annotate.c b/tools/perf/ui/gtk/annotate.c
index 22cc240f7371..35f9641bf670 100644
--- a/tools/perf/ui/gtk/annotate.c
+++ b/tools/perf/ui/gtk/annotate.c
@@ -174,7 +174,7 @@ static int symbol__gtk_annotate(struct map_symbol *ms, struct evsel *evsel,
if (ms->map->dso->annotate_warned)
return -1;
- err = symbol__annotate(ms, evsel, 0, &annotation__default_options, NULL);
+ err = symbol__annotate(ms, evsel, &annotation__default_options, NULL);
if (err) {
char msg[BUFSIZ];
symbol__strerror_disassemble(ms, err, msg, sizeof(msg));
diff --git a/tools/perf/util/annotate.c b/tools/perf/util/annotate.c
index ca73fb74ad03..0ea95be84b3b 100644
--- a/tools/perf/util/annotate.c
+++ b/tools/perf/util/annotate.c
@@ -1143,93 +1143,70 @@ out:
}
struct annotate_args {
- size_t privsize;
- struct arch *arch;
- struct map_symbol ms;
- struct evsel *evsel;
+ struct arch *arch;
+ struct map_symbol ms;
+ struct evsel *evsel;
struct annotation_options *options;
- s64 offset;
- char *line;
- int line_nr;
+ s64 offset;
+ char *line;
+ int line_nr;
};
-static void annotation_line__delete(struct annotation_line *al)
+static void annotation_line__init(struct annotation_line *al,
+ struct annotate_args *args,
+ int nr)
{
- void *ptr = (void *) al - al->privsize;
+ al->offset = args->offset;
+ al->line = strdup(args->line);
+ al->line_nr = args->line_nr;
+ al->data_nr = nr;
+}
+static void annotation_line__exit(struct annotation_line *al)
+{
free_srcline(al->path);
zfree(&al->line);
- free(ptr);
}
-/*
- * Allocating the annotation line data with following
- * structure:
- *
- * --------------------------------------
- * private space | struct annotation_line
- * --------------------------------------
- *
- * Size of the private space is stored in 'struct annotation_line'.
- *
- */
-static struct annotation_line *
-annotation_line__new(struct annotate_args *args, size_t privsize)
+static size_t disasm_line_size(int nr)
{
struct annotation_line *al;
- struct evsel *evsel = args->evsel;
- size_t size = privsize + sizeof(*al);
- int nr = 1;
-
- if (perf_evsel__is_group_event(evsel))
- nr = evsel->core.nr_members;
- size += sizeof(al->data[0]) * nr;
-
- al = zalloc(size);
- if (al) {
- al = (void *) al + privsize;
- al->privsize = privsize;
- al->offset = args->offset;
- al->line = strdup(args->line);
- al->line_nr = args->line_nr;
- al->data_nr = nr;
- }
-
- return al;
+ return (sizeof(struct disasm_line) + (sizeof(al->data[0]) * nr));
}
/*
* Allocating the disasm annotation line data with
* following structure:
*
- * ------------------------------------------------------------
- * privsize space | struct disasm_line | struct annotation_line
- * ------------------------------------------------------------
+ * -------------------------------------------
+ * struct disasm_line | struct annotation_line
+ * -------------------------------------------
*
* We have 'struct annotation_line' member as last member
* of 'struct disasm_line' to have an easy access.
- *
*/
static struct disasm_line *disasm_line__new(struct annotate_args *args)
{
struct disasm_line *dl = NULL;
- struct annotation_line *al;
- size_t privsize = args->privsize + offsetof(struct disasm_line, al);
+ int nr = 1;
- al = annotation_line__new(args, privsize);
- if (al != NULL) {
- dl = disasm_line(al);
+ if (perf_evsel__is_group_event(args->evsel))
+ nr = args->evsel->core.nr_members;
- if (dl->al.line == NULL)
- goto out_delete;
+ dl = zalloc(disasm_line_size(nr));
+ if (!dl)
+ return NULL;
- if (args->offset != -1) {
- if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
- goto out_free_line;
+ annotation_line__init(&dl->al, args, nr);
+ if (dl->al.line == NULL)
+ goto out_delete;
- disasm_line__init_ins(dl, args->arch, &args->ms);
- }
+ if (args->offset != -1) {
+ if (disasm_line__parse(dl->al.line, &dl->ins.name, &dl->ops.raw) < 0)
+ goto out_free_line;
+
+ disasm_line__init_ins(dl, args->arch, &args->ms);
}
return dl;
@@ -1248,7 +1225,8 @@ void disasm_line__free(struct disasm_line *dl)
else
ins__delete(&dl->ops);
zfree(&dl->ins.name);
- annotation_line__delete(&dl->al);
+ annotation_line__exit(&dl->al);
+ free(dl);
}
int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw, int max_ins_name)
@@ -2149,13 +2127,12 @@ void symbol__calc_percent(struct symbol *sym, struct evsel *evsel)
annotation__calc_percent(notes, evsel, symbol__size(sym));
}
-int symbol__annotate(struct map_symbol *ms, struct evsel *evsel, size_t privsize,
+int symbol__annotate(struct map_symbol *ms, struct evsel *evsel,
struct annotation_options *options, struct arch **parch)
{
struct symbol *sym = ms->sym;
struct annotation *notes = symbol__annotation(sym);
struct annotate_args args = {
- .privsize = privsize,
.evsel = evsel,
.options = options,
};
@@ -2644,6 +2621,8 @@ void annotation__set_offsets(struct annotation *notes, s64 size)
struct annotation_line *al;
notes->max_line_len = 0;
+ notes->nr_entries = 0;
+ notes->nr_asm_entries = 0;
list_for_each_entry(al, &notes->src->source, node) {
size_t line_len = strlen(al->line);
@@ -2790,7 +2769,7 @@ int symbol__tty_annotate(struct map_symbol *ms, struct evsel *evsel,
struct symbol *sym = ms->sym;
struct rb_root source_line = RB_ROOT;
- if (symbol__annotate(ms, evsel, 0, opts, NULL) < 0)
+ if (symbol__annotate(ms, evsel, opts, NULL) < 0)
return -1;
symbol__calc_percent(sym, evsel);
@@ -2915,9 +2894,9 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
percent = annotation_data__percent(&al->data[i], percent_type);
obj__set_percent_color(obj, percent, current_entry);
- if (notes->options->show_total_period) {
+ if (symbol_conf.show_total_period) {
obj__printf(obj, "%11" PRIu64 " ", al->data[i].he.period);
- } else if (notes->options->show_nr_samples) {
+ } else if (symbol_conf.show_nr_samples) {
obj__printf(obj, "%6" PRIu64 " ",
al->data[i].he.nr_samples);
} else {
@@ -2931,8 +2910,8 @@ static void __annotation_line__write(struct annotation_line *al, struct annotati
obj__printf(obj, "%-*s", pcnt_width, " ");
else {
obj__printf(obj, "%-*s", pcnt_width,
- notes->options->show_total_period ? "Period" :
- notes->options->show_nr_samples ? "Samples" : "Percent");
+ symbol_conf.show_total_period ? "Period" :
+ symbol_conf.show_nr_samples ? "Samples" : "Percent");
}
}
@@ -3070,7 +3049,7 @@ int symbol__annotate2(struct map_symbol *ms, struct evsel *evsel,
if (perf_evsel__is_group_event(evsel))
nr_pcnt = evsel->core.nr_members;
- err = symbol__annotate(ms, evsel, 0, options, parch);
+ err = symbol__annotate(ms, evsel, options, parch);
if (err)
goto out_free_offsets;
@@ -3094,69 +3073,46 @@ out_free_offsets:
return err;
}
-#define ANNOTATION__CFG(n) \
- { .name = #n, .value = &annotation__default_options.n, }
-
-/*
- * Keep the entries sorted, they are bsearch'ed
- */
-static struct annotation_config {
- const char *name;
- void *value;
-} annotation__configs[] = {
- ANNOTATION__CFG(hide_src_code),
- ANNOTATION__CFG(jump_arrows),
- ANNOTATION__CFG(offset_level),
- ANNOTATION__CFG(show_linenr),
- ANNOTATION__CFG(show_nr_jumps),
- ANNOTATION__CFG(show_nr_samples),
- ANNOTATION__CFG(show_total_period),
- ANNOTATION__CFG(use_offset),
-};
-
-#undef ANNOTATION__CFG
-
-static int annotation_config__cmp(const void *name, const void *cfgp)
-{
- const struct annotation_config *cfg = cfgp;
-
- return strcmp(name, cfg->name);
-}
-
-static int annotation__config(const char *var, const char *value,
- void *data __maybe_unused)
+static int annotation__config(const char *var, const char *value, void *data)
{
- struct annotation_config *cfg;
- const char *name;
+ struct annotation_options *opt = data;
if (!strstarts(var, "annotate."))
return 0;
- name = var + 9;
- cfg = bsearch(name, annotation__configs, ARRAY_SIZE(annotation__configs),
- sizeof(struct annotation_config), annotation_config__cmp);
-
- if (cfg == NULL)
- pr_debug("%s variable unknown, ignoring...", var);
- else if (strcmp(var, "annotate.offset_level") == 0) {
- perf_config_int(cfg->value, name, value);
-
- if (*(int *)cfg->value > ANNOTATION__MAX_OFFSET_LEVEL)
- *(int *)cfg->value = ANNOTATION__MAX_OFFSET_LEVEL;
- else if (*(int *)cfg->value < ANNOTATION__MIN_OFFSET_LEVEL)
- *(int *)cfg->value = ANNOTATION__MIN_OFFSET_LEVEL;
+ if (!strcmp(var, "annotate.offset_level")) {
+ perf_config_u8(&opt->offset_level, "offset_level", value);
+
+ if (opt->offset_level > ANNOTATION__MAX_OFFSET_LEVEL)
+ opt->offset_level = ANNOTATION__MAX_OFFSET_LEVEL;
+ else if (opt->offset_level < ANNOTATION__MIN_OFFSET_LEVEL)
+ opt->offset_level = ANNOTATION__MIN_OFFSET_LEVEL;
+ } else if (!strcmp(var, "annotate.hide_src_code")) {
+ opt->hide_src_code = perf_config_bool("hide_src_code", value);
+ } else if (!strcmp(var, "annotate.jump_arrows")) {
+ opt->jump_arrows = perf_config_bool("jump_arrows", value);
+ } else if (!strcmp(var, "annotate.show_linenr")) {
+ opt->show_linenr = perf_config_bool("show_linenr", value);
+ } else if (!strcmp(var, "annotate.show_nr_jumps")) {
+ opt->show_nr_jumps = perf_config_bool("show_nr_jumps", value);
+ } else if (!strcmp(var, "annotate.show_nr_samples")) {
+ symbol_conf.show_nr_samples = perf_config_bool("show_nr_samples",
+ value);
+ } else if (!strcmp(var, "annotate.show_total_period")) {
+ symbol_conf.show_total_period = perf_config_bool("show_total_period",
+ value);
+ } else if (!strcmp(var, "annotate.use_offset")) {
+ opt->use_offset = perf_config_bool("use_offset", value);
} else {
- *(bool *)cfg->value = perf_config_bool(name, value);
+ pr_debug("%s variable unknown, ignoring...", var);
}
+
return 0;
}
-void annotation_config__init(void)
+void annotation_config__init(struct annotation_options *opt)
{
- perf_config(annotation__config, NULL);
-
- annotation__default_options.show_total_period = symbol_conf.show_total_period;
- annotation__default_options.show_nr_samples = symbol_conf.show_nr_samples;
+ perf_config(annotation__config, opt);
}
static unsigned int parse_percent_type(char *str1, char *str2)
diff --git a/tools/perf/util/annotate.h b/tools/perf/util/annotate.h
index 455403e8fede..001258601a37 100644
--- a/tools/perf/util/annotate.h
+++ b/tools/perf/util/annotate.h
@@ -83,8 +83,6 @@ struct annotation_options {
full_path,
show_linenr,
show_nr_jumps,
- show_nr_samples,
- show_total_period,
show_minmax_cycle,
show_asm_raw,
annotate_src;
@@ -141,7 +139,6 @@ struct annotation_line {
u64 cycles;
u64 cycles_max;
u64 cycles_min;
- size_t privsize;
char *path;
u32 idx;
int idx_asm;
@@ -309,7 +306,7 @@ static inline int annotation__cycles_width(struct annotation *notes)
static inline int annotation__pcnt_width(struct annotation *notes)
{
- return (notes->options->show_total_period ? 12 : 7) * notes->nr_events;
+ return (symbol_conf.show_total_period ? 12 : 7) * notes->nr_events;
}
static inline bool annotation_line__filter(struct annotation_line *al, struct annotation *notes)
@@ -352,7 +349,7 @@ struct annotated_source *symbol__hists(struct symbol *sym, int nr_hists);
void symbol__annotate_zero_histograms(struct symbol *sym);
int symbol__annotate(struct map_symbol *ms,
- struct evsel *evsel, size_t privsize,
+ struct evsel *evsel,
struct annotation_options *options,
struct arch **parch);
int symbol__annotate2(struct map_symbol *ms,
@@ -413,7 +410,7 @@ static inline int symbol__tui_annotate(struct map_symbol *ms __maybe_unused,
}
#endif
-void annotation_config__init(void);
+void annotation_config__init(struct annotation_options *opt);
int annotate_parse_percent_type(const struct option *opt, const char *_str,
int unset);
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
index eb087e7df6f4..3571ce72ca28 100644
--- a/tools/perf/util/auxtrace.c
+++ b/tools/perf/util/auxtrace.c
@@ -629,8 +629,10 @@ int auxtrace_record__options(struct auxtrace_record *itr,
struct evlist *evlist,
struct record_opts *opts)
{
- if (itr)
+ if (itr) {
+ itr->evlist = evlist;
return itr->recording_options(itr, evlist, opts);
+ }
return 0;
}
@@ -664,6 +666,24 @@ int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
return -EINVAL;
}
+int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx)
+{
+ struct evsel *evsel;
+
+ if (!itr->evlist || !itr->pmu)
+ return -EINVAL;
+
+ evlist__for_each_entry(itr->evlist, evsel) {
+ if (evsel->core.attr.type == itr->pmu->type) {
+ if (evsel->disabled)
+ return 0;
+ return perf_evlist__enable_event_idx(itr->evlist, evsel,
+ idx);
+ }
+ }
+ return -EINVAL;
+}
+
/*
* Event record size is 16-bit which results in a maximum size of about 64KiB.
* Allow about 4KiB for the rest of the sample record, to give a maximum
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
index 749d72cd9c7b..e58ef160b599 100644
--- a/tools/perf/util/auxtrace.h
+++ b/tools/perf/util/auxtrace.h
@@ -29,6 +29,7 @@ struct record_opts;
struct perf_record_auxtrace_error;
struct perf_record_auxtrace_info;
struct events_stats;
+struct perf_pmu;
enum auxtrace_error_type {
PERF_AUXTRACE_ERROR_ITRACE = 1,
@@ -322,6 +323,8 @@ struct auxtrace_mmap_params {
* @read_finish: called after reading from an auxtrace mmap
* @alignment: alignment (if any) for AUX area data
* @default_aux_sample_size: default sample size for --aux sample option
+ * @pmu: associated pmu
+ * @evlist: selected events list
*/
struct auxtrace_record {
int (*recording_options)(struct auxtrace_record *itr,
@@ -346,6 +349,8 @@ struct auxtrace_record {
int (*read_finish)(struct auxtrace_record *itr, int idx);
unsigned int alignment;
unsigned int default_aux_sample_size;
+ struct perf_pmu *pmu;
+ struct evlist *evlist;
};
/**
@@ -537,6 +542,7 @@ int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
struct auxtrace_mmap *mm,
unsigned char *data, u64 *head, u64 *old);
u64 auxtrace_record__reference(struct auxtrace_record *itr);
+int auxtrace_record__read_finish(struct auxtrace_record *itr, int idx);
int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
off_t file_offset);
diff --git a/tools/perf/util/config.c b/tools/perf/util/config.c
index 0bc9c4d7fdc5..ef38eba56ed0 100644
--- a/tools/perf/util/config.c
+++ b/tools/perf/util/config.c
@@ -374,6 +374,18 @@ int perf_config_int(int *dest, const char *name, const char *value)
return 0;
}
+int perf_config_u8(u8 *dest, const char *name, const char *value)
+{
+ long ret = 0;
+
+ if (!perf_parse_long(value, &ret)) {
+ bad_config(name);
+ return -1;
+ }
+ *dest = ret;
+ return 0;
+}
+
static int perf_config_bool_or_int(const char *name, const char *value, int *is_bool)
{
int ret;
diff --git a/tools/perf/util/config.h b/tools/perf/util/config.h
index bd0a5897c76a..c10b66dde2f3 100644
--- a/tools/perf/util/config.h
+++ b/tools/perf/util/config.h
@@ -29,6 +29,7 @@ typedef int (*config_fn_t)(const char *, const char *, void *);
int perf_default_config(const char *, const char *, void *);
int perf_config(config_fn_t fn, void *);
int perf_config_int(int *dest, const char *, const char *);
+int perf_config_u8(u8 *dest, const char *name, const char *value);
int perf_config_u64(u64 *dest, const char *, const char *);
int perf_config_bool(const char *, const char *);
int config_error_nonbool(const char *);
diff --git a/tools/perf/util/llvm-utils.c b/tools/perf/util/llvm-utils.c
index eae47c2509eb..b5af680fc667 100644
--- a/tools/perf/util/llvm-utils.c
+++ b/tools/perf/util/llvm-utils.c
@@ -288,6 +288,7 @@ static const char *kinc_fetch_script =
"obj-y := dummy.o\n"
"\\$(obj)/%.o: \\$(src)/%.c\n"
"\t@echo -n \"\\$(NOSTDINC_FLAGS) \\$(LINUXINCLUDE) \\$(EXTRA_CFLAGS)\"\n"
+"\t\\$(CC) -c -o \\$@ \\$<\n"
"EOF\n"
"touch $TMPDIR/dummy.c\n"
"make -s -C $KBUILD_DIR M=$TMPDIR $KBUILD_OPTS dummy.o 2>/dev/null\n"
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index c8c5410315e8..fb5c2cd44d30 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -686,6 +686,7 @@ static struct dso *machine__findnew_module_dso(struct machine *machine,
dso__set_module_info(dso, m, machine);
dso__set_long_name(dso, strdup(filename), true);
+ dso->kernel = DSO_TYPE_KERNEL;
}
dso__get(dso);
@@ -726,9 +727,17 @@ static int machine__process_ksymbol_register(struct machine *machine,
struct map *map = maps__find(&machine->kmaps, event->ksymbol.addr);
if (!map) {
- map = dso__new_map(event->ksymbol.name);
- if (!map)
+ struct dso *dso = dso__new(event->ksymbol.name);
+
+ if (dso) {
+ dso->kernel = DSO_TYPE_KERNEL;
+ map = map__new2(0, dso);
+ }
+
+ if (!dso || !map) {
+ dso__put(dso);
return -ENOMEM;
+ }
map->start = event->ksymbol.addr;
map->end = map->start + event->ksymbol.len;
@@ -972,7 +981,6 @@ int machine__create_extra_kernel_map(struct machine *machine,
kmap = map__kmap(map);
- kmap->kmaps = &machine->kmaps;
strlcpy(kmap->name, xm->name, KMAP_NAME_LEN);
maps__insert(&machine->kmaps, map);
@@ -1082,9 +1090,6 @@ int __weak machine__create_extra_kernel_maps(struct machine *machine __maybe_unu
static int
__machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
{
- struct kmap *kmap;
- struct map *map;
-
/* In case of renewal the kernel map, destroy previous one */
machine__destroy_kernel_maps(machine);
@@ -1093,14 +1098,7 @@ __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
return -1;
machine->vmlinux_map->map_ip = machine->vmlinux_map->unmap_ip = identity__map_ip;
- map = machine__kernel_map(machine);
- kmap = map__kmap(map);
- if (!kmap)
- return -1;
-
- kmap->kmaps = &machine->kmaps;
- maps__insert(&machine->kmaps, map);
-
+ maps__insert(&machine->kmaps, machine->vmlinux_map);
return 0;
}
diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c
index f67960bedebb..a08ca276098e 100644
--- a/tools/perf/util/map.c
+++ b/tools/perf/util/map.c
@@ -375,8 +375,13 @@ struct symbol *map__find_symbol_by_name(struct map *map, const char *name)
struct map *map__clone(struct map *from)
{
- struct map *map = memdup(from, sizeof(*map));
+ size_t size = sizeof(struct map);
+ struct map *map;
+
+ if (from->dso && from->dso->kernel)
+ size += sizeof(struct kmap);
+ map = memdup(from, size);
if (map != NULL) {
refcount_set(&map->refcnt, 1);
RB_CLEAR_NODE(&map->rb_node);
@@ -538,6 +543,16 @@ void maps__insert(struct maps *maps, struct map *map)
__maps__insert(maps, map);
++maps->nr_maps;
+ if (map->dso && map->dso->kernel) {
+ struct kmap *kmap = map__kmap(map);
+
+ if (kmap)
+ kmap->kmaps = maps;
+ else
+ pr_err("Internal error: kernel dso with non kernel map\n");
+ }
+
+
/*
* If we already performed some search by name, then we need to add the just
* inserted map and resort.
diff --git a/tools/perf/util/probe-file.c b/tools/perf/util/probe-file.c
index 5003ba403345..0f5fda11675f 100644
--- a/tools/perf/util/probe-file.c
+++ b/tools/perf/util/probe-file.c
@@ -301,10 +301,15 @@ int probe_file__get_events(int fd, struct strfilter *filter,
p = strchr(ent->s, ':');
if ((p && strfilter__compare(filter, p + 1)) ||
strfilter__compare(filter, ent->s)) {
- strlist__add(plist, ent->s);
+ ret = strlist__add(plist, ent->s);
+ if (ret == -ENOMEM) {
+ pr_err("strlist__add failed with -ENOMEM\n");
+ goto out;
+ }
ret = 0;
}
}
+out:
strlist__delete(namelist);
return ret;
@@ -511,7 +516,11 @@ static int probe_cache__load(struct probe_cache *pcache)
ret = -EINVAL;
goto out;
}
- strlist__add(entry->tevlist, buf);
+ ret = strlist__add(entry->tevlist, buf);
+ if (ret == -ENOMEM) {
+ pr_err("strlist__add failed with -ENOMEM\n");
+ goto out;
+ }
}
}
out:
@@ -672,7 +681,12 @@ int probe_cache__add_entry(struct probe_cache *pcache,
command = synthesize_probe_trace_command(&tevs[i]);
if (!command)
goto out_err;
- strlist__add(entry->tevlist, command);
+ ret = strlist__add(entry->tevlist, command);
+ if (ret == -ENOMEM) {
+ pr_err("strlist__add failed with -ENOMEM\n");
+ goto out_err;
+ }
+
free(command);
}
list_add_tail(&entry->node, &pcache->entries);
@@ -853,9 +867,15 @@ int probe_cache__scan_sdt(struct probe_cache *pcache, const char *pathname)
break;
}
- strlist__add(entry->tevlist, buf);
+ ret = strlist__add(entry->tevlist, buf);
+
free(buf);
entry = NULL;
+
+ if (ret == -ENOMEM) {
+ pr_err("strlist__add failed with -ENOMEM\n");
+ break;
+ }
}
if (entry) {
list_del_init(&entry->node);
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 2c41d47f6f83..90d23cc3c8d4 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -18,7 +18,6 @@
* AGGR_NONE: Use matching CPU
* AGGR_THREAD: Not supported?
*/
-static bool have_frontend_stalled;
struct runtime_stat rt_stat;
struct stats walltime_nsecs_stats;
@@ -144,7 +143,6 @@ void runtime_stat__exit(struct runtime_stat *st)
void perf_stat__init_shadow_stats(void)
{
- have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
runtime_stat__init(&rt_stat);
}
@@ -853,10 +851,6 @@ void perf_stat__print_shadow_stats(struct perf_stat_config *config,
print_metric(config, ctxp, NULL, "%7.2f ",
"stalled cycles per insn",
ratio);
- } else if (have_frontend_stalled) {
- out->new_line(config, ctxp);
- print_metric(config, ctxp, NULL, "%7.2f ",
- "stalled cycles per insn", 0);
}
} else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 3b379b1296f1..1077013d8ce2 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -635,9 +635,12 @@ out:
static bool symbol__is_idle(const char *name)
{
const char * const idle_symbols[] = {
+ "acpi_idle_do_entry",
+ "acpi_processor_ffh_cstate_enter",
"arch_cpu_idle",
"cpu_idle",
"cpu_startup_entry",
+ "idle_cpu",
"intel_idle",
"default_idle",
"native_safe_halt",
@@ -651,13 +654,17 @@ static bool symbol__is_idle(const char *name)
NULL
};
int i;
+ static struct strlist *idle_symbols_list;
- for (i = 0; idle_symbols[i]; i++) {
- if (!strcmp(idle_symbols[i], name))
- return true;
- }
+ if (idle_symbols_list)
+ return strlist__has_entry(idle_symbols_list, name);
- return false;
+ idle_symbols_list = strlist__new(NULL, NULL);
+
+ for (i = 0; idle_symbols[i]; i++)
+ strlist__add(idle_symbols_list, idle_symbols[i]);
+
+ return strlist__has_entry(idle_symbols_list, name);
}
static int map__process_kallsym_symbol(void *arg, const char *name,
diff --git a/tools/testing/kunit/kunit.py b/tools/testing/kunit/kunit.py
index e59eb9e7f923..180ad1e1b04f 100755
--- a/tools/testing/kunit/kunit.py
+++ b/tools/testing/kunit/kunit.py
@@ -24,6 +24,8 @@ KunitResult = namedtuple('KunitResult', ['status','result'])
KunitRequest = namedtuple('KunitRequest', ['raw_output','timeout', 'jobs', 'build_dir', 'defconfig'])
+KernelDirectoryPath = sys.argv[0].split('tools/testing/kunit/')[0]
+
class KunitStatus(Enum):
SUCCESS = auto()
CONFIG_FAILURE = auto()
@@ -35,6 +37,13 @@ def create_default_kunitconfig():
shutil.copyfile('arch/um/configs/kunit_defconfig',
kunit_kernel.kunitconfig_path)
+def get_kernel_root_path():
+ parts = sys.argv[0] if not __file__ else __file__
+ parts = os.path.realpath(parts).split('tools/testing/kunit')
+ if len(parts) != 2:
+ sys.exit(1)
+ return parts[0]
+
def run_tests(linux: kunit_kernel.LinuxSourceTree,
request: KunitRequest) -> KunitResult:
config_start = time.time()
@@ -114,6 +123,9 @@ def main(argv, linux=None):
cli_args = parser.parse_args(argv)
if cli_args.subcommand == 'run':
+ if get_kernel_root_path():
+ os.chdir(get_kernel_root_path())
+
if cli_args.build_dir:
if not os.path.exists(cli_args.build_dir):
os.mkdir(cli_args.build_dir)
diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py
index cc5d844ecca1..d99ae75ef72f 100644
--- a/tools/testing/kunit/kunit_kernel.py
+++ b/tools/testing/kunit/kunit_kernel.py
@@ -93,6 +93,20 @@ class LinuxSourceTree(object):
return False
return True
+ def validate_config(self, build_dir):
+ kconfig_path = get_kconfig_path(build_dir)
+ validated_kconfig = kunit_config.Kconfig()
+ validated_kconfig.read_from_file(kconfig_path)
+ if not self._kconfig.is_subset_of(validated_kconfig):
+ invalid = self._kconfig.entries() - validated_kconfig.entries()
+ message = 'Provided Kconfig is not contained in validated .config. Following fields found in kunitconfig, ' \
+ 'but not in .config: %s' % (
+ ', '.join([str(e) for e in invalid])
+ )
+ logging.error(message)
+ return False
+ return True
+
def build_config(self, build_dir):
kconfig_path = get_kconfig_path(build_dir)
if build_dir and not os.path.exists(build_dir):
@@ -103,12 +117,7 @@ class LinuxSourceTree(object):
except ConfigError as e:
logging.error(e)
return False
- validated_kconfig = kunit_config.Kconfig()
- validated_kconfig.read_from_file(kconfig_path)
- if not self._kconfig.is_subset_of(validated_kconfig):
- logging.error('Provided Kconfig is not contained in validated .config!')
- return False
- return True
+ return self.validate_config(build_dir)
def build_reconfig(self, build_dir):
"""Creates a new .config if it is not a subset of the .kunitconfig."""
@@ -133,12 +142,7 @@ class LinuxSourceTree(object):
except (ConfigError, BuildError) as e:
logging.error(e)
return False
- used_kconfig = kunit_config.Kconfig()
- used_kconfig.read_from_file(get_kconfig_path(build_dir))
- if not self._kconfig.is_subset_of(used_kconfig):
- logging.error('Provided Kconfig is not contained in final config!')
- return False
- return True
+ return self.validate_config(build_dir)
def run_kernel(self, args=[], timeout=None, build_dir=''):
args.extend(['mem=256M'])
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 63430e2664c2..6ec503912bea 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -77,6 +77,12 @@ ifneq ($(SKIP_TARGETS),)
override TARGETS := $(TMP)
endif
+# User can set FORCE_TARGETS to 1 to require all targets to be successfully
+# built; make will fail if any of the targets cannot be built. If
+# FORCE_TARGETS is not set (the default), make will succeed if at least one
+# of the targets gets built.
+FORCE_TARGETS ?=
+
# Clear LDFLAGS and MAKEFLAGS if called from main
# Makefile to avoid test build failures when test
# Makefile doesn't have explicit build rules.
@@ -151,7 +157,8 @@ all: khdr
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
mkdir $$BUILD_TARGET -p; \
- $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET; \
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET \
+ $(if $(FORCE_TARGETS),|| exit); \
ret=$$((ret * $$?)); \
done; exit $$ret;
@@ -205,7 +212,8 @@ ifdef INSTALL_PATH
@ret=1; \
for TARGET in $(TARGETS); do \
BUILD_TARGET=$$BUILD/$$TARGET; \
- $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install; \
+ $(MAKE) OUTPUT=$$BUILD_TARGET -C $$TARGET INSTALL_PATH=$(INSTALL_PATH)/$$TARGET install \
+ $(if $(FORCE_TARGETS),|| exit); \
ret=$$((ret * $$?)); \
done; exit $$ret;
diff --git a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
index 098bcae5f827..0800036ed654 100644
--- a/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
+++ b/tools/testing/selftests/bpf/prog_tests/select_reuseport.c
@@ -506,8 +506,10 @@ static void test_syncookie(int type, sa_family_t family)
.pass_on_failure = 0,
};
- if (type != SOCK_STREAM)
+ if (type != SOCK_STREAM) {
+ test__skip();
return;
+ }
/*
* +1 for TCP-SYN and
@@ -822,8 +824,10 @@ void test_select_reuseport(void)
goto out;
saved_tcp_fo = read_int_sysctl(TCP_FO_SYSCTL);
+ if (saved_tcp_fo < 0)
+ goto out;
saved_tcp_syncookie = read_int_sysctl(TCP_SYNCOOKIE_SYSCTL);
- if (saved_tcp_syncookie < 0 || saved_tcp_syncookie < 0)
+ if (saved_tcp_syncookie < 0)
goto out;
if (enable_fastopen())
diff --git a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
index 07f5b462c2ef..aa43e0bd210c 100644
--- a/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
+++ b/tools/testing/selftests/bpf/prog_tests/sockmap_basic.c
@@ -3,6 +3,11 @@
#include "test_progs.h"
+#define TCP_REPAIR 19 /* TCP sock is under repair right now */
+
+#define TCP_REPAIR_ON 1
+#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
+
static int connected_socket_v4(void)
{
struct sockaddr_in addr = {
diff --git a/tools/testing/selftests/ftrace/Makefile b/tools/testing/selftests/ftrace/Makefile
index cd1f5b3a7774..d6e106fbce11 100644
--- a/tools/testing/selftests/ftrace/Makefile
+++ b/tools/testing/selftests/ftrace/Makefile
@@ -2,7 +2,7 @@
all:
TEST_PROGS := ftracetest
-TEST_FILES := test.d
+TEST_FILES := test.d settings
EXTRA_CLEAN := $(OUTPUT)/logs/*
include ../lib.mk
diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
index 64cfcc75e3c1..f2ee1e889e13 100644
--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
+++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-pid.tc
@@ -1,6 +1,7 @@
#!/bin/sh
# SPDX-License-Identifier: GPL-2.0
# description: ftrace - function pid filters
+# flags: instance
# Make sure that function pid matching filter works.
# Also test it on an instance directory
@@ -96,13 +97,6 @@ do_test() {
}
do_test
-
-mkdir instances/foo
-cd instances/foo
-do_test
-cd ../../
-rmdir instances/foo
-
do_reset
exit 0
diff --git a/tools/testing/selftests/futex/functional/Makefile b/tools/testing/selftests/futex/functional/Makefile
index 30996306cabc..23207829ec75 100644
--- a/tools/testing/selftests/futex/functional/Makefile
+++ b/tools/testing/selftests/futex/functional/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
INCLUDES := -I../include -I../../
CFLAGS := $(CFLAGS) -g -O2 -Wall -D_GNU_SOURCE -pthread $(INCLUDES)
-LDFLAGS := $(LDFLAGS) -pthread -lrt
+LDLIBS := -lpthread -lrt
HEADERS := \
../include/futextest.h \
diff --git a/tools/testing/selftests/kvm/Makefile b/tools/testing/selftests/kvm/Makefile
index 67abc1dd50ee..d91c53b726e6 100644
--- a/tools/testing/selftests/kvm/Makefile
+++ b/tools/testing/selftests/kvm/Makefile
@@ -8,7 +8,7 @@ KSFT_KHDR_INSTALL := 1
UNAME_M := $(shell uname -m)
LIBKVM = lib/assert.c lib/elf.c lib/io.c lib/kvm_util.c lib/sparsebit.c
-LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/ucall.c
+LIBKVM_x86_64 = lib/x86_64/processor.c lib/x86_64/vmx.c lib/x86_64/svm.c lib/x86_64/ucall.c
LIBKVM_aarch64 = lib/aarch64/processor.c lib/aarch64/ucall.c
LIBKVM_s390x = lib/s390x/processor.c lib/s390x/ucall.c
@@ -26,6 +26,7 @@ TEST_GEN_PROGS_x86_64 += x86_64/vmx_dirty_log_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_set_nested_state_test
TEST_GEN_PROGS_x86_64 += x86_64/vmx_tsc_adjust_test
TEST_GEN_PROGS_x86_64 += x86_64/xss_msr_test
+TEST_GEN_PROGS_x86_64 += x86_64/svm_vmcall_test
TEST_GEN_PROGS_x86_64 += clear_dirty_log_test
TEST_GEN_PROGS_x86_64 += dirty_log_test
TEST_GEN_PROGS_x86_64 += kvm_create_max_vcpus
diff --git a/tools/testing/selftests/kvm/include/x86_64/processor.h b/tools/testing/selftests/kvm/include/x86_64/processor.h
index aa6451b3f740..7428513a4c68 100644
--- a/tools/testing/selftests/kvm/include/x86_64/processor.h
+++ b/tools/testing/selftests/kvm/include/x86_64/processor.h
@@ -36,24 +36,24 @@
#define X86_CR4_SMAP (1ul << 21)
#define X86_CR4_PKE (1ul << 22)
-/* The enum values match the intruction encoding of each register */
-enum x86_register {
- RAX = 0,
- RCX,
- RDX,
- RBX,
- RSP,
- RBP,
- RSI,
- RDI,
- R8,
- R9,
- R10,
- R11,
- R12,
- R13,
- R14,
- R15,
+/* General Registers in 64-Bit Mode */
+struct gpr64_regs {
+ u64 rax;
+ u64 rcx;
+ u64 rdx;
+ u64 rbx;
+ u64 rsp;
+ u64 rbp;
+ u64 rsi;
+ u64 rdi;
+ u64 r8;
+ u64 r9;
+ u64 r10;
+ u64 r11;
+ u64 r12;
+ u64 r13;
+ u64 r14;
+ u64 r15;
};
struct desc64 {
@@ -220,20 +220,20 @@ static inline void set_cr4(uint64_t val)
__asm__ __volatile__("mov %0, %%cr4" : : "r" (val) : "memory");
}
-static inline uint64_t get_gdt_base(void)
+static inline struct desc_ptr get_gdt(void)
{
struct desc_ptr gdt;
__asm__ __volatile__("sgdt %[gdt]"
: /* output */ [gdt]"=m"(gdt));
- return gdt.address;
+ return gdt;
}
-static inline uint64_t get_idt_base(void)
+static inline struct desc_ptr get_idt(void)
{
struct desc_ptr idt;
__asm__ __volatile__("sidt %[idt]"
: /* output */ [idt]"=m"(idt));
- return idt.address;
+ return idt;
}
#define SET_XMM(__var, __xmm) \
diff --git a/tools/testing/selftests/kvm/include/x86_64/svm.h b/tools/testing/selftests/kvm/include/x86_64/svm.h
new file mode 100644
index 000000000000..f4ea2355dbc2
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/x86_64/svm.h
@@ -0,0 +1,297 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * tools/testing/selftests/kvm/include/x86_64/svm.h
+ * This is a copy of arch/x86/include/asm/svm.h
+ *
+ */
+
+#ifndef SELFTEST_KVM_SVM_H
+#define SELFTEST_KVM_SVM_H
+
+enum {
+ INTERCEPT_INTR,
+ INTERCEPT_NMI,
+ INTERCEPT_SMI,
+ INTERCEPT_INIT,
+ INTERCEPT_VINTR,
+ INTERCEPT_SELECTIVE_CR0,
+ INTERCEPT_STORE_IDTR,
+ INTERCEPT_STORE_GDTR,
+ INTERCEPT_STORE_LDTR,
+ INTERCEPT_STORE_TR,
+ INTERCEPT_LOAD_IDTR,
+ INTERCEPT_LOAD_GDTR,
+ INTERCEPT_LOAD_LDTR,
+ INTERCEPT_LOAD_TR,
+ INTERCEPT_RDTSC,
+ INTERCEPT_RDPMC,
+ INTERCEPT_PUSHF,
+ INTERCEPT_POPF,
+ INTERCEPT_CPUID,
+ INTERCEPT_RSM,
+ INTERCEPT_IRET,
+ INTERCEPT_INTn,
+ INTERCEPT_INVD,
+ INTERCEPT_PAUSE,
+ INTERCEPT_HLT,
+ INTERCEPT_INVLPG,
+ INTERCEPT_INVLPGA,
+ INTERCEPT_IOIO_PROT,
+ INTERCEPT_MSR_PROT,
+ INTERCEPT_TASK_SWITCH,
+ INTERCEPT_FERR_FREEZE,
+ INTERCEPT_SHUTDOWN,
+ INTERCEPT_VMRUN,
+ INTERCEPT_VMMCALL,
+ INTERCEPT_VMLOAD,
+ INTERCEPT_VMSAVE,
+ INTERCEPT_STGI,
+ INTERCEPT_CLGI,
+ INTERCEPT_SKINIT,
+ INTERCEPT_RDTSCP,
+ INTERCEPT_ICEBP,
+ INTERCEPT_WBINVD,
+ INTERCEPT_MONITOR,
+ INTERCEPT_MWAIT,
+ INTERCEPT_MWAIT_COND,
+ INTERCEPT_XSETBV,
+ INTERCEPT_RDPRU,
+};
+
+
+struct __attribute__ ((__packed__)) vmcb_control_area {
+ u32 intercept_cr;
+ u32 intercept_dr;
+ u32 intercept_exceptions;
+ u64 intercept;
+ u8 reserved_1[40];
+ u16 pause_filter_thresh;
+ u16 pause_filter_count;
+ u64 iopm_base_pa;
+ u64 msrpm_base_pa;
+ u64 tsc_offset;
+ u32 asid;
+ u8 tlb_ctl;
+ u8 reserved_2[3];
+ u32 int_ctl;
+ u32 int_vector;
+ u32 int_state;
+ u8 reserved_3[4];
+ u32 exit_code;
+ u32 exit_code_hi;
+ u64 exit_info_1;
+ u64 exit_info_2;
+ u32 exit_int_info;
+ u32 exit_int_info_err;
+ u64 nested_ctl;
+ u64 avic_vapic_bar;
+ u8 reserved_4[8];
+ u32 event_inj;
+ u32 event_inj_err;
+ u64 nested_cr3;
+ u64 virt_ext;
+ u32 clean;
+ u32 reserved_5;
+ u64 next_rip;
+ u8 insn_len;
+ u8 insn_bytes[15];
+ u64 avic_backing_page; /* Offset 0xe0 */
+ u8 reserved_6[8]; /* Offset 0xe8 */
+ u64 avic_logical_id; /* Offset 0xf0 */
+ u64 avic_physical_id; /* Offset 0xf8 */
+ u8 reserved_7[768];
+};
+
+
+#define TLB_CONTROL_DO_NOTHING 0
+#define TLB_CONTROL_FLUSH_ALL_ASID 1
+#define TLB_CONTROL_FLUSH_ASID 3
+#define TLB_CONTROL_FLUSH_ASID_LOCAL 7
+
+#define V_TPR_MASK 0x0f
+
+#define V_IRQ_SHIFT 8
+#define V_IRQ_MASK (1 << V_IRQ_SHIFT)
+
+#define V_GIF_SHIFT 9
+#define V_GIF_MASK (1 << V_GIF_SHIFT)
+
+#define V_INTR_PRIO_SHIFT 16
+#define V_INTR_PRIO_MASK (0x0f << V_INTR_PRIO_SHIFT)
+
+#define V_IGN_TPR_SHIFT 20
+#define V_IGN_TPR_MASK (1 << V_IGN_TPR_SHIFT)
+
+#define V_INTR_MASKING_SHIFT 24
+#define V_INTR_MASKING_MASK (1 << V_INTR_MASKING_SHIFT)
+
+#define V_GIF_ENABLE_SHIFT 25
+#define V_GIF_ENABLE_MASK (1 << V_GIF_ENABLE_SHIFT)
+
+#define AVIC_ENABLE_SHIFT 31
+#define AVIC_ENABLE_MASK (1 << AVIC_ENABLE_SHIFT)
+
+#define LBR_CTL_ENABLE_MASK BIT_ULL(0)
+#define VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK BIT_ULL(1)
+
+#define SVM_INTERRUPT_SHADOW_MASK 1
+
+#define SVM_IOIO_STR_SHIFT 2
+#define SVM_IOIO_REP_SHIFT 3
+#define SVM_IOIO_SIZE_SHIFT 4
+#define SVM_IOIO_ASIZE_SHIFT 7
+
+#define SVM_IOIO_TYPE_MASK 1
+#define SVM_IOIO_STR_MASK (1 << SVM_IOIO_STR_SHIFT)
+#define SVM_IOIO_REP_MASK (1 << SVM_IOIO_REP_SHIFT)
+#define SVM_IOIO_SIZE_MASK (7 << SVM_IOIO_SIZE_SHIFT)
+#define SVM_IOIO_ASIZE_MASK (7 << SVM_IOIO_ASIZE_SHIFT)
+
+#define SVM_VM_CR_VALID_MASK 0x001fULL
+#define SVM_VM_CR_SVM_LOCK_MASK 0x0008ULL
+#define SVM_VM_CR_SVM_DIS_MASK 0x0010ULL
+
+#define SVM_NESTED_CTL_NP_ENABLE BIT(0)
+#define SVM_NESTED_CTL_SEV_ENABLE BIT(1)
+
+struct __attribute__ ((__packed__)) vmcb_seg {
+ u16 selector;
+ u16 attrib;
+ u32 limit;
+ u64 base;
+};
+
+struct __attribute__ ((__packed__)) vmcb_save_area {
+ struct vmcb_seg es;
+ struct vmcb_seg cs;
+ struct vmcb_seg ss;
+ struct vmcb_seg ds;
+ struct vmcb_seg fs;
+ struct vmcb_seg gs;
+ struct vmcb_seg gdtr;
+ struct vmcb_seg ldtr;
+ struct vmcb_seg idtr;
+ struct vmcb_seg tr;
+ u8 reserved_1[43];
+ u8 cpl;
+ u8 reserved_2[4];
+ u64 efer;
+ u8 reserved_3[112];
+ u64 cr4;
+ u64 cr3;
+ u64 cr0;
+ u64 dr7;
+ u64 dr6;
+ u64 rflags;
+ u64 rip;
+ u8 reserved_4[88];
+ u64 rsp;
+ u8 reserved_5[24];
+ u64 rax;
+ u64 star;
+ u64 lstar;
+ u64 cstar;
+ u64 sfmask;
+ u64 kernel_gs_base;
+ u64 sysenter_cs;
+ u64 sysenter_esp;
+ u64 sysenter_eip;
+ u64 cr2;
+ u8 reserved_6[32];
+ u64 g_pat;
+ u64 dbgctl;
+ u64 br_from;
+ u64 br_to;
+ u64 last_excp_from;
+ u64 last_excp_to;
+};
+
+struct __attribute__ ((__packed__)) vmcb {
+ struct vmcb_control_area control;
+ struct vmcb_save_area save;
+};
+
+#define SVM_CPUID_FUNC 0x8000000a
+
+#define SVM_VM_CR_SVM_DISABLE 4
+
+#define SVM_SELECTOR_S_SHIFT 4
+#define SVM_SELECTOR_DPL_SHIFT 5
+#define SVM_SELECTOR_P_SHIFT 7
+#define SVM_SELECTOR_AVL_SHIFT 8
+#define SVM_SELECTOR_L_SHIFT 9
+#define SVM_SELECTOR_DB_SHIFT 10
+#define SVM_SELECTOR_G_SHIFT 11
+
+#define SVM_SELECTOR_TYPE_MASK (0xf)
+#define SVM_SELECTOR_S_MASK (1 << SVM_SELECTOR_S_SHIFT)
+#define SVM_SELECTOR_DPL_MASK (3 << SVM_SELECTOR_DPL_SHIFT)
+#define SVM_SELECTOR_P_MASK (1 << SVM_SELECTOR_P_SHIFT)
+#define SVM_SELECTOR_AVL_MASK (1 << SVM_SELECTOR_AVL_SHIFT)
+#define SVM_SELECTOR_L_MASK (1 << SVM_SELECTOR_L_SHIFT)
+#define SVM_SELECTOR_DB_MASK (1 << SVM_SELECTOR_DB_SHIFT)
+#define SVM_SELECTOR_G_MASK (1 << SVM_SELECTOR_G_SHIFT)
+
+#define SVM_SELECTOR_WRITE_MASK (1 << 1)
+#define SVM_SELECTOR_READ_MASK SVM_SELECTOR_WRITE_MASK
+#define SVM_SELECTOR_CODE_MASK (1 << 3)
+
+#define INTERCEPT_CR0_READ 0
+#define INTERCEPT_CR3_READ 3
+#define INTERCEPT_CR4_READ 4
+#define INTERCEPT_CR8_READ 8
+#define INTERCEPT_CR0_WRITE (16 + 0)
+#define INTERCEPT_CR3_WRITE (16 + 3)
+#define INTERCEPT_CR4_WRITE (16 + 4)
+#define INTERCEPT_CR8_WRITE (16 + 8)
+
+#define INTERCEPT_DR0_READ 0
+#define INTERCEPT_DR1_READ 1
+#define INTERCEPT_DR2_READ 2
+#define INTERCEPT_DR3_READ 3
+#define INTERCEPT_DR4_READ 4
+#define INTERCEPT_DR5_READ 5
+#define INTERCEPT_DR6_READ 6
+#define INTERCEPT_DR7_READ 7
+#define INTERCEPT_DR0_WRITE (16 + 0)
+#define INTERCEPT_DR1_WRITE (16 + 1)
+#define INTERCEPT_DR2_WRITE (16 + 2)
+#define INTERCEPT_DR3_WRITE (16 + 3)
+#define INTERCEPT_DR4_WRITE (16 + 4)
+#define INTERCEPT_DR5_WRITE (16 + 5)
+#define INTERCEPT_DR6_WRITE (16 + 6)
+#define INTERCEPT_DR7_WRITE (16 + 7)
+
+#define SVM_EVTINJ_VEC_MASK 0xff
+
+#define SVM_EVTINJ_TYPE_SHIFT 8
+#define SVM_EVTINJ_TYPE_MASK (7 << SVM_EVTINJ_TYPE_SHIFT)
+
+#define SVM_EVTINJ_TYPE_INTR (0 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_NMI (2 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_EXEPT (3 << SVM_EVTINJ_TYPE_SHIFT)
+#define SVM_EVTINJ_TYPE_SOFT (4 << SVM_EVTINJ_TYPE_SHIFT)
+
+#define SVM_EVTINJ_VALID (1 << 31)
+#define SVM_EVTINJ_VALID_ERR (1 << 11)
+
+#define SVM_EXITINTINFO_VEC_MASK SVM_EVTINJ_VEC_MASK
+#define SVM_EXITINTINFO_TYPE_MASK SVM_EVTINJ_TYPE_MASK
+
+#define SVM_EXITINTINFO_TYPE_INTR SVM_EVTINJ_TYPE_INTR
+#define SVM_EXITINTINFO_TYPE_NMI SVM_EVTINJ_TYPE_NMI
+#define SVM_EXITINTINFO_TYPE_EXEPT SVM_EVTINJ_TYPE_EXEPT
+#define SVM_EXITINTINFO_TYPE_SOFT SVM_EVTINJ_TYPE_SOFT
+
+#define SVM_EXITINTINFO_VALID SVM_EVTINJ_VALID
+#define SVM_EXITINTINFO_VALID_ERR SVM_EVTINJ_VALID_ERR
+
+#define SVM_EXITINFOSHIFT_TS_REASON_IRET 36
+#define SVM_EXITINFOSHIFT_TS_REASON_JMP 38
+#define SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE 44
+
+#define SVM_EXITINFO_REG_MASK 0x0F
+
+#define SVM_CR0_SELECTIVE_MASK (X86_CR0_TS | X86_CR0_MP)
+
+#endif /* SELFTEST_KVM_SVM_H */
diff --git a/tools/testing/selftests/kvm/include/x86_64/svm_util.h b/tools/testing/selftests/kvm/include/x86_64/svm_util.h
new file mode 100644
index 000000000000..cd037917fece
--- /dev/null
+++ b/tools/testing/selftests/kvm/include/x86_64/svm_util.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * tools/testing/selftests/kvm/include/x86_64/svm_utils.h
+ * Header for nested SVM testing
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+
+#ifndef SELFTEST_KVM_SVM_UTILS_H
+#define SELFTEST_KVM_SVM_UTILS_H
+
+#include <stdint.h>
+#include "svm.h"
+#include "processor.h"
+
+#define CPUID_SVM_BIT 2
+#define CPUID_SVM BIT_ULL(CPUID_SVM_BIT)
+
+#define SVM_EXIT_VMMCALL 0x081
+
+struct svm_test_data {
+ /* VMCB */
+ struct vmcb *vmcb; /* gva */
+ void *vmcb_hva;
+ uint64_t vmcb_gpa;
+
+ /* host state-save area */
+ struct vmcb_save_area *save_area; /* gva */
+ void *save_area_hva;
+ uint64_t save_area_gpa;
+};
+
+struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
+void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
+void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
+void nested_svm_check_supported(void);
+
+#endif /* SELFTEST_KVM_SVM_UTILS_H */
diff --git a/tools/testing/selftests/kvm/lib/x86_64/svm.c b/tools/testing/selftests/kvm/lib/x86_64/svm.c
new file mode 100644
index 000000000000..6e05a8fc3fe0
--- /dev/null
+++ b/tools/testing/selftests/kvm/lib/x86_64/svm.c
@@ -0,0 +1,161 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * tools/testing/selftests/kvm/lib/x86_64/svm.c
+ * Helpers used for nested SVM testing
+ * Largely inspired from KVM unit test svm.c
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "../kvm_util_internal.h"
+#include "processor.h"
+#include "svm_util.h"
+
+struct gpr64_regs guest_regs;
+u64 rflags;
+
+/* Allocate memory regions for nested SVM tests.
+ *
+ * Input Args:
+ * vm - The VM to allocate guest-virtual addresses in.
+ *
+ * Output Args:
+ * p_svm_gva - The guest virtual address for the struct svm_test_data.
+ *
+ * Return:
+ * Pointer to structure with the addresses of the SVM areas.
+ */
+struct svm_test_data *
+vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva)
+{
+ vm_vaddr_t svm_gva = vm_vaddr_alloc(vm, getpagesize(),
+ 0x10000, 0, 0);
+ struct svm_test_data *svm = addr_gva2hva(vm, svm_gva);
+
+ svm->vmcb = (void *)vm_vaddr_alloc(vm, getpagesize(),
+ 0x10000, 0, 0);
+ svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb);
+ svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb);
+
+ svm->save_area = (void *)vm_vaddr_alloc(vm, getpagesize(),
+ 0x10000, 0, 0);
+ svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area);
+ svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area);
+
+ *p_svm_gva = svm_gva;
+ return svm;
+}
+
+static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
+ u64 base, u32 limit, u32 attr)
+{
+ seg->selector = selector;
+ seg->attrib = attr;
+ seg->limit = limit;
+ seg->base = base;
+}
+
+void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp)
+{
+ struct vmcb *vmcb = svm->vmcb;
+ uint64_t vmcb_gpa = svm->vmcb_gpa;
+ struct vmcb_save_area *save = &vmcb->save;
+ struct vmcb_control_area *ctrl = &vmcb->control;
+ u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
+ | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
+ u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
+ | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
+ uint64_t efer;
+
+ efer = rdmsr(MSR_EFER);
+ wrmsr(MSR_EFER, efer | EFER_SVME);
+ wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa);
+
+ memset(vmcb, 0, sizeof(*vmcb));
+ asm volatile ("vmsave\n\t" : : "a" (vmcb_gpa) : "memory");
+ vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr);
+ vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr);
+ vmcb_set_seg(&save->ss, get_ss(), 0, -1U, data_seg_attr);
+ vmcb_set_seg(&save->ds, get_ds(), 0, -1U, data_seg_attr);
+ vmcb_set_seg(&save->gdtr, 0, get_gdt().address, get_gdt().size, 0);
+ vmcb_set_seg(&save->idtr, 0, get_idt().address, get_idt().size, 0);
+
+ ctrl->asid = 1;
+ save->cpl = 0;
+ save->efer = rdmsr(MSR_EFER);
+ asm volatile ("mov %%cr4, %0" : "=r"(save->cr4) : : "memory");
+ asm volatile ("mov %%cr3, %0" : "=r"(save->cr3) : : "memory");
+ asm volatile ("mov %%cr0, %0" : "=r"(save->cr0) : : "memory");
+ asm volatile ("mov %%dr7, %0" : "=r"(save->dr7) : : "memory");
+ asm volatile ("mov %%dr6, %0" : "=r"(save->dr6) : : "memory");
+ asm volatile ("mov %%cr2, %0" : "=r"(save->cr2) : : "memory");
+ save->g_pat = rdmsr(MSR_IA32_CR_PAT);
+ save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
+ ctrl->intercept = (1ULL << INTERCEPT_VMRUN) |
+ (1ULL << INTERCEPT_VMMCALL);
+
+ vmcb->save.rip = (u64)guest_rip;
+ vmcb->save.rsp = (u64)guest_rsp;
+ guest_regs.rdi = (u64)svm;
+}
+
+/*
+ * save/restore 64-bit general registers except rax, rip, rsp
+ * which are directly handed through the VMCB guest processor state
+ */
+#define SAVE_GPR_C \
+ "xchg %%rbx, guest_regs+0x20\n\t" \
+ "xchg %%rcx, guest_regs+0x10\n\t" \
+ "xchg %%rdx, guest_regs+0x18\n\t" \
+ "xchg %%rbp, guest_regs+0x30\n\t" \
+ "xchg %%rsi, guest_regs+0x38\n\t" \
+ "xchg %%rdi, guest_regs+0x40\n\t" \
+ "xchg %%r8, guest_regs+0x48\n\t" \
+ "xchg %%r9, guest_regs+0x50\n\t" \
+ "xchg %%r10, guest_regs+0x58\n\t" \
+ "xchg %%r11, guest_regs+0x60\n\t" \
+ "xchg %%r12, guest_regs+0x68\n\t" \
+ "xchg %%r13, guest_regs+0x70\n\t" \
+ "xchg %%r14, guest_regs+0x78\n\t" \
+ "xchg %%r15, guest_regs+0x80\n\t"
+
+#define LOAD_GPR_C SAVE_GPR_C
+
+/*
+ * selftests do not use interrupts so we dropped clgi/sti/cli/stgi
+ * for now. registers involved in LOAD/SAVE_GPR_C are eventually
+ * unmodified so they do not need to be in the clobber list.
+ */
+void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
+{
+ asm volatile (
+ "vmload\n\t"
+ "mov rflags, %%r15\n\t" // rflags
+ "mov %%r15, 0x170(%[vmcb])\n\t"
+ "mov guest_regs, %%r15\n\t" // rax
+ "mov %%r15, 0x1f8(%[vmcb])\n\t"
+ LOAD_GPR_C
+ "vmrun\n\t"
+ SAVE_GPR_C
+ "mov 0x170(%[vmcb]), %%r15\n\t" // rflags
+ "mov %%r15, rflags\n\t"
+ "mov 0x1f8(%[vmcb]), %%r15\n\t" // rax
+ "mov %%r15, guest_regs\n\t"
+ "vmsave\n\t"
+ : : [vmcb] "r" (vmcb), [vmcb_gpa] "a" (vmcb_gpa)
+ : "r15", "memory");
+}
+
+void nested_svm_check_supported(void)
+{
+ struct kvm_cpuid_entry2 *entry =
+ kvm_get_supported_cpuid_entry(0x80000001);
+
+ if (!(entry->ecx & CPUID_SVM)) {
+ fprintf(stderr, "nested SVM not enabled, skipping test\n");
+ exit(KSFT_SKIP);
+ }
+}
+
diff --git a/tools/testing/selftests/kvm/lib/x86_64/vmx.c b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
index 85064baf5e97..7aaa99ca4dbc 100644
--- a/tools/testing/selftests/kvm/lib/x86_64/vmx.c
+++ b/tools/testing/selftests/kvm/lib/x86_64/vmx.c
@@ -288,9 +288,9 @@ static inline void init_vmcs_host_state(void)
vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE));
vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE));
vmwrite(HOST_TR_BASE,
- get_desc64_base((struct desc64 *)(get_gdt_base() + get_tr())));
- vmwrite(HOST_GDTR_BASE, get_gdt_base());
- vmwrite(HOST_IDTR_BASE, get_idt_base());
+ get_desc64_base((struct desc64 *)(get_gdt().address + get_tr())));
+ vmwrite(HOST_GDTR_BASE, get_gdt().address);
+ vmwrite(HOST_IDTR_BASE, get_idt().address);
vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP));
vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP));
}
diff --git a/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
new file mode 100644
index 000000000000..e280f68f6365
--- /dev/null
+++ b/tools/testing/selftests/kvm/x86_64/svm_vmcall_test.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * svm_vmcall_test
+ *
+ * Copyright (C) 2020, Red Hat, Inc.
+ *
+ * Nested SVM testing: VMCALL
+ */
+
+#include "test_util.h"
+#include "kvm_util.h"
+#include "processor.h"
+#include "svm_util.h"
+
+#define VCPU_ID 5
+
+static struct kvm_vm *vm;
+
+static void l2_guest_code(struct svm_test_data *svm)
+{
+ __asm__ __volatile__("vmcall");
+}
+
+static void l1_guest_code(struct svm_test_data *svm)
+{
+ #define L2_GUEST_STACK_SIZE 64
+ unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
+ struct vmcb *vmcb = svm->vmcb;
+
+ /* Prepare for L2 execution. */
+ generic_svm_setup(svm, l2_guest_code,
+ &l2_guest_stack[L2_GUEST_STACK_SIZE]);
+
+ run_guest(vmcb, svm->vmcb_gpa);
+
+ GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
+ GUEST_DONE();
+}
+
+int main(int argc, char *argv[])
+{
+ vm_vaddr_t svm_gva;
+
+ nested_svm_check_supported();
+
+ vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
+ vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
+
+ vcpu_alloc_svm(vm, &svm_gva);
+ vcpu_args_set(vm, VCPU_ID, 1, svm_gva);
+
+ for (;;) {
+ volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
+ struct ucall uc;
+
+ vcpu_run(vm, VCPU_ID);
+ TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
+ "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
+ run->exit_reason,
+ exit_reason_str(run->exit_reason));
+
+ switch (get_ucall(vm, VCPU_ID, &uc)) {
+ case UCALL_ABORT:
+ TEST_ASSERT(false, "%s",
+ (const char *)uc.args[0]);
+ /* NOT REACHED */
+ case UCALL_SYNC:
+ break;
+ case UCALL_DONE:
+ goto done;
+ default:
+ TEST_ASSERT(false,
+ "Unknown ucall 0x%x.", uc.cmd);
+ }
+ }
+done:
+ kvm_vm_free(vm);
+ return 0;
+}
diff --git a/tools/testing/selftests/lib.mk b/tools/testing/selftests/lib.mk
index 1c8a1963d03f..3ed0134a764d 100644
--- a/tools/testing/selftests/lib.mk
+++ b/tools/testing/selftests/lib.mk
@@ -83,17 +83,20 @@ else
$(call RUN_TESTS, $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_PROGS))
endif
+define INSTALL_SINGLE_RULE
+ $(if $(INSTALL_LIST),@mkdir -p $(INSTALL_PATH))
+ $(if $(INSTALL_LIST),@echo rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/)
+ $(if $(INSTALL_LIST),@rsync -a $(INSTALL_LIST) $(INSTALL_PATH)/)
+endef
+
define INSTALL_RULE
- @if [ "X$(TEST_PROGS)$(TEST_PROGS_EXTENDED)$(TEST_FILES)" != "X" ]; then \
- mkdir -p ${INSTALL_PATH}; \
- echo "rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/"; \
- rsync -a $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES) $(INSTALL_PATH)/; \
- fi
- @if [ "X$(TEST_GEN_PROGS)$(TEST_CUSTOM_PROGS)$(TEST_GEN_PROGS_EXTENDED)$(TEST_GEN_FILES)" != "X" ]; then \
- mkdir -p ${INSTALL_PATH}; \
- echo "rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/"; \
- rsync -a $(TEST_GEN_PROGS) $(TEST_CUSTOM_PROGS) $(TEST_GEN_PROGS_EXTENDED) $(TEST_GEN_FILES) $(INSTALL_PATH)/; \
- fi
+ $(eval INSTALL_LIST = $(TEST_PROGS)) $(INSTALL_SINGLE_RULE)
+ $(eval INSTALL_LIST = $(TEST_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
+ $(eval INSTALL_LIST = $(TEST_FILES)) $(INSTALL_SINGLE_RULE)
+ $(eval INSTALL_LIST = $(TEST_GEN_PROGS)) $(INSTALL_SINGLE_RULE)
+ $(eval INSTALL_LIST = $(TEST_CUSTOM_PROGS)) $(INSTALL_SINGLE_RULE)
+ $(eval INSTALL_LIST = $(TEST_GEN_PROGS_EXTENDED)) $(INSTALL_SINGLE_RULE)
+ $(eval INSTALL_LIST = $(TEST_GEN_FILES)) $(INSTALL_SINGLE_RULE)
endef
install: all
diff --git a/tools/testing/selftests/livepatch/Makefile b/tools/testing/selftests/livepatch/Makefile
index 3876d8d62494..1acc9e1fa3fb 100644
--- a/tools/testing/selftests/livepatch/Makefile
+++ b/tools/testing/selftests/livepatch/Makefile
@@ -8,4 +8,6 @@ TEST_PROGS := \
test-state.sh \
test-ftrace.sh
+TEST_FILES := settings
+
include ../lib.mk
diff --git a/tools/testing/selftests/lkdtm/.gitignore b/tools/testing/selftests/lkdtm/.gitignore
new file mode 100644
index 000000000000..f26212605b6b
--- /dev/null
+++ b/tools/testing/selftests/lkdtm/.gitignore
@@ -0,0 +1,2 @@
+*.sh
+!run.sh
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index b5694196430a..287ae916ec0b 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -27,5 +27,5 @@ KSFT_KHDR_INSTALL := 1
include ../lib.mk
$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
-$(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
-$(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
+$(OUTPUT)/tcp_mmap: LDLIBS += -lpthread
+$(OUTPUT)/tcp_inq: LDLIBS += -lpthread
diff --git a/tools/testing/selftests/net/fib_tests.sh b/tools/testing/selftests/net/fib_tests.sh
index 6dd403103800..60273f1bc7d9 100755
--- a/tools/testing/selftests/net/fib_tests.sh
+++ b/tools/testing/selftests/net/fib_tests.sh
@@ -910,6 +910,12 @@ ipv6_rt_replace_mpath()
check_route6 "2001:db8:104::/64 via 2001:db8:101::3 dev veth1 metric 1024"
log_test $? 0 "Multipath with single path via multipath attribute"
+ # multipath with dev-only
+ add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
+ run_cmd "$IP -6 ro replace 2001:db8:104::/64 dev veth1"
+ check_route6 "2001:db8:104::/64 dev veth1 metric 1024"
+ log_test $? 0 "Multipath with dev-only"
+
# route replace fails - invalid nexthop 1
add_initial_route6 "nexthop via 2001:db8:101::2 nexthop via 2001:db8:103::2"
run_cmd "$IP -6 ro replace 2001:db8:104::/64 nexthop via 2001:db8:111::3 nexthop via 2001:db8:103::3"
diff --git a/tools/testing/selftests/net/forwarding/mirror_gre.sh b/tools/testing/selftests/net/forwarding/mirror_gre.sh
index e6fd7a18c655..0266443601bc 100755
--- a/tools/testing/selftests/net/forwarding/mirror_gre.sh
+++ b/tools/testing/selftests/net/forwarding/mirror_gre.sh
@@ -63,22 +63,23 @@ test_span_gre_mac()
{
local tundev=$1; shift
local direction=$1; shift
- local prot=$1; shift
local what=$1; shift
- local swp3mac=$(mac_get $swp3)
- local h3mac=$(mac_get $h3)
+ case "$direction" in
+ ingress) local src_mac=$(mac_get $h1); local dst_mac=$(mac_get $h2)
+ ;;
+ egress) local src_mac=$(mac_get $h2); local dst_mac=$(mac_get $h1)
+ ;;
+ esac
RET=0
mirror_install $swp1 $direction $tundev "matchall $tcflags"
- tc filter add dev $h3 ingress pref 77 prot $prot \
- flower ip_proto 0x2f src_mac $swp3mac dst_mac $h3mac \
- action pass
+ icmp_capture_install h3-${tundev} "src_mac $src_mac dst_mac $dst_mac"
- mirror_test v$h1 192.0.2.1 192.0.2.2 $h3 77 10
+ mirror_test v$h1 192.0.2.1 192.0.2.2 h3-${tundev} 100 10
- tc filter del dev $h3 ingress pref 77
+ icmp_capture_uninstall h3-${tundev}
mirror_uninstall $swp1 $direction
log_test "$direction $what: envelope MAC ($tcflags)"
@@ -120,14 +121,14 @@ test_ip6gretap()
test_gretap_mac()
{
- test_span_gre_mac gt4 ingress ip "mirror to gretap"
- test_span_gre_mac gt4 egress ip "mirror to gretap"
+ test_span_gre_mac gt4 ingress "mirror to gretap"
+ test_span_gre_mac gt4 egress "mirror to gretap"
}
test_ip6gretap_mac()
{
- test_span_gre_mac gt6 ingress ipv6 "mirror to ip6gretap"
- test_span_gre_mac gt6 egress ipv6 "mirror to ip6gretap"
+ test_span_gre_mac gt6 ingress "mirror to ip6gretap"
+ test_span_gre_mac gt6 egress "mirror to ip6gretap"
}
test_all()
diff --git a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
index bb10e33690b2..ce6bea9675c0 100755
--- a/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
+++ b/tools/testing/selftests/net/forwarding/vxlan_bridge_1d.sh
@@ -516,9 +516,9 @@ test_tos()
RET=0
tc filter add dev v1 egress pref 77 prot ip \
- flower ip_tos 0x40 action pass
- vxlan_ping_test $h1 192.0.2.3 "-Q 0x40" v1 egress 77 10
- vxlan_ping_test $h1 192.0.2.3 "-Q 0x30" v1 egress 77 0
+ flower ip_tos 0x14 action pass
+ vxlan_ping_test $h1 192.0.2.3 "-Q 0x14" v1 egress 77 10
+ vxlan_ping_test $h1 192.0.2.3 "-Q 0x18" v1 egress 77 0
tc filter del dev v1 egress pref 77 prot ip
log_test "VXLAN: envelope TOS inheritance"
diff --git a/tools/testing/selftests/net/mptcp/Makefile b/tools/testing/selftests/net/mptcp/Makefile
index 93de52016dde..ba450e62dc5b 100644
--- a/tools/testing/selftests/net/mptcp/Makefile
+++ b/tools/testing/selftests/net/mptcp/Makefile
@@ -8,6 +8,8 @@ TEST_PROGS := mptcp_connect.sh
TEST_GEN_FILES = mptcp_connect
+TEST_FILES := settings
+
EXTRA_CLEAN := *.pcap
include ../../lib.mk
diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh
index aca21dde102a..5a4938d6dcf2 100755
--- a/tools/testing/selftests/netfilter/nft_concat_range.sh
+++ b/tools/testing/selftests/netfilter/nft_concat_range.sh
@@ -13,11 +13,12 @@
KSELFTEST_SKIP=4
# Available test groups:
+# - reported_issues: check for issues that were reported in the past
# - correctness: check that packets match given entries, and only those
# - concurrency: attempt races between insertion, deletion and lookup
# - timeout: check that packets match entries until they expire
# - performance: estimate matching rate, compare with rbtree and hash baselines
-TESTS="correctness concurrency timeout"
+TESTS="reported_issues correctness concurrency timeout"
[ "${quicktest}" != "1" ] && TESTS="${TESTS} performance"
# Set types, defined by TYPE_ variables below
@@ -25,6 +26,9 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto
net_port_net net_mac net_mac_icmp net6_mac_icmp net6_port_net6_port
net_port_mac_proto_net"
+# Reported bugs, also described by TYPE_ variables below
+BUGS="flush_remove_add"
+
# List of possible paths to pktgen script from kernel tree for performance tests
PKTGEN_SCRIPT_PATHS="
../../../samples/pktgen/pktgen_bench_xmit_mode_netif_receive.sh
@@ -327,6 +331,12 @@ flood_spec ip daddr . tcp dport . meta l4proto . ip saddr
perf_duration 0
"
+# Definition of tests for bugs reported in the past:
+# display display text for test report
+TYPE_flush_remove_add="
+display Add two elements, flush, re-add
+"
+
# Set template for all tests, types and rules are filled in depending on test
set_template='
flush ruleset
@@ -440,6 +450,8 @@ setup_set() {
# Check that at least one of the needed tools is available
check_tools() {
+ [ -z "${tools}" ] && return 0
+
__tools=
for tool in ${tools}; do
if [ "${tool}" = "nc" ] && [ "${proto}" = "udp6" ] && \
@@ -1025,7 +1037,7 @@ format_noconcat() {
add() {
if ! nft add element inet filter test "${1}"; then
err "Failed to add ${1} given ruleset:"
- err "$(nft list ruleset -a)"
+ err "$(nft -a list ruleset)"
return 1
fi
}
@@ -1045,7 +1057,7 @@ add_perf() {
add_perf_norange() {
if ! nft add element netdev perf norange "${1}"; then
err "Failed to add ${1} given ruleset:"
- err "$(nft list ruleset -a)"
+ err "$(nft -a list ruleset)"
return 1
fi
}
@@ -1054,7 +1066,7 @@ add_perf_norange() {
add_perf_noconcat() {
if ! nft add element netdev perf noconcat "${1}"; then
err "Failed to add ${1} given ruleset:"
- err "$(nft list ruleset -a)"
+ err "$(nft -a list ruleset)"
return 1
fi
}
@@ -1063,7 +1075,7 @@ add_perf_noconcat() {
del() {
if ! nft delete element inet filter test "${1}"; then
err "Failed to delete ${1} given ruleset:"
- err "$(nft list ruleset -a)"
+ err "$(nft -a list ruleset)"
return 1
fi
}
@@ -1134,7 +1146,7 @@ send_match() {
err " $(for f in ${src}; do
eval format_\$f "${2}"; printf ' '; done)"
err "should have matched ruleset:"
- err "$(nft list ruleset -a)"
+ err "$(nft -a list ruleset)"
return 1
fi
nft reset counter inet filter test >/dev/null
@@ -1160,7 +1172,7 @@ send_nomatch() {
err " $(for f in ${src}; do
eval format_\$f "${2}"; printf ' '; done)"
err "should not have matched ruleset:"
- err "$(nft list ruleset -a)"
+ err "$(nft -a list ruleset)"
return 1
fi
}
@@ -1430,6 +1442,23 @@ test_performance() {
kill "${perf_pid}"
}
+test_bug_flush_remove_add() {
+ set_cmd='{ set s { type ipv4_addr . inet_service; flags interval; }; }'
+ elem1='{ 10.0.0.1 . 22-25, 10.0.0.1 . 10-20 }'
+ elem2='{ 10.0.0.1 . 10-20, 10.0.0.1 . 22-25 }'
+ for i in `seq 1 100`; do
+ nft add table t ${set_cmd} || return ${KSELFTEST_SKIP}
+ nft add element t s ${elem1} 2>/dev/null || return 1
+ nft flush set t s 2>/dev/null || return 1
+ nft add element t s ${elem2} 2>/dev/null || return 1
+ done
+ nft flush ruleset
+}
+
+test_reported_issues() {
+ eval test_bug_"${subtest}"
+}
+
# Run everything in a separate network namespace
[ "${1}" != "run" ] && { unshare -n "${0}" run; exit $?; }
tmp="$(mktemp)"
@@ -1438,9 +1467,15 @@ trap cleanup EXIT
# Entry point for test runs
passed=0
for name in ${TESTS}; do
- printf "TEST: %s\n" "${name}"
- for type in ${TYPES}; do
- eval desc=\$TYPE_"${type}"
+ printf "TEST: %s\n" "$(echo ${name} | tr '_' ' ')"
+ if [ "${name}" = "reported_issues" ]; then
+ SUBTESTS="${BUGS}"
+ else
+ SUBTESTS="${TYPES}"
+ fi
+
+ for subtest in ${SUBTESTS}; do
+ eval desc=\$TYPE_"${subtest}"
IFS='
'
for __line in ${desc}; do
diff --git a/tools/testing/selftests/openat2/helpers.c b/tools/testing/selftests/openat2/helpers.c
index e9a6557ab16f..5074681ffdc9 100644
--- a/tools/testing/selftests/openat2/helpers.c
+++ b/tools/testing/selftests/openat2/helpers.c
@@ -46,7 +46,7 @@ int sys_renameat2(int olddirfd, const char *oldpath,
int touchat(int dfd, const char *path)
{
- int fd = openat(dfd, path, O_CREAT);
+ int fd = openat(dfd, path, O_CREAT, 0700);
if (fd >= 0)
close(fd);
return fd;
diff --git a/tools/testing/selftests/openat2/resolve_test.c b/tools/testing/selftests/openat2/resolve_test.c
index 7a94b1da8e7b..bbafad440893 100644
--- a/tools/testing/selftests/openat2/resolve_test.c
+++ b/tools/testing/selftests/openat2/resolve_test.c
@@ -230,7 +230,7 @@ void test_openat2_opath_tests(void)
{ .name = "[in_root] garbage link to /root",
.path = "cheeky/garbageself", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "root", .pass = true },
- { .name = "[in_root] chainged garbage links to /root",
+ { .name = "[in_root] chained garbage links to /root",
.path = "abscheeky/garbageself", .how.resolve = RESOLVE_IN_ROOT,
.out.path = "root", .pass = true },
{ .name = "[in_root] relative path to 'root'",
diff --git a/tools/testing/selftests/pidfd/.gitignore b/tools/testing/selftests/pidfd/.gitignore
index 3a779c084d96..39559d723c41 100644
--- a/tools/testing/selftests/pidfd/.gitignore
+++ b/tools/testing/selftests/pidfd/.gitignore
@@ -2,4 +2,5 @@ pidfd_open_test
pidfd_poll_test
pidfd_test
pidfd_wait
+pidfd_fdinfo_test
pidfd_getfd_test
diff --git a/tools/testing/selftests/rseq/Makefile b/tools/testing/selftests/rseq/Makefile
index d6469535630a..2af9d39a9716 100644
--- a/tools/testing/selftests/rseq/Makefile
+++ b/tools/testing/selftests/rseq/Makefile
@@ -4,7 +4,7 @@ ifneq ($(shell $(CC) --version 2>&1 | head -n 1 | grep clang),)
CLANG_FLAGS += -no-integrated-as
endif
-CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L./ -Wl,-rpath=./ \
+CFLAGS += -O2 -Wall -g -I./ -I../../../../usr/include/ -L$(OUTPUT) -Wl,-rpath=./ \
$(CLANG_FLAGS)
LDLIBS += -lpthread
@@ -19,6 +19,8 @@ TEST_GEN_PROGS_EXTENDED = librseq.so
TEST_PROGS = run_param_test.sh
+TEST_FILES := settings
+
include ../lib.mk
$(OUTPUT)/librseq.so: rseq.c rseq.h rseq-*.h
diff --git a/tools/testing/selftests/rtc/Makefile b/tools/testing/selftests/rtc/Makefile
index de9c8566672a..55198ecc04db 100644
--- a/tools/testing/selftests/rtc/Makefile
+++ b/tools/testing/selftests/rtc/Makefile
@@ -1,9 +1,11 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS += -O3 -Wl,-no-as-needed -Wall
-LDFLAGS += -lrt -lpthread -lm
+LDLIBS += -lrt -lpthread -lm
TEST_GEN_PROGS = rtctest
TEST_GEN_PROGS_EXTENDED = setdate
+TEST_FILES := settings
+
include ../lib.mk
diff --git a/tools/testing/selftests/timens/Makefile b/tools/testing/selftests/timens/Makefile
index e9fb30bd8aeb..b4fd9a934654 100644
--- a/tools/testing/selftests/timens/Makefile
+++ b/tools/testing/selftests/timens/Makefile
@@ -2,6 +2,6 @@ TEST_GEN_PROGS := timens timerfd timer clock_nanosleep procfs exec
TEST_GEN_PROGS_EXTENDED := gettime_perf
CFLAGS := -Wall -Werror -pthread
-LDFLAGS := -lrt -ldl
+LDLIBS := -lrt -ldl
include ../lib.mk
diff --git a/tools/testing/selftests/tpm2/test_smoke.sh b/tools/testing/selftests/tpm2/test_smoke.sh
index 8155c2ea7ccb..b630c7b5950a 100755
--- a/tools/testing/selftests/tpm2/test_smoke.sh
+++ b/tools/testing/selftests/tpm2/test_smoke.sh
@@ -1,8 +1,17 @@
#!/bin/bash
# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
+self.flags = flags
-python -m unittest -v tpm2_tests.SmokeTest
-python -m unittest -v tpm2_tests.AsyncTest
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+
+if [ -f /dev/tpm0 ] ; then
+ python -m unittest -v tpm2_tests.SmokeTest
+ python -m unittest -v tpm2_tests.AsyncTest
+else
+ exit $ksft_skip
+fi
CLEAR_CMD=$(which tpm2_clear)
if [ -n $CLEAR_CMD ]; then
diff --git a/tools/testing/selftests/tpm2/test_space.sh b/tools/testing/selftests/tpm2/test_space.sh
index a6f5e346635e..180b469c53b4 100755
--- a/tools/testing/selftests/tpm2/test_space.sh
+++ b/tools/testing/selftests/tpm2/test_space.sh
@@ -1,4 +1,11 @@
#!/bin/bash
# SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause)
-python -m unittest -v tpm2_tests.SpaceTest
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if [ -f /dev/tpmrm0 ] ; then
+ python -m unittest -v tpm2_tests.SpaceTest
+else
+ exit $ksft_skip
+fi
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index a692ea828317..f33714843198 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -112,6 +112,17 @@ echo "NOTE: The above hugetlb tests provide minimal coverage. Use"
echo " https://github.com/libhugetlbfs/libhugetlbfs.git for"
echo " hugetlb regression testing."
+echo "---------------------------"
+echo "running map_fixed_noreplace"
+echo "---------------------------"
+./map_fixed_noreplace
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
echo "-------------------"
echo "running userfaultfd"
echo "-------------------"
@@ -186,6 +197,17 @@ else
echo "[PASS]"
fi
+echo "-------------------------"
+echo "running mlock-random-test"
+echo "-------------------------"
+./mlock-random-test
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
echo "--------------------"
echo "running mlock2-tests"
echo "--------------------"
@@ -197,6 +219,17 @@ else
echo "[PASS]"
fi
+echo "-----------------"
+echo "running thuge-gen"
+echo "-----------------"
+./thuge-gen
+if [ $? -ne 0 ]; then
+ echo "[FAIL]"
+ exitcode=1
+else
+ echo "[PASS]"
+fi
+
if [ $VADDR64 -ne 0 ]; then
echo "-----------------------------"
echo "running virtual_address_range"
diff --git a/tools/testing/selftests/wireguard/netns.sh b/tools/testing/selftests/wireguard/netns.sh
index f5ab1cda8bb5..138d46b3f330 100755
--- a/tools/testing/selftests/wireguard/netns.sh
+++ b/tools/testing/selftests/wireguard/netns.sh
@@ -24,6 +24,7 @@
set -e
exec 3>&1
+export LANG=C
export WG_HIDE_KEYS=never
netns0="wg-test-$$-0"
netns1="wg-test-$$-1"
@@ -297,7 +298,17 @@ ip1 -4 rule add table main suppress_prefixlength 0
n1 ping -W 1 -c 100 -f 192.168.99.7
n1 ping -W 1 -c 100 -f abab::1111
+# Have ns2 NAT into wg0 packets from ns0, but return an icmp error along the right route.
+n2 iptables -t nat -A POSTROUTING -s 10.0.0.0/24 -d 192.168.241.0/24 -j SNAT --to 192.168.241.2
+n0 iptables -t filter -A INPUT \! -s 10.0.0.0/24 -i vethrs -j DROP # Manual rpfilter just to be explicit.
+n2 bash -c 'printf 1 > /proc/sys/net/ipv4/ip_forward'
+ip0 -4 route add 192.168.241.1 via 10.0.0.100
+n2 wg set wg0 peer "$pub1" remove
+[[ $(! n0 ping -W 1 -c 1 192.168.241.1 || false) == *"From 10.0.0.100 icmp_seq=1 Destination Host Unreachable"* ]]
+
n0 iptables -t nat -F
+n0 iptables -t filter -F
+n2 iptables -t nat -F
ip0 link del vethrc
ip0 link del vethrs
ip1 link del wg0
diff --git a/tools/testing/selftests/wireguard/qemu/Makefile b/tools/testing/selftests/wireguard/qemu/Makefile
index f10aa3590adc..28d477683e8a 100644
--- a/tools/testing/selftests/wireguard/qemu/Makefile
+++ b/tools/testing/selftests/wireguard/qemu/Makefile
@@ -38,19 +38,17 @@ endef
define file_download =
$(DISTFILES_PATH)/$(1):
mkdir -p $(DISTFILES_PATH)
- flock -x [email protected] -c '[ -f $$@ ] && exit 0; wget -O [email protected] $(MIRROR)$(1) || wget -O [email protected] $(2)$(1) || rm -f [email protected]'
- if echo "$(3) [email protected]" | sha256sum -c -; then mv [email protected] $$@; else rm -f [email protected]; exit 71; fi
+ flock -x [email protected] -c '[ -f $$@ ] && exit 0; wget -O [email protected] $(MIRROR)$(1) || wget -O [email protected] $(2)$(1) || rm -f [email protected]; [ -f [email protected] ] || exit 1; if echo "$(3) [email protected]" | sha256sum -c -; then mv [email protected] $$@; else rm -f [email protected]; exit 71; fi'
endef
$(eval $(call tar_download,MUSL,musl,1.1.24,.tar.gz,https://www.musl-libc.org/releases/,1370c9a812b2cf2a7d92802510cca0058cc37e66a7bedd70051f0a34015022a3))
-$(eval $(call tar_download,LIBMNL,libmnl,1.0.4,.tar.bz2,https://www.netfilter.org/projects/libmnl/files/,171f89699f286a5854b72b91d06e8f8e3683064c5901fb09d954a9ab6f551f81))
$(eval $(call tar_download,IPERF,iperf,3.7,.tar.gz,https://downloads.es.net/pub/iperf/,d846040224317caf2f75c843d309a950a7db23f9b44b94688ccbe557d6d1710c))
$(eval $(call tar_download,BASH,bash,5.0,.tar.gz,https://ftp.gnu.org/gnu/bash/,b4a80f2ac66170b2913efbfb9f2594f1f76c7b1afd11f799e22035d63077fb4d))
$(eval $(call tar_download,IPROUTE2,iproute2,5.4.0,.tar.xz,https://www.kernel.org/pub/linux/utils/net/iproute2/,fe97aa60a0d4c5ac830be18937e18dc3400ca713a33a89ad896ff1e3d46086ae))
$(eval $(call tar_download,IPTABLES,iptables,1.8.4,.tar.bz2,https://www.netfilter.org/projects/iptables/files/,993a3a5490a544c2cbf2ef15cf7e7ed21af1845baf228318d5c36ef8827e157c))
$(eval $(call tar_download,NMAP,nmap,7.80,.tar.bz2,https://nmap.org/dist/,fcfa5a0e42099e12e4bf7a68ebe6fde05553383a682e816a7ec9256ab4773faa))
$(eval $(call tar_download,IPUTILS,iputils,s20190709,.tar.gz,https://github.com/iputils/iputils/archive/s20190709.tar.gz/#,a15720dd741d7538dd2645f9f516d193636ae4300ff7dbc8bfca757bf166490a))
-$(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20191226,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,aa8af0fdc9872d369d8c890a84dbc2a2466b55795dccd5b47721b2d97644b04f))
+$(eval $(call tar_download,WIREGUARD_TOOLS,wireguard-tools,1.0.20200206,.tar.xz,https://git.zx2c4.com/wireguard-tools/snapshot/,f5207248c6a3c3e3bfc9ab30b91c1897b00802ed861e1f9faaed873366078c64))
KERNEL_BUILD_PATH := $(BUILD_PATH)/kernel$(if $(findstring yes,$(DEBUG_KERNEL)),-debug)
rwildcard=$(foreach d,$(wildcard $1*),$(call rwildcard,$d/,$2) $(filter $(subst *,%,$2),$d))
@@ -295,21 +293,13 @@ $(IPERF_PATH)/src/iperf3: | $(IPERF_PATH)/.installed $(USERSPACE_DEPS)
$(MAKE) -C $(IPERF_PATH)
$(STRIP) -s $@
-$(LIBMNL_PATH)/.installed: $(LIBMNL_TAR)
- flock -s $<.lock tar -C $(BUILD_PATH) -xf $<
- touch $@
-
-$(LIBMNL_PATH)/src/.libs/libmnl.a: | $(LIBMNL_PATH)/.installed $(USERSPACE_DEPS)
- cd $(LIBMNL_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared
- $(MAKE) -C $(LIBMNL_PATH)
- sed -i 's:prefix=.*:prefix=$(LIBMNL_PATH):' $(LIBMNL_PATH)/libmnl.pc
-
$(WIREGUARD_TOOLS_PATH)/.installed: $(WIREGUARD_TOOLS_TAR)
+ mkdir -p $(BUILD_PATH)
flock -s $<.lock tar -C $(BUILD_PATH) -xf $<
touch $@
-$(WIREGUARD_TOOLS_PATH)/src/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS)
- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src LIBMNL_CFLAGS="-I$(LIBMNL_PATH)/include" LIBMNL_LDLIBS="-lmnl" wg
+$(WIREGUARD_TOOLS_PATH)/src/wg: | $(WIREGUARD_TOOLS_PATH)/.installed $(USERSPACE_DEPS)
+ $(MAKE) -C $(WIREGUARD_TOOLS_PATH)/src wg
$(STRIP) -s $@
$(BUILD_PATH)/init: init.c | $(USERSPACE_DEPS)
@@ -340,17 +330,17 @@ $(BASH_PATH)/bash: | $(BASH_PATH)/.installed $(USERSPACE_DEPS)
$(IPROUTE2_PATH)/.installed: $(IPROUTE2_TAR)
mkdir -p $(BUILD_PATH)
flock -s $<.lock tar -C $(BUILD_PATH) -xf $<
- printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=y\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS -DHAVE_LIBMNL -I$(LIBMNL_PATH)/include\nLDLIBS+=-lmnl' > $(IPROUTE2_PATH)/config.mk
+ printf 'CC:=$(CC)\nPKG_CONFIG:=pkg-config\nTC_CONFIG_XT:=n\nTC_CONFIG_ATM:=n\nTC_CONFIG_IPSET:=n\nIP_CONFIG_SETNS:=y\nHAVE_ELF:=n\nHAVE_MNL:=n\nHAVE_BERKELEY_DB:=n\nHAVE_LATEX:=n\nHAVE_PDFLATEX:=n\nCFLAGS+=-DHAVE_SETNS\n' > $(IPROUTE2_PATH)/config.mk
printf 'lib: snapshot\n\t$$(MAKE) -C lib\nip/ip: lib\n\t$$(MAKE) -C ip ip\nmisc/ss: lib\n\t$$(MAKE) -C misc ss\n' >> $(IPROUTE2_PATH)/Makefile
touch $@
-$(IPROUTE2_PATH)/ip/ip: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS)
- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ ip/ip
- $(STRIP) -s $(IPROUTE2_PATH)/ip/ip
+$(IPROUTE2_PATH)/ip/ip: | $(IPROUTE2_PATH)/.installed $(USERSPACE_DEPS)
+ $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ ip/ip
+ $(STRIP) -s $@
-$(IPROUTE2_PATH)/misc/ss: | $(IPROUTE2_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS)
- LDFLAGS="$(LDFLAGS) -L$(LIBMNL_PATH)/src/.libs" PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ misc/ss
- $(STRIP) -s $(IPROUTE2_PATH)/misc/ss
+$(IPROUTE2_PATH)/misc/ss: | $(IPROUTE2_PATH)/.installed $(USERSPACE_DEPS)
+ $(MAKE) -C $(IPROUTE2_PATH) PREFIX=/ misc/ss
+ $(STRIP) -s $@
$(IPTABLES_PATH)/.installed: $(IPTABLES_TAR)
mkdir -p $(BUILD_PATH)
@@ -358,8 +348,8 @@ $(IPTABLES_PATH)/.installed: $(IPTABLES_TAR)
sed -i -e "/nfnetlink=[01]/s:=[01]:=0:" -e "/nfconntrack=[01]/s:=[01]:=0:" $(IPTABLES_PATH)/configure
touch $@
-$(IPTABLES_PATH)/iptables/xtables-legacy-multi: | $(IPTABLES_PATH)/.installed $(LIBMNL_PATH)/src/.libs/libmnl.a $(USERSPACE_DEPS)
- cd $(IPTABLES_PATH) && PKG_CONFIG_LIBDIR="$(LIBMNL_PATH)" ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --with-kernel=$(BUILD_PATH)/include
+$(IPTABLES_PATH)/iptables/xtables-legacy-multi: | $(IPTABLES_PATH)/.installed $(USERSPACE_DEPS)
+ cd $(IPTABLES_PATH) && ./configure --prefix=/ $(CROSS_COMPILE_FLAG) --enable-static --disable-shared --disable-nftables --disable-bpf-compiler --disable-nfsynproxy --disable-libipq --disable-connlabel --with-kernel=$(BUILD_PATH)/include
$(MAKE) -C $(IPTABLES_PATH)
$(STRIP) -s $@
diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c
index d65a0faa46d8..eda7b624eab8 100644
--- a/virt/kvm/arm/arm.c
+++ b/virt/kvm/arm/arm.c
@@ -742,9 +742,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
guest_enter_irqoff();
if (has_vhe()) {
- kvm_arm_vhe_guest_enter();
ret = kvm_vcpu_run_vhe(vcpu);
- kvm_arm_vhe_guest_exit();
} else {
ret = kvm_call_hyp_ret(__kvm_vcpu_run_nvhe, vcpu);
}
diff --git a/virt/kvm/arm/trace.h b/virt/kvm/arm/trace.h
index 204d210d01c2..cc94ccc68821 100644
--- a/virt/kvm/arm/trace.h
+++ b/virt/kvm/arm/trace.h
@@ -4,6 +4,7 @@
#include <kvm/arm_arch_timer.h>
#include <linux/tracepoint.h>
+#include <asm/kvm_arm.h>
#undef TRACE_SYSTEM
#define TRACE_SYSTEM kvm
diff --git a/virt/kvm/arm/vgic/vgic-mmio.c b/virt/kvm/arm/vgic/vgic-mmio.c
index d656ebd5f9d4..97fb2a40e6ba 100644
--- a/virt/kvm/arm/vgic/vgic-mmio.c
+++ b/virt/kvm/arm/vgic/vgic-mmio.c
@@ -179,18 +179,6 @@ unsigned long vgic_mmio_read_pending(struct kvm_vcpu *vcpu,
return value;
}
-/*
- * This function will return the VCPU that performed the MMIO access and
- * trapped from within the VM, and will return NULL if this is a userspace
- * access.
- *
- * We can disable preemption locally around accessing the per-CPU variable,
- * and use the resolved vcpu pointer after enabling preemption again, because
- * even if the current thread is migrated to another CPU, reading the per-CPU
- * value later will give us the same value as we update the per-CPU variable
- * in the preempt notifier handlers.
- */
-
/* Must be called with irq->irq_lock held */
static void vgic_hw_irq_spending(struct kvm_vcpu *vcpu, struct vgic_irq *irq,
bool is_uaccess)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 67ae2d5c37b2..70f03ce0e5c1 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -4409,12 +4409,22 @@ static void kvm_sched_out(struct preempt_notifier *pn,
/**
* kvm_get_running_vcpu - get the vcpu running on the current CPU.
- * Thanks to preempt notifiers, this can also be called from
- * preemptible context.
+ *
+ * We can disable preemption locally around accessing the per-CPU variable,
+ * and use the resolved vcpu pointer after enabling preemption again,
+ * because even if the current thread is migrated to another CPU, reading
+ * the per-CPU value later will give us the same value as we update the
+ * per-CPU variable in the preempt notifier handlers.
*/
struct kvm_vcpu *kvm_get_running_vcpu(void)
{
- return __this_cpu_read(kvm_running_vcpu);
+ struct kvm_vcpu *vcpu;
+
+ preempt_disable();
+ vcpu = __this_cpu_read(kvm_running_vcpu);
+ preempt_enable();
+
+ return vcpu;
}
/**